hexsha
stringlengths 40
40
| repo
stringlengths 5
121
| path
stringlengths 4
227
| license
list | language
stringclasses 1
value | identifier
stringlengths 1
107
| return_type
stringlengths 2
237
⌀ | original_string
stringlengths 75
13.4k
| original_docstring
stringlengths 13
12.9k
| docstring
stringlengths 13
2.57k
| docstring_tokens
list | code
stringlengths 23
1.88k
| code_tokens
list | short_docstring
stringlengths 1
1.32k
| short_docstring_tokens
list | comment
list | parameters
list | docstring_params
dict | code_with_imports
stringlengths 23
1.88k
| idxs
int64 0
611k
| cluster
int64 0
1.02k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5ef64797b729515bafe519c1dbbf55f5ac1bc6c5
|
miguelverissimo/pychain
|
backend/util/crypto_hash.py
|
[
"MIT"
] |
Python
|
crypto_hash
|
<not_specific>
|
def crypto_hash(*args):
"""
crypto_hash encrypts the provided data into a sha256 hash
"""
args_string = sorted(map(lambda data: json.dumps(data), args))
concatenated = "".join(args_string)
return hashlib.sha256(concatenated.encode("utf-8")).hexdigest()
|
crypto_hash encrypts the provided data into a sha256 hash
|
crypto_hash encrypts the provided data into a sha256 hash
|
[
"crypto_hash",
"encrypts",
"the",
"provided",
"data",
"into",
"a",
"sha256",
"hash"
] |
def crypto_hash(*args):
args_string = sorted(map(lambda data: json.dumps(data), args))
concatenated = "".join(args_string)
return hashlib.sha256(concatenated.encode("utf-8")).hexdigest()
|
[
"def",
"crypto_hash",
"(",
"*",
"args",
")",
":",
"args_string",
"=",
"sorted",
"(",
"map",
"(",
"lambda",
"data",
":",
"json",
".",
"dumps",
"(",
"data",
")",
",",
"args",
")",
")",
"concatenated",
"=",
"\"\"",
".",
"join",
"(",
"args_string",
")",
"return",
"hashlib",
".",
"sha256",
"(",
"concatenated",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
".",
"hexdigest",
"(",
")"
] |
crypto_hash encrypts the provided data into a sha256 hash
|
[
"crypto_hash",
"encrypts",
"the",
"provided",
"data",
"into",
"a",
"sha256",
"hash"
] |
[
"\"\"\"\n crypto_hash encrypts the provided data into a sha256 hash\n \"\"\""
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import hashlib
import json
def crypto_hash(*args):
args_string = sorted(map(lambda data: json.dumps(data), args))
concatenated = "".join(args_string)
return hashlib.sha256(concatenated.encode("utf-8")).hexdigest()
| 684 | 252 |
349103f607e2efd1c81ea83c34a01ab0ef4c89be
|
CSIRT-MU/stream4flow
|
web-interface/Stream4Flow/modules/global_functions.py
|
[
"MIT"
] |
Python
|
escape
|
<not_specific>
|
def escape(html):
"""
Replaces ampresands, quotes and carets in the given HTML with their safe versions.
:return: Escaped HTML
"""
return html.replace('&', '&').replace('<', '<').replace(' > ', '>').replace('"', '"').replace("'", ''')
|
Replaces ampresands, quotes and carets in the given HTML with their safe versions.
:return: Escaped HTML
|
Replaces ampresands, quotes and carets in the given HTML with their safe versions.
|
[
"Replaces",
"ampresands",
"quotes",
"and",
"carets",
"in",
"the",
"given",
"HTML",
"with",
"their",
"safe",
"versions",
"."
] |
def escape(html):
return html.replace('&', '&').replace('<', '<').replace(' > ', '>').replace('"', '"').replace("'", ''')
|
[
"def",
"escape",
"(",
"html",
")",
":",
"return",
"html",
".",
"replace",
"(",
"'&'",
",",
"'&'",
")",
".",
"replace",
"(",
"'<'",
",",
"'<'",
")",
".",
"replace",
"(",
"' > '",
",",
"'>'",
")",
".",
"replace",
"(",
"'\"'",
",",
"'"'",
")",
".",
"replace",
"(",
"\"'\"",
",",
"'''",
")"
] |
Replaces ampresands, quotes and carets in the given HTML with their safe versions.
|
[
"Replaces",
"ampresands",
"quotes",
"and",
"carets",
"in",
"the",
"given",
"HTML",
"with",
"their",
"safe",
"versions",
"."
] |
[
"\"\"\"\n Replaces ampresands, quotes and carets in the given HTML with their safe versions.\n\n :return: Escaped HTML\n \"\"\""
] |
[
{
"param": "html",
"type": null
}
] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "html",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def escape(html):
return html.replace('&', '&').replace('<', '<').replace(' > ', '>').replace('"', '"').replace("'", ''')
| 685 | 735 |
b3edda74832fe56cf1ecf4d8a75efae383ce8f01
|
sirtaj/skytool
|
skytool/util/qutil.py
|
[
"FSFAP"
] |
Python
|
widget_update
| null |
def widget_update(widget):
'''Context manager for widget updating.
'''
widget.setUpdatesEnabled(False)
yield
widget.setUpdatesEnabled(True)
|
Context manager for widget updating.
|
Context manager for widget updating.
|
[
"Context",
"manager",
"for",
"widget",
"updating",
"."
] |
def widget_update(widget):
widget.setUpdatesEnabled(False)
yield
widget.setUpdatesEnabled(True)
|
[
"def",
"widget_update",
"(",
"widget",
")",
":",
"widget",
".",
"setUpdatesEnabled",
"(",
"False",
")",
"yield",
"widget",
".",
"setUpdatesEnabled",
"(",
"True",
")"
] |
Context manager for widget updating.
|
[
"Context",
"manager",
"for",
"widget",
"updating",
"."
] |
[
"'''Context manager for widget updating.\n '''"
] |
[
{
"param": "widget",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "widget",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def widget_update(widget):
widget.setUpdatesEnabled(False)
yield
widget.setUpdatesEnabled(True)
| 686 | 326 |
9ed941decee421e320ab353f4f818c777cd1c399
|
CompCogNeuro/sims
|
ch6/objrec/objrec.py
|
[
"BSD-3-Clause"
] |
Python
|
Config
| null |
def Config(ss):
"""
Config configures all the elements using the standard functions
"""
ss.InitParams()
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigTrnEpcLog(ss.TrnEpcLog)
ss.ConfigTstEpcLog(ss.TstEpcLog)
ss.ConfigTstTrlLog(ss.TstTrlLog)
ss.ConfigRunLog(ss.RunLog)
|
Config configures all the elements using the standard functions
|
Config configures all the elements using the standard functions
|
[
"Config",
"configures",
"all",
"the",
"elements",
"using",
"the",
"standard",
"functions"
] |
def Config(ss):
ss.InitParams()
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigTrnEpcLog(ss.TrnEpcLog)
ss.ConfigTstEpcLog(ss.TstEpcLog)
ss.ConfigTstTrlLog(ss.TstTrlLog)
ss.ConfigRunLog(ss.RunLog)
|
[
"def",
"Config",
"(",
"ss",
")",
":",
"ss",
".",
"InitParams",
"(",
")",
"ss",
".",
"ConfigEnv",
"(",
")",
"ss",
".",
"ConfigNet",
"(",
"ss",
".",
"Net",
")",
"ss",
".",
"ConfigTrnEpcLog",
"(",
"ss",
".",
"TrnEpcLog",
")",
"ss",
".",
"ConfigTstEpcLog",
"(",
"ss",
".",
"TstEpcLog",
")",
"ss",
".",
"ConfigTstTrlLog",
"(",
"ss",
".",
"TstTrlLog",
")",
"ss",
".",
"ConfigRunLog",
"(",
"ss",
".",
"RunLog",
")"
] |
Config configures all the elements using the standard functions
|
[
"Config",
"configures",
"all",
"the",
"elements",
"using",
"the",
"standard",
"functions"
] |
[
"\"\"\"\n Config configures all the elements using the standard functions\n \"\"\""
] |
[
{
"param": "ss",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "ss",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def Config(ss):
ss.InitParams()
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigTrnEpcLog(ss.TrnEpcLog)
ss.ConfigTstEpcLog(ss.TstEpcLog)
ss.ConfigTstTrlLog(ss.TstTrlLog)
ss.ConfigRunLog(ss.RunLog)
| 687 | 823 |
5ab7fa33d11f5e8facb2fc94bbe6f9d11aa3fd70
|
MFSJMenger/qctools
|
qctools/fileio.py
|
[
"Apache-2.0"
] |
Python
|
file_reading_iterator_raw
| null |
def file_reading_iterator_raw(filename, options='r'):
"""generates an iterator to loop over the lines of a file"""
# actual loop
with open(filename, options) as f:
while True:
line = f.readline()
if not line:
break
# return line
yield line
|
generates an iterator to loop over the lines of a file
|
generates an iterator to loop over the lines of a file
|
[
"generates",
"an",
"iterator",
"to",
"loop",
"over",
"the",
"lines",
"of",
"a",
"file"
] |
def file_reading_iterator_raw(filename, options='r'):
with open(filename, options) as f:
while True:
line = f.readline()
if not line:
break
yield line
|
[
"def",
"file_reading_iterator_raw",
"(",
"filename",
",",
"options",
"=",
"'r'",
")",
":",
"with",
"open",
"(",
"filename",
",",
"options",
")",
"as",
"f",
":",
"while",
"True",
":",
"line",
"=",
"f",
".",
"readline",
"(",
")",
"if",
"not",
"line",
":",
"break",
"yield",
"line"
] |
generates an iterator to loop over the lines of a file
|
[
"generates",
"an",
"iterator",
"to",
"loop",
"over",
"the",
"lines",
"of",
"a",
"file"
] |
[
"\"\"\"generates an iterator to loop over the lines of a file\"\"\"",
"# actual loop",
"# return line"
] |
[
{
"param": "filename",
"type": null
},
{
"param": "options",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "filename",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "options",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def file_reading_iterator_raw(filename, options='r'):
with open(filename, options) as f:
while True:
line = f.readline()
if not line:
break
yield line
| 688 | 194 |
cd6dfc5f2788d53198aec0b87c394cdf52a0fe0c
|
edgar-merino/infaxmlutil
|
infa/format.py
|
[
"MIT"
] |
Python
|
headers
|
<not_specific>
|
def headers(node):
'''Given a node, get a list with the tags of its predecessors'''
result=[]
curnode=node
while curnode is not None:
result.insert(0, curnode.tag )
parent=curnode.getparent()
if parent is not None and parent.tag == 'REPOSITORY':
break
curnode=parent
return result
|
Given a node, get a list with the tags of its predecessors
|
Given a node, get a list with the tags of its predecessors
|
[
"Given",
"a",
"node",
"get",
"a",
"list",
"with",
"the",
"tags",
"of",
"its",
"predecessors"
] |
def headers(node):
result=[]
curnode=node
while curnode is not None:
result.insert(0, curnode.tag )
parent=curnode.getparent()
if parent is not None and parent.tag == 'REPOSITORY':
break
curnode=parent
return result
|
[
"def",
"headers",
"(",
"node",
")",
":",
"result",
"=",
"[",
"]",
"curnode",
"=",
"node",
"while",
"curnode",
"is",
"not",
"None",
":",
"result",
".",
"insert",
"(",
"0",
",",
"curnode",
".",
"tag",
")",
"parent",
"=",
"curnode",
".",
"getparent",
"(",
")",
"if",
"parent",
"is",
"not",
"None",
"and",
"parent",
".",
"tag",
"==",
"'REPOSITORY'",
":",
"break",
"curnode",
"=",
"parent",
"return",
"result"
] |
Given a node, get a list with the tags of its predecessors
|
[
"Given",
"a",
"node",
"get",
"a",
"list",
"with",
"the",
"tags",
"of",
"its",
"predecessors"
] |
[
"'''Given a node, get a list with the tags of its predecessors'''"
] |
[
{
"param": "node",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "node",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def headers(node):
result=[]
curnode=node
while curnode is not None:
result.insert(0, curnode.tag )
parent=curnode.getparent()
if parent is not None and parent.tag == 'REPOSITORY':
break
curnode=parent
return result
| 689 | 46 |
aae9f248aa70e69db67614341563a776a4e1ec36
|
DougMHu/SCTA
|
src/SCTA/utils/misc.py
|
[
"MIT"
] |
Python
|
dict_float2str
|
<not_specific>
|
def dict_float2str(d):
"""Input: (nested) dictionary with values that are floats
Output: dictionary where all float values are replaced by strings"""
adict = {}
for key, value in d.items():
if inspect.isclass(value):
value = str(value)
if type(value) == dict:
value = dict_float2str(value)
adict[key] = value
return adict
|
Input: (nested) dictionary with values that are floats
Output: dictionary where all float values are replaced by strings
|
(nested) dictionary with values that are floats
Output: dictionary where all float values are replaced by strings
|
[
"(",
"nested",
")",
"dictionary",
"with",
"values",
"that",
"are",
"floats",
"Output",
":",
"dictionary",
"where",
"all",
"float",
"values",
"are",
"replaced",
"by",
"strings"
] |
def dict_float2str(d):
adict = {}
for key, value in d.items():
if inspect.isclass(value):
value = str(value)
if type(value) == dict:
value = dict_float2str(value)
adict[key] = value
return adict
|
[
"def",
"dict_float2str",
"(",
"d",
")",
":",
"adict",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"d",
".",
"items",
"(",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"value",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
"if",
"type",
"(",
"value",
")",
"==",
"dict",
":",
"value",
"=",
"dict_float2str",
"(",
"value",
")",
"adict",
"[",
"key",
"]",
"=",
"value",
"return",
"adict"
] |
Input: (nested) dictionary with values that are floats
Output: dictionary where all float values are replaced by strings
|
[
"Input",
":",
"(",
"nested",
")",
"dictionary",
"with",
"values",
"that",
"are",
"floats",
"Output",
":",
"dictionary",
"where",
"all",
"float",
"values",
"are",
"replaced",
"by",
"strings"
] |
[
"\"\"\"Input: (nested) dictionary with values that are floats\n\tOutput: dictionary where all float values are replaced by strings\"\"\""
] |
[
{
"param": "d",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "d",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import inspect
def dict_float2str(d):
adict = {}
for key, value in d.items():
if inspect.isclass(value):
value = str(value)
if type(value) == dict:
value = dict_float2str(value)
adict[key] = value
return adict
| 690 | 226 |
22e3eef2b683565add03eb7f62d29d41c5457ce9
|
MichaelDiers/TabletopGameAdmin
|
backend/Md.Tga.IntegrationTest/environment.py
|
[
"MIT"
] |
Python
|
__get_value
|
str
|
def __get_value(key: str) -> str:
'''
Reads an environment variable by the given key.
Args:
key (str): The name of the environment variable.
Raises:
Exception: Raised if the environment variable does not exist.
Returns:
str: The value of the environment variable.
'''
value = os.environ.get(key, None)
if value:
return value
raise Exception(f'{key} is not set')
|
Reads an environment variable by the given key.
Args:
key (str): The name of the environment variable.
Raises:
Exception: Raised if the environment variable does not exist.
Returns:
str: The value of the environment variable.
|
Reads an environment variable by the given key.
|
[
"Reads",
"an",
"environment",
"variable",
"by",
"the",
"given",
"key",
"."
] |
def __get_value(key: str) -> str:
value = os.environ.get(key, None)
if value:
return value
raise Exception(f'{key} is not set')
|
[
"def",
"__get_value",
"(",
"key",
":",
"str",
")",
"->",
"str",
":",
"value",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"key",
",",
"None",
")",
"if",
"value",
":",
"return",
"value",
"raise",
"Exception",
"(",
"f'{key} is not set'",
")"
] |
Reads an environment variable by the given key.
|
[
"Reads",
"an",
"environment",
"variable",
"by",
"the",
"given",
"key",
"."
] |
[
"'''\r\n Reads an environment variable by the given key.\r\n\r\n Args:\r\n key (str): The name of the environment variable.\r\n\r\n Raises:\r\n Exception: Raised if the environment variable does not exist.\r\n\r\n Returns:\r\n str: The value of the environment variable.\r\n '''"
] |
[
{
"param": "key",
"type": "str"
}
] |
{
"returns": [
{
"docstring": "The value of the environment variable.",
"docstring_tokens": [
"The",
"value",
"of",
"the",
"environment",
"variable",
"."
],
"type": "str"
}
],
"raises": [
{
"docstring": "Raised if the environment variable does not exist.",
"docstring_tokens": [
"Raised",
"if",
"the",
"environment",
"variable",
"does",
"not",
"exist",
"."
],
"type": "Exception"
}
],
"params": [
{
"identifier": "key",
"type": "str",
"docstring": "The name of the environment variable.",
"docstring_tokens": [
"The",
"name",
"of",
"the",
"environment",
"variable",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
import os
def __get_value(key: str) -> str:
value = os.environ.get(key, None)
if value:
return value
raise Exception(f'{key} is not set')
| 691 | 856 |
5c877eb8daf1cbb464d719704bcc59e8f50f34c1
|
ianepperson/filestorage
|
filestorage/handler_base.py
|
[
"MIT"
] |
Python
|
sanitize_filename
|
str
|
def sanitize_filename(cls, filename: str) -> str:
"""Perform a quick pass to sanitize the filename"""
# Strip out any . prefix - which should eliminate attempts to write
# special Unix files
filename = filename.lstrip('.')
# Strip out any non-alpha, . or _ characters.
def clean_char(c: str) -> str:
if c.isalnum() or c in ('.', '_'):
return c
return '_'
filename = ''.join(clean_char(c) for c in filename)
return filename
|
Perform a quick pass to sanitize the filename
|
Perform a quick pass to sanitize the filename
|
[
"Perform",
"a",
"quick",
"pass",
"to",
"sanitize",
"the",
"filename"
] |
def sanitize_filename(cls, filename: str) -> str:
filename = filename.lstrip('.')
def clean_char(c: str) -> str:
if c.isalnum() or c in ('.', '_'):
return c
return '_'
filename = ''.join(clean_char(c) for c in filename)
return filename
|
[
"def",
"sanitize_filename",
"(",
"cls",
",",
"filename",
":",
"str",
")",
"->",
"str",
":",
"filename",
"=",
"filename",
".",
"lstrip",
"(",
"'.'",
")",
"def",
"clean_char",
"(",
"c",
":",
"str",
")",
"->",
"str",
":",
"if",
"c",
".",
"isalnum",
"(",
")",
"or",
"c",
"in",
"(",
"'.'",
",",
"'_'",
")",
":",
"return",
"c",
"return",
"'_'",
"filename",
"=",
"''",
".",
"join",
"(",
"clean_char",
"(",
"c",
")",
"for",
"c",
"in",
"filename",
")",
"return",
"filename"
] |
Perform a quick pass to sanitize the filename
|
[
"Perform",
"a",
"quick",
"pass",
"to",
"sanitize",
"the",
"filename"
] |
[
"\"\"\"Perform a quick pass to sanitize the filename\"\"\"",
"# Strip out any . prefix - which should eliminate attempts to write",
"# special Unix files",
"# Strip out any non-alpha, . or _ characters."
] |
[
{
"param": "cls",
"type": null
},
{
"param": "filename",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "filename",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def sanitize_filename(cls, filename: str) -> str:
filename = filename.lstrip('.')
def clean_char(c: str) -> str:
if c.isalnum() or c in ('.', '_'):
return c
return '_'
filename = ''.join(clean_char(c) for c in filename)
return filename
| 692 | 150 |
2c19a5de7fc52ab838c0d984ed151dc7ee33579f
|
sudhamayi/rde-dictionary-builder
|
rde_schema_dictionary_gen.py
|
[
"BSD-3-Clause"
] |
Python
|
to_redfish_version
|
<not_specific>
|
def to_redfish_version(ver32):
"""
Converts a PLDM ver32 number to a Redfish version in the format vMajor_Minor_Errata
"""
if ver32 == 0xFFFFFFFF: # un-versioned
return ''
else:
return 'v'+str((ver32 >> 24) & 0x0F)+'_'+str((ver32 >> 16) & 0x0F)+'_'+str((ver32 >> 8) & 0x0F)
|
Converts a PLDM ver32 number to a Redfish version in the format vMajor_Minor_Errata
|
Converts a PLDM ver32 number to a Redfish version in the format vMajor_Minor_Errata
|
[
"Converts",
"a",
"PLDM",
"ver32",
"number",
"to",
"a",
"Redfish",
"version",
"in",
"the",
"format",
"vMajor_Minor_Errata"
] |
def to_redfish_version(ver32):
if ver32 == 0xFFFFFFFF:
return ''
else:
return 'v'+str((ver32 >> 24) & 0x0F)+'_'+str((ver32 >> 16) & 0x0F)+'_'+str((ver32 >> 8) & 0x0F)
|
[
"def",
"to_redfish_version",
"(",
"ver32",
")",
":",
"if",
"ver32",
"==",
"0xFFFFFFFF",
":",
"return",
"''",
"else",
":",
"return",
"'v'",
"+",
"str",
"(",
"(",
"ver32",
">>",
"24",
")",
"&",
"0x0F",
")",
"+",
"'_'",
"+",
"str",
"(",
"(",
"ver32",
">>",
"16",
")",
"&",
"0x0F",
")",
"+",
"'_'",
"+",
"str",
"(",
"(",
"ver32",
">>",
"8",
")",
"&",
"0x0F",
")"
] |
Converts a PLDM ver32 number to a Redfish version in the format vMajor_Minor_Errata
|
[
"Converts",
"a",
"PLDM",
"ver32",
"number",
"to",
"a",
"Redfish",
"version",
"in",
"the",
"format",
"vMajor_Minor_Errata"
] |
[
"\"\"\"\n Converts a PLDM ver32 number to a Redfish version in the format vMajor_Minor_Errata\n \"\"\"",
"# un-versioned"
] |
[
{
"param": "ver32",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "ver32",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def to_redfish_version(ver32):
if ver32 == 0xFFFFFFFF:
return ''
else:
return 'v'+str((ver32 >> 24) & 0x0F)+'_'+str((ver32 >> 16) & 0x0F)+'_'+str((ver32 >> 8) & 0x0F)
| 693 | 724 |
9f5597d2c203d47c1b95f072348c96e8e1f964bb
|
jnsebgosselin/h5py
|
setup_configure.py
|
[
"BSD-3-Clause"
] |
Python
|
validate_version
| null |
def validate_version(s):
"""Ensure that s contains an X.Y.Z format version string, or ValueError."""
try:
tpl = tuple(int(x) for x in s.split('.'))
if len(tpl) != 3:
raise ValueError
except Exception:
raise ValueError("HDF5 version string must be in X.Y.Z format")
|
Ensure that s contains an X.Y.Z format version string, or ValueError.
|
Ensure that s contains an X.Y.Z format version string, or ValueError.
|
[
"Ensure",
"that",
"s",
"contains",
"an",
"X",
".",
"Y",
".",
"Z",
"format",
"version",
"string",
"or",
"ValueError",
"."
] |
def validate_version(s):
try:
tpl = tuple(int(x) for x in s.split('.'))
if len(tpl) != 3:
raise ValueError
except Exception:
raise ValueError("HDF5 version string must be in X.Y.Z format")
|
[
"def",
"validate_version",
"(",
"s",
")",
":",
"try",
":",
"tpl",
"=",
"tuple",
"(",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"s",
".",
"split",
"(",
"'.'",
")",
")",
"if",
"len",
"(",
"tpl",
")",
"!=",
"3",
":",
"raise",
"ValueError",
"except",
"Exception",
":",
"raise",
"ValueError",
"(",
"\"HDF5 version string must be in X.Y.Z format\"",
")"
] |
Ensure that s contains an X.Y.Z format version string, or ValueError.
|
[
"Ensure",
"that",
"s",
"contains",
"an",
"X",
".",
"Y",
".",
"Z",
"format",
"version",
"string",
"or",
"ValueError",
"."
] |
[
"\"\"\"Ensure that s contains an X.Y.Z format version string, or ValueError.\"\"\""
] |
[
{
"param": "s",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "s",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def validate_version(s):
try:
tpl = tuple(int(x) for x in s.split('.'))
if len(tpl) != 3:
raise ValueError
except Exception:
raise ValueError("HDF5 version string must be in X.Y.Z format")
| 694 | 556 |
262f7368253f1910ac4f218c57fa0d460abd794c
|
yc015/KALMUS
|
kalmus/utils/artist.py
|
[
"MIT"
] |
Python
|
flatten_image
|
<not_specific>
|
def flatten_image(image):
"""
Flat the input 2D image into an 1D image while preserve the channels of the input image
with shape==[height x width, channels]
:param image: Input 2D image (either multi-channel color image or greyscale image)
:type image: numpy.ndarray
:return: The flatten 1D image. shape==(height x width, channels)
:rtype: numpy.ndarry
"""
assert len(image.shape) >= 2, "The input image must be a 2 Dimensional image"
if len(image.shape) == 3:
image = image.reshape((-1, image.shape[2]))
elif len(image.shape) == 2:
image = image.reshape((-1, 1))
return image
|
Flat the input 2D image into an 1D image while preserve the channels of the input image
with shape==[height x width, channels]
:param image: Input 2D image (either multi-channel color image or greyscale image)
:type image: numpy.ndarray
:return: The flatten 1D image. shape==(height x width, channels)
:rtype: numpy.ndarry
|
Flat the input 2D image into an 1D image while preserve the channels of the input image
with shape==[height x width, channels]
|
[
"Flat",
"the",
"input",
"2D",
"image",
"into",
"an",
"1D",
"image",
"while",
"preserve",
"the",
"channels",
"of",
"the",
"input",
"image",
"with",
"shape",
"==",
"[",
"height",
"x",
"width",
"channels",
"]"
] |
def flatten_image(image):
assert len(image.shape) >= 2, "The input image must be a 2 Dimensional image"
if len(image.shape) == 3:
image = image.reshape((-1, image.shape[2]))
elif len(image.shape) == 2:
image = image.reshape((-1, 1))
return image
|
[
"def",
"flatten_image",
"(",
"image",
")",
":",
"assert",
"len",
"(",
"image",
".",
"shape",
")",
">=",
"2",
",",
"\"The input image must be a 2 Dimensional image\"",
"if",
"len",
"(",
"image",
".",
"shape",
")",
"==",
"3",
":",
"image",
"=",
"image",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"image",
".",
"shape",
"[",
"2",
"]",
")",
")",
"elif",
"len",
"(",
"image",
".",
"shape",
")",
"==",
"2",
":",
"image",
"=",
"image",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"return",
"image"
] |
Flat the input 2D image into an 1D image while preserve the channels of the input image
with shape==[height x width, channels]
|
[
"Flat",
"the",
"input",
"2D",
"image",
"into",
"an",
"1D",
"image",
"while",
"preserve",
"the",
"channels",
"of",
"the",
"input",
"image",
"with",
"shape",
"==",
"[",
"height",
"x",
"width",
"channels",
"]"
] |
[
"\"\"\"\n Flat the input 2D image into an 1D image while preserve the channels of the input image\n with shape==[height x width, channels]\n\n :param image: Input 2D image (either multi-channel color image or greyscale image)\n :type image: numpy.ndarray\n :return: The flatten 1D image. shape==(height x width, channels)\n :rtype: numpy.ndarry\n \"\"\""
] |
[
{
"param": "image",
"type": null
}
] |
{
"returns": [
{
"docstring": "The flatten 1D image. shape==(height x width, channels)",
"docstring_tokens": [
"The",
"flatten",
"1D",
"image",
".",
"shape",
"==",
"(",
"height",
"x",
"width",
"channels",
")"
],
"type": "numpy.ndarry"
}
],
"raises": [],
"params": [
{
"identifier": "image",
"type": null,
"docstring": "Input 2D image (either multi-channel color image or greyscale image)",
"docstring_tokens": [
"Input",
"2D",
"image",
"(",
"either",
"multi",
"-",
"channel",
"color",
"image",
"or",
"greyscale",
"image",
")"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def flatten_image(image):
assert len(image.shape) >= 2, "The input image must be a 2 Dimensional image"
if len(image.shape) == 3:
image = image.reshape((-1, image.shape[2]))
elif len(image.shape) == 2:
image = image.reshape((-1, 1))
return image
| 695 | 958 |
856dd3bfae90306fe06397fb9ba1ee8598054a14
|
APFDev/actc
|
roxe.cdt/libraries/libc++/libcxx/utils/merge_archives.py
|
[
"MIT"
] |
Python
|
execute_command
|
<not_specific>
|
def execute_command(cmd, cwd=None):
"""
Execute a command, capture and return its output.
"""
kwargs = {
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'cwd': cwd
}
p = subprocess.Popen(cmd, **kwargs)
out, err = p.communicate()
exitCode = p.wait()
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
return out, err, exitCode
|
Execute a command, capture and return its output.
|
Execute a command, capture and return its output.
|
[
"Execute",
"a",
"command",
"capture",
"and",
"return",
"its",
"output",
"."
] |
def execute_command(cmd, cwd=None):
kwargs = {
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'cwd': cwd
}
p = subprocess.Popen(cmd, **kwargs)
out, err = p.communicate()
exitCode = p.wait()
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
return out, err, exitCode
|
[
"def",
"execute_command",
"(",
"cmd",
",",
"cwd",
"=",
"None",
")",
":",
"kwargs",
"=",
"{",
"'stdin'",
":",
"subprocess",
".",
"PIPE",
",",
"'stdout'",
":",
"subprocess",
".",
"PIPE",
",",
"'stderr'",
":",
"subprocess",
".",
"PIPE",
",",
"'cwd'",
":",
"cwd",
"}",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"**",
"kwargs",
")",
"out",
",",
"err",
"=",
"p",
".",
"communicate",
"(",
")",
"exitCode",
"=",
"p",
".",
"wait",
"(",
")",
"if",
"exitCode",
"==",
"-",
"signal",
".",
"SIGINT",
":",
"raise",
"KeyboardInterrupt",
"return",
"out",
",",
"err",
",",
"exitCode"
] |
Execute a command, capture and return its output.
|
[
"Execute",
"a",
"command",
"capture",
"and",
"return",
"its",
"output",
"."
] |
[
"\"\"\"\n Execute a command, capture and return its output.\n \"\"\""
] |
[
{
"param": "cmd",
"type": null
},
{
"param": "cwd",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cmd",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "cwd",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import signal
import subprocess
def execute_command(cmd, cwd=None):
kwargs = {
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'cwd': cwd
}
p = subprocess.Popen(cmd, **kwargs)
out, err = p.communicate()
exitCode = p.wait()
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
return out, err, exitCode
| 696 | 22 |
2dab4c04a0979c008857c12fcaaa0ab0e5094f31
|
jonatanvm/nlp-project
|
code/get-speeches.py
|
[
"MIT"
] |
Python
|
cleanText
|
<not_specific>
|
def cleanText(text):
'''
Remove line breaks and replace all whitespace chars with space
input: string
output: string
'''
stripwords = ["PMPVUORO", "KESKUST", "KYSKESK", "ASIAKOHTA", "EDPVUORO"]
text = text.replace(u'\xa0', u' ')
text = text.replace('\n', ' ')
words = text.split()
words = [w for w in words if w not in stripwords]
text = " ".join(words)
return text
|
Remove line breaks and replace all whitespace chars with space
input: string
output: string
|
Remove line breaks and replace all whitespace chars with space
input: string
output: string
|
[
"Remove",
"line",
"breaks",
"and",
"replace",
"all",
"whitespace",
"chars",
"with",
"space",
"input",
":",
"string",
"output",
":",
"string"
] |
def cleanText(text):
stripwords = ["PMPVUORO", "KESKUST", "KYSKESK", "ASIAKOHTA", "EDPVUORO"]
text = text.replace(u'\xa0', u' ')
text = text.replace('\n', ' ')
words = text.split()
words = [w for w in words if w not in stripwords]
text = " ".join(words)
return text
|
[
"def",
"cleanText",
"(",
"text",
")",
":",
"stripwords",
"=",
"[",
"\"PMPVUORO\"",
",",
"\"KESKUST\"",
",",
"\"KYSKESK\"",
",",
"\"ASIAKOHTA\"",
",",
"\"EDPVUORO\"",
"]",
"text",
"=",
"text",
".",
"replace",
"(",
"u'\\xa0'",
",",
"u' '",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
"words",
"=",
"text",
".",
"split",
"(",
")",
"words",
"=",
"[",
"w",
"for",
"w",
"in",
"words",
"if",
"w",
"not",
"in",
"stripwords",
"]",
"text",
"=",
"\" \"",
".",
"join",
"(",
"words",
")",
"return",
"text"
] |
Remove line breaks and replace all whitespace chars with space
input: string
output: string
|
[
"Remove",
"line",
"breaks",
"and",
"replace",
"all",
"whitespace",
"chars",
"with",
"space",
"input",
":",
"string",
"output",
":",
"string"
] |
[
"'''\n Remove line breaks and replace all whitespace chars with space\n\n input: string\n output: string\n '''"
] |
[
{
"param": "text",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "text",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def cleanText(text):
stripwords = ["PMPVUORO", "KESKUST", "KYSKESK", "ASIAKOHTA", "EDPVUORO"]
text = text.replace(u'\xa0', u' ')
text = text.replace('\n', ' ')
words = text.split()
words = [w for w in words if w not in stripwords]
text = " ".join(words)
return text
| 697 | 312 |
5b514520dd7148c9583edbe558a79ec72f4b7c56
|
ebuntel/BrainExTemp
|
brainex/utils/spark_utils.py
|
[
"MIT"
] |
Python
|
_broadcast_kwargs
|
<not_specific>
|
def _broadcast_kwargs(sc: SparkContext, kwargs_dict):
"""
return the broadcast version of the kwargs values
:param kwargs_dict:
"""
rtn = dict(((key, sc.broadcast(value=value)) for key, value in kwargs_dict))
return rtn
|
return the broadcast version of the kwargs values
:param kwargs_dict:
|
return the broadcast version of the kwargs values
|
[
"return",
"the",
"broadcast",
"version",
"of",
"the",
"kwargs",
"values"
] |
def _broadcast_kwargs(sc: SparkContext, kwargs_dict):
rtn = dict(((key, sc.broadcast(value=value)) for key, value in kwargs_dict))
return rtn
|
[
"def",
"_broadcast_kwargs",
"(",
"sc",
":",
"SparkContext",
",",
"kwargs_dict",
")",
":",
"rtn",
"=",
"dict",
"(",
"(",
"(",
"key",
",",
"sc",
".",
"broadcast",
"(",
"value",
"=",
"value",
")",
")",
"for",
"key",
",",
"value",
"in",
"kwargs_dict",
")",
")",
"return",
"rtn"
] |
return the broadcast version of the kwargs values
|
[
"return",
"the",
"broadcast",
"version",
"of",
"the",
"kwargs",
"values"
] |
[
"\"\"\"\n return the broadcast version of the kwargs values\n :param kwargs_dict:\n \"\"\""
] |
[
{
"param": "sc",
"type": "SparkContext"
},
{
"param": "kwargs_dict",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "sc",
"type": "SparkContext",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "kwargs_dict",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _broadcast_kwargs(sc: SparkContext, kwargs_dict):
rtn = dict(((key, sc.broadcast(value=value)) for key, value in kwargs_dict))
return rtn
| 700 | 397 |
09cc2260f0642e3b8992abc51bf33b4d5408c819
|
hkotaro1215/invest
|
src/natcap/invest/coastal_blue_carbon/preprocessor.py
|
[
"BSD-3-Clause"
] |
Python
|
_create_carbon_pool_initial_table_template
| null |
def _create_carbon_pool_initial_table_template(filepath, code_to_lulc_dict):
"""Create carbon pool initial values table.
Args:
filepath (str): filepath to carbon pool initial conditions
code_to_lulc_dict (dict): map lulc codes to lulc classes
"""
with open(filepath, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(['code', 'lulc-class', 'biomass', 'soil', 'litter'])
for code in code_to_lulc_dict.keys():
row = [code, code_to_lulc_dict[code]] + ['', '', '']
writer.writerow(row)
|
Create carbon pool initial values table.
Args:
filepath (str): filepath to carbon pool initial conditions
code_to_lulc_dict (dict): map lulc codes to lulc classes
|
Create carbon pool initial values table.
|
[
"Create",
"carbon",
"pool",
"initial",
"values",
"table",
"."
] |
def _create_carbon_pool_initial_table_template(filepath, code_to_lulc_dict):
with open(filepath, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(['code', 'lulc-class', 'biomass', 'soil', 'litter'])
for code in code_to_lulc_dict.keys():
row = [code, code_to_lulc_dict[code]] + ['', '', '']
writer.writerow(row)
|
[
"def",
"_create_carbon_pool_initial_table_template",
"(",
"filepath",
",",
"code_to_lulc_dict",
")",
":",
"with",
"open",
"(",
"filepath",
",",
"'w'",
")",
"as",
"csv_file",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"csv_file",
")",
"writer",
".",
"writerow",
"(",
"[",
"'code'",
",",
"'lulc-class'",
",",
"'biomass'",
",",
"'soil'",
",",
"'litter'",
"]",
")",
"for",
"code",
"in",
"code_to_lulc_dict",
".",
"keys",
"(",
")",
":",
"row",
"=",
"[",
"code",
",",
"code_to_lulc_dict",
"[",
"code",
"]",
"]",
"+",
"[",
"''",
",",
"''",
",",
"''",
"]",
"writer",
".",
"writerow",
"(",
"row",
")"
] |
Create carbon pool initial values table.
|
[
"Create",
"carbon",
"pool",
"initial",
"values",
"table",
"."
] |
[
"\"\"\"Create carbon pool initial values table.\n\n Args:\n filepath (str): filepath to carbon pool initial conditions\n code_to_lulc_dict (dict): map lulc codes to lulc classes\n \"\"\""
] |
[
{
"param": "filepath",
"type": null
},
{
"param": "code_to_lulc_dict",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "filepath",
"type": null,
"docstring": "filepath to carbon pool initial conditions",
"docstring_tokens": [
"filepath",
"to",
"carbon",
"pool",
"initial",
"conditions"
],
"default": null,
"is_optional": false
},
{
"identifier": "code_to_lulc_dict",
"type": null,
"docstring": "map lulc codes to lulc classes",
"docstring_tokens": [
"map",
"lulc",
"codes",
"to",
"lulc",
"classes"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
import csv
def _create_carbon_pool_initial_table_template(filepath, code_to_lulc_dict):
with open(filepath, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(['code', 'lulc-class', 'biomass', 'soil', 'litter'])
for code in code_to_lulc_dict.keys():
row = [code, code_to_lulc_dict[code]] + ['', '', '']
writer.writerow(row)
| 703 | 720 |
5e02f6708247e1936dde3a9db58ce9b055abcc43
|
educational-technology-collective/slicing-analysis
|
gardner/extraction/quiz_feature_extractor.py
|
[
"MIT"
] |
Python
|
generate_appended_csv
|
<not_specific>
|
def generate_appended_csv(df_in, week):
"""
Helper function to generate 'wide' appended dataframe from 'long' feature set.
:param df_in: Full pandas.DataFrame of userID, week, and additional features.
:param week: Week to create appended feature set for (starting at zero, inclusive)
:return: pandas.DataFrame of appended ('wide') features for weeks in interval [0, week].
"""
#initialize output data using week 0; additional weeks will be merged on session_user_id
df_app = df_in[df_in.week == 0].set_index(['session_user_id', 'week']).rename(columns = lambda x: 'week_0_{0}'.format(str(x))).reset_index()
if week == 0: # nothing to append; first week of course
return df_app
for i in range(1, week+1): # append data from weeks 0-current week
df_to_append = df_in[df_in.week == i].drop('week', axis=1).set_index('session_user_id')
df_to_append = df_to_append\
.rename(columns = lambda x: 'week_{0}_{1}'.format(str(i), str(x)))\
.reset_index()
# merge with df_app
df_app = df_app.merge(df_to_append)
return df_app
|
Helper function to generate 'wide' appended dataframe from 'long' feature set.
:param df_in: Full pandas.DataFrame of userID, week, and additional features.
:param week: Week to create appended feature set for (starting at zero, inclusive)
:return: pandas.DataFrame of appended ('wide') features for weeks in interval [0, week].
|
Helper function to generate 'wide' appended dataframe from 'long' feature set.
|
[
"Helper",
"function",
"to",
"generate",
"'",
"wide",
"'",
"appended",
"dataframe",
"from",
"'",
"long",
"'",
"feature",
"set",
"."
] |
def generate_appended_csv(df_in, week):
df_app = df_in[df_in.week == 0].set_index(['session_user_id', 'week']).rename(columns = lambda x: 'week_0_{0}'.format(str(x))).reset_index()
if week == 0:
return df_app
for i in range(1, week+1):
df_to_append = df_in[df_in.week == i].drop('week', axis=1).set_index('session_user_id')
df_to_append = df_to_append\
.rename(columns = lambda x: 'week_{0}_{1}'.format(str(i), str(x)))\
.reset_index()
df_app = df_app.merge(df_to_append)
return df_app
|
[
"def",
"generate_appended_csv",
"(",
"df_in",
",",
"week",
")",
":",
"df_app",
"=",
"df_in",
"[",
"df_in",
".",
"week",
"==",
"0",
"]",
".",
"set_index",
"(",
"[",
"'session_user_id'",
",",
"'week'",
"]",
")",
".",
"rename",
"(",
"columns",
"=",
"lambda",
"x",
":",
"'week_0_{0}'",
".",
"format",
"(",
"str",
"(",
"x",
")",
")",
")",
".",
"reset_index",
"(",
")",
"if",
"week",
"==",
"0",
":",
"return",
"df_app",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"week",
"+",
"1",
")",
":",
"df_to_append",
"=",
"df_in",
"[",
"df_in",
".",
"week",
"==",
"i",
"]",
".",
"drop",
"(",
"'week'",
",",
"axis",
"=",
"1",
")",
".",
"set_index",
"(",
"'session_user_id'",
")",
"df_to_append",
"=",
"df_to_append",
".",
"rename",
"(",
"columns",
"=",
"lambda",
"x",
":",
"'week_{0}_{1}'",
".",
"format",
"(",
"str",
"(",
"i",
")",
",",
"str",
"(",
"x",
")",
")",
")",
".",
"reset_index",
"(",
")",
"df_app",
"=",
"df_app",
".",
"merge",
"(",
"df_to_append",
")",
"return",
"df_app"
] |
Helper function to generate 'wide' appended dataframe from 'long' feature set.
|
[
"Helper",
"function",
"to",
"generate",
"'",
"wide",
"'",
"appended",
"dataframe",
"from",
"'",
"long",
"'",
"feature",
"set",
"."
] |
[
"\"\"\"\n Helper function to generate 'wide' appended dataframe from 'long' feature set.\n :param df_in: Full pandas.DataFrame of userID, week, and additional features.\n :param week: Week to create appended feature set for (starting at zero, inclusive)\n :return: pandas.DataFrame of appended ('wide') features for weeks in interval [0, week].\n \"\"\"",
"#initialize output data using week 0; additional weeks will be merged on session_user_id",
"# nothing to append; first week of course",
"# append data from weeks 0-current week",
"# merge with df_app"
] |
[
{
"param": "df_in",
"type": null
},
{
"param": "week",
"type": null
}
] |
{
"returns": [
{
"docstring": "pandas.DataFrame of appended ('wide') features for weeks in interval [0, week].",
"docstring_tokens": [
"pandas",
".",
"DataFrame",
"of",
"appended",
"(",
"'",
"wide",
"'",
")",
"features",
"for",
"weeks",
"in",
"interval",
"[",
"0",
"week",
"]",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "df_in",
"type": null,
"docstring": "Full pandas.DataFrame of userID, week, and additional features.",
"docstring_tokens": [
"Full",
"pandas",
".",
"DataFrame",
"of",
"userID",
"week",
"and",
"additional",
"features",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "week",
"type": null,
"docstring": "Week to create appended feature set for (starting at zero, inclusive)",
"docstring_tokens": [
"Week",
"to",
"create",
"appended",
"feature",
"set",
"for",
"(",
"starting",
"at",
"zero",
"inclusive",
")"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def generate_appended_csv(df_in, week):
df_app = df_in[df_in.week == 0].set_index(['session_user_id', 'week']).rename(columns = lambda x: 'week_0_{0}'.format(str(x))).reset_index()
if week == 0:
return df_app
for i in range(1, week+1):
df_to_append = df_in[df_in.week == i].drop('week', axis=1).set_index('session_user_id')
df_to_append = df_to_append\
.rename(columns = lambda x: 'week_{0}_{1}'.format(str(i), str(x)))\
.reset_index()
df_app = df_app.merge(df_to_append)
return df_app
| 704 | 954 |
fc4504f269503e5cb3aaac9cabda7b08462dd9a6
|
greck2908/language
|
language/serene/util.py
|
[
"Apache-2.0"
] |
Python
|
normalize
|
<not_specific>
|
def normalize(wikipedia_url):
"""Unicode normalize the wikipedia title.
Args:
wikipedia_url: The original title
Returns:
The unicode normalized title
"""
return unicodedata.normalize('NFC', wikipedia_url)
|
Unicode normalize the wikipedia title.
Args:
wikipedia_url: The original title
Returns:
The unicode normalized title
|
Unicode normalize the wikipedia title.
|
[
"Unicode",
"normalize",
"the",
"wikipedia",
"title",
"."
] |
def normalize(wikipedia_url):
return unicodedata.normalize('NFC', wikipedia_url)
|
[
"def",
"normalize",
"(",
"wikipedia_url",
")",
":",
"return",
"unicodedata",
".",
"normalize",
"(",
"'NFC'",
",",
"wikipedia_url",
")"
] |
Unicode normalize the wikipedia title.
|
[
"Unicode",
"normalize",
"the",
"wikipedia",
"title",
"."
] |
[
"\"\"\"Unicode normalize the wikipedia title.\n\n Args:\n wikipedia_url: The original title\n\n Returns:\n The unicode normalized title\n \"\"\""
] |
[
{
"param": "wikipedia_url",
"type": null
}
] |
{
"returns": [
{
"docstring": "The unicode normalized title",
"docstring_tokens": [
"The",
"unicode",
"normalized",
"title"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "wikipedia_url",
"type": null,
"docstring": "The original title",
"docstring_tokens": [
"The",
"original",
"title"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import unicodedata
def normalize(wikipedia_url):
return unicodedata.normalize('NFC', wikipedia_url)
| 706 | 1,009 |
fe48869150eae8006970aa491d6de9f996a598fa
|
wangjksjtu/autoassist-exp
|
machine_translation/fairseq/optim/lr_scheduler/cosine_lr_scheduler.py
|
[
"BSD-3-Clause"
] |
Python
|
add_args
| null |
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N',
help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR',
help='initial learning rate during warmup phase; default is args.lr')
parser.add_argument('--max-lr', required=True, type=float, metavar='LR',
help='max learning rate, must be more than args.lr')
parser.add_argument('--t-mult', default=1, type=float, metavar='LR',
help='factor to grow the length of each period')
parser.add_argument('--lr-period-updates', default=5000, type=float, metavar='LR',
help='initial number of updates per period')
|
Add arguments to the parser for this LR scheduler.
|
Add arguments to the parser for this LR scheduler.
|
[
"Add",
"arguments",
"to",
"the",
"parser",
"for",
"this",
"LR",
"scheduler",
"."
] |
def add_args(parser):
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N',
help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR',
help='initial learning rate during warmup phase; default is args.lr')
parser.add_argument('--max-lr', required=True, type=float, metavar='LR',
help='max learning rate, must be more than args.lr')
parser.add_argument('--t-mult', default=1, type=float, metavar='LR',
help='factor to grow the length of each period')
parser.add_argument('--lr-period-updates', default=5000, type=float, metavar='LR',
help='initial number of updates per period')
|
[
"def",
"add_args",
"(",
"parser",
")",
":",
"parser",
".",
"add_argument",
"(",
"'--warmup-updates'",
",",
"default",
"=",
"0",
",",
"type",
"=",
"int",
",",
"metavar",
"=",
"'N'",
",",
"help",
"=",
"'warmup the learning rate linearly for the first N updates'",
")",
"parser",
".",
"add_argument",
"(",
"'--warmup-init-lr'",
",",
"default",
"=",
"-",
"1",
",",
"type",
"=",
"float",
",",
"metavar",
"=",
"'LR'",
",",
"help",
"=",
"'initial learning rate during warmup phase; default is args.lr'",
")",
"parser",
".",
"add_argument",
"(",
"'--max-lr'",
",",
"required",
"=",
"True",
",",
"type",
"=",
"float",
",",
"metavar",
"=",
"'LR'",
",",
"help",
"=",
"'max learning rate, must be more than args.lr'",
")",
"parser",
".",
"add_argument",
"(",
"'--t-mult'",
",",
"default",
"=",
"1",
",",
"type",
"=",
"float",
",",
"metavar",
"=",
"'LR'",
",",
"help",
"=",
"'factor to grow the length of each period'",
")",
"parser",
".",
"add_argument",
"(",
"'--lr-period-updates'",
",",
"default",
"=",
"5000",
",",
"type",
"=",
"float",
",",
"metavar",
"=",
"'LR'",
",",
"help",
"=",
"'initial number of updates per period'",
")"
] |
Add arguments to the parser for this LR scheduler.
|
[
"Add",
"arguments",
"to",
"the",
"parser",
"for",
"this",
"LR",
"scheduler",
"."
] |
[
"\"\"\"Add arguments to the parser for this LR scheduler.\"\"\""
] |
[
{
"param": "parser",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "parser",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def add_args(parser):
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N',
help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR',
help='initial learning rate during warmup phase; default is args.lr')
parser.add_argument('--max-lr', required=True, type=float, metavar='LR',
help='max learning rate, must be more than args.lr')
parser.add_argument('--t-mult', default=1, type=float, metavar='LR',
help='factor to grow the length of each period')
parser.add_argument('--lr-period-updates', default=5000, type=float, metavar='LR',
help='initial number of updates per period')
| 708 | 849 |
77589517f65e9720dbb8ec447b9081a0a7f0482d
|
flupzor/newsdiffs
|
news/views.py
|
[
"MIT"
] |
Python
|
prepend_http
|
<not_specific>
|
def prepend_http(url):
"""Return a version of the url that starts with the proper scheme.
url may look like
www.nytimes.com
https:/www.nytimes.com <- because double slashes get stripped
http://www.nytimes.com
"""
components = url.split('/', 2)
if len(components) <= 2 or '.' in components[0]:
components = ['http:', '']+components
elif components[1]:
components[1:1] = ['']
return '/'.join(components)
|
Return a version of the url that starts with the proper scheme.
url may look like
www.nytimes.com
https:/www.nytimes.com <- because double slashes get stripped
http://www.nytimes.com
|
Return a version of the url that starts with the proper scheme.
url may look like
|
[
"Return",
"a",
"version",
"of",
"the",
"url",
"that",
"starts",
"with",
"the",
"proper",
"scheme",
".",
"url",
"may",
"look",
"like"
] |
def prepend_http(url):
components = url.split('/', 2)
if len(components) <= 2 or '.' in components[0]:
components = ['http:', '']+components
elif components[1]:
components[1:1] = ['']
return '/'.join(components)
|
[
"def",
"prepend_http",
"(",
"url",
")",
":",
"components",
"=",
"url",
".",
"split",
"(",
"'/'",
",",
"2",
")",
"if",
"len",
"(",
"components",
")",
"<=",
"2",
"or",
"'.'",
"in",
"components",
"[",
"0",
"]",
":",
"components",
"=",
"[",
"'http:'",
",",
"''",
"]",
"+",
"components",
"elif",
"components",
"[",
"1",
"]",
":",
"components",
"[",
"1",
":",
"1",
"]",
"=",
"[",
"''",
"]",
"return",
"'/'",
".",
"join",
"(",
"components",
")"
] |
Return a version of the url that starts with the proper scheme.
|
[
"Return",
"a",
"version",
"of",
"the",
"url",
"that",
"starts",
"with",
"the",
"proper",
"scheme",
"."
] |
[
"\"\"\"Return a version of the url that starts with the proper scheme.\n\n url may look like\n\n www.nytimes.com\n https:/www.nytimes.com <- because double slashes get stripped\n http://www.nytimes.com\n \"\"\""
] |
[
{
"param": "url",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "url",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def prepend_http(url):
components = url.split('/', 2)
if len(components) <= 2 or '.' in components[0]:
components = ['http:', '']+components
elif components[1]:
components[1:1] = ['']
return '/'.join(components)
| 709 | 929 |
2755317405c44da906cf675f5a68886806710d55
|
pansapiens/inmembrane
|
inmembrane/helpers.py
|
[
"BSD-2-Clause"
] |
Python
|
parse_fasta_header
|
<not_specific>
|
def parse_fasta_header(header):
"""
Parses a FASTA format header (with our without the initial '>') and returns a
tuple of sequence id and sequence name/description.
If NCBI SeqID format (gi|gi-number|gb|accession etc, is detected
the first id in the list is used as the canonical id (see see
http://www.ncbi.nlm.nih.gov/books/NBK21097/#A631 ).
"""
if header[0] == '>':
header = header[1:]
tokens = header.split('|')
# check to see if we have an NCBI-style header
if header.find("|") != -1 and len(tokens[0]) <= 3:
# "gi|ginumber|gb|accession bla bla" becomes "gi|ginumber"
seqid = "%s|%s" % (tokens[0], tokens[1].split()[0])
name = tokens[-1:][0].strip()
# otherwise just split on spaces & hope for the best
else:
tokens = header.split()
seqid = tokens[0]
name = header[0:-1].strip()
return seqid, name
|
Parses a FASTA format header (with our without the initial '>') and returns a
tuple of sequence id and sequence name/description.
If NCBI SeqID format (gi|gi-number|gb|accession etc, is detected
the first id in the list is used as the canonical id (see see
http://www.ncbi.nlm.nih.gov/books/NBK21097/#A631 ).
|
Parses a FASTA format header (with our without the initial '>') and returns a
tuple of sequence id and sequence name/description.
|
[
"Parses",
"a",
"FASTA",
"format",
"header",
"(",
"with",
"our",
"without",
"the",
"initial",
"'",
">",
"'",
")",
"and",
"returns",
"a",
"tuple",
"of",
"sequence",
"id",
"and",
"sequence",
"name",
"/",
"description",
"."
] |
def parse_fasta_header(header):
if header[0] == '>':
header = header[1:]
tokens = header.split('|')
if header.find("|") != -1 and len(tokens[0]) <= 3:
seqid = "%s|%s" % (tokens[0], tokens[1].split()[0])
name = tokens[-1:][0].strip()
else:
tokens = header.split()
seqid = tokens[0]
name = header[0:-1].strip()
return seqid, name
|
[
"def",
"parse_fasta_header",
"(",
"header",
")",
":",
"if",
"header",
"[",
"0",
"]",
"==",
"'>'",
":",
"header",
"=",
"header",
"[",
"1",
":",
"]",
"tokens",
"=",
"header",
".",
"split",
"(",
"'|'",
")",
"if",
"header",
".",
"find",
"(",
"\"|\"",
")",
"!=",
"-",
"1",
"and",
"len",
"(",
"tokens",
"[",
"0",
"]",
")",
"<=",
"3",
":",
"seqid",
"=",
"\"%s|%s\"",
"%",
"(",
"tokens",
"[",
"0",
"]",
",",
"tokens",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"name",
"=",
"tokens",
"[",
"-",
"1",
":",
"]",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"else",
":",
"tokens",
"=",
"header",
".",
"split",
"(",
")",
"seqid",
"=",
"tokens",
"[",
"0",
"]",
"name",
"=",
"header",
"[",
"0",
":",
"-",
"1",
"]",
".",
"strip",
"(",
")",
"return",
"seqid",
",",
"name"
] |
Parses a FASTA format header (with our without the initial '>') and returns a
tuple of sequence id and sequence name/description.
|
[
"Parses",
"a",
"FASTA",
"format",
"header",
"(",
"with",
"our",
"without",
"the",
"initial",
"'",
">",
"'",
")",
"and",
"returns",
"a",
"tuple",
"of",
"sequence",
"id",
"and",
"sequence",
"name",
"/",
"description",
"."
] |
[
"\"\"\"\n Parses a FASTA format header (with our without the initial '>') and returns a\n tuple of sequence id and sequence name/description.\n\n If NCBI SeqID format (gi|gi-number|gb|accession etc, is detected\n the first id in the list is used as the canonical id (see see\n http://www.ncbi.nlm.nih.gov/books/NBK21097/#A631 ).\n \"\"\"",
"# check to see if we have an NCBI-style header",
"# \"gi|ginumber|gb|accession bla bla\" becomes \"gi|ginumber\"",
"# otherwise just split on spaces & hope for the best"
] |
[
{
"param": "header",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "header",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def parse_fasta_header(header):
if header[0] == '>':
header = header[1:]
tokens = header.split('|')
if header.find("|") != -1 and len(tokens[0]) <= 3:
seqid = "%s|%s" % (tokens[0], tokens[1].split()[0])
name = tokens[-1:][0].strip()
else:
tokens = header.split()
seqid = tokens[0]
name = header[0:-1].strip()
return seqid, name
| 710 | 591 |
25bcd15ff07b07877b3c9ef2b34c64c3a474ba0e
|
ronhogue/Avere-CLFSLoad
|
clfsload/stypes.py
|
[
"Apache-2.0"
] |
Python
|
from_string
|
<not_specific>
|
def from_string(cls, txt):
'Convert a string to a compression type constant'
if txt is None:
return None
t = txt.upper()
return getattr(cls, t, None)
|
Convert a string to a compression type constant
|
Convert a string to a compression type constant
|
[
"Convert",
"a",
"string",
"to",
"a",
"compression",
"type",
"constant"
] |
def from_string(cls, txt):
if txt is None:
return None
t = txt.upper()
return getattr(cls, t, None)
|
[
"def",
"from_string",
"(",
"cls",
",",
"txt",
")",
":",
"if",
"txt",
"is",
"None",
":",
"return",
"None",
"t",
"=",
"txt",
".",
"upper",
"(",
")",
"return",
"getattr",
"(",
"cls",
",",
"t",
",",
"None",
")"
] |
Convert a string to a compression type constant
|
[
"Convert",
"a",
"string",
"to",
"a",
"compression",
"type",
"constant"
] |
[
"'Convert a string to a compression type constant'"
] |
[
{
"param": "cls",
"type": null
},
{
"param": "txt",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "txt",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def from_string(cls, txt):
if txt is None:
return None
t = txt.upper()
return getattr(cls, t, None)
| 711 | 127 |
746faa788f5bdabe56e61121a03c072330b2f245
|
zhwycsz/edd
|
server/edd_file_importer/utilities.py
|
[
"BSD-3-Clause-LBNL"
] |
Python
|
build_err_payload
|
<not_specific>
|
def build_err_payload(aggregator, import_):
"""
Builds a JSON error response to return as a WS client notification.
"""
# flatten errors & warnings into a single list to send to the UI. Each ImportErrorSummary
# may optionally contain multiple related errors grouped by subcategory
errs = []
for err_type_summary in aggregator.errors.values():
errs.extend(err_type_summary.to_json())
warns = []
for warn_type_summary in aggregator.warnings.values():
warns.extend(warn_type_summary.to_json())
return {
"pk": import_.pk,
"uuid": import_.uuid,
"status": import_.status,
"errors": errs,
"warnings": warns,
}
|
Builds a JSON error response to return as a WS client notification.
|
Builds a JSON error response to return as a WS client notification.
|
[
"Builds",
"a",
"JSON",
"error",
"response",
"to",
"return",
"as",
"a",
"WS",
"client",
"notification",
"."
] |
def build_err_payload(aggregator, import_):
errs = []
for err_type_summary in aggregator.errors.values():
errs.extend(err_type_summary.to_json())
warns = []
for warn_type_summary in aggregator.warnings.values():
warns.extend(warn_type_summary.to_json())
return {
"pk": import_.pk,
"uuid": import_.uuid,
"status": import_.status,
"errors": errs,
"warnings": warns,
}
|
[
"def",
"build_err_payload",
"(",
"aggregator",
",",
"import_",
")",
":",
"errs",
"=",
"[",
"]",
"for",
"err_type_summary",
"in",
"aggregator",
".",
"errors",
".",
"values",
"(",
")",
":",
"errs",
".",
"extend",
"(",
"err_type_summary",
".",
"to_json",
"(",
")",
")",
"warns",
"=",
"[",
"]",
"for",
"warn_type_summary",
"in",
"aggregator",
".",
"warnings",
".",
"values",
"(",
")",
":",
"warns",
".",
"extend",
"(",
"warn_type_summary",
".",
"to_json",
"(",
")",
")",
"return",
"{",
"\"pk\"",
":",
"import_",
".",
"pk",
",",
"\"uuid\"",
":",
"import_",
".",
"uuid",
",",
"\"status\"",
":",
"import_",
".",
"status",
",",
"\"errors\"",
":",
"errs",
",",
"\"warnings\"",
":",
"warns",
",",
"}"
] |
Builds a JSON error response to return as a WS client notification.
|
[
"Builds",
"a",
"JSON",
"error",
"response",
"to",
"return",
"as",
"a",
"WS",
"client",
"notification",
"."
] |
[
"\"\"\"\n Builds a JSON error response to return as a WS client notification.\n \"\"\"",
"# flatten errors & warnings into a single list to send to the UI. Each ImportErrorSummary",
"# may optionally contain multiple related errors grouped by subcategory"
] |
[
{
"param": "aggregator",
"type": null
},
{
"param": "import_",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "aggregator",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "import_",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def build_err_payload(aggregator, import_):
errs = []
for err_type_summary in aggregator.errors.values():
errs.extend(err_type_summary.to_json())
warns = []
for warn_type_summary in aggregator.warnings.values():
warns.extend(warn_type_summary.to_json())
return {
"pk": import_.pk,
"uuid": import_.uuid,
"status": import_.status,
"errors": errs,
"warnings": warns,
}
| 712 | 984 |
135e9f73b08d9f46519e0ae0932965c48c3cdc3e
|
olivergs/django-sitetools
|
sitetools/utils.py
|
[
"MIT"
] |
Python
|
inject_app_defaults
| null |
def inject_app_defaults(appname):
"""
Inject an application's default settings
"""
try:
# Import application settings module
__import__('%s.settings' % appname)
# Import our defaults, project defaults, and project settings
_app_settings = sys.modules['%s.settings' % appname]
_def_settings = sys.modules['django.conf.global_settings']
_settings = sys.modules['django.conf'].settings
# Add the values from the application settings module
for _k in dir(_app_settings):
if _k.isupper():
# Add the value to the default settings module
setattr(_def_settings, _k, getattr(_app_settings, _k))
# Add the value to the settings, if not already present
if not hasattr(_settings, _k):
setattr(_settings, _k, getattr(_app_settings, _k))
except ImportError:
# Silently skip failing settings modules
pass
|
Inject an application's default settings
|
Inject an application's default settings
|
[
"Inject",
"an",
"application",
"'",
"s",
"default",
"settings"
] |
def inject_app_defaults(appname):
try:
__import__('%s.settings' % appname)
_app_settings = sys.modules['%s.settings' % appname]
_def_settings = sys.modules['django.conf.global_settings']
_settings = sys.modules['django.conf'].settings
for _k in dir(_app_settings):
if _k.isupper():
setattr(_def_settings, _k, getattr(_app_settings, _k))
if not hasattr(_settings, _k):
setattr(_settings, _k, getattr(_app_settings, _k))
except ImportError:
pass
|
[
"def",
"inject_app_defaults",
"(",
"appname",
")",
":",
"try",
":",
"__import__",
"(",
"'%s.settings'",
"%",
"appname",
")",
"_app_settings",
"=",
"sys",
".",
"modules",
"[",
"'%s.settings'",
"%",
"appname",
"]",
"_def_settings",
"=",
"sys",
".",
"modules",
"[",
"'django.conf.global_settings'",
"]",
"_settings",
"=",
"sys",
".",
"modules",
"[",
"'django.conf'",
"]",
".",
"settings",
"for",
"_k",
"in",
"dir",
"(",
"_app_settings",
")",
":",
"if",
"_k",
".",
"isupper",
"(",
")",
":",
"setattr",
"(",
"_def_settings",
",",
"_k",
",",
"getattr",
"(",
"_app_settings",
",",
"_k",
")",
")",
"if",
"not",
"hasattr",
"(",
"_settings",
",",
"_k",
")",
":",
"setattr",
"(",
"_settings",
",",
"_k",
",",
"getattr",
"(",
"_app_settings",
",",
"_k",
")",
")",
"except",
"ImportError",
":",
"pass"
] |
Inject an application's default settings
|
[
"Inject",
"an",
"application",
"'",
"s",
"default",
"settings"
] |
[
"\"\"\"\n Inject an application's default settings\n \"\"\"",
"# Import application settings module",
"# Import our defaults, project defaults, and project settings",
"# Add the values from the application settings module",
"# Add the value to the default settings module",
"# Add the value to the settings, if not already present",
"# Silently skip failing settings modules"
] |
[
{
"param": "appname",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "appname",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import sys
def inject_app_defaults(appname):
try:
__import__('%s.settings' % appname)
_app_settings = sys.modules['%s.settings' % appname]
_def_settings = sys.modules['django.conf.global_settings']
_settings = sys.modules['django.conf'].settings
for _k in dir(_app_settings):
if _k.isupper():
setattr(_def_settings, _k, getattr(_app_settings, _k))
if not hasattr(_settings, _k):
setattr(_settings, _k, getattr(_app_settings, _k))
except ImportError:
pass
| 713 | 557 |
882b9016836fd88d90fd3b90629c3883b715dcc8
|
alecokas/swahili-text-gcn
|
src/shared/utils.py
|
[
"MIT"
] |
Python
|
save_cli_options
|
None
|
def save_cli_options(options, save_dir: str) -> None:
"""Save all options to JSON file.
Arguments:
options: A Namespace object from argparse
save_dir: String location to save the options
"""
opt_dict = {}
for option in vars(options):
opt_dict[option] = getattr(options, option)
os.makedirs(save_dir, exist_ok=True)
now = datetime.now()
dt_string = now.strftime("%d.%m.%Y-%H:%M:%S")
opts_file_path = os.path.join(save_dir, f"opts-{dt_string}.json")
with open(opts_file_path, "w") as opt_file:
json.dump(opt_dict, opt_file)
|
Save all options to JSON file.
Arguments:
options: A Namespace object from argparse
save_dir: String location to save the options
|
Save all options to JSON file.
|
[
"Save",
"all",
"options",
"to",
"JSON",
"file",
"."
] |
def save_cli_options(options, save_dir: str) -> None:
opt_dict = {}
for option in vars(options):
opt_dict[option] = getattr(options, option)
os.makedirs(save_dir, exist_ok=True)
now = datetime.now()
dt_string = now.strftime("%d.%m.%Y-%H:%M:%S")
opts_file_path = os.path.join(save_dir, f"opts-{dt_string}.json")
with open(opts_file_path, "w") as opt_file:
json.dump(opt_dict, opt_file)
|
[
"def",
"save_cli_options",
"(",
"options",
",",
"save_dir",
":",
"str",
")",
"->",
"None",
":",
"opt_dict",
"=",
"{",
"}",
"for",
"option",
"in",
"vars",
"(",
"options",
")",
":",
"opt_dict",
"[",
"option",
"]",
"=",
"getattr",
"(",
"options",
",",
"option",
")",
"os",
".",
"makedirs",
"(",
"save_dir",
",",
"exist_ok",
"=",
"True",
")",
"now",
"=",
"datetime",
".",
"now",
"(",
")",
"dt_string",
"=",
"now",
".",
"strftime",
"(",
"\"%d.%m.%Y-%H:%M:%S\"",
")",
"opts_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"save_dir",
",",
"f\"opts-{dt_string}.json\"",
")",
"with",
"open",
"(",
"opts_file_path",
",",
"\"w\"",
")",
"as",
"opt_file",
":",
"json",
".",
"dump",
"(",
"opt_dict",
",",
"opt_file",
")"
] |
Save all options to JSON file.
|
[
"Save",
"all",
"options",
"to",
"JSON",
"file",
"."
] |
[
"\"\"\"Save all options to JSON file.\n Arguments:\n options: A Namespace object from argparse\n save_dir: String location to save the options\n \"\"\""
] |
[
{
"param": "options",
"type": null
},
{
"param": "save_dir",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "options",
"type": null,
"docstring": "A Namespace object from argparse",
"docstring_tokens": [
"A",
"Namespace",
"object",
"from",
"argparse"
],
"default": null,
"is_optional": null
},
{
"identifier": "save_dir",
"type": "str",
"docstring": "String location to save the options",
"docstring_tokens": [
"String",
"location",
"to",
"save",
"the",
"options"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
import datetime
import json
def save_cli_options(options, save_dir: str) -> None:
opt_dict = {}
for option in vars(options):
opt_dict[option] = getattr(options, option)
os.makedirs(save_dir, exist_ok=True)
now = datetime.now()
dt_string = now.strftime("%d.%m.%Y-%H:%M:%S")
opts_file_path = os.path.join(save_dir, f"opts-{dt_string}.json")
with open(opts_file_path, "w") as opt_file:
json.dump(opt_dict, opt_file)
| 714 | 644 |
1f38c46dacaac01b9079b6b5f29a5c56f5ca6e37
|
cledouarec/jira2confluence-gantt
|
src/jira2confluencegantt/report.py
|
[
"Apache-2.0"
] |
Python
|
_tickets_from_project
|
list
|
def _tickets_from_project(
jira_client: JiraClient, project_config: dict
) -> list:
"""
Get all tickets with needed fields for a given project configuration and
sorted the list by start date and end date.
:param jira_client: Jira client to retrieve tickets information.
:param project_config: Project configuration with JQL or fields to extract.
:return: Tickets list for this project.
"""
start_date_field = project_config["Fields"]["Start date"]
end_date_field = project_config["Fields"]["End date"]
fields = [
"key",
"summary",
start_date_field,
end_date_field,
project_config["Fields"]["Progress"],
"parent",
"subtasks",
"issuelinks",
]
tickets = jira_client.tickets_from_jql(
jql=project_config["JQL"], fields=fields
)
def _has_start_and_end_dates(ticket: dict) -> bool:
"""
Check if the `ticket` has a start and end dates.
:param ticket: Ticket to test.
:return: Boolean status of the check.
"""
start_date = ticket["fields"][start_date_field]
end_date = ticket["fields"][end_date_field]
return start_date is not None and end_date is not None
return sorted(
filter(_has_start_and_end_dates, tickets),
key=lambda ticket: (
ticket["fields"][start_date_field],
ticket["fields"][end_date_field],
),
)
|
Get all tickets with needed fields for a given project configuration and
sorted the list by start date and end date.
:param jira_client: Jira client to retrieve tickets information.
:param project_config: Project configuration with JQL or fields to extract.
:return: Tickets list for this project.
|
Get all tickets with needed fields for a given project configuration and
sorted the list by start date and end date.
|
[
"Get",
"all",
"tickets",
"with",
"needed",
"fields",
"for",
"a",
"given",
"project",
"configuration",
"and",
"sorted",
"the",
"list",
"by",
"start",
"date",
"and",
"end",
"date",
"."
] |
def _tickets_from_project(
jira_client: JiraClient, project_config: dict
) -> list:
start_date_field = project_config["Fields"]["Start date"]
end_date_field = project_config["Fields"]["End date"]
fields = [
"key",
"summary",
start_date_field,
end_date_field,
project_config["Fields"]["Progress"],
"parent",
"subtasks",
"issuelinks",
]
tickets = jira_client.tickets_from_jql(
jql=project_config["JQL"], fields=fields
)
def _has_start_and_end_dates(ticket: dict) -> bool:
start_date = ticket["fields"][start_date_field]
end_date = ticket["fields"][end_date_field]
return start_date is not None and end_date is not None
return sorted(
filter(_has_start_and_end_dates, tickets),
key=lambda ticket: (
ticket["fields"][start_date_field],
ticket["fields"][end_date_field],
),
)
|
[
"def",
"_tickets_from_project",
"(",
"jira_client",
":",
"JiraClient",
",",
"project_config",
":",
"dict",
")",
"->",
"list",
":",
"start_date_field",
"=",
"project_config",
"[",
"\"Fields\"",
"]",
"[",
"\"Start date\"",
"]",
"end_date_field",
"=",
"project_config",
"[",
"\"Fields\"",
"]",
"[",
"\"End date\"",
"]",
"fields",
"=",
"[",
"\"key\"",
",",
"\"summary\"",
",",
"start_date_field",
",",
"end_date_field",
",",
"project_config",
"[",
"\"Fields\"",
"]",
"[",
"\"Progress\"",
"]",
",",
"\"parent\"",
",",
"\"subtasks\"",
",",
"\"issuelinks\"",
",",
"]",
"tickets",
"=",
"jira_client",
".",
"tickets_from_jql",
"(",
"jql",
"=",
"project_config",
"[",
"\"JQL\"",
"]",
",",
"fields",
"=",
"fields",
")",
"def",
"_has_start_and_end_dates",
"(",
"ticket",
":",
"dict",
")",
"->",
"bool",
":",
"\"\"\"\n Check if the `ticket` has a start and end dates.\n\n :param ticket: Ticket to test.\n :return: Boolean status of the check.\n \"\"\"",
"start_date",
"=",
"ticket",
"[",
"\"fields\"",
"]",
"[",
"start_date_field",
"]",
"end_date",
"=",
"ticket",
"[",
"\"fields\"",
"]",
"[",
"end_date_field",
"]",
"return",
"start_date",
"is",
"not",
"None",
"and",
"end_date",
"is",
"not",
"None",
"return",
"sorted",
"(",
"filter",
"(",
"_has_start_and_end_dates",
",",
"tickets",
")",
",",
"key",
"=",
"lambda",
"ticket",
":",
"(",
"ticket",
"[",
"\"fields\"",
"]",
"[",
"start_date_field",
"]",
",",
"ticket",
"[",
"\"fields\"",
"]",
"[",
"end_date_field",
"]",
",",
")",
",",
")"
] |
Get all tickets with needed fields for a given project configuration and
sorted the list by start date and end date.
|
[
"Get",
"all",
"tickets",
"with",
"needed",
"fields",
"for",
"a",
"given",
"project",
"configuration",
"and",
"sorted",
"the",
"list",
"by",
"start",
"date",
"and",
"end",
"date",
"."
] |
[
"\"\"\"\n Get all tickets with needed fields for a given project configuration and\n sorted the list by start date and end date.\n\n :param jira_client: Jira client to retrieve tickets information.\n :param project_config: Project configuration with JQL or fields to extract.\n :return: Tickets list for this project.\n \"\"\"",
"\"\"\"\n Check if the `ticket` has a start and end dates.\n\n :param ticket: Ticket to test.\n :return: Boolean status of the check.\n \"\"\""
] |
[
{
"param": "jira_client",
"type": "JiraClient"
},
{
"param": "project_config",
"type": "dict"
}
] |
{
"returns": [
{
"docstring": "Tickets list for this project.",
"docstring_tokens": [
"Tickets",
"list",
"for",
"this",
"project",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "jira_client",
"type": "JiraClient",
"docstring": "Jira client to retrieve tickets information.",
"docstring_tokens": [
"Jira",
"client",
"to",
"retrieve",
"tickets",
"information",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "project_config",
"type": "dict",
"docstring": "Project configuration with JQL or fields to extract.",
"docstring_tokens": [
"Project",
"configuration",
"with",
"JQL",
"or",
"fields",
"to",
"extract",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _tickets_from_project(
jira_client: JiraClient, project_config: dict
) -> list:
start_date_field = project_config["Fields"]["Start date"]
end_date_field = project_config["Fields"]["End date"]
fields = [
"key",
"summary",
start_date_field,
end_date_field,
project_config["Fields"]["Progress"],
"parent",
"subtasks",
"issuelinks",
]
tickets = jira_client.tickets_from_jql(
jql=project_config["JQL"], fields=fields
)
def _has_start_and_end_dates(ticket: dict) -> bool:
start_date = ticket["fields"][start_date_field]
end_date = ticket["fields"][end_date_field]
return start_date is not None and end_date is not None
return sorted(
filter(_has_start_and_end_dates, tickets),
key=lambda ticket: (
ticket["fields"][start_date_field],
ticket["fields"][end_date_field],
),
)
| 715 | 16 |
1de05c23a4e61704517e64e3e42980b1b207e223
|
gmos/micropthon
|
sma8266a/sma8266a.py
|
[
"Apache-2.0"
] |
Python
|
execfile
| null |
def execfile(f):
"""Run a python file from the local file system
"""
fname = f if f.endswith('.py') else f + '.py'
with open(fname) as xfile:
exec(xfile.read(),locals(),locals())
|
Run a python file from the local file system
|
Run a python file from the local file system
|
[
"Run",
"a",
"python",
"file",
"from",
"the",
"local",
"file",
"system"
] |
def execfile(f):
fname = f if f.endswith('.py') else f + '.py'
with open(fname) as xfile:
exec(xfile.read(),locals(),locals())
|
[
"def",
"execfile",
"(",
"f",
")",
":",
"fname",
"=",
"f",
"if",
"f",
".",
"endswith",
"(",
"'.py'",
")",
"else",
"f",
"+",
"'.py'",
"with",
"open",
"(",
"fname",
")",
"as",
"xfile",
":",
"exec",
"(",
"xfile",
".",
"read",
"(",
")",
",",
"locals",
"(",
")",
",",
"locals",
"(",
")",
")"
] |
Run a python file from the local file system
|
[
"Run",
"a",
"python",
"file",
"from",
"the",
"local",
"file",
"system"
] |
[
"\"\"\"Run a python file from the local file system\r\n \"\"\""
] |
[
{
"param": "f",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "f",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def execfile(f):
fname = f if f.endswith('.py') else f + '.py'
with open(fname) as xfile:
exec(xfile.read(),locals(),locals())
| 716 | 908 |
88d8bdfa2d891ec3c0666c3ba0bd1426b97ed1e7
|
ang-jason/fip_powerx_mini_projects-foxtrot
|
mp_sort/virtenv/lib/python3.6/site-packages/transcrypt/development/automated_tests/re/basictests.py
|
[
"MIT"
] |
Python
|
checkCommentGroup
|
<not_specific>
|
def checkCommentGroup(test, flags = 0):
""" Comment Groups are only supported in Python so will
likely fail in javascript only mode
"""
r = None
try:
r = re.compile(r'a(?#foobar)b', flags)
except:
test.checkPad(None,4)
if ( r is not None ):
test.check(r.groups)
test.check(r.pattern)
test.check(r.search("ab").group())
test.check(r.search("er"))
try:
r = re.compile(r'([\d]+)(?#blarg)\[\]', flags)
except:
test.checkPad(None, 4)
return
test.check( r.groups )
test.check( r.pattern )
test.check( r.search("1234[]").group())
test.check( r.search("asdf[]"))
|
Comment Groups are only supported in Python so will
likely fail in javascript only mode
|
Comment Groups are only supported in Python so will
likely fail in javascript only mode
|
[
"Comment",
"Groups",
"are",
"only",
"supported",
"in",
"Python",
"so",
"will",
"likely",
"fail",
"in",
"javascript",
"only",
"mode"
] |
def checkCommentGroup(test, flags = 0):
r = None
try:
r = re.compile(r'a(?#foobar)b', flags)
except:
test.checkPad(None,4)
if ( r is not None ):
test.check(r.groups)
test.check(r.pattern)
test.check(r.search("ab").group())
test.check(r.search("er"))
try:
r = re.compile(r'([\d]+)(?#blarg)\[\]', flags)
except:
test.checkPad(None, 4)
return
test.check( r.groups )
test.check( r.pattern )
test.check( r.search("1234[]").group())
test.check( r.search("asdf[]"))
|
[
"def",
"checkCommentGroup",
"(",
"test",
",",
"flags",
"=",
"0",
")",
":",
"r",
"=",
"None",
"try",
":",
"r",
"=",
"re",
".",
"compile",
"(",
"r'a(?#foobar)b'",
",",
"flags",
")",
"except",
":",
"test",
".",
"checkPad",
"(",
"None",
",",
"4",
")",
"if",
"(",
"r",
"is",
"not",
"None",
")",
":",
"test",
".",
"check",
"(",
"r",
".",
"groups",
")",
"test",
".",
"check",
"(",
"r",
".",
"pattern",
")",
"test",
".",
"check",
"(",
"r",
".",
"search",
"(",
"\"ab\"",
")",
".",
"group",
"(",
")",
")",
"test",
".",
"check",
"(",
"r",
".",
"search",
"(",
"\"er\"",
")",
")",
"try",
":",
"r",
"=",
"re",
".",
"compile",
"(",
"r'([\\d]+)(?#blarg)\\[\\]'",
",",
"flags",
")",
"except",
":",
"test",
".",
"checkPad",
"(",
"None",
",",
"4",
")",
"return",
"test",
".",
"check",
"(",
"r",
".",
"groups",
")",
"test",
".",
"check",
"(",
"r",
".",
"pattern",
")",
"test",
".",
"check",
"(",
"r",
".",
"search",
"(",
"\"1234[]\"",
")",
".",
"group",
"(",
")",
")",
"test",
".",
"check",
"(",
"r",
".",
"search",
"(",
"\"asdf[]\"",
")",
")"
] |
Comment Groups are only supported in Python so will
likely fail in javascript only mode
|
[
"Comment",
"Groups",
"are",
"only",
"supported",
"in",
"Python",
"so",
"will",
"likely",
"fail",
"in",
"javascript",
"only",
"mode"
] |
[
"\"\"\" Comment Groups are only supported in Python so will\r\n likely fail in javascript only mode\r\n \"\"\""
] |
[
{
"param": "test",
"type": null
},
{
"param": "flags",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "test",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "flags",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import re
def checkCommentGroup(test, flags = 0):
r = None
try:
r = re.compile(r'a(?#foobar)b', flags)
except:
test.checkPad(None,4)
if ( r is not None ):
test.check(r.groups)
test.check(r.pattern)
test.check(r.search("ab").group())
test.check(r.search("er"))
try:
r = re.compile(r'([\d]+)(?#blarg)\[\]', flags)
except:
test.checkPad(None, 4)
return
test.check( r.groups )
test.check( r.pattern )
test.check( r.search("1234[]").group())
test.check( r.search("asdf[]"))
| 717 | 688 |
50c7cc22b83ba31cde6f2fb7f74169557128eddd
|
dinhhuy258/chromium
|
webkit/support/setup_third_party.py
|
[
"BSD-3-Clause"
] |
Python
|
GetHeaderFilesInDir
|
<not_specific>
|
def GetHeaderFilesInDir(dir_path):
"""Return a list of all header files in dir_path."""
all_files = []
for root, dirs, files in os.walk(dir_path):
# Backslashes get shell escaped by gyp, so force forward slash for
# path separators.
all_files.extend([os.path.join(root, f).replace(os.sep, '/')
for f in files if f.endswith('.h')])
return all_files
|
Return a list of all header files in dir_path.
|
Return a list of all header files in dir_path.
|
[
"Return",
"a",
"list",
"of",
"all",
"header",
"files",
"in",
"dir_path",
"."
] |
def GetHeaderFilesInDir(dir_path):
all_files = []
for root, dirs, files in os.walk(dir_path):
all_files.extend([os.path.join(root, f).replace(os.sep, '/')
for f in files if f.endswith('.h')])
return all_files
|
[
"def",
"GetHeaderFilesInDir",
"(",
"dir_path",
")",
":",
"all_files",
"=",
"[",
"]",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"dir_path",
")",
":",
"all_files",
".",
"extend",
"(",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"f",
")",
".",
"replace",
"(",
"os",
".",
"sep",
",",
"'/'",
")",
"for",
"f",
"in",
"files",
"if",
"f",
".",
"endswith",
"(",
"'.h'",
")",
"]",
")",
"return",
"all_files"
] |
Return a list of all header files in dir_path.
|
[
"Return",
"a",
"list",
"of",
"all",
"header",
"files",
"in",
"dir_path",
"."
] |
[
"\"\"\"Return a list of all header files in dir_path.\"\"\"",
"# Backslashes get shell escaped by gyp, so force forward slash for",
"# path separators."
] |
[
{
"param": "dir_path",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "dir_path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
def GetHeaderFilesInDir(dir_path):
all_files = []
for root, dirs, files in os.walk(dir_path):
all_files.extend([os.path.join(root, f).replace(os.sep, '/')
for f in files if f.endswith('.h')])
return all_files
| 718 | 822 |
da0ed44c6a9259669f5aee1aaf83d49f69001d49
|
cclauss/pdoc
|
pdoc/extract.py
|
[
"Unlicense"
] |
Python
|
submodules
|
typing.Sequence[str]
|
def submodules(dname: str, mname: str) -> typing.Sequence[str]:
"""
Returns a list of fully qualified submodules within a package, given a
base directory and a fully qualified module name.
"""
loc = os.path.join(dname, *mname.split("."))
ret = []
for mi in pkgutil.iter_modules([loc], prefix=mname + "."):
if isinstance(mi, tuple):
# Python 3.5 compat
ret.append(mi[1])
else:
ret.append(mi.name)
ret.sort()
return ret
|
Returns a list of fully qualified submodules within a package, given a
base directory and a fully qualified module name.
|
Returns a list of fully qualified submodules within a package, given a
base directory and a fully qualified module name.
|
[
"Returns",
"a",
"list",
"of",
"fully",
"qualified",
"submodules",
"within",
"a",
"package",
"given",
"a",
"base",
"directory",
"and",
"a",
"fully",
"qualified",
"module",
"name",
"."
] |
def submodules(dname: str, mname: str) -> typing.Sequence[str]:
loc = os.path.join(dname, *mname.split("."))
ret = []
for mi in pkgutil.iter_modules([loc], prefix=mname + "."):
if isinstance(mi, tuple):
ret.append(mi[1])
else:
ret.append(mi.name)
ret.sort()
return ret
|
[
"def",
"submodules",
"(",
"dname",
":",
"str",
",",
"mname",
":",
"str",
")",
"->",
"typing",
".",
"Sequence",
"[",
"str",
"]",
":",
"loc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dname",
",",
"*",
"mname",
".",
"split",
"(",
"\".\"",
")",
")",
"ret",
"=",
"[",
"]",
"for",
"mi",
"in",
"pkgutil",
".",
"iter_modules",
"(",
"[",
"loc",
"]",
",",
"prefix",
"=",
"mname",
"+",
"\".\"",
")",
":",
"if",
"isinstance",
"(",
"mi",
",",
"tuple",
")",
":",
"ret",
".",
"append",
"(",
"mi",
"[",
"1",
"]",
")",
"else",
":",
"ret",
".",
"append",
"(",
"mi",
".",
"name",
")",
"ret",
".",
"sort",
"(",
")",
"return",
"ret"
] |
Returns a list of fully qualified submodules within a package, given a
base directory and a fully qualified module name.
|
[
"Returns",
"a",
"list",
"of",
"fully",
"qualified",
"submodules",
"within",
"a",
"package",
"given",
"a",
"base",
"directory",
"and",
"a",
"fully",
"qualified",
"module",
"name",
"."
] |
[
"\"\"\"\n Returns a list of fully qualified submodules within a package, given a\n base directory and a fully qualified module name.\n \"\"\"",
"# Python 3.5 compat"
] |
[
{
"param": "dname",
"type": "str"
},
{
"param": "mname",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "dname",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "mname",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import typing
import os
import pkgutil
def submodules(dname: str, mname: str) -> typing.Sequence[str]:
loc = os.path.join(dname, *mname.split("."))
ret = []
for mi in pkgutil.iter_modules([loc], prefix=mname + "."):
if isinstance(mi, tuple):
ret.append(mi[1])
else:
ret.append(mi.name)
ret.sort()
return ret
| 719 | 324 |
7c43db642f103082011aadfa466651ea512a382b
|
jheiselman/spacewar-type-r
|
lava/physics.py
|
[
"MIT"
] |
Python
|
radial_collide
|
<not_specific>
|
def radial_collide(item1, item2):
""" item1 and item2 are 3-tuples in the format of
(x, y, radius) for each object to test
"""
x_diff = item1[0] - item2[0]
y_diff = item1[1] - item2[1]
hit_radius = item1[2] + item2[2]
return math.hypot(x_diff, y_diff) < hit_radius
|
item1 and item2 are 3-tuples in the format of
(x, y, radius) for each object to test
|
item1 and item2 are 3-tuples in the format of
(x, y, radius) for each object to test
|
[
"item1",
"and",
"item2",
"are",
"3",
"-",
"tuples",
"in",
"the",
"format",
"of",
"(",
"x",
"y",
"radius",
")",
"for",
"each",
"object",
"to",
"test"
] |
def radial_collide(item1, item2):
x_diff = item1[0] - item2[0]
y_diff = item1[1] - item2[1]
hit_radius = item1[2] + item2[2]
return math.hypot(x_diff, y_diff) < hit_radius
|
[
"def",
"radial_collide",
"(",
"item1",
",",
"item2",
")",
":",
"x_diff",
"=",
"item1",
"[",
"0",
"]",
"-",
"item2",
"[",
"0",
"]",
"y_diff",
"=",
"item1",
"[",
"1",
"]",
"-",
"item2",
"[",
"1",
"]",
"hit_radius",
"=",
"item1",
"[",
"2",
"]",
"+",
"item2",
"[",
"2",
"]",
"return",
"math",
".",
"hypot",
"(",
"x_diff",
",",
"y_diff",
")",
"<",
"hit_radius"
] |
item1 and item2 are 3-tuples in the format of
(x, y, radius) for each object to test
|
[
"item1",
"and",
"item2",
"are",
"3",
"-",
"tuples",
"in",
"the",
"format",
"of",
"(",
"x",
"y",
"radius",
")",
"for",
"each",
"object",
"to",
"test"
] |
[
"\"\"\" item1 and item2 are 3-tuples in the format of\n (x, y, radius) for each object to test\n \"\"\""
] |
[
{
"param": "item1",
"type": null
},
{
"param": "item2",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "item1",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "item2",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import math
def radial_collide(item1, item2):
x_diff = item1[0] - item2[0]
y_diff = item1[1] - item2[1]
hit_radius = item1[2] + item2[2]
return math.hypot(x_diff, y_diff) < hit_radius
| 720 | 805 |
c04d9d6bebea231b016975881df8f65ff102f370
|
FrederikLehn/modpy
|
modpy/proxy/_kriging.py
|
[
"MIT"
] |
Python
|
_compute_kernel
|
<not_specific>
|
def _compute_kernel(sigma, R):
"""
Computes the kernel of a Gaussian Process.
Parameters
----------
sigma : float
Standard deviation.
R : array_like
Matrix or vector of correlation coefficients.
Returns
-------
C : array_like
Kernel or Covariance Matrix of a Gaussian Process.
"""
return sigma ** 2. * R
|
Computes the kernel of a Gaussian Process.
Parameters
----------
sigma : float
Standard deviation.
R : array_like
Matrix or vector of correlation coefficients.
Returns
-------
C : array_like
Kernel or Covariance Matrix of a Gaussian Process.
|
Computes the kernel of a Gaussian Process.
|
[
"Computes",
"the",
"kernel",
"of",
"a",
"Gaussian",
"Process",
"."
] |
def _compute_kernel(sigma, R):
return sigma ** 2. * R
|
[
"def",
"_compute_kernel",
"(",
"sigma",
",",
"R",
")",
":",
"return",
"sigma",
"**",
"2.",
"*",
"R"
] |
Computes the kernel of a Gaussian Process.
|
[
"Computes",
"the",
"kernel",
"of",
"a",
"Gaussian",
"Process",
"."
] |
[
"\"\"\"\r\n Computes the kernel of a Gaussian Process.\r\n\r\n Parameters\r\n ----------\r\n sigma : float\r\n Standard deviation.\r\n R : array_like\r\n Matrix or vector of correlation coefficients.\r\n\r\n Returns\r\n -------\r\n C : array_like\r\n Kernel or Covariance Matrix of a Gaussian Process.\r\n \"\"\""
] |
[
{
"param": "sigma",
"type": null
},
{
"param": "R",
"type": null
}
] |
{
"returns": [
{
"docstring": "Kernel or Covariance Matrix of a Gaussian Process.",
"docstring_tokens": [
"Kernel",
"or",
"Covariance",
"Matrix",
"of",
"a",
"Gaussian",
"Process",
"."
],
"type": "array_like\r"
}
],
"raises": [],
"params": [
{
"identifier": "sigma",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
},
{
"identifier": "R",
"type": null,
"docstring": "Matrix or vector of correlation coefficients.",
"docstring_tokens": [
"Matrix",
"or",
"vector",
"of",
"correlation",
"coefficients",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def _compute_kernel(sigma, R):
return sigma ** 2. * R
| 721 | 347 |
8b98d3c4ba43d5f9247dfd21e84c76c6818e079a
|
JakobLangenbahn/crosslingual-information-retrieval
|
src/data/preprocess_data.py
|
[
"MIT"
] |
Python
|
create_dictionary
|
<not_specific>
|
def create_dictionary(unique_token, embedding_dictionary, embedding_array_normalized):
""" Create reduced dictionary and embedding array for translation search.
"""
index = 0
word_embedding_dictionary = {}
embedding_subset_dictionary = {}
for token in unique_token:
if embedding_dictionary.get(token):
word_embedding_dictionary[token] = embedding_array_normalized[
embedding_dictionary.get(token)].tolist()
embedding_subset_dictionary[index] = token
index += 1
return word_embedding_dictionary, embedding_subset_dictionary
|
Create reduced dictionary and embedding array for translation search.
|
Create reduced dictionary and embedding array for translation search.
|
[
"Create",
"reduced",
"dictionary",
"and",
"embedding",
"array",
"for",
"translation",
"search",
"."
] |
def create_dictionary(unique_token, embedding_dictionary, embedding_array_normalized):
index = 0
word_embedding_dictionary = {}
embedding_subset_dictionary = {}
for token in unique_token:
if embedding_dictionary.get(token):
word_embedding_dictionary[token] = embedding_array_normalized[
embedding_dictionary.get(token)].tolist()
embedding_subset_dictionary[index] = token
index += 1
return word_embedding_dictionary, embedding_subset_dictionary
|
[
"def",
"create_dictionary",
"(",
"unique_token",
",",
"embedding_dictionary",
",",
"embedding_array_normalized",
")",
":",
"index",
"=",
"0",
"word_embedding_dictionary",
"=",
"{",
"}",
"embedding_subset_dictionary",
"=",
"{",
"}",
"for",
"token",
"in",
"unique_token",
":",
"if",
"embedding_dictionary",
".",
"get",
"(",
"token",
")",
":",
"word_embedding_dictionary",
"[",
"token",
"]",
"=",
"embedding_array_normalized",
"[",
"embedding_dictionary",
".",
"get",
"(",
"token",
")",
"]",
".",
"tolist",
"(",
")",
"embedding_subset_dictionary",
"[",
"index",
"]",
"=",
"token",
"index",
"+=",
"1",
"return",
"word_embedding_dictionary",
",",
"embedding_subset_dictionary"
] |
Create reduced dictionary and embedding array for translation search.
|
[
"Create",
"reduced",
"dictionary",
"and",
"embedding",
"array",
"for",
"translation",
"search",
"."
] |
[
"\"\"\" Create reduced dictionary and embedding array for translation search.\n \"\"\""
] |
[
{
"param": "unique_token",
"type": null
},
{
"param": "embedding_dictionary",
"type": null
},
{
"param": "embedding_array_normalized",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "unique_token",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "embedding_dictionary",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "embedding_array_normalized",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def create_dictionary(unique_token, embedding_dictionary, embedding_array_normalized):
index = 0
word_embedding_dictionary = {}
embedding_subset_dictionary = {}
for token in unique_token:
if embedding_dictionary.get(token):
word_embedding_dictionary[token] = embedding_array_normalized[
embedding_dictionary.get(token)].tolist()
embedding_subset_dictionary[index] = token
index += 1
return word_embedding_dictionary, embedding_subset_dictionary
| 722 | 955 |
be15ff4847eb347099c0e02af196a9c3b3db6e8d
|
timkrentz/Arduino
|
IMU/VTK-6.2.0/Wrapping/Python/vtk/util/misc.py
|
[
"MIT"
] |
Python
|
vtkGetDataRoot
|
<not_specific>
|
def vtkGetDataRoot():
"""vtkGetDataRoot() -- return vtk example data directory
"""
dataIndex=-1;
for i in range(0, len(sys.argv)):
if sys.argv[i] == '-D' and i < len(sys.argv)-1:
dataIndex = i+1
if dataIndex != -1:
dataRoot = sys.argv[dataIndex]
else:
try:
dataRoot = os.environ['VTK_DATA_ROOT']
except KeyError:
dataRoot = '../../../../VTKData'
return dataRoot
|
vtkGetDataRoot() -- return vtk example data directory
|
- return vtk example data directory
|
[
"-",
"return",
"vtk",
"example",
"data",
"directory"
] |
def vtkGetDataRoot():
dataIndex=-1;
for i in range(0, len(sys.argv)):
if sys.argv[i] == '-D' and i < len(sys.argv)-1:
dataIndex = i+1
if dataIndex != -1:
dataRoot = sys.argv[dataIndex]
else:
try:
dataRoot = os.environ['VTK_DATA_ROOT']
except KeyError:
dataRoot = '../../../../VTKData'
return dataRoot
|
[
"def",
"vtkGetDataRoot",
"(",
")",
":",
"dataIndex",
"=",
"-",
"1",
";",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"sys",
".",
"argv",
")",
")",
":",
"if",
"sys",
".",
"argv",
"[",
"i",
"]",
"==",
"'-D'",
"and",
"i",
"<",
"len",
"(",
"sys",
".",
"argv",
")",
"-",
"1",
":",
"dataIndex",
"=",
"i",
"+",
"1",
"if",
"dataIndex",
"!=",
"-",
"1",
":",
"dataRoot",
"=",
"sys",
".",
"argv",
"[",
"dataIndex",
"]",
"else",
":",
"try",
":",
"dataRoot",
"=",
"os",
".",
"environ",
"[",
"'VTK_DATA_ROOT'",
"]",
"except",
"KeyError",
":",
"dataRoot",
"=",
"'../../../../VTKData'",
"return",
"dataRoot"
] |
vtkGetDataRoot() -- return vtk example data directory
|
[
"vtkGetDataRoot",
"()",
"--",
"return",
"vtk",
"example",
"data",
"directory"
] |
[
"\"\"\"vtkGetDataRoot() -- return vtk example data directory\r\n \"\"\""
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import sys
import os
def vtkGetDataRoot():
dataIndex=-1;
for i in range(0, len(sys.argv)):
if sys.argv[i] == '-D' and i < len(sys.argv)-1:
dataIndex = i+1
if dataIndex != -1:
dataRoot = sys.argv[dataIndex]
else:
try:
dataRoot = os.environ['VTK_DATA_ROOT']
except KeyError:
dataRoot = '../../../../VTKData'
return dataRoot
| 723 | 697 |
6062c097b253192ba0941e69e744e8601cbd39ac
|
moi90/pytorch
|
torch/autograd/profiler_util.py
|
[
"Intel"
] |
Python
|
_format_time_share
|
<not_specific>
|
def _format_time_share(time_us, total_time_us):
"""Defines how to format time in FunctionEvent"""
if total_time_us == 0:
assert time_us == 0, "Expected time_us == 0 but got {}".format(time_us)
return "NaN"
return '{:.2f}%'.format(time_us * 100.0 / total_time_us)
|
Defines how to format time in FunctionEvent
|
Defines how to format time in FunctionEvent
|
[
"Defines",
"how",
"to",
"format",
"time",
"in",
"FunctionEvent"
] |
def _format_time_share(time_us, total_time_us):
if total_time_us == 0:
assert time_us == 0, "Expected time_us == 0 but got {}".format(time_us)
return "NaN"
return '{:.2f}%'.format(time_us * 100.0 / total_time_us)
|
[
"def",
"_format_time_share",
"(",
"time_us",
",",
"total_time_us",
")",
":",
"if",
"total_time_us",
"==",
"0",
":",
"assert",
"time_us",
"==",
"0",
",",
"\"Expected time_us == 0 but got {}\"",
".",
"format",
"(",
"time_us",
")",
"return",
"\"NaN\"",
"return",
"'{:.2f}%'",
".",
"format",
"(",
"time_us",
"*",
"100.0",
"/",
"total_time_us",
")"
] |
Defines how to format time in FunctionEvent
|
[
"Defines",
"how",
"to",
"format",
"time",
"in",
"FunctionEvent"
] |
[
"\"\"\"Defines how to format time in FunctionEvent\"\"\""
] |
[
{
"param": "time_us",
"type": null
},
{
"param": "total_time_us",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "time_us",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "total_time_us",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _format_time_share(time_us, total_time_us):
if total_time_us == 0:
assert time_us == 0, "Expected time_us == 0 but got {}".format(time_us)
return "NaN"
return '{:.2f}%'.format(time_us * 100.0 / total_time_us)
| 724 | 554 |
14919ddd1234afe93649c39e0da87a592431abe8
|
suhanoves/pycbrf
|
pycbrf/utils.py
|
[
"BSD-3-Clause"
] |
Python
|
_date_format
|
str
|
def _date_format(value: datetime) -> str:
"""Format datetime into a string.
:param value:
"""
return value.strftime('%d/%m/%Y')
|
Format datetime into a string.
:param value:
|
Format datetime into a string.
|
[
"Format",
"datetime",
"into",
"a",
"string",
"."
] |
def _date_format(value: datetime) -> str:
return value.strftime('%d/%m/%Y')
|
[
"def",
"_date_format",
"(",
"value",
":",
"datetime",
")",
"->",
"str",
":",
"return",
"value",
".",
"strftime",
"(",
"'%d/%m/%Y'",
")"
] |
Format datetime into a string.
|
[
"Format",
"datetime",
"into",
"a",
"string",
"."
] |
[
"\"\"\"Format datetime into a string.\n\n :param value:\n\n \"\"\""
] |
[
{
"param": "value",
"type": "datetime"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "value",
"type": "datetime",
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _date_format(value: datetime) -> str:
return value.strftime('%d/%m/%Y')
| 725 | 769 |
e83fbb30328d5203cb34acdb8dbe0fc1c036d972
|
nick-youngblut/MGSIM
|
MGSIM/Utils.py
|
[
"MIT"
] |
Python
|
is_file
| null |
def is_file(fileName):
""" Does file exist? with custom output message
"""
if os.path.isfile(fileName) is False:
raise IOError('"{}" does not exist'.format(fileName))
|
Does file exist? with custom output message
|
Does file exist. with custom output message
|
[
"Does",
"file",
"exist",
".",
"with",
"custom",
"output",
"message"
] |
def is_file(fileName):
if os.path.isfile(fileName) is False:
raise IOError('"{}" does not exist'.format(fileName))
|
[
"def",
"is_file",
"(",
"fileName",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"fileName",
")",
"is",
"False",
":",
"raise",
"IOError",
"(",
"'\"{}\" does not exist'",
".",
"format",
"(",
"fileName",
")",
")"
] |
Does file exist?
|
[
"Does",
"file",
"exist?"
] |
[
"\"\"\" Does file exist? with custom output message\n \"\"\""
] |
[
{
"param": "fileName",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "fileName",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
def is_file(fileName):
if os.path.isfile(fileName) is False:
raise IOError('"{}" does not exist'.format(fileName))
| 726 | 221 |
e64c279a0b564b70af10f6a59d75689cf08ca945
|
daniel-rollings-fivestars/aladdin
|
aladdin/lib/git.py
|
[
"MIT"
] |
Python
|
extract_hash
|
<not_specific>
|
def extract_hash(cls, value, git_url):
"""
Get a hash out of whatever is the value given.
value: can be a branch name, part of a hash
"""
if not git_url:
# There is no way to check anything
return value
ls_remote_res = cls._get_hash_ls_remote(value, git_url)
if ls_remote_res:
return cls._full_hash_to_short_hash(str(ls_remote_res))
# Default is to return the value, truncated to the size of a hash
return cls._full_hash_to_short_hash(value)
|
Get a hash out of whatever is the value given.
value: can be a branch name, part of a hash
|
Get a hash out of whatever is the value given.
value: can be a branch name, part of a hash
|
[
"Get",
"a",
"hash",
"out",
"of",
"whatever",
"is",
"the",
"value",
"given",
".",
"value",
":",
"can",
"be",
"a",
"branch",
"name",
"part",
"of",
"a",
"hash"
] |
def extract_hash(cls, value, git_url):
if not git_url:
return value
ls_remote_res = cls._get_hash_ls_remote(value, git_url)
if ls_remote_res:
return cls._full_hash_to_short_hash(str(ls_remote_res))
return cls._full_hash_to_short_hash(value)
|
[
"def",
"extract_hash",
"(",
"cls",
",",
"value",
",",
"git_url",
")",
":",
"if",
"not",
"git_url",
":",
"return",
"value",
"ls_remote_res",
"=",
"cls",
".",
"_get_hash_ls_remote",
"(",
"value",
",",
"git_url",
")",
"if",
"ls_remote_res",
":",
"return",
"cls",
".",
"_full_hash_to_short_hash",
"(",
"str",
"(",
"ls_remote_res",
")",
")",
"return",
"cls",
".",
"_full_hash_to_short_hash",
"(",
"value",
")"
] |
Get a hash out of whatever is the value given.
|
[
"Get",
"a",
"hash",
"out",
"of",
"whatever",
"is",
"the",
"value",
"given",
"."
] |
[
"\"\"\"\n Get a hash out of whatever is the value given.\n value: can be a branch name, part of a hash\n \"\"\"",
"# There is no way to check anything",
"# Default is to return the value, truncated to the size of a hash"
] |
[
{
"param": "cls",
"type": null
},
{
"param": "value",
"type": null
},
{
"param": "git_url",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "value",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "git_url",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def extract_hash(cls, value, git_url):
if not git_url:
return value
ls_remote_res = cls._get_hash_ls_remote(value, git_url)
if ls_remote_res:
return cls._full_hash_to_short_hash(str(ls_remote_res))
return cls._full_hash_to_short_hash(value)
| 727 | 968 |
b2c5511abce115bda3b733c023479f5a51d3250a
|
baddad1980/photograbber
|
albumhelpers.py
|
[
"Apache-2.0"
] |
Python
|
add_photo_paths
| null |
def add_photo_paths(album):
'''set path info in album dictionary'''
for photo in album['photos'].items():
#photo[1]['path'] = os.path.join(album['folder'], '%s.jpg' % photo[0])
photo[1]['path'] = '%s' % photo[1]['src_big'].split('/')[-1]
|
set path info in album dictionary
|
set path info in album dictionary
|
[
"set",
"path",
"info",
"in",
"album",
"dictionary"
] |
def add_photo_paths(album):
for photo in album['photos'].items():
photo[1]['path'] = '%s' % photo[1]['src_big'].split('/')[-1]
|
[
"def",
"add_photo_paths",
"(",
"album",
")",
":",
"for",
"photo",
"in",
"album",
"[",
"'photos'",
"]",
".",
"items",
"(",
")",
":",
"photo",
"[",
"1",
"]",
"[",
"'path'",
"]",
"=",
"'%s'",
"%",
"photo",
"[",
"1",
"]",
"[",
"'src_big'",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]"
] |
set path info in album dictionary
|
[
"set",
"path",
"info",
"in",
"album",
"dictionary"
] |
[
"'''set path info in album dictionary'''",
"#photo[1]['path'] = os.path.join(album['folder'], '%s.jpg' % photo[0])"
] |
[
{
"param": "album",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "album",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def add_photo_paths(album):
for photo in album['photos'].items():
photo[1]['path'] = '%s' % photo[1]['src_big'].split('/')[-1]
| 728 | 359 |
f16b46892694dac5a853741a65a829b5df4f1e78
|
bryngemark/aCT
|
src/act/client/aCT-client/src/config.py
|
[
"Apache-2.0"
] |
Python
|
parse_conf_file
| null |
def parse_conf_file(conf_file, conf_dict):
"""
Parse configuration file into given dictionary.
All dictionary keys with None values will be taken from config file.
If they are not present in config file, they remain None.
"""
# insert dummy section, as config parser requires it
conf_str = '[dummy]\n' + conf_file.read()
conf_fp = io.StringIO(conf_str)
conf_parser = configparser.RawConfigParser()
conf_parser.readfp(conf_fp)
config = dict(conf_parser.items('dummy'))
for key, value in conf_dict.items():
if value == None:
conf_dict[key] = config.get(key, None)
|
Parse configuration file into given dictionary.
All dictionary keys with None values will be taken from config file.
If they are not present in config file, they remain None.
|
Parse configuration file into given dictionary.
All dictionary keys with None values will be taken from config file.
If they are not present in config file, they remain None.
|
[
"Parse",
"configuration",
"file",
"into",
"given",
"dictionary",
".",
"All",
"dictionary",
"keys",
"with",
"None",
"values",
"will",
"be",
"taken",
"from",
"config",
"file",
".",
"If",
"they",
"are",
"not",
"present",
"in",
"config",
"file",
"they",
"remain",
"None",
"."
] |
def parse_conf_file(conf_file, conf_dict):
conf_str = '[dummy]\n' + conf_file.read()
conf_fp = io.StringIO(conf_str)
conf_parser = configparser.RawConfigParser()
conf_parser.readfp(conf_fp)
config = dict(conf_parser.items('dummy'))
for key, value in conf_dict.items():
if value == None:
conf_dict[key] = config.get(key, None)
|
[
"def",
"parse_conf_file",
"(",
"conf_file",
",",
"conf_dict",
")",
":",
"conf_str",
"=",
"'[dummy]\\n'",
"+",
"conf_file",
".",
"read",
"(",
")",
"conf_fp",
"=",
"io",
".",
"StringIO",
"(",
"conf_str",
")",
"conf_parser",
"=",
"configparser",
".",
"RawConfigParser",
"(",
")",
"conf_parser",
".",
"readfp",
"(",
"conf_fp",
")",
"config",
"=",
"dict",
"(",
"conf_parser",
".",
"items",
"(",
"'dummy'",
")",
")",
"for",
"key",
",",
"value",
"in",
"conf_dict",
".",
"items",
"(",
")",
":",
"if",
"value",
"==",
"None",
":",
"conf_dict",
"[",
"key",
"]",
"=",
"config",
".",
"get",
"(",
"key",
",",
"None",
")"
] |
Parse configuration file into given dictionary.
|
[
"Parse",
"configuration",
"file",
"into",
"given",
"dictionary",
"."
] |
[
"\"\"\"\n Parse configuration file into given dictionary.\n\n All dictionary keys with None values will be taken from config file.\n If they are not present in config file, they remain None.\n \"\"\"",
"# insert dummy section, as config parser requires it"
] |
[
{
"param": "conf_file",
"type": null
},
{
"param": "conf_dict",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "conf_file",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "conf_dict",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import configparser
import io
def parse_conf_file(conf_file, conf_dict):
conf_str = '[dummy]\n' + conf_file.read()
conf_fp = io.StringIO(conf_str)
conf_parser = configparser.RawConfigParser()
conf_parser.readfp(conf_fp)
config = dict(conf_parser.items('dummy'))
for key, value in conf_dict.items():
if value == None:
conf_dict[key] = config.get(key, None)
| 729 | 203 |
fa5dd3bde4190701e96cf649331478aa095b5281
|
sshcrack/rpi-music-visualizer
|
python/tools/tools.py
|
[
"MIT"
] |
Python
|
wheel
|
<not_specific>
|
def wheel(pos):
"""Generate rainbow colors across 0-255 positions."""
pos = pos % 255
if pos < 85:
return [pos * 3, 255 - pos * 3, 0]
elif pos < 170:
pos -= 85
return [255 - pos * 3, 0, pos * 3]
else:
pos -= 170
return [0, pos * 3, 255 - pos * 3]
|
Generate rainbow colors across 0-255 positions.
|
Generate rainbow colors across 0-255 positions.
|
[
"Generate",
"rainbow",
"colors",
"across",
"0",
"-",
"255",
"positions",
"."
] |
def wheel(pos):
pos = pos % 255
if pos < 85:
return [pos * 3, 255 - pos * 3, 0]
elif pos < 170:
pos -= 85
return [255 - pos * 3, 0, pos * 3]
else:
pos -= 170
return [0, pos * 3, 255 - pos * 3]
|
[
"def",
"wheel",
"(",
"pos",
")",
":",
"pos",
"=",
"pos",
"%",
"255",
"if",
"pos",
"<",
"85",
":",
"return",
"[",
"pos",
"*",
"3",
",",
"255",
"-",
"pos",
"*",
"3",
",",
"0",
"]",
"elif",
"pos",
"<",
"170",
":",
"pos",
"-=",
"85",
"return",
"[",
"255",
"-",
"pos",
"*",
"3",
",",
"0",
",",
"pos",
"*",
"3",
"]",
"else",
":",
"pos",
"-=",
"170",
"return",
"[",
"0",
",",
"pos",
"*",
"3",
",",
"255",
"-",
"pos",
"*",
"3",
"]"
] |
Generate rainbow colors across 0-255 positions.
|
[
"Generate",
"rainbow",
"colors",
"across",
"0",
"-",
"255",
"positions",
"."
] |
[
"\"\"\"Generate rainbow colors across 0-255 positions.\"\"\""
] |
[
{
"param": "pos",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "pos",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def wheel(pos):
pos = pos % 255
if pos < 85:
return [pos * 3, 255 - pos * 3, 0]
elif pos < 170:
pos -= 85
return [255 - pos * 3, 0, pos * 3]
else:
pos -= 170
return [0, pos * 3, 255 - pos * 3]
| 730 | 62 |
b4732d6ad94d3b9bc65699b674ae9d452bad6516
|
MontmereLimited/django-lean
|
django_lean/experiments/stats.py
|
[
"BSD-3-Clause"
] |
Python
|
ss
|
<not_specific>
|
def ss(inlist):
"""
Squares each value in the passed list, adds up these squares and
returns the result.
Originally written by Gary Strangman.
Usage: lss(inlist)
"""
ss = 0
for item in inlist:
ss = ss + item*item
return ss
|
Squares each value in the passed list, adds up these squares and
returns the result.
Originally written by Gary Strangman.
Usage: lss(inlist)
|
Squares each value in the passed list, adds up these squares and
returns the result.
Originally written by Gary Strangman.
|
[
"Squares",
"each",
"value",
"in",
"the",
"passed",
"list",
"adds",
"up",
"these",
"squares",
"and",
"returns",
"the",
"result",
".",
"Originally",
"written",
"by",
"Gary",
"Strangman",
"."
] |
def ss(inlist):
ss = 0
for item in inlist:
ss = ss + item*item
return ss
|
[
"def",
"ss",
"(",
"inlist",
")",
":",
"ss",
"=",
"0",
"for",
"item",
"in",
"inlist",
":",
"ss",
"=",
"ss",
"+",
"item",
"*",
"item",
"return",
"ss"
] |
Squares each value in the passed list, adds up these squares and
returns the result.
|
[
"Squares",
"each",
"value",
"in",
"the",
"passed",
"list",
"adds",
"up",
"these",
"squares",
"and",
"returns",
"the",
"result",
"."
] |
[
"\"\"\"\n Squares each value in the passed list, adds up these squares and\n returns the result.\n \n Originally written by Gary Strangman.\n\n Usage: lss(inlist)\n \"\"\""
] |
[
{
"param": "inlist",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "inlist",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def ss(inlist):
ss = 0
for item in inlist:
ss = ss + item*item
return ss
| 731 | 413 |
56307b09588682a6b3cf28ccaa7fff0d9b01f0cf
|
samuelfu/MIT_Trading
|
Barclays Options/progressBar.py
|
[
"MIT"
] |
Python
|
printProgressBar
| null |
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
|
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
|
Call in a loop to create terminal progress bar
|
[
"Call",
"in",
"a",
"loop",
"to",
"create",
"terminal",
"progress",
"bar"
] |
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
if iteration == total:
print()
|
[
"def",
"printProgressBar",
"(",
"iteration",
",",
"total",
",",
"prefix",
"=",
"''",
",",
"suffix",
"=",
"''",
",",
"decimals",
"=",
"1",
",",
"length",
"=",
"100",
",",
"fill",
"=",
"'█'):",
"",
"",
"percent",
"=",
"(",
"\"{0:.\"",
"+",
"str",
"(",
"decimals",
")",
"+",
"\"f}\"",
")",
".",
"format",
"(",
"100",
"*",
"(",
"iteration",
"/",
"float",
"(",
"total",
")",
")",
")",
"filledLength",
"=",
"int",
"(",
"length",
"*",
"iteration",
"//",
"total",
")",
"bar",
"=",
"fill",
"*",
"filledLength",
"+",
"'-'",
"*",
"(",
"length",
"-",
"filledLength",
")",
"print",
"(",
"'\\r%s |%s| %s%% %s'",
"%",
"(",
"prefix",
",",
"bar",
",",
"percent",
",",
"suffix",
")",
",",
"end",
"=",
"'\\r'",
")",
"if",
"iteration",
"==",
"total",
":",
"print",
"(",
")"
] |
Call in a loop to create terminal progress bar
|
[
"Call",
"in",
"a",
"loop",
"to",
"create",
"terminal",
"progress",
"bar"
] |
[
"\"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n \"\"\"",
"# Print New Line on Complete"
] |
[
{
"param": "iteration",
"type": null
},
{
"param": "total",
"type": null
},
{
"param": "prefix",
"type": null
},
{
"param": "suffix",
"type": null
},
{
"param": "decimals",
"type": null
},
{
"param": "length",
"type": null
},
{
"param": "fill",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "iteration",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "total",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "prefix",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "suffix",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "decimals",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "length",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "fill",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": [
{
"identifier": "params",
"docstring": "iteration - Required : current iteration (Int)\ntotal - Required : total iterations (Int)\nprefix - Optional : prefix string (Str)\nsuffix - Optional : suffix string (Str)\ndecimals - Optional : positive number of decimals in percent complete (Int)\nlength - Optional : character length of bar (Int)\nfill - Optional : bar fill character (Str)",
"docstring_tokens": [
"iteration",
"-",
"Required",
":",
"current",
"iteration",
"(",
"Int",
")",
"total",
"-",
"Required",
":",
"total",
"iterations",
"(",
"Int",
")",
"prefix",
"-",
"Optional",
":",
"prefix",
"string",
"(",
"Str",
")",
"suffix",
"-",
"Optional",
":",
"suffix",
"string",
"(",
"Str",
")",
"decimals",
"-",
"Optional",
":",
"positive",
"number",
"of",
"decimals",
"in",
"percent",
"complete",
"(",
"Int",
")",
"length",
"-",
"Optional",
":",
"character",
"length",
"of",
"bar",
"(",
"Int",
")",
"fill",
"-",
"Optional",
":",
"bar",
"fill",
"character",
"(",
"Str",
")"
]
}
]
}
|
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
if iteration == total:
print()
| 732 | 634 |
ab6235e1a7c10836c38ec33da667d7ec8ab1c045
|
Ernel1997/test262
|
tools/generation/lib/template.py
|
[
"BSD-3-Clause"
] |
Python
|
indent
|
<not_specific>
|
def indent(text, prefix = ' ', js_value = False):
'''Prefix a block of text (as defined by the "line break" control
character) with some character sequence.
:param prefix: String value to insert before each line
:param js_value: If True, the text will be interpreted as a JavaScript
value, meaning that indentation will not occur for lines that would
effect the runtime value; defaults to False
'''
if isinstance(text, list):
lines = text
else:
lines = text.split('\n')
indented = [prefix + lines[0]]
str_char = None
for line in lines[1:]:
# Determine if the beginning of the current line is part of some
# previously-opened literal value.
if js_value:
for char in indented[-1]:
if char == str_char:
str_char = None
elif str_char is None and char in '\'"`':
str_char = char
# Do not indent the current line if it is a continuation of a literal
# value or if it is empty.
if str_char or len(line) == 0:
indented.append(line)
else:
indented.append(prefix + line)
return '\n'.join(indented)
|
Prefix a block of text (as defined by the "line break" control
character) with some character sequence.
:param prefix: String value to insert before each line
:param js_value: If True, the text will be interpreted as a JavaScript
value, meaning that indentation will not occur for lines that would
effect the runtime value; defaults to False
|
Prefix a block of text (as defined by the "line break" control
character) with some character sequence.
|
[
"Prefix",
"a",
"block",
"of",
"text",
"(",
"as",
"defined",
"by",
"the",
"\"",
"line",
"break",
"\"",
"control",
"character",
")",
"with",
"some",
"character",
"sequence",
"."
] |
def indent(text, prefix = ' ', js_value = False):
if isinstance(text, list):
lines = text
else:
lines = text.split('\n')
indented = [prefix + lines[0]]
str_char = None
for line in lines[1:]:
if js_value:
for char in indented[-1]:
if char == str_char:
str_char = None
elif str_char is None and char in '\'"`':
str_char = char
if str_char or len(line) == 0:
indented.append(line)
else:
indented.append(prefix + line)
return '\n'.join(indented)
|
[
"def",
"indent",
"(",
"text",
",",
"prefix",
"=",
"' '",
",",
"js_value",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"text",
",",
"list",
")",
":",
"lines",
"=",
"text",
"else",
":",
"lines",
"=",
"text",
".",
"split",
"(",
"'\\n'",
")",
"indented",
"=",
"[",
"prefix",
"+",
"lines",
"[",
"0",
"]",
"]",
"str_char",
"=",
"None",
"for",
"line",
"in",
"lines",
"[",
"1",
":",
"]",
":",
"if",
"js_value",
":",
"for",
"char",
"in",
"indented",
"[",
"-",
"1",
"]",
":",
"if",
"char",
"==",
"str_char",
":",
"str_char",
"=",
"None",
"elif",
"str_char",
"is",
"None",
"and",
"char",
"in",
"'\\'\"`'",
":",
"str_char",
"=",
"char",
"if",
"str_char",
"or",
"len",
"(",
"line",
")",
"==",
"0",
":",
"indented",
".",
"append",
"(",
"line",
")",
"else",
":",
"indented",
".",
"append",
"(",
"prefix",
"+",
"line",
")",
"return",
"'\\n'",
".",
"join",
"(",
"indented",
")"
] |
Prefix a block of text (as defined by the "line break" control
character) with some character sequence.
|
[
"Prefix",
"a",
"block",
"of",
"text",
"(",
"as",
"defined",
"by",
"the",
"\"",
"line",
"break",
"\"",
"control",
"character",
")",
"with",
"some",
"character",
"sequence",
"."
] |
[
"'''Prefix a block of text (as defined by the \"line break\" control\n character) with some character sequence.\n\n :param prefix: String value to insert before each line\n :param js_value: If True, the text will be interpreted as a JavaScript\n value, meaning that indentation will not occur for lines that would\n effect the runtime value; defaults to False\n '''",
"# Determine if the beginning of the current line is part of some",
"# previously-opened literal value.",
"# Do not indent the current line if it is a continuation of a literal",
"# value or if it is empty."
] |
[
{
"param": "text",
"type": null
},
{
"param": "prefix",
"type": null
},
{
"param": "js_value",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "text",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "prefix",
"type": null,
"docstring": "String value to insert before each line",
"docstring_tokens": [
"String",
"value",
"to",
"insert",
"before",
"each",
"line"
],
"default": null,
"is_optional": null
},
{
"identifier": "js_value",
"type": null,
"docstring": "If True, the text will be interpreted as a JavaScript\nvalue, meaning that indentation will not occur for lines that would\neffect the runtime value; defaults to False",
"docstring_tokens": [
"If",
"True",
"the",
"text",
"will",
"be",
"interpreted",
"as",
"a",
"JavaScript",
"value",
"meaning",
"that",
"indentation",
"will",
"not",
"occur",
"for",
"lines",
"that",
"would",
"effect",
"the",
"runtime",
"value",
";",
"defaults",
"to",
"False"
],
"default": "False",
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def indent(text, prefix = ' ', js_value = False):
if isinstance(text, list):
lines = text
else:
lines = text.split('\n')
indented = [prefix + lines[0]]
str_char = None
for line in lines[1:]:
if js_value:
for char in indented[-1]:
if char == str_char:
str_char = None
elif str_char is None and char in '\'"`':
str_char = char
if str_char or len(line) == 0:
indented.append(line)
else:
indented.append(prefix + line)
return '\n'.join(indented)
| 733 | 575 |
0c3e95f84022e615c039f1b93fab79fb5bc56eb0
|
leonardodepaula/xgbimputer
|
xgbimputer/utils.py
|
[
"Apache-2.0"
] |
Python
|
is_float
|
<not_specific>
|
def is_float(value):
'''
Verify if value's dtype is float.
'''
try:
float(value)
except ValueError:
return False
else:
return True
|
Verify if value's dtype is float.
|
Verify if value's dtype is float.
|
[
"Verify",
"if",
"value",
"'",
"s",
"dtype",
"is",
"float",
"."
] |
def is_float(value):
try:
float(value)
except ValueError:
return False
else:
return True
|
[
"def",
"is_float",
"(",
"value",
")",
":",
"try",
":",
"float",
"(",
"value",
")",
"except",
"ValueError",
":",
"return",
"False",
"else",
":",
"return",
"True"
] |
Verify if value's dtype is float.
|
[
"Verify",
"if",
"value",
"'",
"s",
"dtype",
"is",
"float",
"."
] |
[
"'''\n Verify if value's dtype is float.\n '''"
] |
[
{
"param": "value",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "value",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def is_float(value):
try:
float(value)
except ValueError:
return False
else:
return True
| 734 | 179 |
8b06666ae3e371cbb271cc912170971418a71315
|
Belval/SentimentRNN
|
word2vec/data_utils.py
|
[
"MIT"
] |
Python
|
read_chunk
| null |
def read_chunk(filename):
"""
Read a file in chunks of 10MB, returns a generator.
"""
with open(filename, 'r') as f:
while True:
data = f.read(10 * 1024 * 1024)
if not data:
break
yield data.split()[:-1]
|
Read a file in chunks of 10MB, returns a generator.
|
Read a file in chunks of 10MB, returns a generator.
|
[
"Read",
"a",
"file",
"in",
"chunks",
"of",
"10MB",
"returns",
"a",
"generator",
"."
] |
def read_chunk(filename):
with open(filename, 'r') as f:
while True:
data = f.read(10 * 1024 * 1024)
if not data:
break
yield data.split()[:-1]
|
[
"def",
"read_chunk",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"f",
":",
"while",
"True",
":",
"data",
"=",
"f",
".",
"read",
"(",
"10",
"*",
"1024",
"*",
"1024",
")",
"if",
"not",
"data",
":",
"break",
"yield",
"data",
".",
"split",
"(",
")",
"[",
":",
"-",
"1",
"]"
] |
Read a file in chunks of 10MB, returns a generator.
|
[
"Read",
"a",
"file",
"in",
"chunks",
"of",
"10MB",
"returns",
"a",
"generator",
"."
] |
[
"\"\"\"\n Read a file in chunks of 10MB, returns a generator.\n \"\"\""
] |
[
{
"param": "filename",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "filename",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def read_chunk(filename):
with open(filename, 'r') as f:
while True:
data = f.read(10 * 1024 * 1024)
if not data:
break
yield data.split()[:-1]
| 735 | 194 |
70a8b0403f5970afa19b001bb031c01f258cba98
|
vahndi/quant-survey
|
survey/generation/formatting.py
|
[
"MIT"
] |
Python
|
spaces
|
str
|
def spaces(num_spaces: int) -> str:
"""
Return a string with the given number of spaces.
"""
return ' ' * num_spaces
|
Return a string with the given number of spaces.
|
Return a string with the given number of spaces.
|
[
"Return",
"a",
"string",
"with",
"the",
"given",
"number",
"of",
"spaces",
"."
] |
def spaces(num_spaces: int) -> str:
return ' ' * num_spaces
|
[
"def",
"spaces",
"(",
"num_spaces",
":",
"int",
")",
"->",
"str",
":",
"return",
"' '",
"*",
"num_spaces"
] |
Return a string with the given number of spaces.
|
[
"Return",
"a",
"string",
"with",
"the",
"given",
"number",
"of",
"spaces",
"."
] |
[
"\"\"\"\n Return a string with the given number of spaces.\n \"\"\""
] |
[
{
"param": "num_spaces",
"type": "int"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "num_spaces",
"type": "int",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def spaces(num_spaces: int) -> str:
return ' ' * num_spaces
| 736 | 466 |
c54e1caef063f9f08ba136f2fedf0a92bf296710
|
tungduynguyen/EC-DT
|
main_ECDT.py
|
[
"MIT"
] |
Python
|
loadCSV
|
<not_specific>
|
def loadCSV(file):
"""Loads a CSV file and converts all floats and ints into basic datatypes."""
def convertTypes(s):
s = s.strip()
try:
return float(s) if '.' in s else int(s)
except ValueError:
return s
reader = csv.reader(open(file, 'rt'))
return [[convertTypes(item) for item in row] for row in reader]
|
Loads a CSV file and converts all floats and ints into basic datatypes.
|
Loads a CSV file and converts all floats and ints into basic datatypes.
|
[
"Loads",
"a",
"CSV",
"file",
"and",
"converts",
"all",
"floats",
"and",
"ints",
"into",
"basic",
"datatypes",
"."
] |
def loadCSV(file):
def convertTypes(s):
s = s.strip()
try:
return float(s) if '.' in s else int(s)
except ValueError:
return s
reader = csv.reader(open(file, 'rt'))
return [[convertTypes(item) for item in row] for row in reader]
|
[
"def",
"loadCSV",
"(",
"file",
")",
":",
"def",
"convertTypes",
"(",
"s",
")",
":",
"s",
"=",
"s",
".",
"strip",
"(",
")",
"try",
":",
"return",
"float",
"(",
"s",
")",
"if",
"'.'",
"in",
"s",
"else",
"int",
"(",
"s",
")",
"except",
"ValueError",
":",
"return",
"s",
"reader",
"=",
"csv",
".",
"reader",
"(",
"open",
"(",
"file",
",",
"'rt'",
")",
")",
"return",
"[",
"[",
"convertTypes",
"(",
"item",
")",
"for",
"item",
"in",
"row",
"]",
"for",
"row",
"in",
"reader",
"]"
] |
Loads a CSV file and converts all floats and ints into basic datatypes.
|
[
"Loads",
"a",
"CSV",
"file",
"and",
"converts",
"all",
"floats",
"and",
"ints",
"into",
"basic",
"datatypes",
"."
] |
[
"\"\"\"Loads a CSV file and converts all floats and ints into basic datatypes.\"\"\""
] |
[
{
"param": "file",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "file",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import csv
def loadCSV(file):
def convertTypes(s):
s = s.strip()
try:
return float(s) if '.' in s else int(s)
except ValueError:
return s
reader = csv.reader(open(file, 'rt'))
return [[convertTypes(item) for item in row] for row in reader]
| 737 | 106 |
d064af40e527fdfbb0ac277e7a9f1c764dc679aa
|
gravypod/quarkGL
|
tools/actions/generate_compile_commands_json.py
|
[
"MIT"
] |
Python
|
_get_command
|
<not_specific>
|
def _get_command(path, command_directory):
'''Read a _compile_command file and return the parsed JSON string.
Args:
path: The pathlib.Path to _compile_command file.
command_directory: The directory commands are run from.
Returns:
a string to stick in compile_commands.json.
'''
with path.open('r') as f:
contents = f.read().split('\0')
if len(contents) != 2:
# Old/incomplete file or something; silently ignore it.
return None
command, file = contents
return textwrap.dedent('''\
{
"directory": "%s",
"command": "%s",
"file": "%s"
}''' % (command_directory, command.replace('"', '\\"'), file))
|
Read a _compile_command file and return the parsed JSON string.
Args:
path: The pathlib.Path to _compile_command file.
command_directory: The directory commands are run from.
Returns:
a string to stick in compile_commands.json.
|
Read a _compile_command file and return the parsed JSON string.
|
[
"Read",
"a",
"_compile_command",
"file",
"and",
"return",
"the",
"parsed",
"JSON",
"string",
"."
] |
def _get_command(path, command_directory):
with path.open('r') as f:
contents = f.read().split('\0')
if len(contents) != 2:
return None
command, file = contents
return textwrap.dedent('''\
{
"directory": "%s",
"command": "%s",
"file": "%s"
}''' % (command_directory, command.replace('"', '\\"'), file))
|
[
"def",
"_get_command",
"(",
"path",
",",
"command_directory",
")",
":",
"with",
"path",
".",
"open",
"(",
"'r'",
")",
"as",
"f",
":",
"contents",
"=",
"f",
".",
"read",
"(",
")",
".",
"split",
"(",
"'\\0'",
")",
"if",
"len",
"(",
"contents",
")",
"!=",
"2",
":",
"return",
"None",
"command",
",",
"file",
"=",
"contents",
"return",
"textwrap",
".",
"dedent",
"(",
"'''\\\n {\n \"directory\": \"%s\",\n \"command\": \"%s\",\n \"file\": \"%s\"\n }'''",
"%",
"(",
"command_directory",
",",
"command",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")",
",",
"file",
")",
")"
] |
Read a _compile_command file and return the parsed JSON string.
|
[
"Read",
"a",
"_compile_command",
"file",
"and",
"return",
"the",
"parsed",
"JSON",
"string",
"."
] |
[
"'''Read a _compile_command file and return the parsed JSON string.\n Args:\n path: The pathlib.Path to _compile_command file.\n command_directory: The directory commands are run from.\n Returns:\n a string to stick in compile_commands.json.\n '''",
"# Old/incomplete file or something; silently ignore it."
] |
[
{
"param": "path",
"type": null
},
{
"param": "command_directory",
"type": null
}
] |
{
"returns": [
{
"docstring": "a string to stick in compile_commands.json.",
"docstring_tokens": [
"a",
"string",
"to",
"stick",
"in",
"compile_commands",
".",
"json",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "path",
"type": null,
"docstring": "The pathlib.Path to _compile_command file.",
"docstring_tokens": [
"The",
"pathlib",
".",
"Path",
"to",
"_compile_command",
"file",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "command_directory",
"type": null,
"docstring": "The directory commands are run from.",
"docstring_tokens": [
"The",
"directory",
"commands",
"are",
"run",
"from",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import textwrap
def _get_command(path, command_directory):
with path.open('r') as f:
contents = f.read().split('\0')
if len(contents) != 2:
return None
command, file = contents
return textwrap.dedent('''\
{
"directory": "%s",
"command": "%s",
"file": "%s"
}''' % (command_directory, command.replace('"', '\\"'), file))
| 738 | 142 |
0a40f6ea53c535ec879674c05128f65780b18c90
|
zipated/src
|
PRESUBMIT.py
|
[
"BSD-3-Clause"
] |
Python
|
_CheckSyslogUseWarning
|
<not_specific>
|
def _CheckSyslogUseWarning(input_api, output_api, source_file_filter=None):
"""Checks that all source files use SYSLOG properly."""
syslog_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
for line_number, line in f.ChangedContents():
if 'SYSLOG' in line:
syslog_files.append(f.LocalPath() + ':' + str(line_number))
if syslog_files:
return [output_api.PresubmitPromptWarning(
'Please make sure there are no privacy sensitive bits of data in SYSLOG'
' calls.\nFiles to check:\n', items=syslog_files)]
return []
|
Checks that all source files use SYSLOG properly.
|
Checks that all source files use SYSLOG properly.
|
[
"Checks",
"that",
"all",
"source",
"files",
"use",
"SYSLOG",
"properly",
"."
] |
def _CheckSyslogUseWarning(input_api, output_api, source_file_filter=None):
syslog_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
for line_number, line in f.ChangedContents():
if 'SYSLOG' in line:
syslog_files.append(f.LocalPath() + ':' + str(line_number))
if syslog_files:
return [output_api.PresubmitPromptWarning(
'Please make sure there are no privacy sensitive bits of data in SYSLOG'
' calls.\nFiles to check:\n', items=syslog_files)]
return []
|
[
"def",
"_CheckSyslogUseWarning",
"(",
"input_api",
",",
"output_api",
",",
"source_file_filter",
"=",
"None",
")",
":",
"syslog_files",
"=",
"[",
"]",
"for",
"f",
"in",
"input_api",
".",
"AffectedSourceFiles",
"(",
"source_file_filter",
")",
":",
"for",
"line_number",
",",
"line",
"in",
"f",
".",
"ChangedContents",
"(",
")",
":",
"if",
"'SYSLOG'",
"in",
"line",
":",
"syslog_files",
".",
"append",
"(",
"f",
".",
"LocalPath",
"(",
")",
"+",
"':'",
"+",
"str",
"(",
"line_number",
")",
")",
"if",
"syslog_files",
":",
"return",
"[",
"output_api",
".",
"PresubmitPromptWarning",
"(",
"'Please make sure there are no privacy sensitive bits of data in SYSLOG'",
"' calls.\\nFiles to check:\\n'",
",",
"items",
"=",
"syslog_files",
")",
"]",
"return",
"[",
"]"
] |
Checks that all source files use SYSLOG properly.
|
[
"Checks",
"that",
"all",
"source",
"files",
"use",
"SYSLOG",
"properly",
"."
] |
[
"\"\"\"Checks that all source files use SYSLOG properly.\"\"\""
] |
[
{
"param": "input_api",
"type": null
},
{
"param": "output_api",
"type": null
},
{
"param": "source_file_filter",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "input_api",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "output_api",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "source_file_filter",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _CheckSyslogUseWarning(input_api, output_api, source_file_filter=None):
syslog_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
for line_number, line in f.ChangedContents():
if 'SYSLOG' in line:
syslog_files.append(f.LocalPath() + ':' + str(line_number))
if syslog_files:
return [output_api.PresubmitPromptWarning(
'Please make sure there are no privacy sensitive bits of data in SYSLOG'
' calls.\nFiles to check:\n', items=syslog_files)]
return []
| 740 | 462 |
8ccca429e9b591a9f50c7c5e67a862a4284dc12b
|
bkatyl/compute-image-tools
|
daisy_workflows/linux_common/utils/common.py
|
[
"Apache-2.0"
] |
Python
|
GetOslogin
|
<not_specific>
|
def GetOslogin(discovery, credentials):
"""Get google os-login api cli object.
Args:
discovery: object, from googleapiclient.
credentials: object, from google.auth.
Returns:
oslogin: object, the google oslogin api object.
"""
oslogin = discovery.build('oslogin', 'v1', credentials=credentials)
return oslogin
|
Get google os-login api cli object.
Args:
discovery: object, from googleapiclient.
credentials: object, from google.auth.
Returns:
oslogin: object, the google oslogin api object.
|
Get google os-login api cli object.
|
[
"Get",
"google",
"os",
"-",
"login",
"api",
"cli",
"object",
"."
] |
def GetOslogin(discovery, credentials):
oslogin = discovery.build('oslogin', 'v1', credentials=credentials)
return oslogin
|
[
"def",
"GetOslogin",
"(",
"discovery",
",",
"credentials",
")",
":",
"oslogin",
"=",
"discovery",
".",
"build",
"(",
"'oslogin'",
",",
"'v1'",
",",
"credentials",
"=",
"credentials",
")",
"return",
"oslogin"
] |
Get google os-login api cli object.
|
[
"Get",
"google",
"os",
"-",
"login",
"api",
"cli",
"object",
"."
] |
[
"\"\"\"Get google os-login api cli object.\n\n Args:\n discovery: object, from googleapiclient.\n credentials: object, from google.auth.\n\n Returns:\n oslogin: object, the google oslogin api object.\n \"\"\""
] |
[
{
"param": "discovery",
"type": null
},
{
"param": "credentials",
"type": null
}
] |
{
"returns": [
{
"docstring": "object, the google oslogin api object.",
"docstring_tokens": [
"object",
"the",
"google",
"oslogin",
"api",
"object",
"."
],
"type": "oslogin"
}
],
"raises": [],
"params": [
{
"identifier": "discovery",
"type": null,
"docstring": "object, from googleapiclient.",
"docstring_tokens": [
"object",
"from",
"googleapiclient",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "credentials",
"type": null,
"docstring": "object, from google.auth.",
"docstring_tokens": [
"object",
"from",
"google",
".",
"auth",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def GetOslogin(discovery, credentials):
oslogin = discovery.build('oslogin', 'v1', credentials=credentials)
return oslogin
| 741 | 567 |
036a86b4f9ab0aaa033b776534fbff001143114e
|
tristanbrown/resolwe-bio-py
|
resdk/data_upload/multiplexed.py
|
[
"Apache-2.0"
] |
Python
|
_create_multi_sample
| null |
def _create_multi_sample(multi_reads, sample_list):
"""Create samples from uploaded and demultiplexed reads."""
qseq_id = multi_reads.id
demulti_list = multi_reads.resolwe.data.filter(parents=qseq_id)
for sample in sample_list:
label = '{}_{}'.format(sample.name, sample.barcode)
demulti_reads = [s for s in demulti_list if label in s.name][0]
demulti_reads.sample.delete(force=True)
main_sample = demulti_reads.resolwe.sample.create(name=sample.name)
main_sample.add_data(demulti_reads)
main_sample.save()
demulti_reads.collections[0].add_samples(main_sample)
|
Create samples from uploaded and demultiplexed reads.
|
Create samples from uploaded and demultiplexed reads.
|
[
"Create",
"samples",
"from",
"uploaded",
"and",
"demultiplexed",
"reads",
"."
] |
def _create_multi_sample(multi_reads, sample_list):
qseq_id = multi_reads.id
demulti_list = multi_reads.resolwe.data.filter(parents=qseq_id)
for sample in sample_list:
label = '{}_{}'.format(sample.name, sample.barcode)
demulti_reads = [s for s in demulti_list if label in s.name][0]
demulti_reads.sample.delete(force=True)
main_sample = demulti_reads.resolwe.sample.create(name=sample.name)
main_sample.add_data(demulti_reads)
main_sample.save()
demulti_reads.collections[0].add_samples(main_sample)
|
[
"def",
"_create_multi_sample",
"(",
"multi_reads",
",",
"sample_list",
")",
":",
"qseq_id",
"=",
"multi_reads",
".",
"id",
"demulti_list",
"=",
"multi_reads",
".",
"resolwe",
".",
"data",
".",
"filter",
"(",
"parents",
"=",
"qseq_id",
")",
"for",
"sample",
"in",
"sample_list",
":",
"label",
"=",
"'{}_{}'",
".",
"format",
"(",
"sample",
".",
"name",
",",
"sample",
".",
"barcode",
")",
"demulti_reads",
"=",
"[",
"s",
"for",
"s",
"in",
"demulti_list",
"if",
"label",
"in",
"s",
".",
"name",
"]",
"[",
"0",
"]",
"demulti_reads",
".",
"sample",
".",
"delete",
"(",
"force",
"=",
"True",
")",
"main_sample",
"=",
"demulti_reads",
".",
"resolwe",
".",
"sample",
".",
"create",
"(",
"name",
"=",
"sample",
".",
"name",
")",
"main_sample",
".",
"add_data",
"(",
"demulti_reads",
")",
"main_sample",
".",
"save",
"(",
")",
"demulti_reads",
".",
"collections",
"[",
"0",
"]",
".",
"add_samples",
"(",
"main_sample",
")"
] |
Create samples from uploaded and demultiplexed reads.
|
[
"Create",
"samples",
"from",
"uploaded",
"and",
"demultiplexed",
"reads",
"."
] |
[
"\"\"\"Create samples from uploaded and demultiplexed reads.\"\"\""
] |
[
{
"param": "multi_reads",
"type": null
},
{
"param": "sample_list",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "multi_reads",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "sample_list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _create_multi_sample(multi_reads, sample_list):
qseq_id = multi_reads.id
demulti_list = multi_reads.resolwe.data.filter(parents=qseq_id)
for sample in sample_list:
label = '{}_{}'.format(sample.name, sample.barcode)
demulti_reads = [s for s in demulti_list if label in s.name][0]
demulti_reads.sample.delete(force=True)
main_sample = demulti_reads.resolwe.sample.create(name=sample.name)
main_sample.add_data(demulti_reads)
main_sample.save()
demulti_reads.collections[0].add_samples(main_sample)
| 742 | 940 |
ab1cb25602c015c1dc59bb1273087d7ea24fc6b0
|
turian/blogofile
|
blogofile/util.py
|
[
"MIT"
] |
Python
|
path_join
|
<not_specific>
|
def path_join(*parts, **kwargs):
"""A better os.path.join
Converts (back)slashes from other platforms automatically
Normally, os.path.join is great, as long as you pass each dir/file
independantly, but not if you (accidentally/intentionally) put a slash in
if sep is specified, use that as the seperator
rather than the system default"""
if kwargs.has_key('sep'):
sep = kwargs['sep']
else:
sep = os.sep
if os.sep == "\\":
wrong_slash_type = "/"
else:
wrong_slash_type = "\\"
new_parts = []
for p in parts:
if hasattr(p,"__iter__"):
#This part is a sequence itself, recurse into it
p = path_join(*p)
if p in ("","\\","/"):
continue
new_parts.append(p.replace(wrong_slash_type,os.sep))
return sep.join(new_parts)
|
A better os.path.join
Converts (back)slashes from other platforms automatically
Normally, os.path.join is great, as long as you pass each dir/file
independantly, but not if you (accidentally/intentionally) put a slash in
if sep is specified, use that as the seperator
rather than the system default
|
A better os.path.join
Converts (back)slashes from other platforms automatically
Normally, os.path.join is great, as long as you pass each dir/file
independantly, but not if you (accidentally/intentionally) put a slash in
if sep is specified, use that as the seperator
rather than the system default
|
[
"A",
"better",
"os",
".",
"path",
".",
"join",
"Converts",
"(",
"back",
")",
"slashes",
"from",
"other",
"platforms",
"automatically",
"Normally",
"os",
".",
"path",
".",
"join",
"is",
"great",
"as",
"long",
"as",
"you",
"pass",
"each",
"dir",
"/",
"file",
"independantly",
"but",
"not",
"if",
"you",
"(",
"accidentally",
"/",
"intentionally",
")",
"put",
"a",
"slash",
"in",
"if",
"sep",
"is",
"specified",
"use",
"that",
"as",
"the",
"seperator",
"rather",
"than",
"the",
"system",
"default"
] |
def path_join(*parts, **kwargs):
if kwargs.has_key('sep'):
sep = kwargs['sep']
else:
sep = os.sep
if os.sep == "\\":
wrong_slash_type = "/"
else:
wrong_slash_type = "\\"
new_parts = []
for p in parts:
if hasattr(p,"__iter__"):
p = path_join(*p)
if p in ("","\\","/"):
continue
new_parts.append(p.replace(wrong_slash_type,os.sep))
return sep.join(new_parts)
|
[
"def",
"path_join",
"(",
"*",
"parts",
",",
"**",
"kwargs",
")",
":",
"if",
"kwargs",
".",
"has_key",
"(",
"'sep'",
")",
":",
"sep",
"=",
"kwargs",
"[",
"'sep'",
"]",
"else",
":",
"sep",
"=",
"os",
".",
"sep",
"if",
"os",
".",
"sep",
"==",
"\"\\\\\"",
":",
"wrong_slash_type",
"=",
"\"/\"",
"else",
":",
"wrong_slash_type",
"=",
"\"\\\\\"",
"new_parts",
"=",
"[",
"]",
"for",
"p",
"in",
"parts",
":",
"if",
"hasattr",
"(",
"p",
",",
"\"__iter__\"",
")",
":",
"p",
"=",
"path_join",
"(",
"*",
"p",
")",
"if",
"p",
"in",
"(",
"\"\"",
",",
"\"\\\\\"",
",",
"\"/\"",
")",
":",
"continue",
"new_parts",
".",
"append",
"(",
"p",
".",
"replace",
"(",
"wrong_slash_type",
",",
"os",
".",
"sep",
")",
")",
"return",
"sep",
".",
"join",
"(",
"new_parts",
")"
] |
A better os.path.join
Converts (back)slashes from other platforms automatically
Normally, os.path.join is great, as long as you pass each dir/file
independantly, but not if you (accidentally/intentionally) put a slash in
|
[
"A",
"better",
"os",
".",
"path",
".",
"join",
"Converts",
"(",
"back",
")",
"slashes",
"from",
"other",
"platforms",
"automatically",
"Normally",
"os",
".",
"path",
".",
"join",
"is",
"great",
"as",
"long",
"as",
"you",
"pass",
"each",
"dir",
"/",
"file",
"independantly",
"but",
"not",
"if",
"you",
"(",
"accidentally",
"/",
"intentionally",
")",
"put",
"a",
"slash",
"in"
] |
[
"\"\"\"A better os.path.join\n\n Converts (back)slashes from other platforms automatically\n Normally, os.path.join is great, as long as you pass each dir/file\n independantly, but not if you (accidentally/intentionally) put a slash in\n\n if sep is specified, use that as the seperator\n rather than the system default\"\"\"",
"#This part is a sequence itself, recurse into it"
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import os
def path_join(*parts, **kwargs):
if kwargs.has_key('sep'):
sep = kwargs['sep']
else:
sep = os.sep
if os.sep == "\\":
wrong_slash_type = "/"
else:
wrong_slash_type = "\\"
new_parts = []
for p in parts:
if hasattr(p,"__iter__"):
p = path_join(*p)
if p in ("","\\","/"):
continue
new_parts.append(p.replace(wrong_slash_type,os.sep))
return sep.join(new_parts)
| 743 | 991 |
15e033202b89b2b849017cc12e0e4369d1203049
|
radiasoft/rsopt
|
rsopt/pkcli/optimize.py
|
[
"Apache-2.0"
] |
Python
|
_final_global_result
| null |
def _final_global_result(H):
"""Looks at points declaired local minima. For cases where multiple 'best' results may be found."""
print("Local Minima Found: ('x', 'f')")
for lm in H[H['local_min']]:
print(lm['x'], lm['f'])
|
Looks at points declaired local minima. For cases where multiple 'best' results may be found.
|
Looks at points declaired local minima. For cases where multiple 'best' results may be found.
|
[
"Looks",
"at",
"points",
"declaired",
"local",
"minima",
".",
"For",
"cases",
"where",
"multiple",
"'",
"best",
"'",
"results",
"may",
"be",
"found",
"."
] |
def _final_global_result(H):
print("Local Minima Found: ('x', 'f')")
for lm in H[H['local_min']]:
print(lm['x'], lm['f'])
|
[
"def",
"_final_global_result",
"(",
"H",
")",
":",
"print",
"(",
"\"Local Minima Found: ('x', 'f')\"",
")",
"for",
"lm",
"in",
"H",
"[",
"H",
"[",
"'local_min'",
"]",
"]",
":",
"print",
"(",
"lm",
"[",
"'x'",
"]",
",",
"lm",
"[",
"'f'",
"]",
")"
] |
Looks at points declaired local minima.
|
[
"Looks",
"at",
"points",
"declaired",
"local",
"minima",
"."
] |
[
"\"\"\"Looks at points declaired local minima. For cases where multiple 'best' results may be found.\"\"\""
] |
[
{
"param": "H",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "H",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _final_global_result(H):
print("Local Minima Found: ('x', 'f')")
for lm in H[H['local_min']]:
print(lm['x'], lm['f'])
| 744 | 731 |
7c3dc6954dae1c2827a38016def8595ba94b85e5
|
KrishnaGarg/named-entity-recognition
|
build_vocab.py
|
[
"MIT"
] |
Python
|
save_dict_to_json
| null |
def save_dict_to_json(d, path):
"""Save properties of dataset to json file"""
with open(path, 'w') as f:
dictionary = {k:v for k, v in d.items()}
json.dump(dictionary, f, indent=4)
|
Save properties of dataset to json file
|
Save properties of dataset to json file
|
[
"Save",
"properties",
"of",
"dataset",
"to",
"json",
"file"
] |
def save_dict_to_json(d, path):
with open(path, 'w') as f:
dictionary = {k:v for k, v in d.items()}
json.dump(dictionary, f, indent=4)
|
[
"def",
"save_dict_to_json",
"(",
"d",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"dictionary",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
"}",
"json",
".",
"dump",
"(",
"dictionary",
",",
"f",
",",
"indent",
"=",
"4",
")"
] |
Save properties of dataset to json file
|
[
"Save",
"properties",
"of",
"dataset",
"to",
"json",
"file"
] |
[
"\"\"\"Save properties of dataset to json file\"\"\""
] |
[
{
"param": "d",
"type": null
},
{
"param": "path",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "d",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import json
def save_dict_to_json(d, path):
with open(path, 'w') as f:
dictionary = {k:v for k, v in d.items()}
json.dump(dictionary, f, indent=4)
| 745 | 1,019 |
a4b5ba924abccb41869b0602547c8c9c9b3d5096
|
blakfeld/Data-Structures-and-Algoritms-Practice
|
Python/general/binary_search_rotated_array.py
|
[
"MIT"
] |
Python
|
binary_search
|
<not_specific>
|
def binary_search(list_to_search, num_to_find):
"""
Perform a Binary Search on a rotated array of ints.
Args:
list_to_search (list): The list to search.
num_to_find (int): The int to search for.
Returns:
tuple: (index, value)
"""
first = 0
last = len(list_to_search) - 1
while first <= last:
mid = (first + last) // 2
if list_to_search[mid] == num_to_find:
return mid, list_to_search[mid]
# Is first half sorted?
if list_to_search[first] <= list_to_search[mid]:
# If first and mid are less than num_to_find, Search the
# first half, else search the second half.
if all([list_to_search[first] <= num_to_find,
list_to_search[mid] >= num_to_find]):
last = mid - 1
else:
first = mid + 1
# If the second half is sorted.
else:
# If last and mid are less than num_to_find, Search the
# second half, else search the first half.
if all([list_to_search[mid] <= num_to_find,
list_to_search[last] <= num_to_find]):
first = mid + 1
else:
last = mid - 1
return None, None
|
Perform a Binary Search on a rotated array of ints.
Args:
list_to_search (list): The list to search.
num_to_find (int): The int to search for.
Returns:
tuple: (index, value)
|
Perform a Binary Search on a rotated array of ints.
|
[
"Perform",
"a",
"Binary",
"Search",
"on",
"a",
"rotated",
"array",
"of",
"ints",
"."
] |
def binary_search(list_to_search, num_to_find):
first = 0
last = len(list_to_search) - 1
while first <= last:
mid = (first + last) // 2
if list_to_search[mid] == num_to_find:
return mid, list_to_search[mid]
if list_to_search[first] <= list_to_search[mid]:
if all([list_to_search[first] <= num_to_find,
list_to_search[mid] >= num_to_find]):
last = mid - 1
else:
first = mid + 1
else:
if all([list_to_search[mid] <= num_to_find,
list_to_search[last] <= num_to_find]):
first = mid + 1
else:
last = mid - 1
return None, None
|
[
"def",
"binary_search",
"(",
"list_to_search",
",",
"num_to_find",
")",
":",
"first",
"=",
"0",
"last",
"=",
"len",
"(",
"list_to_search",
")",
"-",
"1",
"while",
"first",
"<=",
"last",
":",
"mid",
"=",
"(",
"first",
"+",
"last",
")",
"//",
"2",
"if",
"list_to_search",
"[",
"mid",
"]",
"==",
"num_to_find",
":",
"return",
"mid",
",",
"list_to_search",
"[",
"mid",
"]",
"if",
"list_to_search",
"[",
"first",
"]",
"<=",
"list_to_search",
"[",
"mid",
"]",
":",
"if",
"all",
"(",
"[",
"list_to_search",
"[",
"first",
"]",
"<=",
"num_to_find",
",",
"list_to_search",
"[",
"mid",
"]",
">=",
"num_to_find",
"]",
")",
":",
"last",
"=",
"mid",
"-",
"1",
"else",
":",
"first",
"=",
"mid",
"+",
"1",
"else",
":",
"if",
"all",
"(",
"[",
"list_to_search",
"[",
"mid",
"]",
"<=",
"num_to_find",
",",
"list_to_search",
"[",
"last",
"]",
"<=",
"num_to_find",
"]",
")",
":",
"first",
"=",
"mid",
"+",
"1",
"else",
":",
"last",
"=",
"mid",
"-",
"1",
"return",
"None",
",",
"None"
] |
Perform a Binary Search on a rotated array of ints.
|
[
"Perform",
"a",
"Binary",
"Search",
"on",
"a",
"rotated",
"array",
"of",
"ints",
"."
] |
[
"\"\"\"\n Perform a Binary Search on a rotated array of ints.\n\n Args:\n list_to_search (list): The list to search.\n num_to_find (int): The int to search for.\n\n Returns:\n tuple: (index, value)\n \"\"\"",
"# Is first half sorted?",
"# If first and mid are less than num_to_find, Search the",
"# first half, else search the second half.",
"# If the second half is sorted.",
"# If last and mid are less than num_to_find, Search the",
"# second half, else search the first half."
] |
[
{
"param": "list_to_search",
"type": null
},
{
"param": "num_to_find",
"type": null
}
] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "tuple"
}
],
"raises": [],
"params": [
{
"identifier": "list_to_search",
"type": null,
"docstring": "The list to search.",
"docstring_tokens": [
"The",
"list",
"to",
"search",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "num_to_find",
"type": null,
"docstring": "The int to search for.",
"docstring_tokens": [
"The",
"int",
"to",
"search",
"for",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def binary_search(list_to_search, num_to_find):
first = 0
last = len(list_to_search) - 1
while first <= last:
mid = (first + last) // 2
if list_to_search[mid] == num_to_find:
return mid, list_to_search[mid]
if list_to_search[first] <= list_to_search[mid]:
if all([list_to_search[first] <= num_to_find,
list_to_search[mid] >= num_to_find]):
last = mid - 1
else:
first = mid + 1
else:
if all([list_to_search[mid] <= num_to_find,
list_to_search[last] <= num_to_find]):
first = mid + 1
else:
last = mid - 1
return None, None
| 746 | 272 |
6e2cc22a22c753b1b11cdb00fa32523981cb1e59
|
futurecore/revelation
|
revelation/utils.py
|
[
"BSD-3-Clause"
] |
Python
|
sext_11
|
<not_specific>
|
def sext_11(value):
"""Sign-extended 11 bit number.
"""
if value & 0x400:
return 0xfffff800 | value
return value
|
Sign-extended 11 bit number.
|
Sign-extended 11 bit number.
|
[
"Sign",
"-",
"extended",
"11",
"bit",
"number",
"."
] |
def sext_11(value):
if value & 0x400:
return 0xfffff800 | value
return value
|
[
"def",
"sext_11",
"(",
"value",
")",
":",
"if",
"value",
"&",
"0x400",
":",
"return",
"0xfffff800",
"|",
"value",
"return",
"value"
] |
Sign-extended 11 bit number.
|
[
"Sign",
"-",
"extended",
"11",
"bit",
"number",
"."
] |
[
"\"\"\"Sign-extended 11 bit number.\n \"\"\""
] |
[
{
"param": "value",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "value",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def sext_11(value):
if value & 0x400:
return 0xfffff800 | value
return value
| 747 | 276 |
8d171214f4172d49ef6506e8bf0a14d69aed84f8
|
rraadd88/pyensembl
|
pyensembl/species.py
|
[
"Apache-2.0"
] |
Python
|
all_registered_latin_names
|
<not_specific>
|
def all_registered_latin_names(cls):
"""
Returns latin name of every registered species.
"""
return list(cls._latin_names_to_species.keys())
|
Returns latin name of every registered species.
|
Returns latin name of every registered species.
|
[
"Returns",
"latin",
"name",
"of",
"every",
"registered",
"species",
"."
] |
def all_registered_latin_names(cls):
return list(cls._latin_names_to_species.keys())
|
[
"def",
"all_registered_latin_names",
"(",
"cls",
")",
":",
"return",
"list",
"(",
"cls",
".",
"_latin_names_to_species",
".",
"keys",
"(",
")",
")"
] |
Returns latin name of every registered species.
|
[
"Returns",
"latin",
"name",
"of",
"every",
"registered",
"species",
"."
] |
[
"\"\"\"\n Returns latin name of every registered species.\n \"\"\""
] |
[
{
"param": "cls",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def all_registered_latin_names(cls):
return list(cls._latin_names_to_species.keys())
| 748 | 348 |
6c49464e27f4d695f06fda748f72121d3c0eecec
|
DavydovDmitry/lightnings
|
lightnings/utils/env.py
|
[
"Apache-2.0"
] |
Python
|
export_env
| null |
def export_env(env_file: pathlib.Path):
"""Export environment variables from file
env_file : pathlib.Path
path to file with environment variables
"""
with open(env_file) as f:
for line in f.readlines():
line = line.strip()
if line and line[0] != '#':
key, value = line.split('=')
os.environ[key] = value
|
Export environment variables from file
env_file : pathlib.Path
path to file with environment variables
|
Export environment variables from file
env_file : pathlib.Path
path to file with environment variables
|
[
"Export",
"environment",
"variables",
"from",
"file",
"env_file",
":",
"pathlib",
".",
"Path",
"path",
"to",
"file",
"with",
"environment",
"variables"
] |
def export_env(env_file: pathlib.Path):
with open(env_file) as f:
for line in f.readlines():
line = line.strip()
if line and line[0] != '#':
key, value = line.split('=')
os.environ[key] = value
|
[
"def",
"export_env",
"(",
"env_file",
":",
"pathlib",
".",
"Path",
")",
":",
"with",
"open",
"(",
"env_file",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
"and",
"line",
"[",
"0",
"]",
"!=",
"'#'",
":",
"key",
",",
"value",
"=",
"line",
".",
"split",
"(",
"'='",
")",
"os",
".",
"environ",
"[",
"key",
"]",
"=",
"value"
] |
Export environment variables from file
env_file : pathlib.Path
path to file with environment variables
|
[
"Export",
"environment",
"variables",
"from",
"file",
"env_file",
":",
"pathlib",
".",
"Path",
"path",
"to",
"file",
"with",
"environment",
"variables"
] |
[
"\"\"\"Export environment variables from file\n\n env_file : pathlib.Path\n path to file with environment variables\n \"\"\""
] |
[
{
"param": "env_file",
"type": "pathlib.Path"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "env_file",
"type": "pathlib.Path",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
def export_env(env_file: pathlib.Path):
with open(env_file) as f:
for line in f.readlines():
line = line.strip()
if line and line[0] != '#':
key, value = line.split('=')
os.environ[key] = value
| 750 | 570 |
5a2ed4c5926b2425b3168d78c6d5db25ff8923bd
|
danielgis/invest
|
scripts/invest-autotest.py
|
[
"BSD-3-Clause"
] |
Python
|
sh
|
<not_specific>
|
def sh(command, capture=True):
"""Execute something on the shell and return the stdout."""
p = subprocess.Popen(command, shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
p_stdout = p.communicate()[0]
if capture:
return p_stdout
|
Execute something on the shell and return the stdout.
|
Execute something on the shell and return the stdout.
|
[
"Execute",
"something",
"on",
"the",
"shell",
"and",
"return",
"the",
"stdout",
"."
] |
def sh(command, capture=True):
p = subprocess.Popen(command, shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
p_stdout = p.communicate()[0]
if capture:
return p_stdout
|
[
"def",
"sh",
"(",
"command",
",",
"capture",
"=",
"True",
")",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"shell",
"=",
"True",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"p_stdout",
"=",
"p",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"if",
"capture",
":",
"return",
"p_stdout"
] |
Execute something on the shell and return the stdout.
|
[
"Execute",
"something",
"on",
"the",
"shell",
"and",
"return",
"the",
"stdout",
"."
] |
[
"\"\"\"Execute something on the shell and return the stdout.\"\"\""
] |
[
{
"param": "command",
"type": null
},
{
"param": "capture",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "command",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "capture",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import subprocess
def sh(command, capture=True):
p = subprocess.Popen(command, shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
p_stdout = p.communicate()[0]
if capture:
return p_stdout
| 751 | 75 |
a8eb1a2ef74d78e81766d1c56dacd2a3dd6440a4
|
Zhong-J/azureml-examples
|
cli/endpoints/online/model-2/onlinescoring/score.py
|
[
"MIT"
] |
Python
|
run
|
<not_specific>
|
def run(raw_data):
"""
This function is called for every invocation of the endpoint to perform the actual scoring/prediction.
In the example we extract the data from the json input and call the scikit-learn model's predict()
method and return the result back
"""
logging.info("model 2: request received")
result = [0.5, 0.5]
logging.info("Request processed")
# return hardcoded result so that it is easy to validate safe rollout scenario: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-safely-rollout-managed-endpoints
return result
# actual scoring logic for reference:
# data = json.loads(raw_data)["data"]
# data = numpy.array(data)
# result = model.predict(data)
# return result.tolist()
|
This function is called for every invocation of the endpoint to perform the actual scoring/prediction.
In the example we extract the data from the json input and call the scikit-learn model's predict()
method and return the result back
|
This function is called for every invocation of the endpoint to perform the actual scoring/prediction.
In the example we extract the data from the json input and call the scikit-learn model's predict()
method and return the result back
|
[
"This",
"function",
"is",
"called",
"for",
"every",
"invocation",
"of",
"the",
"endpoint",
"to",
"perform",
"the",
"actual",
"scoring",
"/",
"prediction",
".",
"In",
"the",
"example",
"we",
"extract",
"the",
"data",
"from",
"the",
"json",
"input",
"and",
"call",
"the",
"scikit",
"-",
"learn",
"model",
"'",
"s",
"predict",
"()",
"method",
"and",
"return",
"the",
"result",
"back"
] |
def run(raw_data):
logging.info("model 2: request received")
result = [0.5, 0.5]
logging.info("Request processed")
return result
|
[
"def",
"run",
"(",
"raw_data",
")",
":",
"logging",
".",
"info",
"(",
"\"model 2: request received\"",
")",
"result",
"=",
"[",
"0.5",
",",
"0.5",
"]",
"logging",
".",
"info",
"(",
"\"Request processed\"",
")",
"return",
"result"
] |
This function is called for every invocation of the endpoint to perform the actual scoring/prediction.
|
[
"This",
"function",
"is",
"called",
"for",
"every",
"invocation",
"of",
"the",
"endpoint",
"to",
"perform",
"the",
"actual",
"scoring",
"/",
"prediction",
"."
] |
[
"\"\"\"\n This function is called for every invocation of the endpoint to perform the actual scoring/prediction.\n In the example we extract the data from the json input and call the scikit-learn model's predict()\n method and return the result back\n \"\"\"",
"# return hardcoded result so that it is easy to validate safe rollout scenario: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-safely-rollout-managed-endpoints",
"# actual scoring logic for reference:",
"# data = json.loads(raw_data)[\"data\"]",
"# data = numpy.array(data)",
"# result = model.predict(data)",
"# return result.tolist()"
] |
[
{
"param": "raw_data",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "raw_data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import logging
def run(raw_data):
logging.info("model 2: request received")
result = [0.5, 0.5]
logging.info("Request processed")
return result
| 752 | 797 |
8f6bad5446f2d194cb22c227f4d490af9250bac6
|
Alexander-N/iscc-specs
|
src/iscc/iscc.py
|
[
"BSD-2-Clause"
] |
Python
|
dct
|
<not_specific>
|
def dct(values_list):
"""
Discrete cosine transform algorithm by Project Nayuki. (MIT License)
See: https://www.nayuki.io/page/fast-discrete-cosine-transform-algorithms
"""
n = len(values_list)
if n == 1:
return list(values_list)
elif n == 0 or n % 2 != 0:
raise ValueError()
else:
half = n // 2
alpha = [(values_list[i] + values_list[-(i + 1)]) for i in range(half)]
beta = [
(values_list[i] - values_list[-(i + 1)])
/ (math.cos((i + 0.5) * math.pi / n) * 2.0)
for i in range(half)
]
alpha = dct(alpha)
beta = dct(beta)
result = []
for i in range(half - 1):
result.append(alpha[i])
result.append(beta[i] + beta[i + 1])
result.append(alpha[-1])
result.append(beta[-1])
return result
|
Discrete cosine transform algorithm by Project Nayuki. (MIT License)
See: https://www.nayuki.io/page/fast-discrete-cosine-transform-algorithms
|
Discrete cosine transform algorithm by Project Nayuki.
|
[
"Discrete",
"cosine",
"transform",
"algorithm",
"by",
"Project",
"Nayuki",
"."
] |
def dct(values_list):
n = len(values_list)
if n == 1:
return list(values_list)
elif n == 0 or n % 2 != 0:
raise ValueError()
else:
half = n // 2
alpha = [(values_list[i] + values_list[-(i + 1)]) for i in range(half)]
beta = [
(values_list[i] - values_list[-(i + 1)])
/ (math.cos((i + 0.5) * math.pi / n) * 2.0)
for i in range(half)
]
alpha = dct(alpha)
beta = dct(beta)
result = []
for i in range(half - 1):
result.append(alpha[i])
result.append(beta[i] + beta[i + 1])
result.append(alpha[-1])
result.append(beta[-1])
return result
|
[
"def",
"dct",
"(",
"values_list",
")",
":",
"n",
"=",
"len",
"(",
"values_list",
")",
"if",
"n",
"==",
"1",
":",
"return",
"list",
"(",
"values_list",
")",
"elif",
"n",
"==",
"0",
"or",
"n",
"%",
"2",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
")",
"else",
":",
"half",
"=",
"n",
"//",
"2",
"alpha",
"=",
"[",
"(",
"values_list",
"[",
"i",
"]",
"+",
"values_list",
"[",
"-",
"(",
"i",
"+",
"1",
")",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"half",
")",
"]",
"beta",
"=",
"[",
"(",
"values_list",
"[",
"i",
"]",
"-",
"values_list",
"[",
"-",
"(",
"i",
"+",
"1",
")",
"]",
")",
"/",
"(",
"math",
".",
"cos",
"(",
"(",
"i",
"+",
"0.5",
")",
"*",
"math",
".",
"pi",
"/",
"n",
")",
"*",
"2.0",
")",
"for",
"i",
"in",
"range",
"(",
"half",
")",
"]",
"alpha",
"=",
"dct",
"(",
"alpha",
")",
"beta",
"=",
"dct",
"(",
"beta",
")",
"result",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"half",
"-",
"1",
")",
":",
"result",
".",
"append",
"(",
"alpha",
"[",
"i",
"]",
")",
"result",
".",
"append",
"(",
"beta",
"[",
"i",
"]",
"+",
"beta",
"[",
"i",
"+",
"1",
"]",
")",
"result",
".",
"append",
"(",
"alpha",
"[",
"-",
"1",
"]",
")",
"result",
".",
"append",
"(",
"beta",
"[",
"-",
"1",
"]",
")",
"return",
"result"
] |
Discrete cosine transform algorithm by Project Nayuki.
|
[
"Discrete",
"cosine",
"transform",
"algorithm",
"by",
"Project",
"Nayuki",
"."
] |
[
"\"\"\"\n Discrete cosine transform algorithm by Project Nayuki. (MIT License)\n See: https://www.nayuki.io/page/fast-discrete-cosine-transform-algorithms\n \"\"\""
] |
[
{
"param": "values_list",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "values_list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import math
def dct(values_list):
n = len(values_list)
if n == 1:
return list(values_list)
elif n == 0 or n % 2 != 0:
raise ValueError()
else:
half = n // 2
alpha = [(values_list[i] + values_list[-(i + 1)]) for i in range(half)]
beta = [
(values_list[i] - values_list[-(i + 1)])
/ (math.cos((i + 0.5) * math.pi / n) * 2.0)
for i in range(half)
]
alpha = dct(alpha)
beta = dct(beta)
result = []
for i in range(half - 1):
result.append(alpha[i])
result.append(beta[i] + beta[i + 1])
result.append(alpha[-1])
result.append(beta[-1])
return result
| 753 | 997 |
bb6dfb20ee6b3ea6daf3ca56c2ff701a4adad119
|
getziadz/edalize
|
edalize/edatool.py
|
[
"BSD-2-Clause"
] |
Python
|
jinja_filter_param_value_str
|
<not_specific>
|
def jinja_filter_param_value_str(value, str_quote_style=""):
""" Convert a parameter value to string suitable to be passed to an EDA tool
Rules:
- Booleans are represented as 0/1
- Strings are either passed through or enclosed in the characters specified
in str_quote_style (e.g. '"' or '\\"')
- Everything else (including int, float, etc.) are converted using the str()
function.
"""
if type(value) == bool:
if (value) == True:
return '1'
else:
return '0'
elif type(value) == str:
return str_quote_style + str(value) + str_quote_style
else:
return str(value)
|
Convert a parameter value to string suitable to be passed to an EDA tool
Rules:
- Booleans are represented as 0/1
- Strings are either passed through or enclosed in the characters specified
in str_quote_style (e.g. '"' or '\\"')
- Everything else (including int, float, etc.) are converted using the str()
function.
|
Convert a parameter value to string suitable to be passed to an EDA tool
Rules:
Booleans are represented as 0/1
Strings are either passed through or enclosed in the characters specified
in str_quote_style
Everything else (including int, float, etc.) are converted using the str()
function.
|
[
"Convert",
"a",
"parameter",
"value",
"to",
"string",
"suitable",
"to",
"be",
"passed",
"to",
"an",
"EDA",
"tool",
"Rules",
":",
"Booleans",
"are",
"represented",
"as",
"0",
"/",
"1",
"Strings",
"are",
"either",
"passed",
"through",
"or",
"enclosed",
"in",
"the",
"characters",
"specified",
"in",
"str_quote_style",
"Everything",
"else",
"(",
"including",
"int",
"float",
"etc",
".",
")",
"are",
"converted",
"using",
"the",
"str",
"()",
"function",
"."
] |
def jinja_filter_param_value_str(value, str_quote_style=""):
if type(value) == bool:
if (value) == True:
return '1'
else:
return '0'
elif type(value) == str:
return str_quote_style + str(value) + str_quote_style
else:
return str(value)
|
[
"def",
"jinja_filter_param_value_str",
"(",
"value",
",",
"str_quote_style",
"=",
"\"\"",
")",
":",
"if",
"type",
"(",
"value",
")",
"==",
"bool",
":",
"if",
"(",
"value",
")",
"==",
"True",
":",
"return",
"'1'",
"else",
":",
"return",
"'0'",
"elif",
"type",
"(",
"value",
")",
"==",
"str",
":",
"return",
"str_quote_style",
"+",
"str",
"(",
"value",
")",
"+",
"str_quote_style",
"else",
":",
"return",
"str",
"(",
"value",
")"
] |
Convert a parameter value to string suitable to be passed to an EDA tool
Rules:
Booleans are represented as 0/1
Strings are either passed through or enclosed in the characters specified
in str_quote_style (e.g. '"'
|
[
"Convert",
"a",
"parameter",
"value",
"to",
"string",
"suitable",
"to",
"be",
"passed",
"to",
"an",
"EDA",
"tool",
"Rules",
":",
"Booleans",
"are",
"represented",
"as",
"0",
"/",
"1",
"Strings",
"are",
"either",
"passed",
"through",
"or",
"enclosed",
"in",
"the",
"characters",
"specified",
"in",
"str_quote_style",
"(",
"e",
".",
"g",
".",
"'",
"\"",
"'"
] |
[
"\"\"\" Convert a parameter value to string suitable to be passed to an EDA tool\n\n Rules:\n - Booleans are represented as 0/1\n - Strings are either passed through or enclosed in the characters specified\n in str_quote_style (e.g. '\"' or '\\\\\"')\n - Everything else (including int, float, etc.) are converted using the str()\n function.\n \"\"\""
] |
[
{
"param": "value",
"type": null
},
{
"param": "str_quote_style",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "value",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "str_quote_style",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def jinja_filter_param_value_str(value, str_quote_style=""):
if type(value) == bool:
if (value) == True:
return '1'
else:
return '0'
elif type(value) == str:
return str_quote_style + str(value) + str_quote_style
else:
return str(value)
| 754 | 1,012 |
e8f0c78bcd23a80029086e52723d0df6b1d0e3a2
|
haohongxiang/PaddleNLP
|
examples/text_to_sql/IGSQL/model/model_utils.py
|
[
"Apache-2.0"
] |
Python
|
per_token_accuracy
|
<not_specific>
|
def per_token_accuracy(gold_seq, pred_seq):
""" Returns the per-token accuracy comparing two strings (recall).
Args:
gold_seq (`list`): A list of gold tokens.
pred_seq (`list`): A list of predicted tokens.
Returns:
`float`: Representing the accuracy.
"""
num_correct = 0
for i, gold_token in enumerate(gold_seq):
if i < len(pred_seq) and pred_seq[i] == gold_token:
num_correct += 1
return float(num_correct) / len(gold_seq)
|
Returns the per-token accuracy comparing two strings (recall).
Args:
gold_seq (`list`): A list of gold tokens.
pred_seq (`list`): A list of predicted tokens.
Returns:
`float`: Representing the accuracy.
|
Returns the per-token accuracy comparing two strings (recall).
|
[
"Returns",
"the",
"per",
"-",
"token",
"accuracy",
"comparing",
"two",
"strings",
"(",
"recall",
")",
"."
] |
def per_token_accuracy(gold_seq, pred_seq):
num_correct = 0
for i, gold_token in enumerate(gold_seq):
if i < len(pred_seq) and pred_seq[i] == gold_token:
num_correct += 1
return float(num_correct) / len(gold_seq)
|
[
"def",
"per_token_accuracy",
"(",
"gold_seq",
",",
"pred_seq",
")",
":",
"num_correct",
"=",
"0",
"for",
"i",
",",
"gold_token",
"in",
"enumerate",
"(",
"gold_seq",
")",
":",
"if",
"i",
"<",
"len",
"(",
"pred_seq",
")",
"and",
"pred_seq",
"[",
"i",
"]",
"==",
"gold_token",
":",
"num_correct",
"+=",
"1",
"return",
"float",
"(",
"num_correct",
")",
"/",
"len",
"(",
"gold_seq",
")"
] |
Returns the per-token accuracy comparing two strings (recall).
|
[
"Returns",
"the",
"per",
"-",
"token",
"accuracy",
"comparing",
"two",
"strings",
"(",
"recall",
")",
"."
] |
[
"\"\"\" Returns the per-token accuracy comparing two strings (recall).\n\n Args:\n gold_seq (`list`): A list of gold tokens.\n pred_seq (`list`): A list of predicted tokens.\n\n Returns:\n `float`: Representing the accuracy.\n \"\"\""
] |
[
{
"param": "gold_seq",
"type": null
},
{
"param": "pred_seq",
"type": null
}
] |
{
"returns": [
{
"docstring": "Representing the accuracy.",
"docstring_tokens": [
"Representing",
"the",
"accuracy",
"."
],
"type": "`float`"
}
],
"raises": [],
"params": [
{
"identifier": "gold_seq",
"type": null,
"docstring": "A list of gold tokens.",
"docstring_tokens": [
"A",
"list",
"of",
"gold",
"tokens",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "pred_seq",
"type": null,
"docstring": "A list of predicted tokens.",
"docstring_tokens": [
"A",
"list",
"of",
"predicted",
"tokens",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def per_token_accuracy(gold_seq, pred_seq):
num_correct = 0
for i, gold_token in enumerate(gold_seq):
if i < len(pred_seq) and pred_seq[i] == gold_token:
num_correct += 1
return float(num_correct) / len(gold_seq)
| 755 | 714 |
7a19f7bf9bc4e5d2c863142e6389d2620f89b9f3
|
young-oct/complex_sporco
|
docs/source/automodule.py
|
[
"BSD-3-Clause"
] |
Python
|
sort_by_list_order
|
<not_specific>
|
def sort_by_list_order(sortlist, reflist, reverse=False, fltr=False,
slemap=None):
"""
Sort a list according to the order of entries in a reference list.
Parameters
----------
sortlist : list
List to be sorted
reflist : list
Reference list defining sorting order
reverse : bool, optional (default False)
Flag indicating whether to sort in reverse order
fltr : bool, optional (default False)
Flag indicating whether to filter `sortlist` to remove any entries
that are not in `reflist`
slemap : function or None, optional (default None)
Function mapping a sortlist entry to the form of an entry in
`reflist`
Returns
-------
sortedlist : list
Sorted (and possibly filtered) version of sortlist
"""
def keyfunc(entry):
if slemap is not None:
rle = slemap(entry)
if rle in reflist:
# Ordering index taken from reflist
return reflist.index(rle)
else:
# Ordering index taken from sortlist, offset
# by the length of reflist so that entries
# that are not in reflist retain their order
# in sortlist
return sortlist.index(entry) + len(reflist)
if fltr:
if slemap:
sortlist = filter(lambda x: slemap(x) in reflist, sortlist)
else:
sortlist = filter(lambda x: x in reflist, sortlist)
return sorted(sortlist, key=keyfunc, reverse=reverse)
|
Sort a list according to the order of entries in a reference list.
Parameters
----------
sortlist : list
List to be sorted
reflist : list
Reference list defining sorting order
reverse : bool, optional (default False)
Flag indicating whether to sort in reverse order
fltr : bool, optional (default False)
Flag indicating whether to filter `sortlist` to remove any entries
that are not in `reflist`
slemap : function or None, optional (default None)
Function mapping a sortlist entry to the form of an entry in
`reflist`
Returns
-------
sortedlist : list
Sorted (and possibly filtered) version of sortlist
|
Sort a list according to the order of entries in a reference list.
Parameters
Returns
sortedlist : list
Sorted (and possibly filtered) version of sortlist
|
[
"Sort",
"a",
"list",
"according",
"to",
"the",
"order",
"of",
"entries",
"in",
"a",
"reference",
"list",
".",
"Parameters",
"Returns",
"sortedlist",
":",
"list",
"Sorted",
"(",
"and",
"possibly",
"filtered",
")",
"version",
"of",
"sortlist"
] |
def sort_by_list_order(sortlist, reflist, reverse=False, fltr=False,
slemap=None):
def keyfunc(entry):
if slemap is not None:
rle = slemap(entry)
if rle in reflist:
return reflist.index(rle)
else:
return sortlist.index(entry) + len(reflist)
if fltr:
if slemap:
sortlist = filter(lambda x: slemap(x) in reflist, sortlist)
else:
sortlist = filter(lambda x: x in reflist, sortlist)
return sorted(sortlist, key=keyfunc, reverse=reverse)
|
[
"def",
"sort_by_list_order",
"(",
"sortlist",
",",
"reflist",
",",
"reverse",
"=",
"False",
",",
"fltr",
"=",
"False",
",",
"slemap",
"=",
"None",
")",
":",
"def",
"keyfunc",
"(",
"entry",
")",
":",
"if",
"slemap",
"is",
"not",
"None",
":",
"rle",
"=",
"slemap",
"(",
"entry",
")",
"if",
"rle",
"in",
"reflist",
":",
"return",
"reflist",
".",
"index",
"(",
"rle",
")",
"else",
":",
"return",
"sortlist",
".",
"index",
"(",
"entry",
")",
"+",
"len",
"(",
"reflist",
")",
"if",
"fltr",
":",
"if",
"slemap",
":",
"sortlist",
"=",
"filter",
"(",
"lambda",
"x",
":",
"slemap",
"(",
"x",
")",
"in",
"reflist",
",",
"sortlist",
")",
"else",
":",
"sortlist",
"=",
"filter",
"(",
"lambda",
"x",
":",
"x",
"in",
"reflist",
",",
"sortlist",
")",
"return",
"sorted",
"(",
"sortlist",
",",
"key",
"=",
"keyfunc",
",",
"reverse",
"=",
"reverse",
")"
] |
Sort a list according to the order of entries in a reference list.
|
[
"Sort",
"a",
"list",
"according",
"to",
"the",
"order",
"of",
"entries",
"in",
"a",
"reference",
"list",
"."
] |
[
"\"\"\"\n Sort a list according to the order of entries in a reference list.\n\n Parameters\n ----------\n sortlist : list\n List to be sorted\n reflist : list\n Reference list defining sorting order\n reverse : bool, optional (default False)\n Flag indicating whether to sort in reverse order\n fltr : bool, optional (default False)\n Flag indicating whether to filter `sortlist` to remove any entries\n that are not in `reflist`\n slemap : function or None, optional (default None)\n Function mapping a sortlist entry to the form of an entry in\n `reflist`\n\n Returns\n -------\n sortedlist : list\n Sorted (and possibly filtered) version of sortlist\n \"\"\"",
"# Ordering index taken from reflist",
"# Ordering index taken from sortlist, offset",
"# by the length of reflist so that entries",
"# that are not in reflist retain their order",
"# in sortlist"
] |
[
{
"param": "sortlist",
"type": null
},
{
"param": "reflist",
"type": null
},
{
"param": "reverse",
"type": null
},
{
"param": "fltr",
"type": null
},
{
"param": "slemap",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "sortlist",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "reflist",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "reverse",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "fltr",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "slemap",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def sort_by_list_order(sortlist, reflist, reverse=False, fltr=False,
slemap=None):
def keyfunc(entry):
if slemap is not None:
rle = slemap(entry)
if rle in reflist:
return reflist.index(rle)
else:
return sortlist.index(entry) + len(reflist)
if fltr:
if slemap:
sortlist = filter(lambda x: slemap(x) in reflist, sortlist)
else:
sortlist = filter(lambda x: x in reflist, sortlist)
return sorted(sortlist, key=keyfunc, reverse=reverse)
| 756 | 843 |
4adce89b1de83297f03eea1a760bd3994deda41e
|
twisted/quotient
|
xquotient/exmess.py
|
[
"MIT"
] |
Python
|
isFrozen
|
<not_specific>
|
def isFrozen(status):
"""
Return a boolean indicating whether the given status name is frozen or not.
@type status: C{unicode}
@rtype: C{bool}
"""
return status.startswith(u'.')
|
Return a boolean indicating whether the given status name is frozen or not.
@type status: C{unicode}
@rtype: C{bool}
|
Return a boolean indicating whether the given status name is frozen or not.
|
[
"Return",
"a",
"boolean",
"indicating",
"whether",
"the",
"given",
"status",
"name",
"is",
"frozen",
"or",
"not",
"."
] |
def isFrozen(status):
return status.startswith(u'.')
|
[
"def",
"isFrozen",
"(",
"status",
")",
":",
"return",
"status",
".",
"startswith",
"(",
"u'.'",
")"
] |
Return a boolean indicating whether the given status name is frozen or not.
|
[
"Return",
"a",
"boolean",
"indicating",
"whether",
"the",
"given",
"status",
"name",
"is",
"frozen",
"or",
"not",
"."
] |
[
"\"\"\"\n Return a boolean indicating whether the given status name is frozen or not.\n\n @type status: C{unicode}\n @rtype: C{bool}\n \"\"\""
] |
[
{
"param": "status",
"type": null
}
] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "C{bool}"
}
],
"raises": [],
"params": [
{
"identifier": "status",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def isFrozen(status):
return status.startswith(u'.')
| 757 | 163 |
adbae2f6f1b020c9821eec5d99b62a3704477e5d
|
ussjoin/rpi-docker-disco
|
bin/ddfscli.py
|
[
"BSD-3-Clause"
] |
Python
|
exists
| null |
def exists(program, tag):
"""Usage: tag
Check if a given tag exists.
Prints 'True' or 'False' and returns the appropriate exit status.
"""
if not program.ddfs.exists(tag):
raise Exception("False")
print("True")
|
Usage: tag
Check if a given tag exists.
Prints 'True' or 'False' and returns the appropriate exit status.
|
tag
Check if a given tag exists.
Prints 'True' or 'False' and returns the appropriate exit status.
|
[
"tag",
"Check",
"if",
"a",
"given",
"tag",
"exists",
".",
"Prints",
"'",
"True",
"'",
"or",
"'",
"False",
"'",
"and",
"returns",
"the",
"appropriate",
"exit",
"status",
"."
] |
def exists(program, tag):
if not program.ddfs.exists(tag):
raise Exception("False")
print("True")
|
[
"def",
"exists",
"(",
"program",
",",
"tag",
")",
":",
"if",
"not",
"program",
".",
"ddfs",
".",
"exists",
"(",
"tag",
")",
":",
"raise",
"Exception",
"(",
"\"False\"",
")",
"print",
"(",
"\"True\"",
")"
] |
Usage: tag
Check if a given tag exists.
|
[
"Usage",
":",
"tag",
"Check",
"if",
"a",
"given",
"tag",
"exists",
"."
] |
[
"\"\"\"Usage: tag\n\n Check if a given tag exists.\n Prints 'True' or 'False' and returns the appropriate exit status.\n \"\"\""
] |
[
{
"param": "program",
"type": null
},
{
"param": "tag",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "program",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "tag",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def exists(program, tag):
if not program.ddfs.exists(tag):
raise Exception("False")
print("True")
| 758 | 797 |
7a47dfafc1006385eb91b57c27a79608ceb605c7
|
google/init2winit
|
init2winit/utils.py
|
[
"Apache-2.0"
] |
Python
|
run_in_parallel
|
<not_specific>
|
def run_in_parallel(function, list_of_kwargs_to_function, num_workers):
"""Run a function on a list of kwargs in parallel with ThreadPoolExecutor.
Adapted from code by mlbileschi.
Args:
function: a function.
list_of_kwargs_to_function: list of dictionary from string to argument
value. These will be passed into `function` as kwargs.
num_workers: int.
Returns:
list of return values from function.
"""
if num_workers < 1:
raise ValueError(
'Number of workers must be greater than 0. Was {}'.format(num_workers))
with concurrent.futures.ThreadPoolExecutor(num_workers) as executor:
futures = []
logging.info(
'Adding %d jobs to process pool to run in %d parallel '
'threads.', len(list_of_kwargs_to_function), num_workers)
for kwargs in list_of_kwargs_to_function:
f = executor.submit(function, **kwargs)
futures.append(f)
for f in concurrent.futures.as_completed(futures):
if f.exception():
# Propagate exception to main thread.
raise f.exception()
return [f.result() for f in futures]
|
Run a function on a list of kwargs in parallel with ThreadPoolExecutor.
Adapted from code by mlbileschi.
Args:
function: a function.
list_of_kwargs_to_function: list of dictionary from string to argument
value. These will be passed into `function` as kwargs.
num_workers: int.
Returns:
list of return values from function.
|
Run a function on a list of kwargs in parallel with ThreadPoolExecutor.
Adapted from code by mlbileschi.
|
[
"Run",
"a",
"function",
"on",
"a",
"list",
"of",
"kwargs",
"in",
"parallel",
"with",
"ThreadPoolExecutor",
".",
"Adapted",
"from",
"code",
"by",
"mlbileschi",
"."
] |
def run_in_parallel(function, list_of_kwargs_to_function, num_workers):
if num_workers < 1:
raise ValueError(
'Number of workers must be greater than 0. Was {}'.format(num_workers))
with concurrent.futures.ThreadPoolExecutor(num_workers) as executor:
futures = []
logging.info(
'Adding %d jobs to process pool to run in %d parallel '
'threads.', len(list_of_kwargs_to_function), num_workers)
for kwargs in list_of_kwargs_to_function:
f = executor.submit(function, **kwargs)
futures.append(f)
for f in concurrent.futures.as_completed(futures):
if f.exception():
raise f.exception()
return [f.result() for f in futures]
|
[
"def",
"run_in_parallel",
"(",
"function",
",",
"list_of_kwargs_to_function",
",",
"num_workers",
")",
":",
"if",
"num_workers",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'Number of workers must be greater than 0. Was {}'",
".",
"format",
"(",
"num_workers",
")",
")",
"with",
"concurrent",
".",
"futures",
".",
"ThreadPoolExecutor",
"(",
"num_workers",
")",
"as",
"executor",
":",
"futures",
"=",
"[",
"]",
"logging",
".",
"info",
"(",
"'Adding %d jobs to process pool to run in %d parallel '",
"'threads.'",
",",
"len",
"(",
"list_of_kwargs_to_function",
")",
",",
"num_workers",
")",
"for",
"kwargs",
"in",
"list_of_kwargs_to_function",
":",
"f",
"=",
"executor",
".",
"submit",
"(",
"function",
",",
"**",
"kwargs",
")",
"futures",
".",
"append",
"(",
"f",
")",
"for",
"f",
"in",
"concurrent",
".",
"futures",
".",
"as_completed",
"(",
"futures",
")",
":",
"if",
"f",
".",
"exception",
"(",
")",
":",
"raise",
"f",
".",
"exception",
"(",
")",
"return",
"[",
"f",
".",
"result",
"(",
")",
"for",
"f",
"in",
"futures",
"]"
] |
Run a function on a list of kwargs in parallel with ThreadPoolExecutor.
|
[
"Run",
"a",
"function",
"on",
"a",
"list",
"of",
"kwargs",
"in",
"parallel",
"with",
"ThreadPoolExecutor",
"."
] |
[
"\"\"\"Run a function on a list of kwargs in parallel with ThreadPoolExecutor.\n\n Adapted from code by mlbileschi.\n Args:\n function: a function.\n list_of_kwargs_to_function: list of dictionary from string to argument\n value. These will be passed into `function` as kwargs.\n num_workers: int.\n\n Returns:\n list of return values from function.\n \"\"\"",
"# Propagate exception to main thread."
] |
[
{
"param": "function",
"type": null
},
{
"param": "list_of_kwargs_to_function",
"type": null
},
{
"param": "num_workers",
"type": null
}
] |
{
"returns": [
{
"docstring": "list of return values from function.",
"docstring_tokens": [
"list",
"of",
"return",
"values",
"from",
"function",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "function",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "list_of_kwargs_to_function",
"type": null,
"docstring": "list of dictionary from string to argument\nvalue. These will be passed into `function` as kwargs.",
"docstring_tokens": [
"list",
"of",
"dictionary",
"from",
"string",
"to",
"argument",
"value",
".",
"These",
"will",
"be",
"passed",
"into",
"`",
"function",
"`",
"as",
"kwargs",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "num_workers",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import logging
import concurrent
def run_in_parallel(function, list_of_kwargs_to_function, num_workers):
if num_workers < 1:
raise ValueError(
'Number of workers must be greater than 0. Was {}'.format(num_workers))
with concurrent.futures.ThreadPoolExecutor(num_workers) as executor:
futures = []
logging.info(
'Adding %d jobs to process pool to run in %d parallel '
'threads.', len(list_of_kwargs_to_function), num_workers)
for kwargs in list_of_kwargs_to_function:
f = executor.submit(function, **kwargs)
futures.append(f)
for f in concurrent.futures.as_completed(futures):
if f.exception():
raise f.exception()
return [f.result() for f in futures]
| 760 | 340 |
7fa078f6c0af0bb2df6b2d5d4a7bd29f55f5bae8
|
tripincloud/MultiAgentAI
|
adv_coop_multiagent_pathfinding/search/probleme.py
|
[
"MIT"
] |
Python
|
distManhattan
|
<not_specific>
|
def distManhattan(p1, p2):
""" calcule la distance de Manhattan entre le tuple
p1 et le tuple p2
"""
(x1, y1) = p1
(x2, y2) = p2
return abs(x1-x2)+abs(y1-y2)
|
calcule la distance de Manhattan entre le tuple
p1 et le tuple p2
|
calcule la distance de Manhattan entre le tuple
p1 et le tuple p2
|
[
"calcule",
"la",
"distance",
"de",
"Manhattan",
"entre",
"le",
"tuple",
"p1",
"et",
"le",
"tuple",
"p2"
] |
def distManhattan(p1, p2):
(x1, y1) = p1
(x2, y2) = p2
return abs(x1-x2)+abs(y1-y2)
|
[
"def",
"distManhattan",
"(",
"p1",
",",
"p2",
")",
":",
"(",
"x1",
",",
"y1",
")",
"=",
"p1",
"(",
"x2",
",",
"y2",
")",
"=",
"p2",
"return",
"abs",
"(",
"x1",
"-",
"x2",
")",
"+",
"abs",
"(",
"y1",
"-",
"y2",
")"
] |
calcule la distance de Manhattan entre le tuple
p1 et le tuple p2
|
[
"calcule",
"la",
"distance",
"de",
"Manhattan",
"entre",
"le",
"tuple",
"p1",
"et",
"le",
"tuple",
"p2"
] |
[
"\"\"\" calcule la distance de Manhattan entre le tuple \n p1 et le tuple p2\n \"\"\""
] |
[
{
"param": "p1",
"type": null
},
{
"param": "p2",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "p1",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "p2",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def distManhattan(p1, p2):
(x1, y1) = p1
(x2, y2) = p2
return abs(x1-x2)+abs(y1-y2)
| 762 | 805 |
77c2f6f110611b30daa2815da9ba536a68100dae
|
Smithsonian/cheby_checker
|
cheby_checker/sifter_query.py
|
[
"MIT"
] |
Python
|
_check_query_inputs
|
<not_specific>
|
def _check_query_inputs( cheby_dict_for_orbit , param_dict ):
"""
Check whether the inputs are of the allowed format
"""
# Are they both dictionaries ?
assert isinstance(cheby_dict_for_orbit , dict) and isinstance(param_dict , dict), \
' ... not dictionaries ... types = [%r, %r]' % ( type(cheby_dict_for_orbit), type(param_dict) )
# Does the cheby_dict have the correct content/format ?
# cheby.check_validity( cheby_dict_for_orbit )
# Does the param_dict have the correct keys ?
return True
|
Check whether the inputs are of the allowed format
|
Check whether the inputs are of the allowed format
|
[
"Check",
"whether",
"the",
"inputs",
"are",
"of",
"the",
"allowed",
"format"
] |
def _check_query_inputs( cheby_dict_for_orbit , param_dict ):
assert isinstance(cheby_dict_for_orbit , dict) and isinstance(param_dict , dict), \
' ... not dictionaries ... types = [%r, %r]' % ( type(cheby_dict_for_orbit), type(param_dict) )
return True
|
[
"def",
"_check_query_inputs",
"(",
"cheby_dict_for_orbit",
",",
"param_dict",
")",
":",
"assert",
"isinstance",
"(",
"cheby_dict_for_orbit",
",",
"dict",
")",
"and",
"isinstance",
"(",
"param_dict",
",",
"dict",
")",
",",
"' ... not dictionaries ... types = [%r, %r]'",
"%",
"(",
"type",
"(",
"cheby_dict_for_orbit",
")",
",",
"type",
"(",
"param_dict",
")",
")",
"return",
"True"
] |
Check whether the inputs are of the allowed format
|
[
"Check",
"whether",
"the",
"inputs",
"are",
"of",
"the",
"allowed",
"format"
] |
[
"\"\"\"\n Check whether the inputs are of the allowed format\n \"\"\"",
"# Are they both dictionaries ?",
"# Does the cheby_dict have the correct content/format ?",
"# cheby.check_validity( cheby_dict_for_orbit )",
"# Does the param_dict have the correct keys ?"
] |
[
{
"param": "cheby_dict_for_orbit",
"type": null
},
{
"param": "param_dict",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cheby_dict_for_orbit",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "param_dict",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _check_query_inputs( cheby_dict_for_orbit , param_dict ):
assert isinstance(cheby_dict_for_orbit , dict) and isinstance(param_dict , dict), \
' ... not dictionaries ... types = [%r, %r]' % ( type(cheby_dict_for_orbit), type(param_dict) )
return True
| 763 | 51 |
b3d3cb471c25765cfc5920a548a7e0f03b53fa91
|
schillerlab/sc-toolbox
|
sc_toolbox/api/util/__init__.py
|
[
"MIT"
] |
Python
|
timestamp
|
<not_specific>
|
def timestamp():
"""
Custom timestamp in common EU format.
Returns:
datetime timestamp
"""
import datetime
import time
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime("%d-%m-%Y %H:%M:%S")
return st
|
Custom timestamp in common EU format.
Returns:
datetime timestamp
|
Custom timestamp in common EU format.
|
[
"Custom",
"timestamp",
"in",
"common",
"EU",
"format",
"."
] |
def timestamp():
import datetime
import time
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime("%d-%m-%Y %H:%M:%S")
return st
|
[
"def",
"timestamp",
"(",
")",
":",
"import",
"datetime",
"import",
"time",
"ts",
"=",
"time",
".",
"time",
"(",
")",
"st",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"ts",
")",
".",
"strftime",
"(",
"\"%d-%m-%Y %H:%M:%S\"",
")",
"return",
"st"
] |
Custom timestamp in common EU format.
|
[
"Custom",
"timestamp",
"in",
"common",
"EU",
"format",
"."
] |
[
"\"\"\"\n Custom timestamp in common EU format.\n\n Returns:\n datetime timestamp\n \"\"\""
] |
[] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import time
import datetime
def timestamp():
import datetime
import time
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime("%d-%m-%Y %H:%M:%S")
return st
| 765 | 576 |
b56df7edc1f92380ad56693bdebc7effea57c849
|
justanothergithubber/course-materials
|
50.004 Introduction to Algorithms/document_distance_sample/docdist6.py
|
[
"MIT"
] |
Python
|
merge
|
<not_specific>
|
def merge(L,R):
"""
Given two sorted sequences L and R, return their merge.
"""
i = 0
j = 0
answer = []
while i<len(L) and j<len(R):
if L[i]<R[j]:
answer.append(L[i])
i += 1
else:
answer.append(R[j])
j += 1
if i<len(L):
answer.extend(L[i:])
if j<len(R):
answer.extend(R[j:])
return answer
|
Given two sorted sequences L and R, return their merge.
|
Given two sorted sequences L and R, return their merge.
|
[
"Given",
"two",
"sorted",
"sequences",
"L",
"and",
"R",
"return",
"their",
"merge",
"."
] |
def merge(L,R):
i = 0
j = 0
answer = []
while i<len(L) and j<len(R):
if L[i]<R[j]:
answer.append(L[i])
i += 1
else:
answer.append(R[j])
j += 1
if i<len(L):
answer.extend(L[i:])
if j<len(R):
answer.extend(R[j:])
return answer
|
[
"def",
"merge",
"(",
"L",
",",
"R",
")",
":",
"i",
"=",
"0",
"j",
"=",
"0",
"answer",
"=",
"[",
"]",
"while",
"i",
"<",
"len",
"(",
"L",
")",
"and",
"j",
"<",
"len",
"(",
"R",
")",
":",
"if",
"L",
"[",
"i",
"]",
"<",
"R",
"[",
"j",
"]",
":",
"answer",
".",
"append",
"(",
"L",
"[",
"i",
"]",
")",
"i",
"+=",
"1",
"else",
":",
"answer",
".",
"append",
"(",
"R",
"[",
"j",
"]",
")",
"j",
"+=",
"1",
"if",
"i",
"<",
"len",
"(",
"L",
")",
":",
"answer",
".",
"extend",
"(",
"L",
"[",
"i",
":",
"]",
")",
"if",
"j",
"<",
"len",
"(",
"R",
")",
":",
"answer",
".",
"extend",
"(",
"R",
"[",
"j",
":",
"]",
")",
"return",
"answer"
] |
Given two sorted sequences L and R, return their merge.
|
[
"Given",
"two",
"sorted",
"sequences",
"L",
"and",
"R",
"return",
"their",
"merge",
"."
] |
[
"\"\"\"\n Given two sorted sequences L and R, return their merge.\n \"\"\""
] |
[
{
"param": "L",
"type": null
},
{
"param": "R",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "L",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "R",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def merge(L,R):
i = 0
j = 0
answer = []
while i<len(L) and j<len(R):
if L[i]<R[j]:
answer.append(L[i])
i += 1
else:
answer.append(R[j])
j += 1
if i<len(L):
answer.extend(L[i:])
if j<len(R):
answer.extend(R[j:])
return answer
| 766 | 721 |
c567f6833f53f5339231bf74817bf46aa65e6237
|
DDDong2666/tum-adlr-ws20-02
|
wzk/numpy2.py
|
[
"MIT"
] |
Python
|
shape_wrapper
|
<not_specific>
|
def shape_wrapper(shape=None):
"""
Note the inconsistent usage of shape / shape as function arguments in numpy.
https://stackoverflow.com/questions/44804965/numpy-size-vs-shape-in-function-arguments
-> use shape
"""
if shape is None:
return ()
elif isinstance(shape, int):
return shape,
elif isinstance(shape, tuple):
return shape
else:
raise ValueError(f"Unknown 'shape': {shape}")
|
Note the inconsistent usage of shape / shape as function arguments in numpy.
https://stackoverflow.com/questions/44804965/numpy-size-vs-shape-in-function-arguments
-> use shape
|
Note the inconsistent usage of shape / shape as function arguments in numpy.
|
[
"Note",
"the",
"inconsistent",
"usage",
"of",
"shape",
"/",
"shape",
"as",
"function",
"arguments",
"in",
"numpy",
"."
] |
def shape_wrapper(shape=None):
if shape is None:
return ()
elif isinstance(shape, int):
return shape,
elif isinstance(shape, tuple):
return shape
else:
raise ValueError(f"Unknown 'shape': {shape}")
|
[
"def",
"shape_wrapper",
"(",
"shape",
"=",
"None",
")",
":",
"if",
"shape",
"is",
"None",
":",
"return",
"(",
")",
"elif",
"isinstance",
"(",
"shape",
",",
"int",
")",
":",
"return",
"shape",
",",
"elif",
"isinstance",
"(",
"shape",
",",
"tuple",
")",
":",
"return",
"shape",
"else",
":",
"raise",
"ValueError",
"(",
"f\"Unknown 'shape': {shape}\"",
")"
] |
Note the inconsistent usage of shape / shape as function arguments in numpy.
|
[
"Note",
"the",
"inconsistent",
"usage",
"of",
"shape",
"/",
"shape",
"as",
"function",
"arguments",
"in",
"numpy",
"."
] |
[
"\"\"\"\n Note the inconsistent usage of shape / shape as function arguments in numpy.\n https://stackoverflow.com/questions/44804965/numpy-size-vs-shape-in-function-arguments\n -> use shape\n \"\"\""
] |
[
{
"param": "shape",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "shape",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def shape_wrapper(shape=None):
if shape is None:
return ()
elif isinstance(shape, int):
return shape,
elif isinstance(shape, tuple):
return shape
else:
raise ValueError(f"Unknown 'shape': {shape}")
| 768 | 395 |
b374e23ddf0c768ab8d7f85a3186bc4321f0a57a
|
RacimoLab/dinf
|
dinf/discriminator.py
|
[
"MIT"
] |
Python
|
from_file
|
<not_specific>
|
def from_file(cls, filename: str | pathlib.Path):
"""
Load neural network from the given file.
:param filename: The filename of the saved model.
:return: The network object.
"""
with open(filename, "rb") as f:
data = pickle.load(f)
if data.pop("format_version", -1) != cls.format_version:
raise ValueError(
f"{filename}: saved network is not compatible with this "
"version of dinf. Either train a new network or use an "
"older version of dinf."
)
expected_fields = set(map(lambda f: f.name, dataclasses.fields(cls)))
expected_fields.remove("format_version")
assert data.keys() == expected_fields
return cls(**data)
|
Load neural network from the given file.
:param filename: The filename of the saved model.
:return: The network object.
|
Load neural network from the given file.
|
[
"Load",
"neural",
"network",
"from",
"the",
"given",
"file",
"."
] |
def from_file(cls, filename: str | pathlib.Path):
with open(filename, "rb") as f:
data = pickle.load(f)
if data.pop("format_version", -1) != cls.format_version:
raise ValueError(
f"{filename}: saved network is not compatible with this "
"version of dinf. Either train a new network or use an "
"older version of dinf."
)
expected_fields = set(map(lambda f: f.name, dataclasses.fields(cls)))
expected_fields.remove("format_version")
assert data.keys() == expected_fields
return cls(**data)
|
[
"def",
"from_file",
"(",
"cls",
",",
"filename",
":",
"str",
"|",
"pathlib",
".",
"Path",
")",
":",
"with",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
"as",
"f",
":",
"data",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"if",
"data",
".",
"pop",
"(",
"\"format_version\"",
",",
"-",
"1",
")",
"!=",
"cls",
".",
"format_version",
":",
"raise",
"ValueError",
"(",
"f\"{filename}: saved network is not compatible with this \"",
"\"version of dinf. Either train a new network or use an \"",
"\"older version of dinf.\"",
")",
"expected_fields",
"=",
"set",
"(",
"map",
"(",
"lambda",
"f",
":",
"f",
".",
"name",
",",
"dataclasses",
".",
"fields",
"(",
"cls",
")",
")",
")",
"expected_fields",
".",
"remove",
"(",
"\"format_version\"",
")",
"assert",
"data",
".",
"keys",
"(",
")",
"==",
"expected_fields",
"return",
"cls",
"(",
"**",
"data",
")"
] |
Load neural network from the given file.
|
[
"Load",
"neural",
"network",
"from",
"the",
"given",
"file",
"."
] |
[
"\"\"\"\n Load neural network from the given file.\n\n :param filename: The filename of the saved model.\n :return: The network object.\n \"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "filename",
"type": "str | pathlib.Path"
}
] |
{
"returns": [
{
"docstring": "The network object.",
"docstring_tokens": [
"The",
"network",
"object",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "filename",
"type": "str | pathlib.Path",
"docstring": "The filename of the saved model.",
"docstring_tokens": [
"The",
"filename",
"of",
"the",
"saved",
"model",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import dataclasses
import pickle
def from_file(cls, filename: str | pathlib.Path):
with open(filename, "rb") as f:
data = pickle.load(f)
if data.pop("format_version", -1) != cls.format_version:
raise ValueError(
f"{filename}: saved network is not compatible with this "
"version of dinf. Either train a new network or use an "
"older version of dinf."
)
expected_fields = set(map(lambda f: f.name, dataclasses.fields(cls)))
expected_fields.remove("format_version")
assert data.keys() == expected_fields
return cls(**data)
| 769 | 1,003 |
4e63beb5a5ee3d01737e8440cb5f94e3fde72b9b
|
josephcslater/python-control
|
external/controls.py
|
[
"BSD-3-Clause"
] |
Python
|
CountsToFloat
|
<not_specific>
|
def CountsToFloat(counts, bits=9, vmax=2.5, vmin=-2.5):
"""Convert the integer output of ADC to a floating point number by
mulitplying by dv."""
dv = (vmax-vmin)/2**bits
return dv*counts
|
Convert the integer output of ADC to a floating point number by
mulitplying by dv.
|
Convert the integer output of ADC to a floating point number by
mulitplying by dv.
|
[
"Convert",
"the",
"integer",
"output",
"of",
"ADC",
"to",
"a",
"floating",
"point",
"number",
"by",
"mulitplying",
"by",
"dv",
"."
] |
def CountsToFloat(counts, bits=9, vmax=2.5, vmin=-2.5):
dv = (vmax-vmin)/2**bits
return dv*counts
|
[
"def",
"CountsToFloat",
"(",
"counts",
",",
"bits",
"=",
"9",
",",
"vmax",
"=",
"2.5",
",",
"vmin",
"=",
"-",
"2.5",
")",
":",
"dv",
"=",
"(",
"vmax",
"-",
"vmin",
")",
"/",
"2",
"**",
"bits",
"return",
"dv",
"*",
"counts"
] |
Convert the integer output of ADC to a floating point number by
mulitplying by dv.
|
[
"Convert",
"the",
"integer",
"output",
"of",
"ADC",
"to",
"a",
"floating",
"point",
"number",
"by",
"mulitplying",
"by",
"dv",
"."
] |
[
"\"\"\"Convert the integer output of ADC to a floating point number by\n mulitplying by dv.\"\"\""
] |
[
{
"param": "counts",
"type": null
},
{
"param": "bits",
"type": null
},
{
"param": "vmax",
"type": null
},
{
"param": "vmin",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "counts",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "bits",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "vmax",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "vmin",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def CountsToFloat(counts, bits=9, vmax=2.5, vmin=-2.5):
dv = (vmax-vmin)/2**bits
return dv*counts
| 770 | 373 |
17dda4bd6c2f4d1fa1e4028e4afa0e4f92dd7cd7
|
benjaminpope/lightkurve
|
src/lightkurve/correctors/metrics.py
|
[
"MIT"
] |
Python
|
_unique_key_for_processing_neighbors
|
<not_specific>
|
def _unique_key_for_processing_neighbors(
corrected_lc: LightCurve,
radius: float = 6000.0,
min_targets: int = 30,
max_targets: int = 50,
interpolate: bool = False,
extrapolate: bool = False,
author: tuple = ("Kepler", "K2", "SPOC"),
flux_column: str = "sap_flux",
):
"""Returns a unique key that will determine whether a cached version of a
call to `_download_and_preprocess_neighbors` can be re-used."""
return f"{corrected_lc.ra}{corrected_lc.dec}{corrected_lc.cadenceno}{radius}{min_targets}{max_targets}{author}{flux_column}{interpolate}"
|
Returns a unique key that will determine whether a cached version of a
call to `_download_and_preprocess_neighbors` can be re-used.
|
Returns a unique key that will determine whether a cached version of a
call to `_download_and_preprocess_neighbors` can be re-used.
|
[
"Returns",
"a",
"unique",
"key",
"that",
"will",
"determine",
"whether",
"a",
"cached",
"version",
"of",
"a",
"call",
"to",
"`",
"_download_and_preprocess_neighbors",
"`",
"can",
"be",
"re",
"-",
"used",
"."
] |
def _unique_key_for_processing_neighbors(
corrected_lc: LightCurve,
radius: float = 6000.0,
min_targets: int = 30,
max_targets: int = 50,
interpolate: bool = False,
extrapolate: bool = False,
author: tuple = ("Kepler", "K2", "SPOC"),
flux_column: str = "sap_flux",
):
return f"{corrected_lc.ra}{corrected_lc.dec}{corrected_lc.cadenceno}{radius}{min_targets}{max_targets}{author}{flux_column}{interpolate}"
|
[
"def",
"_unique_key_for_processing_neighbors",
"(",
"corrected_lc",
":",
"LightCurve",
",",
"radius",
":",
"float",
"=",
"6000.0",
",",
"min_targets",
":",
"int",
"=",
"30",
",",
"max_targets",
":",
"int",
"=",
"50",
",",
"interpolate",
":",
"bool",
"=",
"False",
",",
"extrapolate",
":",
"bool",
"=",
"False",
",",
"author",
":",
"tuple",
"=",
"(",
"\"Kepler\"",
",",
"\"K2\"",
",",
"\"SPOC\"",
")",
",",
"flux_column",
":",
"str",
"=",
"\"sap_flux\"",
",",
")",
":",
"return",
"f\"{corrected_lc.ra}{corrected_lc.dec}{corrected_lc.cadenceno}{radius}{min_targets}{max_targets}{author}{flux_column}{interpolate}\""
] |
Returns a unique key that will determine whether a cached version of a
call to `_download_and_preprocess_neighbors` can be re-used.
|
[
"Returns",
"a",
"unique",
"key",
"that",
"will",
"determine",
"whether",
"a",
"cached",
"version",
"of",
"a",
"call",
"to",
"`",
"_download_and_preprocess_neighbors",
"`",
"can",
"be",
"re",
"-",
"used",
"."
] |
[
"\"\"\"Returns a unique key that will determine whether a cached version of a\n call to `_download_and_preprocess_neighbors` can be re-used.\"\"\""
] |
[
{
"param": "corrected_lc",
"type": "LightCurve"
},
{
"param": "radius",
"type": "float"
},
{
"param": "min_targets",
"type": "int"
},
{
"param": "max_targets",
"type": "int"
},
{
"param": "interpolate",
"type": "bool"
},
{
"param": "extrapolate",
"type": "bool"
},
{
"param": "author",
"type": "tuple"
},
{
"param": "flux_column",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "corrected_lc",
"type": "LightCurve",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "radius",
"type": "float",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "min_targets",
"type": "int",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "max_targets",
"type": "int",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "interpolate",
"type": "bool",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "extrapolate",
"type": "bool",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "author",
"type": "tuple",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "flux_column",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _unique_key_for_processing_neighbors(
corrected_lc: LightCurve,
radius: float = 6000.0,
min_targets: int = 30,
max_targets: int = 50,
interpolate: bool = False,
extrapolate: bool = False,
author: tuple = ("Kepler", "K2", "SPOC"),
flux_column: str = "sap_flux",
):
return f"{corrected_lc.ra}{corrected_lc.dec}{corrected_lc.cadenceno}{radius}{min_targets}{max_targets}{author}{flux_column}{interpolate}"
| 771 | 552 |
34d8b627a80ece42b7fd1a1e1efdae949251a7c7
|
aeroxis/sower
|
src/sower/contract.py
|
[
"MIT"
] |
Python
|
validate_contract
| null |
def validate_contract(contract):
"""
Validates the given contract. Contract must be of type 'dict'.
"""
if len(contract) != 1:
raise ValueError('Root Level of Contract must contain '\
'1 element: sower')
if 'sower' not in contract:
raise ValueError('"sower" is not the root level element '\
'in the contract.')
plan = contract.get('sower', {}).get('plan')
if not plan:
raise ValueError("A plan was not specified in the contract."\
" The 2nd level must contain a 'plan' element.")
|
Validates the given contract. Contract must be of type 'dict'.
|
Validates the given contract. Contract must be of type 'dict'.
|
[
"Validates",
"the",
"given",
"contract",
".",
"Contract",
"must",
"be",
"of",
"type",
"'",
"dict",
"'",
"."
] |
def validate_contract(contract):
if len(contract) != 1:
raise ValueError('Root Level of Contract must contain '\
'1 element: sower')
if 'sower' not in contract:
raise ValueError('"sower" is not the root level element '\
'in the contract.')
plan = contract.get('sower', {}).get('plan')
if not plan:
raise ValueError("A plan was not specified in the contract."\
" The 2nd level must contain a 'plan' element.")
|
[
"def",
"validate_contract",
"(",
"contract",
")",
":",
"if",
"len",
"(",
"contract",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'Root Level of Contract must contain '",
"'1 element: sower'",
")",
"if",
"'sower'",
"not",
"in",
"contract",
":",
"raise",
"ValueError",
"(",
"'\"sower\" is not the root level element '",
"'in the contract.'",
")",
"plan",
"=",
"contract",
".",
"get",
"(",
"'sower'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'plan'",
")",
"if",
"not",
"plan",
":",
"raise",
"ValueError",
"(",
"\"A plan was not specified in the contract.\"",
"\" The 2nd level must contain a 'plan' element.\"",
")"
] |
Validates the given contract.
|
[
"Validates",
"the",
"given",
"contract",
"."
] |
[
"\"\"\"\n Validates the given contract. Contract must be of type 'dict'.\n \"\"\""
] |
[
{
"param": "contract",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "contract",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def validate_contract(contract):
if len(contract) != 1:
raise ValueError('Root Level of Contract must contain '\
'1 element: sower')
if 'sower' not in contract:
raise ValueError('"sower" is not the root level element '\
'in the contract.')
plan = contract.get('sower', {}).get('plan')
if not plan:
raise ValueError("A plan was not specified in the contract."\
" The 2nd level must contain a 'plan' element.")
| 772 | 829 |
da0dfad533caa07d3e9dd90a8a0d2372a0c1c072
|
anetczuk/pybraingym
|
src/pybraingym/experiment.py
|
[
"MIT"
] |
Python
|
processLastReward
| null |
def processLastReward(task, agent):
"""Store last reward when episode is done.
Step is important in edge case of Q/SARSA learning
when reward is only received in last step.
"""
observation = task.getObservation()
agent.integrateObservation(observation)
agent.getAction()
reward = task.getReward() ## repeat last reward
agent.giveReward(reward)
|
Store last reward when episode is done.
Step is important in edge case of Q/SARSA learning
when reward is only received in last step.
|
Store last reward when episode is done.
Step is important in edge case of Q/SARSA learning
when reward is only received in last step.
|
[
"Store",
"last",
"reward",
"when",
"episode",
"is",
"done",
".",
"Step",
"is",
"important",
"in",
"edge",
"case",
"of",
"Q",
"/",
"SARSA",
"learning",
"when",
"reward",
"is",
"only",
"received",
"in",
"last",
"step",
"."
] |
def processLastReward(task, agent):
observation = task.getObservation()
agent.integrateObservation(observation)
agent.getAction()
reward = task.getReward()
agent.giveReward(reward)
|
[
"def",
"processLastReward",
"(",
"task",
",",
"agent",
")",
":",
"observation",
"=",
"task",
".",
"getObservation",
"(",
")",
"agent",
".",
"integrateObservation",
"(",
"observation",
")",
"agent",
".",
"getAction",
"(",
")",
"reward",
"=",
"task",
".",
"getReward",
"(",
")",
"agent",
".",
"giveReward",
"(",
"reward",
")"
] |
Store last reward when episode is done.
|
[
"Store",
"last",
"reward",
"when",
"episode",
"is",
"done",
"."
] |
[
"\"\"\"Store last reward when episode is done.\n\n Step is important in edge case of Q/SARSA learning\n when reward is only received in last step.\n \"\"\"",
"## repeat last reward"
] |
[
{
"param": "task",
"type": null
},
{
"param": "agent",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "task",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "agent",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def processLastReward(task, agent):
observation = task.getObservation()
agent.integrateObservation(observation)
agent.getAction()
reward = task.getReward()
agent.giveReward(reward)
| 773 | 440 |
1f1b1af829e079ef5fde5e8b86c43c573f197ede
|
scottwedge/python_scripts
|
excel_rw_test/src/analysis_data.py
|
[
"MIT"
] |
Python
|
analysis_point_max_linearity
|
<not_specific>
|
def analysis_point_max_linearity(boundary_index_list, linearity_list):
"""
analysis max linearity for center and boundary area
"""
boundary_max_linearity = [0, [0, 0, 0]]
center_max_linearity = [0, [0, 0, 0]]
for i in range(len(linearity_list)):
for j in range(len(linearity_list[i])):
max_val = max(linearity_list[i][j])
max_val_index = linearity_list[i][j].index(max_val)
if i in boundary_index_list:
if max_val > boundary_max_linearity[0]:
boundary_max_linearity[0] = max_val
boundary_max_linearity[1] = [i, j, max_val_index]
else:
if max_val > center_max_linearity[0]:
center_max_linearity[0] = max_val
center_max_linearity[1] = [i, j, max_val_index]
return boundary_max_linearity, center_max_linearity
|
analysis max linearity for center and boundary area
|
analysis max linearity for center and boundary area
|
[
"analysis",
"max",
"linearity",
"for",
"center",
"and",
"boundary",
"area"
] |
def analysis_point_max_linearity(boundary_index_list, linearity_list):
boundary_max_linearity = [0, [0, 0, 0]]
center_max_linearity = [0, [0, 0, 0]]
for i in range(len(linearity_list)):
for j in range(len(linearity_list[i])):
max_val = max(linearity_list[i][j])
max_val_index = linearity_list[i][j].index(max_val)
if i in boundary_index_list:
if max_val > boundary_max_linearity[0]:
boundary_max_linearity[0] = max_val
boundary_max_linearity[1] = [i, j, max_val_index]
else:
if max_val > center_max_linearity[0]:
center_max_linearity[0] = max_val
center_max_linearity[1] = [i, j, max_val_index]
return boundary_max_linearity, center_max_linearity
|
[
"def",
"analysis_point_max_linearity",
"(",
"boundary_index_list",
",",
"linearity_list",
")",
":",
"boundary_max_linearity",
"=",
"[",
"0",
",",
"[",
"0",
",",
"0",
",",
"0",
"]",
"]",
"center_max_linearity",
"=",
"[",
"0",
",",
"[",
"0",
",",
"0",
",",
"0",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"linearity_list",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"linearity_list",
"[",
"i",
"]",
")",
")",
":",
"max_val",
"=",
"max",
"(",
"linearity_list",
"[",
"i",
"]",
"[",
"j",
"]",
")",
"max_val_index",
"=",
"linearity_list",
"[",
"i",
"]",
"[",
"j",
"]",
".",
"index",
"(",
"max_val",
")",
"if",
"i",
"in",
"boundary_index_list",
":",
"if",
"max_val",
">",
"boundary_max_linearity",
"[",
"0",
"]",
":",
"boundary_max_linearity",
"[",
"0",
"]",
"=",
"max_val",
"boundary_max_linearity",
"[",
"1",
"]",
"=",
"[",
"i",
",",
"j",
",",
"max_val_index",
"]",
"else",
":",
"if",
"max_val",
">",
"center_max_linearity",
"[",
"0",
"]",
":",
"center_max_linearity",
"[",
"0",
"]",
"=",
"max_val",
"center_max_linearity",
"[",
"1",
"]",
"=",
"[",
"i",
",",
"j",
",",
"max_val_index",
"]",
"return",
"boundary_max_linearity",
",",
"center_max_linearity"
] |
analysis max linearity for center and boundary area
|
[
"analysis",
"max",
"linearity",
"for",
"center",
"and",
"boundary",
"area"
] |
[
"\"\"\"\n analysis max linearity for center and boundary area\n \"\"\""
] |
[
{
"param": "boundary_index_list",
"type": null
},
{
"param": "linearity_list",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "boundary_index_list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "linearity_list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def analysis_point_max_linearity(boundary_index_list, linearity_list):
boundary_max_linearity = [0, [0, 0, 0]]
center_max_linearity = [0, [0, 0, 0]]
for i in range(len(linearity_list)):
for j in range(len(linearity_list[i])):
max_val = max(linearity_list[i][j])
max_val_index = linearity_list[i][j].index(max_val)
if i in boundary_index_list:
if max_val > boundary_max_linearity[0]:
boundary_max_linearity[0] = max_val
boundary_max_linearity[1] = [i, j, max_val_index]
else:
if max_val > center_max_linearity[0]:
center_max_linearity[0] = max_val
center_max_linearity[1] = [i, j, max_val_index]
return boundary_max_linearity, center_max_linearity
| 774 | 785 |
0ceb15471ca6941f1a3c2803a1bcd3575ac7f39e
|
dell/python-powerstore
|
PyPowerStore/utils/helpers.py
|
[
"Apache-2.0"
] |
Python
|
prepare_querystring
|
<not_specific>
|
def prepare_querystring(*query_arguments, **kw_query_arguments):
"""Prepare a querystring dict containing all query_arguments and
kw_query_arguments passed.
:return: Querystring dict.
:rtype: dict
"""
querystring = dict()
for argument_dict in query_arguments:
if isinstance(argument_dict, dict):
querystring.update(argument_dict)
querystring.update(kw_query_arguments)
return querystring
|
Prepare a querystring dict containing all query_arguments and
kw_query_arguments passed.
:return: Querystring dict.
:rtype: dict
|
Prepare a querystring dict containing all query_arguments and
kw_query_arguments passed.
|
[
"Prepare",
"a",
"querystring",
"dict",
"containing",
"all",
"query_arguments",
"and",
"kw_query_arguments",
"passed",
"."
] |
def prepare_querystring(*query_arguments, **kw_query_arguments):
querystring = dict()
for argument_dict in query_arguments:
if isinstance(argument_dict, dict):
querystring.update(argument_dict)
querystring.update(kw_query_arguments)
return querystring
|
[
"def",
"prepare_querystring",
"(",
"*",
"query_arguments",
",",
"**",
"kw_query_arguments",
")",
":",
"querystring",
"=",
"dict",
"(",
")",
"for",
"argument_dict",
"in",
"query_arguments",
":",
"if",
"isinstance",
"(",
"argument_dict",
",",
"dict",
")",
":",
"querystring",
".",
"update",
"(",
"argument_dict",
")",
"querystring",
".",
"update",
"(",
"kw_query_arguments",
")",
"return",
"querystring"
] |
Prepare a querystring dict containing all query_arguments and
kw_query_arguments passed.
|
[
"Prepare",
"a",
"querystring",
"dict",
"containing",
"all",
"query_arguments",
"and",
"kw_query_arguments",
"passed",
"."
] |
[
"\"\"\"Prepare a querystring dict containing all query_arguments and\n kw_query_arguments passed.\n\n :return: Querystring dict.\n :rtype: dict\n \"\"\""
] |
[] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "dict"
}
],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
def prepare_querystring(*query_arguments, **kw_query_arguments):
querystring = dict()
for argument_dict in query_arguments:
if isinstance(argument_dict, dict):
querystring.update(argument_dict)
querystring.update(kw_query_arguments)
return querystring
| 776 | 714 |
7780d1f2f72e7bad9d4e454e345f4b9b6a4c745c
|
Seiji-Armstrong/seipy
|
seipy/base.py
|
[
"MIT"
] |
Python
|
merge_two_dicts
|
<not_specific>
|
def merge_two_dicts(dict_1, dict_2):
"""
Given two dicts, return one merged dict.
"""
return {**dict_1, **dict_2}
|
Given two dicts, return one merged dict.
|
Given two dicts, return one merged dict.
|
[
"Given",
"two",
"dicts",
"return",
"one",
"merged",
"dict",
"."
] |
def merge_two_dicts(dict_1, dict_2):
return {**dict_1, **dict_2}
|
[
"def",
"merge_two_dicts",
"(",
"dict_1",
",",
"dict_2",
")",
":",
"return",
"{",
"**",
"dict_1",
",",
"**",
"dict_2",
"}"
] |
Given two dicts, return one merged dict.
|
[
"Given",
"two",
"dicts",
"return",
"one",
"merged",
"dict",
"."
] |
[
"\"\"\"\n Given two dicts, return one merged dict.\n \"\"\""
] |
[
{
"param": "dict_1",
"type": null
},
{
"param": "dict_2",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "dict_1",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "dict_2",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def merge_two_dicts(dict_1, dict_2):
return {**dict_1, **dict_2}
| 777 | 996 |
ec7518cb824ed5accdfa856c7b10574caddb892e
|
khtg/apitools
|
apitools/base/py/encoding.py
|
[
"Apache-2.0"
] |
Python
|
DictToAdditionalPropertyMessage
|
<not_specific>
|
def DictToAdditionalPropertyMessage(properties, additional_property_type,
sort_items=False):
"""Convert the given dictionary to an AdditionalProperty message."""
items = properties.items()
if sort_items:
items = sorted(items)
map_ = []
for key, value in items:
map_.append(additional_property_type.AdditionalProperty(
key=key, value=value))
return additional_property_type(additionalProperties=map_)
|
Convert the given dictionary to an AdditionalProperty message.
|
Convert the given dictionary to an AdditionalProperty message.
|
[
"Convert",
"the",
"given",
"dictionary",
"to",
"an",
"AdditionalProperty",
"message",
"."
] |
def DictToAdditionalPropertyMessage(properties, additional_property_type,
sort_items=False):
items = properties.items()
if sort_items:
items = sorted(items)
map_ = []
for key, value in items:
map_.append(additional_property_type.AdditionalProperty(
key=key, value=value))
return additional_property_type(additionalProperties=map_)
|
[
"def",
"DictToAdditionalPropertyMessage",
"(",
"properties",
",",
"additional_property_type",
",",
"sort_items",
"=",
"False",
")",
":",
"items",
"=",
"properties",
".",
"items",
"(",
")",
"if",
"sort_items",
":",
"items",
"=",
"sorted",
"(",
"items",
")",
"map_",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"items",
":",
"map_",
".",
"append",
"(",
"additional_property_type",
".",
"AdditionalProperty",
"(",
"key",
"=",
"key",
",",
"value",
"=",
"value",
")",
")",
"return",
"additional_property_type",
"(",
"additionalProperties",
"=",
"map_",
")"
] |
Convert the given dictionary to an AdditionalProperty message.
|
[
"Convert",
"the",
"given",
"dictionary",
"to",
"an",
"AdditionalProperty",
"message",
"."
] |
[
"\"\"\"Convert the given dictionary to an AdditionalProperty message.\"\"\""
] |
[
{
"param": "properties",
"type": null
},
{
"param": "additional_property_type",
"type": null
},
{
"param": "sort_items",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "properties",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "additional_property_type",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "sort_items",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def DictToAdditionalPropertyMessage(properties, additional_property_type,
sort_items=False):
items = properties.items()
if sort_items:
items = sorted(items)
map_ = []
for key, value in items:
map_.append(additional_property_type.AdditionalProperty(
key=key, value=value))
return additional_property_type(additionalProperties=map_)
| 778 | 674 |
2a0d402e4d414c6749d926911e6caa80f325adfd
|
jean-edouard-boulanger/python-quickforex
|
quickforex/api.py
|
[
"MIT"
] |
Python
|
install_provider
|
None
|
def install_provider(provider: ProviderBase) -> None:
"""Install an alternative provider to query foreign exchange rates. Note that calling this function is not needed
to use the QuickForex API because a provider is installed by default.
:param provider: Installed provider.
"""
global _INSTALLED_PROVIDER
_INSTALLED_PROVIDER = provider
|
Install an alternative provider to query foreign exchange rates. Note that calling this function is not needed
to use the QuickForex API because a provider is installed by default.
:param provider: Installed provider.
|
Install an alternative provider to query foreign exchange rates. Note that calling this function is not needed
to use the QuickForex API because a provider is installed by default.
|
[
"Install",
"an",
"alternative",
"provider",
"to",
"query",
"foreign",
"exchange",
"rates",
".",
"Note",
"that",
"calling",
"this",
"function",
"is",
"not",
"needed",
"to",
"use",
"the",
"QuickForex",
"API",
"because",
"a",
"provider",
"is",
"installed",
"by",
"default",
"."
] |
def install_provider(provider: ProviderBase) -> None:
global _INSTALLED_PROVIDER
_INSTALLED_PROVIDER = provider
|
[
"def",
"install_provider",
"(",
"provider",
":",
"ProviderBase",
")",
"->",
"None",
":",
"global",
"_INSTALLED_PROVIDER",
"_INSTALLED_PROVIDER",
"=",
"provider"
] |
Install an alternative provider to query foreign exchange rates.
|
[
"Install",
"an",
"alternative",
"provider",
"to",
"query",
"foreign",
"exchange",
"rates",
"."
] |
[
"\"\"\"Install an alternative provider to query foreign exchange rates. Note that calling this function is not needed\n to use the QuickForex API because a provider is installed by default.\n\n :param provider: Installed provider.\n \"\"\""
] |
[
{
"param": "provider",
"type": "ProviderBase"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "provider",
"type": "ProviderBase",
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def install_provider(provider: ProviderBase) -> None:
global _INSTALLED_PROVIDER
_INSTALLED_PROVIDER = provider
| 779 | 351 |
89e8813cf7f6c8a6eea7821b86c6ff1ab918f535
|
peterlisak/pycpix
|
cpix/filters.py
|
[
"MIT"
] |
Python
|
encode_bool
|
<not_specific>
|
def encode_bool(value):
"""Encode booleans to produce valid XML"""
if value:
return "true"
return "false"
|
Encode booleans to produce valid XML
|
Encode booleans to produce valid XML
|
[
"Encode",
"booleans",
"to",
"produce",
"valid",
"XML"
] |
def encode_bool(value):
if value:
return "true"
return "false"
|
[
"def",
"encode_bool",
"(",
"value",
")",
":",
"if",
"value",
":",
"return",
"\"true\"",
"return",
"\"false\""
] |
Encode booleans to produce valid XML
|
[
"Encode",
"booleans",
"to",
"produce",
"valid",
"XML"
] |
[
"\"\"\"Encode booleans to produce valid XML\"\"\""
] |
[
{
"param": "value",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "value",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def encode_bool(value):
if value:
return "true"
return "false"
| 780 | 276 |
bba14ed55863badf3e6eed33baf240df3330c1fa
|
usegalaxy-no/usegalaxy
|
venv/lib/python3.6/site-packages/ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py
|
[
"MIT"
] |
Python
|
check_params
|
<not_specific>
|
def check_params(each, fields):
"""
Each dictionary parameters validation as per the rule defined in fields.
:param each: validating each dictionary
:param fields: list of dictionary which has the set of rules.
:return: tuple which has err and message
"""
msg = ""
for f in fields:
if f['name'] not in each and f["required"] is False:
continue
if not f["name"] in each and f["required"] is True:
msg = "{0} is required and must be of type: {1}".format(f['name'], f['type'])
elif not isinstance(each[f["name"]], f["type"]):
msg = "{0} must be of type: {1}. {2} ({3}) provided.".format(
f['name'], f['type'], each[f['name']], type(each[f['name']]))
elif f['name'] in each and isinstance(each[f['name']], int) and 'min' in f:
if each[f['name']] < f['min']:
msg = "{0} must be greater than or equal to: {1}".format(f['name'], f['min'])
return msg
|
Each dictionary parameters validation as per the rule defined in fields.
:param each: validating each dictionary
:param fields: list of dictionary which has the set of rules.
:return: tuple which has err and message
|
Each dictionary parameters validation as per the rule defined in fields.
|
[
"Each",
"dictionary",
"parameters",
"validation",
"as",
"per",
"the",
"rule",
"defined",
"in",
"fields",
"."
] |
def check_params(each, fields):
msg = ""
for f in fields:
if f['name'] not in each and f["required"] is False:
continue
if not f["name"] in each and f["required"] is True:
msg = "{0} is required and must be of type: {1}".format(f['name'], f['type'])
elif not isinstance(each[f["name"]], f["type"]):
msg = "{0} must be of type: {1}. {2} ({3}) provided.".format(
f['name'], f['type'], each[f['name']], type(each[f['name']]))
elif f['name'] in each and isinstance(each[f['name']], int) and 'min' in f:
if each[f['name']] < f['min']:
msg = "{0} must be greater than or equal to: {1}".format(f['name'], f['min'])
return msg
|
[
"def",
"check_params",
"(",
"each",
",",
"fields",
")",
":",
"msg",
"=",
"\"\"",
"for",
"f",
"in",
"fields",
":",
"if",
"f",
"[",
"'name'",
"]",
"not",
"in",
"each",
"and",
"f",
"[",
"\"required\"",
"]",
"is",
"False",
":",
"continue",
"if",
"not",
"f",
"[",
"\"name\"",
"]",
"in",
"each",
"and",
"f",
"[",
"\"required\"",
"]",
"is",
"True",
":",
"msg",
"=",
"\"{0} is required and must be of type: {1}\"",
".",
"format",
"(",
"f",
"[",
"'name'",
"]",
",",
"f",
"[",
"'type'",
"]",
")",
"elif",
"not",
"isinstance",
"(",
"each",
"[",
"f",
"[",
"\"name\"",
"]",
"]",
",",
"f",
"[",
"\"type\"",
"]",
")",
":",
"msg",
"=",
"\"{0} must be of type: {1}. {2} ({3}) provided.\"",
".",
"format",
"(",
"f",
"[",
"'name'",
"]",
",",
"f",
"[",
"'type'",
"]",
",",
"each",
"[",
"f",
"[",
"'name'",
"]",
"]",
",",
"type",
"(",
"each",
"[",
"f",
"[",
"'name'",
"]",
"]",
")",
")",
"elif",
"f",
"[",
"'name'",
"]",
"in",
"each",
"and",
"isinstance",
"(",
"each",
"[",
"f",
"[",
"'name'",
"]",
"]",
",",
"int",
")",
"and",
"'min'",
"in",
"f",
":",
"if",
"each",
"[",
"f",
"[",
"'name'",
"]",
"]",
"<",
"f",
"[",
"'min'",
"]",
":",
"msg",
"=",
"\"{0} must be greater than or equal to: {1}\"",
".",
"format",
"(",
"f",
"[",
"'name'",
"]",
",",
"f",
"[",
"'min'",
"]",
")",
"return",
"msg"
] |
Each dictionary parameters validation as per the rule defined in fields.
|
[
"Each",
"dictionary",
"parameters",
"validation",
"as",
"per",
"the",
"rule",
"defined",
"in",
"fields",
"."
] |
[
"\"\"\"\n Each dictionary parameters validation as per the rule defined in fields.\n :param each: validating each dictionary\n :param fields: list of dictionary which has the set of rules.\n :return: tuple which has err and message\n \"\"\""
] |
[
{
"param": "each",
"type": null
},
{
"param": "fields",
"type": null
}
] |
{
"returns": [
{
"docstring": "tuple which has err and message",
"docstring_tokens": [
"tuple",
"which",
"has",
"err",
"and",
"message"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "each",
"type": null,
"docstring": "validating each dictionary",
"docstring_tokens": [
"validating",
"each",
"dictionary"
],
"default": null,
"is_optional": null
},
{
"identifier": "fields",
"type": null,
"docstring": "list of dictionary which has the set of rules.",
"docstring_tokens": [
"list",
"of",
"dictionary",
"which",
"has",
"the",
"set",
"of",
"rules",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def check_params(each, fields):
msg = ""
for f in fields:
if f['name'] not in each and f["required"] is False:
continue
if not f["name"] in each and f["required"] is True:
msg = "{0} is required and must be of type: {1}".format(f['name'], f['type'])
elif not isinstance(each[f["name"]], f["type"]):
msg = "{0} must be of type: {1}. {2} ({3}) provided.".format(
f['name'], f['type'], each[f['name']], type(each[f['name']]))
elif f['name'] in each and isinstance(each[f['name']], int) and 'min' in f:
if each[f['name']] < f['min']:
msg = "{0} must be greater than or equal to: {1}".format(f['name'], f['min'])
return msg
| 781 | 54 |
1809d8d1eb0abb802156531a59e70755902f5041
|
kbvatral/pyplanning
|
pyplanning/solvers/graphplan.py
|
[
"MIT"
] |
Python
|
check_mutex
|
<not_specific>
|
def check_mutex(level: Level, action_set, action):
"""
Determines whether an action is mutex with any of the actions
in the given set of actions at the given level
"""
for a in action_set:
if frozenset([action, a]) in level.action_mutex:
return True
return False
|
Determines whether an action is mutex with any of the actions
in the given set of actions at the given level
|
Determines whether an action is mutex with any of the actions
in the given set of actions at the given level
|
[
"Determines",
"whether",
"an",
"action",
"is",
"mutex",
"with",
"any",
"of",
"the",
"actions",
"in",
"the",
"given",
"set",
"of",
"actions",
"at",
"the",
"given",
"level"
] |
def check_mutex(level: Level, action_set, action):
for a in action_set:
if frozenset([action, a]) in level.action_mutex:
return True
return False
|
[
"def",
"check_mutex",
"(",
"level",
":",
"Level",
",",
"action_set",
",",
"action",
")",
":",
"for",
"a",
"in",
"action_set",
":",
"if",
"frozenset",
"(",
"[",
"action",
",",
"a",
"]",
")",
"in",
"level",
".",
"action_mutex",
":",
"return",
"True",
"return",
"False"
] |
Determines whether an action is mutex with any of the actions
in the given set of actions at the given level
|
[
"Determines",
"whether",
"an",
"action",
"is",
"mutex",
"with",
"any",
"of",
"the",
"actions",
"in",
"the",
"given",
"set",
"of",
"actions",
"at",
"the",
"given",
"level"
] |
[
"\"\"\"\n Determines whether an action is mutex with any of the actions\n in the given set of actions at the given level\n \"\"\""
] |
[
{
"param": "level",
"type": "Level"
},
{
"param": "action_set",
"type": null
},
{
"param": "action",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "level",
"type": "Level",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "action_set",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "action",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def check_mutex(level: Level, action_set, action):
for a in action_set:
if frozenset([action, a]) in level.action_mutex:
return True
return False
| 782 | 854 |
538477018064d427c16182598e17640089834e03
|
laszlokiraly/LearningAlgorithms
|
ch08/timing.py
|
[
"MIT"
] |
Python
|
pq_add
|
<not_specific>
|
def pq_add(N, num):
"""Run a single trial of num heap add requests."""
return 1000*min(timeit.repeat(stmt='''
v = 0
for i in range({}):
pq.put(v)
v = (v + 137)%{}'''.format(num, N),
setup = '''
import queue
pq = queue.PriorityQueue()
for i in range(0,{},2):
pq.put(i, i)'''.format(N), repeat=5, number=1))
|
Run a single trial of num heap add requests.
|
Run a single trial of num heap add requests.
|
[
"Run",
"a",
"single",
"trial",
"of",
"num",
"heap",
"add",
"requests",
"."
] |
def pq_add(N, num):
return 1000*min(timeit.repeat(stmt='''
v = 0
for i in range({}):
pq.put(v)
v = (v + 137)%{}'''.format(num, N),
setup = '''
import queue
pq = queue.PriorityQueue()
for i in range(0,{},2):
pq.put(i, i)'''.format(N), repeat=5, number=1))
|
[
"def",
"pq_add",
"(",
"N",
",",
"num",
")",
":",
"return",
"1000",
"*",
"min",
"(",
"timeit",
".",
"repeat",
"(",
"stmt",
"=",
"'''\nv = 0\nfor i in range({}):\n pq.put(v)\n v = (v + 137)%{}'''",
".",
"format",
"(",
"num",
",",
"N",
")",
",",
"setup",
"=",
"'''\nimport queue\npq = queue.PriorityQueue()\nfor i in range(0,{},2):\n pq.put(i, i)'''",
".",
"format",
"(",
"N",
")",
",",
"repeat",
"=",
"5",
",",
"number",
"=",
"1",
")",
")"
] |
Run a single trial of num heap add requests.
|
[
"Run",
"a",
"single",
"trial",
"of",
"num",
"heap",
"add",
"requests",
"."
] |
[
"\"\"\"Run a single trial of num heap add requests.\"\"\""
] |
[
{
"param": "N",
"type": null
},
{
"param": "num",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "N",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "num",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import timeit
def pq_add(N, num):
return 1000*min(timeit.repeat(stmt='''
v = 0
for i in range({}):
pq.put(v)
v = (v + 137)%{}'''.format(num, N),
setup = '''
import queue
pq = queue.PriorityQueue()
for i in range(0,{},2):
pq.put(i, i)'''.format(N), repeat=5, number=1))
| 783 | 74 |
c7c9c0cc240b12f161fe3991edb66a3d8838e7cd
|
CroatianMeteorNetwork/MeteorRadiometer
|
Python/Data Filtering/2-stage-fit_radiometer.py
|
[
"MIT"
] |
Python
|
saveCSV
| null |
def saveCSV(file_name, x_data, y_data):
""" Save a CSV file with filtered data. """
with open(file_name, 'w') as f:
for i in range(len(x_data)):
f.write('{:.6f}'.format(x_data[i]) + ',' + '{:06.6f}'.format(y_data[i]) + '\n')
|
Save a CSV file with filtered data.
|
Save a CSV file with filtered data.
|
[
"Save",
"a",
"CSV",
"file",
"with",
"filtered",
"data",
"."
] |
def saveCSV(file_name, x_data, y_data):
with open(file_name, 'w') as f:
for i in range(len(x_data)):
f.write('{:.6f}'.format(x_data[i]) + ',' + '{:06.6f}'.format(y_data[i]) + '\n')
|
[
"def",
"saveCSV",
"(",
"file_name",
",",
"x_data",
",",
"y_data",
")",
":",
"with",
"open",
"(",
"file_name",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"x_data",
")",
")",
":",
"f",
".",
"write",
"(",
"'{:.6f}'",
".",
"format",
"(",
"x_data",
"[",
"i",
"]",
")",
"+",
"','",
"+",
"'{:06.6f}'",
".",
"format",
"(",
"y_data",
"[",
"i",
"]",
")",
"+",
"'\\n'",
")"
] |
Save a CSV file with filtered data.
|
[
"Save",
"a",
"CSV",
"file",
"with",
"filtered",
"data",
"."
] |
[
"\"\"\" Save a CSV file with filtered data. \"\"\""
] |
[
{
"param": "file_name",
"type": null
},
{
"param": "x_data",
"type": null
},
{
"param": "y_data",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "file_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "x_data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "y_data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def saveCSV(file_name, x_data, y_data):
with open(file_name, 'w') as f:
for i in range(len(x_data)):
f.write('{:.6f}'.format(x_data[i]) + ',' + '{:06.6f}'.format(y_data[i]) + '\n')
| 785 | 492 |
1516b22f847258bb1ee9aa15360b85c1e481c114
|
cholmcc/jalien_py
|
examples/alien.py
|
[
"BSD-3-Clause"
] |
Python
|
runShellCMD
|
<not_specific>
|
def runShellCMD(INPUT: str = '', captureout: bool = True):
"""Run shell command in subprocess; if exists, print stdout and stderr"""
if not INPUT: return
sh_cmd = re.sub(r'^!', '', INPUT)
if captureout:
args = sh_cmd
shcmd_out = subprocess.run(args, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True, env = os.environ)
else:
args = shlex.split(sh_cmd)
shcmd_out = subprocess.run(args, env = os.environ)
stdout = shcmd_out.stdout
if stdout: print(stdout.decode(), flush = True)
stderr = shcmd_out.stderr
if stderr: print(stderr.decode(), flush = True)
|
Run shell command in subprocess; if exists, print stdout and stderr
|
Run shell command in subprocess; if exists, print stdout and stderr
|
[
"Run",
"shell",
"command",
"in",
"subprocess",
";",
"if",
"exists",
"print",
"stdout",
"and",
"stderr"
] |
def runShellCMD(INPUT: str = '', captureout: bool = True):
if not INPUT: return
sh_cmd = re.sub(r'^!', '', INPUT)
if captureout:
args = sh_cmd
shcmd_out = subprocess.run(args, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True, env = os.environ)
else:
args = shlex.split(sh_cmd)
shcmd_out = subprocess.run(args, env = os.environ)
stdout = shcmd_out.stdout
if stdout: print(stdout.decode(), flush = True)
stderr = shcmd_out.stderr
if stderr: print(stderr.decode(), flush = True)
|
[
"def",
"runShellCMD",
"(",
"INPUT",
":",
"str",
"=",
"''",
",",
"captureout",
":",
"bool",
"=",
"True",
")",
":",
"if",
"not",
"INPUT",
":",
"return",
"sh_cmd",
"=",
"re",
".",
"sub",
"(",
"r'^!'",
",",
"''",
",",
"INPUT",
")",
"if",
"captureout",
":",
"args",
"=",
"sh_cmd",
"shcmd_out",
"=",
"subprocess",
".",
"run",
"(",
"args",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"shell",
"=",
"True",
",",
"env",
"=",
"os",
".",
"environ",
")",
"else",
":",
"args",
"=",
"shlex",
".",
"split",
"(",
"sh_cmd",
")",
"shcmd_out",
"=",
"subprocess",
".",
"run",
"(",
"args",
",",
"env",
"=",
"os",
".",
"environ",
")",
"stdout",
"=",
"shcmd_out",
".",
"stdout",
"if",
"stdout",
":",
"print",
"(",
"stdout",
".",
"decode",
"(",
")",
",",
"flush",
"=",
"True",
")",
"stderr",
"=",
"shcmd_out",
".",
"stderr",
"if",
"stderr",
":",
"print",
"(",
"stderr",
".",
"decode",
"(",
")",
",",
"flush",
"=",
"True",
")"
] |
Run shell command in subprocess; if exists, print stdout and stderr
|
[
"Run",
"shell",
"command",
"in",
"subprocess",
";",
"if",
"exists",
"print",
"stdout",
"and",
"stderr"
] |
[
"\"\"\"Run shell command in subprocess; if exists, print stdout and stderr\"\"\""
] |
[
{
"param": "INPUT",
"type": "str"
},
{
"param": "captureout",
"type": "bool"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "INPUT",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "captureout",
"type": "bool",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import subprocess
import re
import os
import shlex
def runShellCMD(INPUT: str = '', captureout: bool = True):
if not INPUT: return
sh_cmd = re.sub(r'^!', '', INPUT)
if captureout:
args = sh_cmd
shcmd_out = subprocess.run(args, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True, env = os.environ)
else:
args = shlex.split(sh_cmd)
shcmd_out = subprocess.run(args, env = os.environ)
stdout = shcmd_out.stdout
if stdout: print(stdout.decode(), flush = True)
stderr = shcmd_out.stderr
if stderr: print(stderr.decode(), flush = True)
| 786 | 548 |
23fedc8bf16edec4584867d3bf953328240c5431
|
mjhoptics/opticalglass
|
opticalglass/glass.py
|
[
"BSD-3-Clause"
] |
Python
|
calc_glass_constants
|
<not_specific>
|
def calc_glass_constants(nd, nF, nC, *partials):
"""Given central, blue and red refractive indices, calculate Vd and PFd.
Args:
nd, nF, nC: refractive indices at central, short and long wavelengths
partials (tuple): if present, 2 ref indxs, n4 and n5, wl4 < wl5
Returns:
V-number and relative partial dispersion from F to d
If `partials` is present, the return values include the central wavelength
index and the relative partial dispersion between the 2 refractive indices
provided from `partials`.
"""
dFC = nF-nC
vd = (nd - 1.0)/dFC
PFd = (nF-nd)/dFC
if len(partials) == 2:
n4, n5 = partials
P45 = (n4-n5)/dFC
return nd, vd, PFd, P45
return vd, PFd
|
Given central, blue and red refractive indices, calculate Vd and PFd.
Args:
nd, nF, nC: refractive indices at central, short and long wavelengths
partials (tuple): if present, 2 ref indxs, n4 and n5, wl4 < wl5
Returns:
V-number and relative partial dispersion from F to d
If `partials` is present, the return values include the central wavelength
index and the relative partial dispersion between the 2 refractive indices
provided from `partials`.
|
Given central, blue and red refractive indices, calculate Vd and PFd.
|
[
"Given",
"central",
"blue",
"and",
"red",
"refractive",
"indices",
"calculate",
"Vd",
"and",
"PFd",
"."
] |
def calc_glass_constants(nd, nF, nC, *partials):
dFC = nF-nC
vd = (nd - 1.0)/dFC
PFd = (nF-nd)/dFC
if len(partials) == 2:
n4, n5 = partials
P45 = (n4-n5)/dFC
return nd, vd, PFd, P45
return vd, PFd
|
[
"def",
"calc_glass_constants",
"(",
"nd",
",",
"nF",
",",
"nC",
",",
"*",
"partials",
")",
":",
"dFC",
"=",
"nF",
"-",
"nC",
"vd",
"=",
"(",
"nd",
"-",
"1.0",
")",
"/",
"dFC",
"PFd",
"=",
"(",
"nF",
"-",
"nd",
")",
"/",
"dFC",
"if",
"len",
"(",
"partials",
")",
"==",
"2",
":",
"n4",
",",
"n5",
"=",
"partials",
"P45",
"=",
"(",
"n4",
"-",
"n5",
")",
"/",
"dFC",
"return",
"nd",
",",
"vd",
",",
"PFd",
",",
"P45",
"return",
"vd",
",",
"PFd"
] |
Given central, blue and red refractive indices, calculate Vd and PFd.
|
[
"Given",
"central",
"blue",
"and",
"red",
"refractive",
"indices",
"calculate",
"Vd",
"and",
"PFd",
"."
] |
[
"\"\"\"Given central, blue and red refractive indices, calculate Vd and PFd.\n \n Args:\n nd, nF, nC: refractive indices at central, short and long wavelengths\n partials (tuple): if present, 2 ref indxs, n4 and n5, wl4 < wl5\n \n Returns:\n V-number and relative partial dispersion from F to d\n\n If `partials` is present, the return values include the central wavelength\n index and the relative partial dispersion between the 2 refractive indices\n provided from `partials`.\n \"\"\""
] |
[
{
"param": "nd",
"type": null
},
{
"param": "nF",
"type": null
},
{
"param": "nC",
"type": null
}
] |
{
"returns": [
{
"docstring": "V-number and relative partial dispersion from F to d",
"docstring_tokens": [
"V",
"-",
"number",
"and",
"relative",
"partial",
"dispersion",
"from",
"F",
"to",
"d"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "nd",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "nF",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "nC",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [
{
"identifier": "nd, nF, nC",
"type": null,
"docstring": "refractive indices at central, short and long wavelengths",
"docstring_tokens": [
"refractive",
"indices",
"at",
"central",
"short",
"and",
"long",
"wavelengths"
],
"default": null,
"is_optional": null
},
{
"identifier": "partials",
"type": null,
"docstring": "if present, 2 ref indxs, n4 and n5, wl4 < wl5",
"docstring_tokens": [
"if",
"present",
"2",
"ref",
"indxs",
"n4",
"and",
"n5",
"wl4",
"<",
"wl5"
],
"default": null,
"is_optional": false
}
],
"others": []
}
|
def calc_glass_constants(nd, nF, nC, *partials):
dFC = nF-nC
vd = (nd - 1.0)/dFC
PFd = (nF-nd)/dFC
if len(partials) == 2:
n4, n5 = partials
P45 = (n4-n5)/dFC
return nd, vd, PFd, P45
return vd, PFd
| 787 | 204 |
ed39114305258133c3a70ec078b6229653f72e54
|
HotMaps/HotMaps-toolbox-service
|
api/app/decorators/restplus.py
|
[
"Apache-2.0"
] |
Python
|
handle_false_parameters
|
<not_specific>
|
def handle_false_parameters(error):
'''
decorator called with an error caused by wrong parameters
:param error -- the called error:
:return:
'''
message = 'Missing Parameter: ' + str(error)
response = {
"message": message,
"error": {
"message": message,
"status": "531",
"statusText": "PARAMETERS"
}
}
return response, 531
|
decorator called with an error caused by wrong parameters
:param error -- the called error:
:return:
|
decorator called with an error caused by wrong parameters
:param error -- the called error:
:return.
|
[
"decorator",
"called",
"with",
"an",
"error",
"caused",
"by",
"wrong",
"parameters",
":",
"param",
"error",
"--",
"the",
"called",
"error",
":",
":",
"return",
"."
] |
def handle_false_parameters(error):
message = 'Missing Parameter: ' + str(error)
response = {
"message": message,
"error": {
"message": message,
"status": "531",
"statusText": "PARAMETERS"
}
}
return response, 531
|
[
"def",
"handle_false_parameters",
"(",
"error",
")",
":",
"message",
"=",
"'Missing Parameter: '",
"+",
"str",
"(",
"error",
")",
"response",
"=",
"{",
"\"message\"",
":",
"message",
",",
"\"error\"",
":",
"{",
"\"message\"",
":",
"message",
",",
"\"status\"",
":",
"\"531\"",
",",
"\"statusText\"",
":",
"\"PARAMETERS\"",
"}",
"}",
"return",
"response",
",",
"531"
] |
decorator called with an error caused by wrong parameters
:param error -- the called error:
:return:
|
[
"decorator",
"called",
"with",
"an",
"error",
"caused",
"by",
"wrong",
"parameters",
":",
"param",
"error",
"--",
"the",
"called",
"error",
":",
":",
"return",
":"
] |
[
"'''\n decorator called with an error caused by wrong parameters\n :param error -- the called error:\n :return:\n '''"
] |
[
{
"param": "error",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "error",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def handle_false_parameters(error):
message = 'Missing Parameter: ' + str(error)
response = {
"message": message,
"error": {
"message": message,
"status": "531",
"statusText": "PARAMETERS"
}
}
return response, 531
| 788 | 220 |
9962c4555aa4a617fb3be41f22ed28fdbf5345a1
|
shihjames/sc-projects
|
stanCode_projects/name_searching_system/babynames.py
|
[
"MIT"
] |
Python
|
add_data_for_name
| null |
def add_data_for_name(name_data, year, rank, name):
"""
Adds the given year and rank to the associated name in the name_data dict.
Input:
name_data (dict): dict holding baby name data
year (str): the year of the data entry to add
rank (str): the rank of the data entry to add
name (str): the name of the data entry to add
Output:
This function modifies the name_data dict to store the provided
name, year, and rank. This function does not return any values.
"""
# Create new dictionary for new name
if name not in name_data:
name_data[name] = {}
# Renew ranking of a certain new
if year in name_data[name]:
if int(rank) < int(name_data[name][year]):
name_data[name][year] = rank
# Add new year and ranking of a certain name
else:
name_data[name][year] = rank
|
Adds the given year and rank to the associated name in the name_data dict.
Input:
name_data (dict): dict holding baby name data
year (str): the year of the data entry to add
rank (str): the rank of the data entry to add
name (str): the name of the data entry to add
Output:
This function modifies the name_data dict to store the provided
name, year, and rank. This function does not return any values.
|
Adds the given year and rank to the associated name in the name_data dict.
This function modifies the name_data dict to store the provided
name, year, and rank. This function does not return any values.
|
[
"Adds",
"the",
"given",
"year",
"and",
"rank",
"to",
"the",
"associated",
"name",
"in",
"the",
"name_data",
"dict",
".",
"This",
"function",
"modifies",
"the",
"name_data",
"dict",
"to",
"store",
"the",
"provided",
"name",
"year",
"and",
"rank",
".",
"This",
"function",
"does",
"not",
"return",
"any",
"values",
"."
] |
def add_data_for_name(name_data, year, rank, name):
if name not in name_data:
name_data[name] = {}
if year in name_data[name]:
if int(rank) < int(name_data[name][year]):
name_data[name][year] = rank
else:
name_data[name][year] = rank
|
[
"def",
"add_data_for_name",
"(",
"name_data",
",",
"year",
",",
"rank",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"name_data",
":",
"name_data",
"[",
"name",
"]",
"=",
"{",
"}",
"if",
"year",
"in",
"name_data",
"[",
"name",
"]",
":",
"if",
"int",
"(",
"rank",
")",
"<",
"int",
"(",
"name_data",
"[",
"name",
"]",
"[",
"year",
"]",
")",
":",
"name_data",
"[",
"name",
"]",
"[",
"year",
"]",
"=",
"rank",
"else",
":",
"name_data",
"[",
"name",
"]",
"[",
"year",
"]",
"=",
"rank"
] |
Adds the given year and rank to the associated name in the name_data dict.
|
[
"Adds",
"the",
"given",
"year",
"and",
"rank",
"to",
"the",
"associated",
"name",
"in",
"the",
"name_data",
"dict",
"."
] |
[
"\"\"\"\n Adds the given year and rank to the associated name in the name_data dict.\n\n Input:\n name_data (dict): dict holding baby name data\n year (str): the year of the data entry to add\n rank (str): the rank of the data entry to add\n name (str): the name of the data entry to add\n\n Output:\n This function modifies the name_data dict to store the provided\n name, year, and rank. This function does not return any values.\n\n \"\"\"",
"# Create new dictionary for new name",
"# Renew ranking of a certain new",
"# Add new year and ranking of a certain name"
] |
[
{
"param": "name_data",
"type": null
},
{
"param": "year",
"type": null
},
{
"param": "rank",
"type": null
},
{
"param": "name",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "name_data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "year",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "rank",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def add_data_for_name(name_data, year, rank, name):
if name not in name_data:
name_data[name] = {}
if year in name_data[name]:
if int(rank) < int(name_data[name][year]):
name_data[name][year] = rank
else:
name_data[name][year] = rank
| 789 | 415 |
a0ee11ee105b1079cb27917bb535af34fb0515f2
|
610yilingliu/TheThousandAutumn_ocr
|
to_bw.py
|
[
"MIT"
] |
Python
|
random_str
|
<not_specific>
|
def random_str():
'''
Return a 6 char random string.
'''
seed = 'qwertyuiopasdfghjklzxcvbnm1234567890'
string = ''
for i in range(6):
current_pos = random.random()
char = seed[int(len(seed) * current_pos)]
string += char
return string
|
Return a 6 char random string.
|
Return a 6 char random string.
|
[
"Return",
"a",
"6",
"char",
"random",
"string",
"."
] |
def random_str():
seed = 'qwertyuiopasdfghjklzxcvbnm1234567890'
string = ''
for i in range(6):
current_pos = random.random()
char = seed[int(len(seed) * current_pos)]
string += char
return string
|
[
"def",
"random_str",
"(",
")",
":",
"seed",
"=",
"'qwertyuiopasdfghjklzxcvbnm1234567890'",
"string",
"=",
"''",
"for",
"i",
"in",
"range",
"(",
"6",
")",
":",
"current_pos",
"=",
"random",
".",
"random",
"(",
")",
"char",
"=",
"seed",
"[",
"int",
"(",
"len",
"(",
"seed",
")",
"*",
"current_pos",
")",
"]",
"string",
"+=",
"char",
"return",
"string"
] |
Return a 6 char random string.
|
[
"Return",
"a",
"6",
"char",
"random",
"string",
"."
] |
[
"'''\n Return a 6 char random string.\n '''"
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import random
def random_str():
seed = 'qwertyuiopasdfghjklzxcvbnm1234567890'
string = ''
for i in range(6):
current_pos = random.random()
char = seed[int(len(seed) * current_pos)]
string += char
return string
| 790 | 874 |
ace5a7b06907dcf5ea1ded19274e7564ce2a846b
|
JuroOravec/knwldg
|
common/common/common/util.py
|
[
"MIT"
] |
Python
|
lot2dol
|
<not_specific>
|
def lot2dol(lot):
'''Convert list of (key, value) tuples to dict of lists'''
dol = {}
for k, val in lot:
if k not in dol:
dol[k] = []
dol[k].append(val)
return dol
|
Convert list of (key, value) tuples to dict of lists
|
Convert list of (key, value) tuples to dict of lists
|
[
"Convert",
"list",
"of",
"(",
"key",
"value",
")",
"tuples",
"to",
"dict",
"of",
"lists"
] |
def lot2dol(lot):
dol = {}
for k, val in lot:
if k not in dol:
dol[k] = []
dol[k].append(val)
return dol
|
[
"def",
"lot2dol",
"(",
"lot",
")",
":",
"dol",
"=",
"{",
"}",
"for",
"k",
",",
"val",
"in",
"lot",
":",
"if",
"k",
"not",
"in",
"dol",
":",
"dol",
"[",
"k",
"]",
"=",
"[",
"]",
"dol",
"[",
"k",
"]",
".",
"append",
"(",
"val",
")",
"return",
"dol"
] |
Convert list of (key, value) tuples to dict of lists
|
[
"Convert",
"list",
"of",
"(",
"key",
"value",
")",
"tuples",
"to",
"dict",
"of",
"lists"
] |
[
"'''Convert list of (key, value) tuples to dict of lists'''"
] |
[
{
"param": "lot",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "lot",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def lot2dol(lot):
dol = {}
for k, val in lot:
if k not in dol:
dol[k] = []
dol[k].append(val)
return dol
| 791 | 211 |
775af68e5258be56cf5e5cd9f804b6d235c0fd17
|
LaudateCorpus1/code-align-evals-data
|
human_eval/636bffed-7f29-4c7b-a5fd-7823b8e412f5.py
|
[
"MIT"
] |
Python
|
is_bored
|
<not_specific>
|
def is_bored(S):
"""
You'll be given a string of words, and your task is to count the number
of boredoms. A boredom is a sentence that starts with the word "I".
Sentences are delimited by '.', '?' or '!'.
For example:
>>> is_bored("Hello world")
0
>>> is_bored("The sky is blue. The sun is shining. I love this weather")
1
"""
#[SOLUTION]
import re
sentences = re.split(r'[.?!]\s*', S)
return sum(sentence[0:2] == 'I ' for sentence in sentences)
|
You'll be given a string of words, and your task is to count the number
of boredoms. A boredom is a sentence that starts with the word "I".
Sentences are delimited by '.', '?' or '!'.
For example:
>>> is_bored("Hello world")
0
>>> is_bored("The sky is blue. The sun is shining. I love this weather")
1
|
You'll be given a string of words, and your task is to count the number
of boredoms. A boredom is a sentence that starts with the word "I".
|
[
"You",
"'",
"ll",
"be",
"given",
"a",
"string",
"of",
"words",
"and",
"your",
"task",
"is",
"to",
"count",
"the",
"number",
"of",
"boredoms",
".",
"A",
"boredom",
"is",
"a",
"sentence",
"that",
"starts",
"with",
"the",
"word",
"\"",
"I",
"\"",
"."
] |
def is_bored(S):
import re
sentences = re.split(r'[.?!]\s*', S)
return sum(sentence[0:2] == 'I ' for sentence in sentences)
|
[
"def",
"is_bored",
"(",
"S",
")",
":",
"import",
"re",
"sentences",
"=",
"re",
".",
"split",
"(",
"r'[.?!]\\s*'",
",",
"S",
")",
"return",
"sum",
"(",
"sentence",
"[",
"0",
":",
"2",
"]",
"==",
"'I '",
"for",
"sentence",
"in",
"sentences",
")"
] |
You'll be given a string of words, and your task is to count the number
of boredoms.
|
[
"You",
"'",
"ll",
"be",
"given",
"a",
"string",
"of",
"words",
"and",
"your",
"task",
"is",
"to",
"count",
"the",
"number",
"of",
"boredoms",
"."
] |
[
"\"\"\"\n You'll be given a string of words, and your task is to count the number\n of boredoms. A boredom is a sentence that starts with the word \"I\".\n Sentences are delimited by '.', '?' or '!'.\n \n For example:\n >>> is_bored(\"Hello world\")\n 0\n >>> is_bored(\"The sky is blue. The sun is shining. I love this weather\")\n 1\n \"\"\"",
"#[SOLUTION]"
] |
[
{
"param": "S",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "S",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import re
def is_bored(S):
import re
sentences = re.split(r'[.?!]\s*', S)
return sum(sentence[0:2] == 'I ' for sentence in sentences)
| 792 | 47 |
32f19d35dc48c685bb6ad79830a75d66e2d7b142
|
streanger/clients_scanner
|
clients_scanner/clients_scanner.py
|
[
"MIT"
] |
Python
|
store_devices_status
|
<not_specific>
|
def store_devices_status():
'''store actual status for current network devices'''
print('store_status_thread works')
time_between = 120 # [s]
while True:
time.sleep(time_between)
# save info to file
return None
|
store actual status for current network devices
|
store actual status for current network devices
|
[
"store",
"actual",
"status",
"for",
"current",
"network",
"devices"
] |
def store_devices_status():
print('store_status_thread works')
time_between = 120
while True:
time.sleep(time_between)
return None
|
[
"def",
"store_devices_status",
"(",
")",
":",
"print",
"(",
"'store_status_thread works'",
")",
"time_between",
"=",
"120",
"while",
"True",
":",
"time",
".",
"sleep",
"(",
"time_between",
")",
"return",
"None"
] |
store actual status for current network devices
|
[
"store",
"actual",
"status",
"for",
"current",
"network",
"devices"
] |
[
"'''store actual status for current network devices'''",
"# [s]",
"# save info to file"
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import time
def store_devices_status():
print('store_status_thread works')
time_between = 120
while True:
time.sleep(time_between)
return None
| 793 | 105 |
3d9c0948cb083ebf199640f10dd9f2b1cd4eae58
|
ColtonUnruh/Pyformer
|
source_code/pymphony/game_functions.py
|
[
"MIT"
] |
Python
|
bullet_murder
| null |
def bullet_murder(bullets, walls):
""" check the bullet class to see if any of them have hit any walls. If they have, kill them. """
for bullet in bullets.copy():
for wall in walls:
if wall.rect.colliderect(bullet.rect):
bullets.remove(bullet)
|
check the bullet class to see if any of them have hit any walls. If they have, kill them.
|
check the bullet class to see if any of them have hit any walls. If they have, kill them.
|
[
"check",
"the",
"bullet",
"class",
"to",
"see",
"if",
"any",
"of",
"them",
"have",
"hit",
"any",
"walls",
".",
"If",
"they",
"have",
"kill",
"them",
"."
] |
def bullet_murder(bullets, walls):
for bullet in bullets.copy():
for wall in walls:
if wall.rect.colliderect(bullet.rect):
bullets.remove(bullet)
|
[
"def",
"bullet_murder",
"(",
"bullets",
",",
"walls",
")",
":",
"for",
"bullet",
"in",
"bullets",
".",
"copy",
"(",
")",
":",
"for",
"wall",
"in",
"walls",
":",
"if",
"wall",
".",
"rect",
".",
"colliderect",
"(",
"bullet",
".",
"rect",
")",
":",
"bullets",
".",
"remove",
"(",
"bullet",
")"
] |
check the bullet class to see if any of them have hit any walls.
|
[
"check",
"the",
"bullet",
"class",
"to",
"see",
"if",
"any",
"of",
"them",
"have",
"hit",
"any",
"walls",
"."
] |
[
"\"\"\" check the bullet class to see if any of them have hit any walls. If they have, kill them. \"\"\""
] |
[
{
"param": "bullets",
"type": null
},
{
"param": "walls",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "bullets",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "walls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def bullet_murder(bullets, walls):
for bullet in bullets.copy():
for wall in walls:
if wall.rect.colliderect(bullet.rect):
bullets.remove(bullet)
| 795 | 905 |
9cf1f25b9ba8f2cc00695c5573fe306e68132de6
|
skive-it/graph_def_editor
|
examples/coco_example.py
|
[
"Apache-2.0"
] |
Python
|
_graph_has_op
|
<not_specific>
|
def _graph_has_op(g, op_name):
# type: (tf.Graph, str) -> bool
"""
A method that really ought to be part of `tf.Graph`. Returns true of the
indicated graph has an op by the indicated name.
"""
all_ops_in_graph = g.get_operations()
return any(op_name == o.name for o in all_ops_in_graph)
|
A method that really ought to be part of `tf.Graph`. Returns true of the
indicated graph has an op by the indicated name.
|
A method that really ought to be part of `tf.Graph`. Returns true of the
indicated graph has an op by the indicated name.
|
[
"A",
"method",
"that",
"really",
"ought",
"to",
"be",
"part",
"of",
"`",
"tf",
".",
"Graph",
"`",
".",
"Returns",
"true",
"of",
"the",
"indicated",
"graph",
"has",
"an",
"op",
"by",
"the",
"indicated",
"name",
"."
] |
def _graph_has_op(g, op_name):
all_ops_in_graph = g.get_operations()
return any(op_name == o.name for o in all_ops_in_graph)
|
[
"def",
"_graph_has_op",
"(",
"g",
",",
"op_name",
")",
":",
"all_ops_in_graph",
"=",
"g",
".",
"get_operations",
"(",
")",
"return",
"any",
"(",
"op_name",
"==",
"o",
".",
"name",
"for",
"o",
"in",
"all_ops_in_graph",
")"
] |
A method that really ought to be part of `tf.Graph`.
|
[
"A",
"method",
"that",
"really",
"ought",
"to",
"be",
"part",
"of",
"`",
"tf",
".",
"Graph",
"`",
"."
] |
[
"# type: (tf.Graph, str) -> bool",
"\"\"\"\n A method that really ought to be part of `tf.Graph`. Returns true of the\n indicated graph has an op by the indicated name.\n \"\"\""
] |
[
{
"param": "g",
"type": null
},
{
"param": "op_name",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "g",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "op_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _graph_has_op(g, op_name):
all_ops_in_graph = g.get_operations()
return any(op_name == o.name for o in all_ops_in_graph)
| 797 | 966 |
14483027d120169b3905b86f0083348a4dbb71c2
|
jdchristensen/attic
|
attic/helpers.py
|
[
"BSD-3-Clause"
] |
Python
|
format_file_size
|
<not_specific>
|
def format_file_size(v):
"""Format file size into a human friendly format
"""
if abs(v) > 10**12:
return '%.2f TB' % (v / 10**12)
elif abs(v) > 10**9:
return '%.2f GB' % (v / 10**9)
elif abs(v) > 10**6:
return '%.2f MB' % (v / 10**6)
elif abs(v) > 10**3:
return '%.2f kB' % (v / 10**3)
else:
return '%d B' % v
|
Format file size into a human friendly format
|
Format file size into a human friendly format
|
[
"Format",
"file",
"size",
"into",
"a",
"human",
"friendly",
"format"
] |
def format_file_size(v):
if abs(v) > 10**12:
return '%.2f TB' % (v / 10**12)
elif abs(v) > 10**9:
return '%.2f GB' % (v / 10**9)
elif abs(v) > 10**6:
return '%.2f MB' % (v / 10**6)
elif abs(v) > 10**3:
return '%.2f kB' % (v / 10**3)
else:
return '%d B' % v
|
[
"def",
"format_file_size",
"(",
"v",
")",
":",
"if",
"abs",
"(",
"v",
")",
">",
"10",
"**",
"12",
":",
"return",
"'%.2f TB'",
"%",
"(",
"v",
"/",
"10",
"**",
"12",
")",
"elif",
"abs",
"(",
"v",
")",
">",
"10",
"**",
"9",
":",
"return",
"'%.2f GB'",
"%",
"(",
"v",
"/",
"10",
"**",
"9",
")",
"elif",
"abs",
"(",
"v",
")",
">",
"10",
"**",
"6",
":",
"return",
"'%.2f MB'",
"%",
"(",
"v",
"/",
"10",
"**",
"6",
")",
"elif",
"abs",
"(",
"v",
")",
">",
"10",
"**",
"3",
":",
"return",
"'%.2f kB'",
"%",
"(",
"v",
"/",
"10",
"**",
"3",
")",
"else",
":",
"return",
"'%d B'",
"%",
"v"
] |
Format file size into a human friendly format
|
[
"Format",
"file",
"size",
"into",
"a",
"human",
"friendly",
"format"
] |
[
"\"\"\"Format file size into a human friendly format\n \"\"\""
] |
[
{
"param": "v",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "v",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def format_file_size(v):
if abs(v) > 10**12:
return '%.2f TB' % (v / 10**12)
elif abs(v) > 10**9:
return '%.2f GB' % (v / 10**9)
elif abs(v) > 10**6:
return '%.2f MB' % (v / 10**6)
elif abs(v) > 10**3:
return '%.2f kB' % (v / 10**3)
else:
return '%d B' % v
| 798 | 814 |
d1181d524399d947392b0c43b9101ec2dfe2c70c
|
kimd113/cs257
|
books/books.py
|
[
"MIT"
] |
Python
|
filter_by_year
|
<not_specific>
|
def filter_by_year(year,filtered_list):
''' filter by the input of year '''
if year != None:
try:
if int(year[0]) and len(year[0]) == 4 and int(year[1]) and len(year[1]) == 4:
new_list=[]
for row in filtered_list:
if year[0] <= row[1] and year[1] >= row[1]:
new_list.append(row)
return new_list
else:
exit()
except:
print('Please enter a proper 4-digit integer for both year values')
exit()
else:
return filtered_list
|
filter by the input of year
|
filter by the input of year
|
[
"filter",
"by",
"the",
"input",
"of",
"year"
] |
def filter_by_year(year,filtered_list):
if year != None:
try:
if int(year[0]) and len(year[0]) == 4 and int(year[1]) and len(year[1]) == 4:
new_list=[]
for row in filtered_list:
if year[0] <= row[1] and year[1] >= row[1]:
new_list.append(row)
return new_list
else:
exit()
except:
print('Please enter a proper 4-digit integer for both year values')
exit()
else:
return filtered_list
|
[
"def",
"filter_by_year",
"(",
"year",
",",
"filtered_list",
")",
":",
"if",
"year",
"!=",
"None",
":",
"try",
":",
"if",
"int",
"(",
"year",
"[",
"0",
"]",
")",
"and",
"len",
"(",
"year",
"[",
"0",
"]",
")",
"==",
"4",
"and",
"int",
"(",
"year",
"[",
"1",
"]",
")",
"and",
"len",
"(",
"year",
"[",
"1",
"]",
")",
"==",
"4",
":",
"new_list",
"=",
"[",
"]",
"for",
"row",
"in",
"filtered_list",
":",
"if",
"year",
"[",
"0",
"]",
"<=",
"row",
"[",
"1",
"]",
"and",
"year",
"[",
"1",
"]",
">=",
"row",
"[",
"1",
"]",
":",
"new_list",
".",
"append",
"(",
"row",
")",
"return",
"new_list",
"else",
":",
"exit",
"(",
")",
"except",
":",
"print",
"(",
"'Please enter a proper 4-digit integer for both year values'",
")",
"exit",
"(",
")",
"else",
":",
"return",
"filtered_list"
] |
filter by the input of year
|
[
"filter",
"by",
"the",
"input",
"of",
"year"
] |
[
"''' filter by the input of year '''"
] |
[
{
"param": "year",
"type": null
},
{
"param": "filtered_list",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "year",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "filtered_list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def filter_by_year(year,filtered_list):
if year != None:
try:
if int(year[0]) and len(year[0]) == 4 and int(year[1]) and len(year[1]) == 4:
new_list=[]
for row in filtered_list:
if year[0] <= row[1] and year[1] >= row[1]:
new_list.append(row)
return new_list
else:
exit()
except:
print('Please enter a proper 4-digit integer for both year values')
exit()
else:
return filtered_list
| 799 | 918 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.