hexsha
stringlengths
40
40
repo
stringlengths
5
121
path
stringlengths
4
227
license
list
language
stringclasses
1 value
identifier
stringlengths
1
107
return_type
stringlengths
2
237
original_string
stringlengths
75
13.4k
original_docstring
stringlengths
13
12.9k
docstring
stringlengths
13
2.57k
docstring_tokens
list
code
stringlengths
23
1.88k
code_tokens
list
short_docstring
stringlengths
1
1.32k
short_docstring_tokens
list
comment
list
parameters
list
docstring_params
dict
code_with_imports
stringlengths
23
1.88k
idxs
int64
0
611k
cluster
int64
0
1.02k
5cd59d8c6069cf4fe01378b719a4312d3e68fe26
itsnubby/korok
sableye/_leg_sableye.py
[ "Apache-2.0" ]
Python
_check_supported
<not_specific>
def _check_supported(label, support_dict): """ See if we can handle it. """ if label in support_dict: return True else: return False
See if we can handle it.
See if we can handle it.
[ "See", "if", "we", "can", "handle", "it", "." ]
def _check_supported(label, support_dict): if label in support_dict: return True else: return False
[ "def", "_check_supported", "(", "label", ",", "support_dict", ")", ":", "if", "label", "in", "support_dict", ":", "return", "True", "else", ":", "return", "False" ]
See if we can handle it.
[ "See", "if", "we", "can", "handle", "it", "." ]
[ "\"\"\"\n See if we can handle it.\n \"\"\"" ]
[ { "param": "label", "type": null }, { "param": "support_dict", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "label", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "support_dict", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _check_supported(label, support_dict): if label in support_dict: return True else: return False
914
561
bbba474fb98839b7d23b959539e8f0962c3d908a
shaypal5/palpyutil
utilp/func/str.py
[ "MIT" ]
Python
str_replace_by_dict
str
def str_replace_by_dict(string: str, rep_dict: dict) -> str: """Replaces multiple source-destination substrings by a given dict. Parameters ---------- string : str The string to replace substrings in. rep_dict : dict A dictionary of source-destination mappings to use for replacement. Returns ------- str The resulting string after substring replacements. Example ------- >>> rep_dict = {'cat': 'dog', 'a': 'b'} >>> str_replace_by_dict('my cat is a friend', rep_dict) 'my dog is b friend' """ rep = dict((re.escape(k), v) for k, v in rep_dict.items()) pattern = re.compile("|".join(rep.keys())) return pattern.sub(lambda m: rep[re.escape(m.group(0))], string)
Replaces multiple source-destination substrings by a given dict. Parameters ---------- string : str The string to replace substrings in. rep_dict : dict A dictionary of source-destination mappings to use for replacement. Returns ------- str The resulting string after substring replacements. Example ------- >>> rep_dict = {'cat': 'dog', 'a': 'b'} >>> str_replace_by_dict('my cat is a friend', rep_dict) 'my dog is b friend'
Replaces multiple source-destination substrings by a given dict. Parameters string : str The string to replace substrings in. rep_dict : dict A dictionary of source-destination mappings to use for replacement. Returns str The resulting string after substring replacements. Example
[ "Replaces", "multiple", "source", "-", "destination", "substrings", "by", "a", "given", "dict", ".", "Parameters", "string", ":", "str", "The", "string", "to", "replace", "substrings", "in", ".", "rep_dict", ":", "dict", "A", "dictionary", "of", "source", "-", "destination", "mappings", "to", "use", "for", "replacement", ".", "Returns", "str", "The", "resulting", "string", "after", "substring", "replacements", ".", "Example" ]
def str_replace_by_dict(string: str, rep_dict: dict) -> str: rep = dict((re.escape(k), v) for k, v in rep_dict.items()) pattern = re.compile("|".join(rep.keys())) return pattern.sub(lambda m: rep[re.escape(m.group(0))], string)
[ "def", "str_replace_by_dict", "(", "string", ":", "str", ",", "rep_dict", ":", "dict", ")", "->", "str", ":", "rep", "=", "dict", "(", "(", "re", ".", "escape", "(", "k", ")", ",", "v", ")", "for", "k", ",", "v", "in", "rep_dict", ".", "items", "(", ")", ")", "pattern", "=", "re", ".", "compile", "(", "\"|\"", ".", "join", "(", "rep", ".", "keys", "(", ")", ")", ")", "return", "pattern", ".", "sub", "(", "lambda", "m", ":", "rep", "[", "re", ".", "escape", "(", "m", ".", "group", "(", "0", ")", ")", "]", ",", "string", ")" ]
Replaces multiple source-destination substrings by a given dict.
[ "Replaces", "multiple", "source", "-", "destination", "substrings", "by", "a", "given", "dict", "." ]
[ "\"\"\"Replaces multiple source-destination substrings by a given dict.\n\n Parameters\n ----------\n string : str\n The string to replace substrings in.\n rep_dict : dict\n A dictionary of source-destination mappings to use for replacement.\n\n Returns\n -------\n str\n The resulting string after substring replacements.\n\n Example\n -------\n >>> rep_dict = {'cat': 'dog', 'a': 'b'}\n >>> str_replace_by_dict('my cat is a friend', rep_dict)\n 'my dog is b friend'\n \"\"\"" ]
[ { "param": "string", "type": "str" }, { "param": "rep_dict", "type": "dict" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "string", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "rep_dict", "type": "dict", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def str_replace_by_dict(string: str, rep_dict: dict) -> str: rep = dict((re.escape(k), v) for k, v in rep_dict.items()) pattern = re.compile("|".join(rep.keys())) return pattern.sub(lambda m: rep[re.escape(m.group(0))], string)
915
699
ef768e168a68e6fa10ee3fd3a82c092d6e7fdf6c
AarynnCarter/Eureka
eureka/lib/util.py
[ "MIT" ]
Python
trim
<not_specific>
def trim(data, meta): """ Removes the edges of the data arrays Args: dat: Data object md: Metadata object Returns: subdata arrays with trimmed edges depending on xwindow and ywindow which have been set in the S3 ecf """ data.subdata = data.data[:, meta.ywindow[0]:meta.ywindow[1], meta.xwindow[0]:meta.xwindow[1]] data.suberr = data.err[:, meta.ywindow[0]:meta.ywindow[1], meta.xwindow[0]:meta.xwindow[1]] data.subdq = data.dq[:, meta.ywindow[0]:meta.ywindow[1], meta.xwindow[0]:meta.xwindow[1]] data.subwave = data.wave[meta.ywindow[0]:meta.ywindow[1], meta.xwindow[0]:meta.xwindow[1]] data.subv0 = data.v0[:, meta.ywindow[0]:meta.ywindow[1], meta.xwindow[0]:meta.xwindow[1]] meta.subny = meta.ywindow[1] - meta.ywindow[0] meta.subnx = meta.xwindow[1] - meta.xwindow[0] return data, meta
Removes the edges of the data arrays Args: dat: Data object md: Metadata object Returns: subdata arrays with trimmed edges depending on xwindow and ywindow which have been set in the S3 ecf
Removes the edges of the data arrays
[ "Removes", "the", "edges", "of", "the", "data", "arrays" ]
def trim(data, meta): data.subdata = data.data[:, meta.ywindow[0]:meta.ywindow[1], meta.xwindow[0]:meta.xwindow[1]] data.suberr = data.err[:, meta.ywindow[0]:meta.ywindow[1], meta.xwindow[0]:meta.xwindow[1]] data.subdq = data.dq[:, meta.ywindow[0]:meta.ywindow[1], meta.xwindow[0]:meta.xwindow[1]] data.subwave = data.wave[meta.ywindow[0]:meta.ywindow[1], meta.xwindow[0]:meta.xwindow[1]] data.subv0 = data.v0[:, meta.ywindow[0]:meta.ywindow[1], meta.xwindow[0]:meta.xwindow[1]] meta.subny = meta.ywindow[1] - meta.ywindow[0] meta.subnx = meta.xwindow[1] - meta.xwindow[0] return data, meta
[ "def", "trim", "(", "data", ",", "meta", ")", ":", "data", ".", "subdata", "=", "data", ".", "data", "[", ":", ",", "meta", ".", "ywindow", "[", "0", "]", ":", "meta", ".", "ywindow", "[", "1", "]", ",", "meta", ".", "xwindow", "[", "0", "]", ":", "meta", ".", "xwindow", "[", "1", "]", "]", "data", ".", "suberr", "=", "data", ".", "err", "[", ":", ",", "meta", ".", "ywindow", "[", "0", "]", ":", "meta", ".", "ywindow", "[", "1", "]", ",", "meta", ".", "xwindow", "[", "0", "]", ":", "meta", ".", "xwindow", "[", "1", "]", "]", "data", ".", "subdq", "=", "data", ".", "dq", "[", ":", ",", "meta", ".", "ywindow", "[", "0", "]", ":", "meta", ".", "ywindow", "[", "1", "]", ",", "meta", ".", "xwindow", "[", "0", "]", ":", "meta", ".", "xwindow", "[", "1", "]", "]", "data", ".", "subwave", "=", "data", ".", "wave", "[", "meta", ".", "ywindow", "[", "0", "]", ":", "meta", ".", "ywindow", "[", "1", "]", ",", "meta", ".", "xwindow", "[", "0", "]", ":", "meta", ".", "xwindow", "[", "1", "]", "]", "data", ".", "subv0", "=", "data", ".", "v0", "[", ":", ",", "meta", ".", "ywindow", "[", "0", "]", ":", "meta", ".", "ywindow", "[", "1", "]", ",", "meta", ".", "xwindow", "[", "0", "]", ":", "meta", ".", "xwindow", "[", "1", "]", "]", "meta", ".", "subny", "=", "meta", ".", "ywindow", "[", "1", "]", "-", "meta", ".", "ywindow", "[", "0", "]", "meta", ".", "subnx", "=", "meta", ".", "xwindow", "[", "1", "]", "-", "meta", ".", "xwindow", "[", "0", "]", "return", "data", ",", "meta" ]
Removes the edges of the data arrays
[ "Removes", "the", "edges", "of", "the", "data", "arrays" ]
[ "\"\"\"\n Removes the edges of the data arrays\n\n Args:\n dat: Data object\n md: Metadata object\n\n Returns:\n subdata arrays with trimmed edges depending on xwindow and ywindow which have been set in the S3 ecf\n \"\"\"" ]
[ { "param": "data", "type": null }, { "param": "meta", "type": null } ]
{ "returns": [ { "docstring": "subdata arrays with trimmed edges depending on xwindow and ywindow which have been set in the S3 ecf", "docstring_tokens": [ "subdata", "arrays", "with", "trimmed", "edges", "depending", "on", "xwindow", "and", "ywindow", "which", "have", "been", "set", "in", "the", "S3", "ecf" ], "type": null } ], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "meta", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "dat", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "md", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "others": [] }
def trim(data, meta): data.subdata = data.data[:, meta.ywindow[0]:meta.ywindow[1], meta.xwindow[0]:meta.xwindow[1]] data.suberr = data.err[:, meta.ywindow[0]:meta.ywindow[1], meta.xwindow[0]:meta.xwindow[1]] data.subdq = data.dq[:, meta.ywindow[0]:meta.ywindow[1], meta.xwindow[0]:meta.xwindow[1]] data.subwave = data.wave[meta.ywindow[0]:meta.ywindow[1], meta.xwindow[0]:meta.xwindow[1]] data.subv0 = data.v0[:, meta.ywindow[0]:meta.ywindow[1], meta.xwindow[0]:meta.xwindow[1]] meta.subny = meta.ywindow[1] - meta.ywindow[0] meta.subnx = meta.xwindow[1] - meta.xwindow[0] return data, meta
916
382
93c8f2d44280aa2850f91de41235144d5fbe7f51
benrxv/pybbm
pybb/util.py
[ "BSD-2-Clause" ]
Python
rstrip_str
<not_specific>
def rstrip_str(user, str): """ Replace strings with spaces (tabs, etc..) only with newlines Remove blank line at the end """ if user.is_staff: return str return '\n'.join([s.rstrip() for s in str.splitlines()])
Replace strings with spaces (tabs, etc..) only with newlines Remove blank line at the end
Replace strings with spaces (tabs, etc..) only with newlines Remove blank line at the end
[ "Replace", "strings", "with", "spaces", "(", "tabs", "etc", "..", ")", "only", "with", "newlines", "Remove", "blank", "line", "at", "the", "end" ]
def rstrip_str(user, str): if user.is_staff: return str return '\n'.join([s.rstrip() for s in str.splitlines()])
[ "def", "rstrip_str", "(", "user", ",", "str", ")", ":", "if", "user", ".", "is_staff", ":", "return", "str", "return", "'\\n'", ".", "join", "(", "[", "s", ".", "rstrip", "(", ")", "for", "s", "in", "str", ".", "splitlines", "(", ")", "]", ")" ]
Replace strings with spaces (tabs, etc..) only with newlines Remove blank line at the end
[ "Replace", "strings", "with", "spaces", "(", "tabs", "etc", "..", ")", "only", "with", "newlines", "Remove", "blank", "line", "at", "the", "end" ]
[ "\"\"\"\n Replace strings with spaces (tabs, etc..) only with newlines\n Remove blank line at the end\n \"\"\"" ]
[ { "param": "user", "type": null }, { "param": "str", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "user", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "str", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def rstrip_str(user, str): if user.is_staff: return str return '\n'.join([s.rstrip() for s in str.splitlines()])
917
36
3f73db4ff0eb5c8b5fdeb8738792d974867ed602
coree/Multimodal-Toolkit
multimodal_transformers/data/data_utils.py
[ "MIT" ]
Python
agg_text_columns_func
<not_specific>
def agg_text_columns_func(empty_row_values, replace_text, texts): """replace empty texts or remove empty text str from a list of text str""" processed_texts = [] for text in texts.astype('str'): if text not in empty_row_values: processed_texts.append(text) else: if replace_text is not None: processed_texts.append(replace_text) return processed_texts
replace empty texts or remove empty text str from a list of text str
replace empty texts or remove empty text str from a list of text str
[ "replace", "empty", "texts", "or", "remove", "empty", "text", "str", "from", "a", "list", "of", "text", "str" ]
def agg_text_columns_func(empty_row_values, replace_text, texts): processed_texts = [] for text in texts.astype('str'): if text not in empty_row_values: processed_texts.append(text) else: if replace_text is not None: processed_texts.append(replace_text) return processed_texts
[ "def", "agg_text_columns_func", "(", "empty_row_values", ",", "replace_text", ",", "texts", ")", ":", "processed_texts", "=", "[", "]", "for", "text", "in", "texts", ".", "astype", "(", "'str'", ")", ":", "if", "text", "not", "in", "empty_row_values", ":", "processed_texts", ".", "append", "(", "text", ")", "else", ":", "if", "replace_text", "is", "not", "None", ":", "processed_texts", ".", "append", "(", "replace_text", ")", "return", "processed_texts" ]
replace empty texts or remove empty text str from a list of text str
[ "replace", "empty", "texts", "or", "remove", "empty", "text", "str", "from", "a", "list", "of", "text", "str" ]
[ "\"\"\"replace empty texts or remove empty text str from a list of text str\"\"\"" ]
[ { "param": "empty_row_values", "type": null }, { "param": "replace_text", "type": null }, { "param": "texts", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "empty_row_values", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "replace_text", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "texts", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def agg_text_columns_func(empty_row_values, replace_text, texts): processed_texts = [] for text in texts.astype('str'): if text not in empty_row_values: processed_texts.append(text) else: if replace_text is not None: processed_texts.append(replace_text) return processed_texts
918
150
ee3e3316cdc6bc58fd1909a3fb35134fd0d34a95
guerler/kc-align
kcalign/__init__.py
[ "AFL-3.0" ]
Python
join_extract_DNA_seq
<not_specific>
def join_extract_DNA_seq(seq, homologs, tab): """ Same as extract_DNA_seq() but for when the gene of interests is split between two different reading frames. """ Dseqs = [] for homolog in homologs: shift = homolog[1] Pseq = seq[homolog[1]:].translate(table=tab) check = 0 for i in range(len(Pseq)): if str(Pseq[i:i+len(homolog[0])]) == homolog[0]: check = 1 break if check == 1: Dseqs.append(seq[i*3+shift:(i+len(homolog[0]))*3+shift]) else: return 1 return Dseqs[0]+Dseqs[1]
Same as extract_DNA_seq() but for when the gene of interests is split between two different reading frames.
Same as extract_DNA_seq() but for when the gene of interests is split between two different reading frames.
[ "Same", "as", "extract_DNA_seq", "()", "but", "for", "when", "the", "gene", "of", "interests", "is", "split", "between", "two", "different", "reading", "frames", "." ]
def join_extract_DNA_seq(seq, homologs, tab): Dseqs = [] for homolog in homologs: shift = homolog[1] Pseq = seq[homolog[1]:].translate(table=tab) check = 0 for i in range(len(Pseq)): if str(Pseq[i:i+len(homolog[0])]) == homolog[0]: check = 1 break if check == 1: Dseqs.append(seq[i*3+shift:(i+len(homolog[0]))*3+shift]) else: return 1 return Dseqs[0]+Dseqs[1]
[ "def", "join_extract_DNA_seq", "(", "seq", ",", "homologs", ",", "tab", ")", ":", "Dseqs", "=", "[", "]", "for", "homolog", "in", "homologs", ":", "shift", "=", "homolog", "[", "1", "]", "Pseq", "=", "seq", "[", "homolog", "[", "1", "]", ":", "]", ".", "translate", "(", "table", "=", "tab", ")", "check", "=", "0", "for", "i", "in", "range", "(", "len", "(", "Pseq", ")", ")", ":", "if", "str", "(", "Pseq", "[", "i", ":", "i", "+", "len", "(", "homolog", "[", "0", "]", ")", "]", ")", "==", "homolog", "[", "0", "]", ":", "check", "=", "1", "break", "if", "check", "==", "1", ":", "Dseqs", ".", "append", "(", "seq", "[", "i", "*", "3", "+", "shift", ":", "(", "i", "+", "len", "(", "homolog", "[", "0", "]", ")", ")", "*", "3", "+", "shift", "]", ")", "else", ":", "return", "1", "return", "Dseqs", "[", "0", "]", "+", "Dseqs", "[", "1", "]" ]
Same as extract_DNA_seq() but for when the gene of interests is split between two different reading frames.
[ "Same", "as", "extract_DNA_seq", "()", "but", "for", "when", "the", "gene", "of", "interests", "is", "split", "between", "two", "different", "reading", "frames", "." ]
[ "\"\"\"\n Same as extract_DNA_seq() but for when the gene of interests is split\n between two different reading frames.\n \"\"\"" ]
[ { "param": "seq", "type": null }, { "param": "homologs", "type": null }, { "param": "tab", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "seq", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "homologs", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "tab", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def join_extract_DNA_seq(seq, homologs, tab): Dseqs = [] for homolog in homologs: shift = homolog[1] Pseq = seq[homolog[1]:].translate(table=tab) check = 0 for i in range(len(Pseq)): if str(Pseq[i:i+len(homolog[0])]) == homolog[0]: check = 1 break if check == 1: Dseqs.append(seq[i*3+shift:(i+len(homolog[0]))*3+shift]) else: return 1 return Dseqs[0]+Dseqs[1]
919
943
4036103bbcdf43818de93c4a553f384c9fd803fe
aws-samples/aws-medialive-mediapackage-cloudwatch-dashboard
create_CW_dashboard.py
[ "Apache-2.0" ]
Python
update_status_code_range_2xx4xx_metric
<not_specific>
def update_status_code_range_2xx4xx_metric(mp_endpoint_names): """Update the metrics of the "Status Code Range (sum)" dashboard widget""" results = [] for mp_name in mp_endpoint_names: endpoints = mp_endpoint_names[mp_name] for endpoint in endpoints: entry = ["AWS/MediaPackage", "EgressRequestCount", "Channel", mp_name, "OriginEndpoint", endpoint, "StatusCodeRange", "2xx"] results.append(entry) entry = ["AWS/MediaPackage", "EgressRequestCount", "Channel", mp_name, "OriginEndpoint", endpoint, "StatusCodeRange", "4xx", {"yAxis": "right"}] results.append(entry) return results
Update the metrics of the "Status Code Range (sum)" dashboard widget
Update the metrics of the "Status Code Range (sum)" dashboard widget
[ "Update", "the", "metrics", "of", "the", "\"", "Status", "Code", "Range", "(", "sum", ")", "\"", "dashboard", "widget" ]
def update_status_code_range_2xx4xx_metric(mp_endpoint_names): results = [] for mp_name in mp_endpoint_names: endpoints = mp_endpoint_names[mp_name] for endpoint in endpoints: entry = ["AWS/MediaPackage", "EgressRequestCount", "Channel", mp_name, "OriginEndpoint", endpoint, "StatusCodeRange", "2xx"] results.append(entry) entry = ["AWS/MediaPackage", "EgressRequestCount", "Channel", mp_name, "OriginEndpoint", endpoint, "StatusCodeRange", "4xx", {"yAxis": "right"}] results.append(entry) return results
[ "def", "update_status_code_range_2xx4xx_metric", "(", "mp_endpoint_names", ")", ":", "results", "=", "[", "]", "for", "mp_name", "in", "mp_endpoint_names", ":", "endpoints", "=", "mp_endpoint_names", "[", "mp_name", "]", "for", "endpoint", "in", "endpoints", ":", "entry", "=", "[", "\"AWS/MediaPackage\"", ",", "\"EgressRequestCount\"", ",", "\"Channel\"", ",", "mp_name", ",", "\"OriginEndpoint\"", ",", "endpoint", ",", "\"StatusCodeRange\"", ",", "\"2xx\"", "]", "results", ".", "append", "(", "entry", ")", "entry", "=", "[", "\"AWS/MediaPackage\"", ",", "\"EgressRequestCount\"", ",", "\"Channel\"", ",", "mp_name", ",", "\"OriginEndpoint\"", ",", "endpoint", ",", "\"StatusCodeRange\"", ",", "\"4xx\"", ",", "{", "\"yAxis\"", ":", "\"right\"", "}", "]", "results", ".", "append", "(", "entry", ")", "return", "results" ]
Update the metrics of the "Status Code Range (sum)" dashboard widget
[ "Update", "the", "metrics", "of", "the", "\"", "Status", "Code", "Range", "(", "sum", ")", "\"", "dashboard", "widget" ]
[ "\"\"\"Update the metrics of the \"Status Code Range (sum)\" dashboard widget\"\"\"" ]
[ { "param": "mp_endpoint_names", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "mp_endpoint_names", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def update_status_code_range_2xx4xx_metric(mp_endpoint_names): results = [] for mp_name in mp_endpoint_names: endpoints = mp_endpoint_names[mp_name] for endpoint in endpoints: entry = ["AWS/MediaPackage", "EgressRequestCount", "Channel", mp_name, "OriginEndpoint", endpoint, "StatusCodeRange", "2xx"] results.append(entry) entry = ["AWS/MediaPackage", "EgressRequestCount", "Channel", mp_name, "OriginEndpoint", endpoint, "StatusCodeRange", "4xx", {"yAxis": "right"}] results.append(entry) return results
920
955
5a99d2ca83dd91021028710c4811c993917389d7
meewa1/BrukerGUI
MRIAssimilator/brukerData.py
[ "MIT" ]
Python
is_valid_dir
<not_specific>
def is_valid_dir(parser, arg): """ Check if arg is a valid directory that already exists on the file system. Parameters ---------- parser : argparse object arg : str Returns ------- arg """ arg = os.path.abspath(arg) if not os.path.isdir(arg): parser.error("The file %s is not a directory!" % arg) elif not os.path.exists(arg): parser.error("The directory %s does not exist!" % arg) else: return arg
Check if arg is a valid directory that already exists on the file system. Parameters ---------- parser : argparse object arg : str Returns ------- arg
Check if arg is a valid directory that already exists on the file system. Parameters parser : argparse object arg : str Returns arg
[ "Check", "if", "arg", "is", "a", "valid", "directory", "that", "already", "exists", "on", "the", "file", "system", ".", "Parameters", "parser", ":", "argparse", "object", "arg", ":", "str", "Returns", "arg" ]
def is_valid_dir(parser, arg): arg = os.path.abspath(arg) if not os.path.isdir(arg): parser.error("The file %s is not a directory!" % arg) elif not os.path.exists(arg): parser.error("The directory %s does not exist!" % arg) else: return arg
[ "def", "is_valid_dir", "(", "parser", ",", "arg", ")", ":", "arg", "=", "os", ".", "path", ".", "abspath", "(", "arg", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "arg", ")", ":", "parser", ".", "error", "(", "\"The file %s is not a directory!\"", "%", "arg", ")", "elif", "not", "os", ".", "path", ".", "exists", "(", "arg", ")", ":", "parser", ".", "error", "(", "\"The directory %s does not exist!\"", "%", "arg", ")", "else", ":", "return", "arg" ]
Check if arg is a valid directory that already exists on the file system.
[ "Check", "if", "arg", "is", "a", "valid", "directory", "that", "already", "exists", "on", "the", "file", "system", "." ]
[ "\"\"\"\nCheck if arg is a valid directory that already exists on the file system.\n\nParameters\n----------\nparser : argparse object\narg : str\n\nReturns\n-------\narg\n \"\"\"" ]
[ { "param": "parser", "type": null }, { "param": "arg", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "parser", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "arg", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def is_valid_dir(parser, arg): arg = os.path.abspath(arg) if not os.path.isdir(arg): parser.error("The file %s is not a directory!" % arg) elif not os.path.exists(arg): parser.error("The directory %s does not exist!" % arg) else: return arg
921
41
4f422b735f731220c2b4e793549518b862e51185
graydenshand/flask_boot
src/flask_batteries/commands/webpack.py
[ "MIT" ]
Python
build
null
def build(bail): """ Compile static assets with Webpack If --bail is passed, exit with an error code if compilation fails. This is useful for CI tests. """ if bail: proc = subprocess.run(["npx", "webpack", "--bail"]) if proc.returncode != 0: exit(1) else: subprocess.run(["npx", "webpack"])
Compile static assets with Webpack If --bail is passed, exit with an error code if compilation fails. This is useful for CI tests.
Compile static assets with Webpack If --bail is passed, exit with an error code if compilation fails. This is useful for CI tests.
[ "Compile", "static", "assets", "with", "Webpack", "If", "--", "bail", "is", "passed", "exit", "with", "an", "error", "code", "if", "compilation", "fails", ".", "This", "is", "useful", "for", "CI", "tests", "." ]
def build(bail): if bail: proc = subprocess.run(["npx", "webpack", "--bail"]) if proc.returncode != 0: exit(1) else: subprocess.run(["npx", "webpack"])
[ "def", "build", "(", "bail", ")", ":", "if", "bail", ":", "proc", "=", "subprocess", ".", "run", "(", "[", "\"npx\"", ",", "\"webpack\"", ",", "\"--bail\"", "]", ")", "if", "proc", ".", "returncode", "!=", "0", ":", "exit", "(", "1", ")", "else", ":", "subprocess", ".", "run", "(", "[", "\"npx\"", ",", "\"webpack\"", "]", ")" ]
Compile static assets with Webpack If --bail is passed, exit with an error code if compilation fails.
[ "Compile", "static", "assets", "with", "Webpack", "If", "--", "bail", "is", "passed", "exit", "with", "an", "error", "code", "if", "compilation", "fails", "." ]
[ "\"\"\"\n Compile static assets with Webpack\n\n If --bail is passed, exit with an error code if compilation fails.\n This is useful for CI tests.\n \"\"\"" ]
[ { "param": "bail", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "bail", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import subprocess def build(bail): if bail: proc = subprocess.run(["npx", "webpack", "--bail"]) if proc.returncode != 0: exit(1) else: subprocess.run(["npx", "webpack"])
922
68
5244e776c280a204ee06d9152495856d51d592d2
google-cloud-sdk-unofficial/google-cloud-sdk
lib/googlecloudsdk/command_lib/container/flags.py
[ "Apache-2.0" ]
Python
AddDataplaneV2Flag
null
def AddDataplaneV2Flag(parser, hidden=False): """Adds --enable-dataplane-v2 boolean flag.""" help_text = """ Enables the new eBPF dataplane for GKE clusters that is required for network security, scalability and visibility features. """ parser.add_argument( '--enable-dataplane-v2', action='store_true', help=help_text, hidden=hidden)
Adds --enable-dataplane-v2 boolean flag.
-enable-dataplane-v2 boolean flag.
[ "-", "enable", "-", "dataplane", "-", "v2", "boolean", "flag", "." ]
def AddDataplaneV2Flag(parser, hidden=False): help_text = """ Enables the new eBPF dataplane for GKE clusters that is required for network security, scalability and visibility features. """ parser.add_argument( '--enable-dataplane-v2', action='store_true', help=help_text, hidden=hidden)
[ "def", "AddDataplaneV2Flag", "(", "parser", ",", "hidden", "=", "False", ")", ":", "help_text", "=", "\"\"\"\nEnables the new eBPF dataplane for GKE clusters that is required for\nnetwork security, scalability and visibility features.\n\"\"\"", "parser", ".", "add_argument", "(", "'--enable-dataplane-v2'", ",", "action", "=", "'store_true'", ",", "help", "=", "help_text", ",", "hidden", "=", "hidden", ")" ]
Adds --enable-dataplane-v2 boolean flag.
[ "Adds", "--", "enable", "-", "dataplane", "-", "v2", "boolean", "flag", "." ]
[ "\"\"\"Adds --enable-dataplane-v2 boolean flag.\"\"\"" ]
[ { "param": "parser", "type": null }, { "param": "hidden", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "parser", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "hidden", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def AddDataplaneV2Flag(parser, hidden=False): help_text = """ Enables the new eBPF dataplane for GKE clusters that is required for network security, scalability and visibility features. """ parser.add_argument( '--enable-dataplane-v2', action='store_true', help=help_text, hidden=hidden)
923
61
136293e5a83c07b85d2536f9b53fc2029feb3692
EmmaRenauld/dwi_ml
dwi_ml/data/processing/space/neighborhood.py
[ "MIT" ]
Python
add_args_neighborhood
null
def add_args_neighborhood(p): """ Optional arguments that should be added to an argparser to use a neighborhood. """ n = p.add_mutually_exclusive_group() n.add_argument( '--sphere_radius', type=float, help="If set, a neighborhood will be added to the input information. " "This neighborhood \ndefinition lies on a sphere. It will be a " "list of 6 positions \n(up, down, left, right, behind, in front) " "at exactly given radius around each point \nof the streamlines, " "in voxel space.") n.add_argument( '--grid_radius', type=int, help="If set, a neighborhood will be added to the input information. " "This neighborhood \ndefinition uses a list of points similar to " "the original voxel grid \naround each point of the streamlines. " "Ex: with radius 1, that's 27 points. \nWith radius 2, that's " "125 points. Radius is in voxel space.")
Optional arguments that should be added to an argparser to use a neighborhood.
Optional arguments that should be added to an argparser to use a neighborhood.
[ "Optional", "arguments", "that", "should", "be", "added", "to", "an", "argparser", "to", "use", "a", "neighborhood", "." ]
def add_args_neighborhood(p): n = p.add_mutually_exclusive_group() n.add_argument( '--sphere_radius', type=float, help="If set, a neighborhood will be added to the input information. " "This neighborhood \ndefinition lies on a sphere. It will be a " "list of 6 positions \n(up, down, left, right, behind, in front) " "at exactly given radius around each point \nof the streamlines, " "in voxel space.") n.add_argument( '--grid_radius', type=int, help="If set, a neighborhood will be added to the input information. " "This neighborhood \ndefinition uses a list of points similar to " "the original voxel grid \naround each point of the streamlines. " "Ex: with radius 1, that's 27 points. \nWith radius 2, that's " "125 points. Radius is in voxel space.")
[ "def", "add_args_neighborhood", "(", "p", ")", ":", "n", "=", "p", ".", "add_mutually_exclusive_group", "(", ")", "n", ".", "add_argument", "(", "'--sphere_radius'", ",", "type", "=", "float", ",", "help", "=", "\"If set, a neighborhood will be added to the input information. \"", "\"This neighborhood \\ndefinition lies on a sphere. It will be a \"", "\"list of 6 positions \\n(up, down, left, right, behind, in front) \"", "\"at exactly given radius around each point \\nof the streamlines, \"", "\"in voxel space.\"", ")", "n", ".", "add_argument", "(", "'--grid_radius'", ",", "type", "=", "int", ",", "help", "=", "\"If set, a neighborhood will be added to the input information. \"", "\"This neighborhood \\ndefinition uses a list of points similar to \"", "\"the original voxel grid \\naround each point of the streamlines. \"", "\"Ex: with radius 1, that's 27 points. \\nWith radius 2, that's \"", "\"125 points. Radius is in voxel space.\"", ")" ]
Optional arguments that should be added to an argparser to use a neighborhood.
[ "Optional", "arguments", "that", "should", "be", "added", "to", "an", "argparser", "to", "use", "a", "neighborhood", "." ]
[ "\"\"\"\n Optional arguments that should be added to an argparser to use a\n neighborhood.\n \"\"\"" ]
[ { "param": "p", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "p", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def add_args_neighborhood(p): n = p.add_mutually_exclusive_group() n.add_argument( '--sphere_radius', type=float, help="If set, a neighborhood will be added to the input information. " "This neighborhood \ndefinition lies on a sphere. It will be a " "list of 6 positions \n(up, down, left, right, behind, in front) " "at exactly given radius around each point \nof the streamlines, " "in voxel space.") n.add_argument( '--grid_radius', type=int, help="If set, a neighborhood will be added to the input information. " "This neighborhood \ndefinition uses a list of points similar to " "the original voxel grid \naround each point of the streamlines. " "Ex: with radius 1, that's 27 points. \nWith radius 2, that's " "125 points. Radius is in voxel space.")
924
422
d2e400dc5a2051432b13a879650431533e0736c5
zhlu9890/relation_engine_importers
src/modelseed/generate_genome_helpers.py
[ "MIT" ]
Python
generate_genome
null
def generate_genome(genbank): """ Generate an import row for the genome with a link to the organism taxon. """ row = { '_key': genbank.id, 'name': genbank.name, 'description': genbank.description, 'molecule_type': genbank.annotations.get('molecule_type', ''), 'topology': genbank.annotations.get('topology', ''), 'data_file_division': genbank.annotations.get('data_file_division', ''), 'date': genbank.annotations.get('date', ''), 'accessions': genbank.annotations.get('accessions', []), 'sequence_version': genbank.annotations.get('sequence_version', ''), 'source': genbank.annotations.get('source', ''), 'dbxrefs': genbank.dbxrefs, 'organism_name': genbank.annotations.get('organism', ''), 'taxonomy': ', '.join(genbank.annotations.get('taxonomy', '')), 'comment': genbank.annotations.get('comment', ''), 'annotation_data': {} } annot_data = genbank.annotations.get('structured_comment', {}).get('Genome-Annotation-Data', {}) for (key, val) in annot_data.items(): row['annotation_data'][key] = val yield row
Generate an import row for the genome with a link to the organism taxon.
Generate an import row for the genome with a link to the organism taxon.
[ "Generate", "an", "import", "row", "for", "the", "genome", "with", "a", "link", "to", "the", "organism", "taxon", "." ]
def generate_genome(genbank): row = { '_key': genbank.id, 'name': genbank.name, 'description': genbank.description, 'molecule_type': genbank.annotations.get('molecule_type', ''), 'topology': genbank.annotations.get('topology', ''), 'data_file_division': genbank.annotations.get('data_file_division', ''), 'date': genbank.annotations.get('date', ''), 'accessions': genbank.annotations.get('accessions', []), 'sequence_version': genbank.annotations.get('sequence_version', ''), 'source': genbank.annotations.get('source', ''), 'dbxrefs': genbank.dbxrefs, 'organism_name': genbank.annotations.get('organism', ''), 'taxonomy': ', '.join(genbank.annotations.get('taxonomy', '')), 'comment': genbank.annotations.get('comment', ''), 'annotation_data': {} } annot_data = genbank.annotations.get('structured_comment', {}).get('Genome-Annotation-Data', {}) for (key, val) in annot_data.items(): row['annotation_data'][key] = val yield row
[ "def", "generate_genome", "(", "genbank", ")", ":", "row", "=", "{", "'_key'", ":", "genbank", ".", "id", ",", "'name'", ":", "genbank", ".", "name", ",", "'description'", ":", "genbank", ".", "description", ",", "'molecule_type'", ":", "genbank", ".", "annotations", ".", "get", "(", "'molecule_type'", ",", "''", ")", ",", "'topology'", ":", "genbank", ".", "annotations", ".", "get", "(", "'topology'", ",", "''", ")", ",", "'data_file_division'", ":", "genbank", ".", "annotations", ".", "get", "(", "'data_file_division'", ",", "''", ")", ",", "'date'", ":", "genbank", ".", "annotations", ".", "get", "(", "'date'", ",", "''", ")", ",", "'accessions'", ":", "genbank", ".", "annotations", ".", "get", "(", "'accessions'", ",", "[", "]", ")", ",", "'sequence_version'", ":", "genbank", ".", "annotations", ".", "get", "(", "'sequence_version'", ",", "''", ")", ",", "'source'", ":", "genbank", ".", "annotations", ".", "get", "(", "'source'", ",", "''", ")", ",", "'dbxrefs'", ":", "genbank", ".", "dbxrefs", ",", "'organism_name'", ":", "genbank", ".", "annotations", ".", "get", "(", "'organism'", ",", "''", ")", ",", "'taxonomy'", ":", "', '", ".", "join", "(", "genbank", ".", "annotations", ".", "get", "(", "'taxonomy'", ",", "''", ")", ")", ",", "'comment'", ":", "genbank", ".", "annotations", ".", "get", "(", "'comment'", ",", "''", ")", ",", "'annotation_data'", ":", "{", "}", "}", "annot_data", "=", "genbank", ".", "annotations", ".", "get", "(", "'structured_comment'", ",", "{", "}", ")", ".", "get", "(", "'Genome-Annotation-Data'", ",", "{", "}", ")", "for", "(", "key", ",", "val", ")", "in", "annot_data", ".", "items", "(", ")", ":", "row", "[", "'annotation_data'", "]", "[", "key", "]", "=", "val", "yield", "row" ]
Generate an import row for the genome with a link to the organism taxon.
[ "Generate", "an", "import", "row", "for", "the", "genome", "with", "a", "link", "to", "the", "organism", "taxon", "." ]
[ "\"\"\"\n Generate an import row for the genome with a link to the organism taxon.\n \"\"\"" ]
[ { "param": "genbank", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "genbank", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def generate_genome(genbank): row = { '_key': genbank.id, 'name': genbank.name, 'description': genbank.description, 'molecule_type': genbank.annotations.get('molecule_type', ''), 'topology': genbank.annotations.get('topology', ''), 'data_file_division': genbank.annotations.get('data_file_division', ''), 'date': genbank.annotations.get('date', ''), 'accessions': genbank.annotations.get('accessions', []), 'sequence_version': genbank.annotations.get('sequence_version', ''), 'source': genbank.annotations.get('source', ''), 'dbxrefs': genbank.dbxrefs, 'organism_name': genbank.annotations.get('organism', ''), 'taxonomy': ', '.join(genbank.annotations.get('taxonomy', '')), 'comment': genbank.annotations.get('comment', ''), 'annotation_data': {} } annot_data = genbank.annotations.get('structured_comment', {}).get('Genome-Annotation-Data', {}) for (key, val) in annot_data.items(): row['annotation_data'][key] = val yield row
925
25
197edacc60a18c9ef01f008816c6c91a96513ef6
vyshakhgnair/dffml
dffml/util/entrypoint.py
[ "MIT" ]
Python
base_entry_point
<not_specific>
def base_entry_point(entrypoint, *args): """ Any class which subclasses from Entrypoint needs this decorator applied to it. The decorator sets the ENTRYPOINT and ENTRY_POINT_NAME properties on the class. This allows the load() classmethod to be called to load subclasses of the class being decorated. This is how the subclasses get loaded via the entry point system by calling BaseClass.load(). ENTRY_POINT_NAME corresponds to the command line argument and config file reference to the class. It comes from all arguments after the entrypoint argument (first argument) is a list which would turn into an command line argument if it were joined with hyphens. Examples -------- >>> from dffml import base_entry_point, Entrypoint >>> >>> @base_entry_point('dffml.entrypoint', 'entrypoint') ... class BaseEntrypointSubclassClass(Entrypoint): pass .. code-block:: python entry_points={ # dffml.entrypoint = ENTRYPOINT 'dffml.entrypoint': [ 'mylabel = module.path.to:EntrypointSubclassClass', ] } """ def add_entry_point_and_name(cls): cls.ENTRYPOINT = entrypoint cls.ENTRY_POINT_NAME = list(args) return cls return add_entry_point_and_name
Any class which subclasses from Entrypoint needs this decorator applied to it. The decorator sets the ENTRYPOINT and ENTRY_POINT_NAME properties on the class. This allows the load() classmethod to be called to load subclasses of the class being decorated. This is how the subclasses get loaded via the entry point system by calling BaseClass.load(). ENTRY_POINT_NAME corresponds to the command line argument and config file reference to the class. It comes from all arguments after the entrypoint argument (first argument) is a list which would turn into an command line argument if it were joined with hyphens. Examples -------- >>> from dffml import base_entry_point, Entrypoint >>> >>> @base_entry_point('dffml.entrypoint', 'entrypoint') ... class BaseEntrypointSubclassClass(Entrypoint): pass .. code-block:: python entry_points={ # dffml.entrypoint = ENTRYPOINT 'dffml.entrypoint': [ 'mylabel = module.path.to:EntrypointSubclassClass', ] }
Any class which subclasses from Entrypoint needs this decorator applied to it. The decorator sets the ENTRYPOINT and ENTRY_POINT_NAME properties on the class. This allows the load() classmethod to be called to load subclasses of the class being decorated. This is how the subclasses get loaded via the entry point system by calling BaseClass.load(). ENTRY_POINT_NAME corresponds to the command line argument and config file reference to the class. It comes from all arguments after the entrypoint argument (first argument) is a list which would turn into an command line argument if it were joined with hyphens. Examples code-block:: python
[ "Any", "class", "which", "subclasses", "from", "Entrypoint", "needs", "this", "decorator", "applied", "to", "it", ".", "The", "decorator", "sets", "the", "ENTRYPOINT", "and", "ENTRY_POINT_NAME", "properties", "on", "the", "class", ".", "This", "allows", "the", "load", "()", "classmethod", "to", "be", "called", "to", "load", "subclasses", "of", "the", "class", "being", "decorated", ".", "This", "is", "how", "the", "subclasses", "get", "loaded", "via", "the", "entry", "point", "system", "by", "calling", "BaseClass", ".", "load", "()", ".", "ENTRY_POINT_NAME", "corresponds", "to", "the", "command", "line", "argument", "and", "config", "file", "reference", "to", "the", "class", ".", "It", "comes", "from", "all", "arguments", "after", "the", "entrypoint", "argument", "(", "first", "argument", ")", "is", "a", "list", "which", "would", "turn", "into", "an", "command", "line", "argument", "if", "it", "were", "joined", "with", "hyphens", ".", "Examples", "code", "-", "block", "::", "python" ]
def base_entry_point(entrypoint, *args): def add_entry_point_and_name(cls): cls.ENTRYPOINT = entrypoint cls.ENTRY_POINT_NAME = list(args) return cls return add_entry_point_and_name
[ "def", "base_entry_point", "(", "entrypoint", ",", "*", "args", ")", ":", "def", "add_entry_point_and_name", "(", "cls", ")", ":", "cls", ".", "ENTRYPOINT", "=", "entrypoint", "cls", ".", "ENTRY_POINT_NAME", "=", "list", "(", "args", ")", "return", "cls", "return", "add_entry_point_and_name" ]
Any class which subclasses from Entrypoint needs this decorator applied to it.
[ "Any", "class", "which", "subclasses", "from", "Entrypoint", "needs", "this", "decorator", "applied", "to", "it", "." ]
[ "\"\"\"\n Any class which subclasses from Entrypoint needs this decorator applied to\n it. The decorator sets the ENTRYPOINT and ENTRY_POINT_NAME properties on\n the class.\n\n This allows the load() classmethod to be called to load subclasses of the\n class being decorated. This is how the subclasses get loaded via the\n entry point system by calling BaseClass.load().\n\n ENTRY_POINT_NAME corresponds to the command line argument and config file\n reference to the class. It comes from all arguments after the entrypoint\n argument (first argument) is a list which would turn into an command line\n argument if it were joined with hyphens.\n\n Examples\n --------\n\n >>> from dffml import base_entry_point, Entrypoint\n >>>\n >>> @base_entry_point('dffml.entrypoint', 'entrypoint')\n ... class BaseEntrypointSubclassClass(Entrypoint): pass\n\n .. code-block:: python\n\n entry_points={\n # dffml.entrypoint = ENTRYPOINT\n 'dffml.entrypoint': [\n 'mylabel = module.path.to:EntrypointSubclassClass',\n ]\n }\n \"\"\"" ]
[ { "param": "entrypoint", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "entrypoint", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def base_entry_point(entrypoint, *args): def add_entry_point_and_name(cls): cls.ENTRYPOINT = entrypoint cls.ENTRY_POINT_NAME = list(args) return cls return add_entry_point_and_name
926
714
12ea706896f78b7b6f865724da89e737d01ef07b
Apstra/aos-ansible
library/aos.py
[ "Apache-2.0" ]
Python
validate_vni_ranges
<not_specific>
def validate_vni_ranges(ranges): """ Validate VNI ranges provided are valid and properly formatted :param ranges: list :return: list """ errors = [] for vni_range in ranges: if not isinstance(vni_range, list): errors.append("Invalid range: must be a list") elif len(vni_range) != 2: errors.append("Invalid range: must be a list of 2 members") elif any(map(lambda r: not isinstance(r, int), vni_range)): errors.append("Invalid range: Expected integer values") elif vni_range[1] <= vni_range[0]: errors.append("Invalid range: 2nd element must be bigger than 1st") elif vni_range[0] < 4095 or vni_range[1] > 16777213: errors.append("Invalid range: must be a valid range between 4096" " and 16777214") return errors
Validate VNI ranges provided are valid and properly formatted :param ranges: list :return: list
Validate VNI ranges provided are valid and properly formatted
[ "Validate", "VNI", "ranges", "provided", "are", "valid", "and", "properly", "formatted" ]
def validate_vni_ranges(ranges): errors = [] for vni_range in ranges: if not isinstance(vni_range, list): errors.append("Invalid range: must be a list") elif len(vni_range) != 2: errors.append("Invalid range: must be a list of 2 members") elif any(map(lambda r: not isinstance(r, int), vni_range)): errors.append("Invalid range: Expected integer values") elif vni_range[1] <= vni_range[0]: errors.append("Invalid range: 2nd element must be bigger than 1st") elif vni_range[0] < 4095 or vni_range[1] > 16777213: errors.append("Invalid range: must be a valid range between 4096" " and 16777214") return errors
[ "def", "validate_vni_ranges", "(", "ranges", ")", ":", "errors", "=", "[", "]", "for", "vni_range", "in", "ranges", ":", "if", "not", "isinstance", "(", "vni_range", ",", "list", ")", ":", "errors", ".", "append", "(", "\"Invalid range: must be a list\"", ")", "elif", "len", "(", "vni_range", ")", "!=", "2", ":", "errors", ".", "append", "(", "\"Invalid range: must be a list of 2 members\"", ")", "elif", "any", "(", "map", "(", "lambda", "r", ":", "not", "isinstance", "(", "r", ",", "int", ")", ",", "vni_range", ")", ")", ":", "errors", ".", "append", "(", "\"Invalid range: Expected integer values\"", ")", "elif", "vni_range", "[", "1", "]", "<=", "vni_range", "[", "0", "]", ":", "errors", ".", "append", "(", "\"Invalid range: 2nd element must be bigger than 1st\"", ")", "elif", "vni_range", "[", "0", "]", "<", "4095", "or", "vni_range", "[", "1", "]", ">", "16777213", ":", "errors", ".", "append", "(", "\"Invalid range: must be a valid range between 4096\"", "\" and 16777214\"", ")", "return", "errors" ]
Validate VNI ranges provided are valid and properly formatted
[ "Validate", "VNI", "ranges", "provided", "are", "valid", "and", "properly", "formatted" ]
[ "\"\"\"\n Validate VNI ranges provided are valid and properly formatted\n :param ranges: list\n :return: list\n \"\"\"" ]
[ { "param": "ranges", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "ranges", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def validate_vni_ranges(ranges): errors = [] for vni_range in ranges: if not isinstance(vni_range, list): errors.append("Invalid range: must be a list") elif len(vni_range) != 2: errors.append("Invalid range: must be a list of 2 members") elif any(map(lambda r: not isinstance(r, int), vni_range)): errors.append("Invalid range: Expected integer values") elif vni_range[1] <= vni_range[0]: errors.append("Invalid range: 2nd element must be bigger than 1st") elif vni_range[0] < 4095 or vni_range[1] > 16777213: errors.append("Invalid range: must be a valid range between 4096" " and 16777214") return errors
927
527
972c8ccbf607ede880733f6c9666f2e076b77d60
yasinalm/gramme
train_mono.py
[ "MIT" ]
Python
disp_to_depth
<not_specific>
def disp_to_depth(disp): """Convert network's sigmoid output into depth prediction The formula for this conversion is given in the 'additional considerations' section of the paper. """ # Disp is not scaled in DispResNet. min_depth = 0.1 max_depth = 100.0 min_disp = 1 / max_depth max_disp = 1 / min_depth scaled_disp = min_disp + (max_disp - min_disp) * disp depth = 1 / scaled_disp # disp = disp.clamp(min=1e-3) # depth = 1./disp return depth
Convert network's sigmoid output into depth prediction The formula for this conversion is given in the 'additional considerations' section of the paper.
Convert network's sigmoid output into depth prediction The formula for this conversion is given in the 'additional considerations' section of the paper.
[ "Convert", "network", "'", "s", "sigmoid", "output", "into", "depth", "prediction", "The", "formula", "for", "this", "conversion", "is", "given", "in", "the", "'", "additional", "considerations", "'", "section", "of", "the", "paper", "." ]
def disp_to_depth(disp): min_depth = 0.1 max_depth = 100.0 min_disp = 1 / max_depth max_disp = 1 / min_depth scaled_disp = min_disp + (max_disp - min_disp) * disp depth = 1 / scaled_disp return depth
[ "def", "disp_to_depth", "(", "disp", ")", ":", "min_depth", "=", "0.1", "max_depth", "=", "100.0", "min_disp", "=", "1", "/", "max_depth", "max_disp", "=", "1", "/", "min_depth", "scaled_disp", "=", "min_disp", "+", "(", "max_disp", "-", "min_disp", ")", "*", "disp", "depth", "=", "1", "/", "scaled_disp", "return", "depth" ]
Convert network's sigmoid output into depth prediction The formula for this conversion is given in the 'additional considerations' section of the paper.
[ "Convert", "network", "'", "s", "sigmoid", "output", "into", "depth", "prediction", "The", "formula", "for", "this", "conversion", "is", "given", "in", "the", "'", "additional", "considerations", "'", "section", "of", "the", "paper", "." ]
[ "\"\"\"Convert network's sigmoid output into depth prediction\n The formula for this conversion is given in the 'additional considerations'\n section of the paper.\n \"\"\"", "# Disp is not scaled in DispResNet.", "# disp = disp.clamp(min=1e-3)", "# depth = 1./disp" ]
[ { "param": "disp", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "disp", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def disp_to_depth(disp): min_depth = 0.1 max_depth = 100.0 min_disp = 1 / max_depth max_disp = 1 / min_depth scaled_disp = min_disp + (max_disp - min_disp) * disp depth = 1 / scaled_disp return depth
928
204
f1c92b49fa15a2fe1966ff883d8c696c2b5cf9c7
rand-projects/fisb-decode
db/location/createsuadb.py
[ "BSD-2-Clause-Patent" ]
Python
addToCollection
null
def addToCollection(db, id, feature): """Add a new item to the collection, or append item to existing collection. Will add ``feature`` to the collection if it doesn't exist. If it does, will append it to the existing item. I.e.: add the feature to the ``FeatureCollection`` ``features`` list. Args: db (object): Handle to database. id (str): Name of object to add. Any dashes in the name should be removed because the NOTAM-D SUA will remove them. feature (dict): Dictionary containing the SUA item to add. """ # Some (many) items have more than a single entry. In that case we append # the new entry to the old. obj = db.SUA.find_one({'_id': id}) if obj is not None: # Add this feature set and replace. obj['features'].append(feature) db.SUA.replace_one({'_id': id}, obj, upsert=True) print('Adding to:', id) else: # Add a totally new item. db.SUA.insert_one({'_id': id, 'type': 'FeatureCollection', 'features': [feature]})
Add a new item to the collection, or append item to existing collection. Will add ``feature`` to the collection if it doesn't exist. If it does, will append it to the existing item. I.e.: add the feature to the ``FeatureCollection`` ``features`` list. Args: db (object): Handle to database. id (str): Name of object to add. Any dashes in the name should be removed because the NOTAM-D SUA will remove them. feature (dict): Dictionary containing the SUA item to add.
Add a new item to the collection, or append item to existing collection. Will add ``feature`` to the collection if it doesn't exist. If it does, will append it to the existing item.
[ "Add", "a", "new", "item", "to", "the", "collection", "or", "append", "item", "to", "existing", "collection", ".", "Will", "add", "`", "`", "feature", "`", "`", "to", "the", "collection", "if", "it", "doesn", "'", "t", "exist", ".", "If", "it", "does", "will", "append", "it", "to", "the", "existing", "item", "." ]
def addToCollection(db, id, feature): obj = db.SUA.find_one({'_id': id}) if obj is not None: obj['features'].append(feature) db.SUA.replace_one({'_id': id}, obj, upsert=True) print('Adding to:', id) else: db.SUA.insert_one({'_id': id, 'type': 'FeatureCollection', 'features': [feature]})
[ "def", "addToCollection", "(", "db", ",", "id", ",", "feature", ")", ":", "obj", "=", "db", ".", "SUA", ".", "find_one", "(", "{", "'_id'", ":", "id", "}", ")", "if", "obj", "is", "not", "None", ":", "obj", "[", "'features'", "]", ".", "append", "(", "feature", ")", "db", ".", "SUA", ".", "replace_one", "(", "{", "'_id'", ":", "id", "}", ",", "obj", ",", "upsert", "=", "True", ")", "print", "(", "'Adding to:'", ",", "id", ")", "else", ":", "db", ".", "SUA", ".", "insert_one", "(", "{", "'_id'", ":", "id", ",", "'type'", ":", "'FeatureCollection'", ",", "'features'", ":", "[", "feature", "]", "}", ")" ]
Add a new item to the collection, or append item to existing collection.
[ "Add", "a", "new", "item", "to", "the", "collection", "or", "append", "item", "to", "existing", "collection", "." ]
[ "\"\"\"Add a new item to the collection, or append item to existing collection.\n\n Will add ``feature`` to the collection if it doesn't exist. If\n it does, will append it to the existing item. I.e.: add the\n feature to the ``FeatureCollection`` ``features`` list.\n\n Args:\n db (object): Handle to database.\n id (str): Name of object to add. Any dashes in the name should be \n removed because the NOTAM-D SUA will remove them.\n feature (dict): Dictionary containing the SUA item to add.\n \"\"\"", "# Some (many) items have more than a single entry. In that case we append", "# the new entry to the old.", "# Add this feature set and replace.", "# Add a totally new item." ]
[ { "param": "db", "type": null }, { "param": "id", "type": null }, { "param": "feature", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "db", "type": null, "docstring": "Handle to database.", "docstring_tokens": [ "Handle", "to", "database", "." ], "default": null, "is_optional": false }, { "identifier": "id", "type": null, "docstring": "Name of object to add. Any dashes in the name should be\nremoved because the NOTAM-D SUA will remove them.", "docstring_tokens": [ "Name", "of", "object", "to", "add", ".", "Any", "dashes", "in", "the", "name", "should", "be", "removed", "because", "the", "NOTAM", "-", "D", "SUA", "will", "remove", "them", "." ], "default": null, "is_optional": false }, { "identifier": "feature", "type": null, "docstring": "Dictionary containing the SUA item to add.", "docstring_tokens": [ "Dictionary", "containing", "the", "SUA", "item", "to", "add", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def addToCollection(db, id, feature): obj = db.SUA.find_one({'_id': id}) if obj is not None: obj['features'].append(feature) db.SUA.replace_one({'_id': id}, obj, upsert=True) print('Adding to:', id) else: db.SUA.insert_one({'_id': id, 'type': 'FeatureCollection', 'features': [feature]})
929
257
1d7b11bb157253100daf5c42009803376396a7c4
EliahKagan/old-practice-snapshot
main/smallest-string-with-swaps/smallest-string-with-swaps-bfs-inplace.py
[ "0BSD" ]
Python
components
null
def components(adj: List[List[int]]): """Yields components of a graph. Finds them via BFS.""" vis = [False] * len(adj) for start in range(len(adj)): if vis[start]: continue vis[start] = True component = [start] with contextlib.suppress(IndexError): for i in itertools.count(): for dest in adj[component[i]]: if vis[dest]: continue vis[dest] = True component.append(dest) yield component
Yields components of a graph. Finds them via BFS.
Yields components of a graph. Finds them via BFS.
[ "Yields", "components", "of", "a", "graph", ".", "Finds", "them", "via", "BFS", "." ]
def components(adj: List[List[int]]): vis = [False] * len(adj) for start in range(len(adj)): if vis[start]: continue vis[start] = True component = [start] with contextlib.suppress(IndexError): for i in itertools.count(): for dest in adj[component[i]]: if vis[dest]: continue vis[dest] = True component.append(dest) yield component
[ "def", "components", "(", "adj", ":", "List", "[", "List", "[", "int", "]", "]", ")", ":", "vis", "=", "[", "False", "]", "*", "len", "(", "adj", ")", "for", "start", "in", "range", "(", "len", "(", "adj", ")", ")", ":", "if", "vis", "[", "start", "]", ":", "continue", "vis", "[", "start", "]", "=", "True", "component", "=", "[", "start", "]", "with", "contextlib", ".", "suppress", "(", "IndexError", ")", ":", "for", "i", "in", "itertools", ".", "count", "(", ")", ":", "for", "dest", "in", "adj", "[", "component", "[", "i", "]", "]", ":", "if", "vis", "[", "dest", "]", ":", "continue", "vis", "[", "dest", "]", "=", "True", "component", ".", "append", "(", "dest", ")", "yield", "component" ]
Yields components of a graph.
[ "Yields", "components", "of", "a", "graph", "." ]
[ "\"\"\"Yields components of a graph. Finds them via BFS.\"\"\"" ]
[ { "param": "adj", "type": "List[List[int]]" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "adj", "type": "List[List[int]]", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import contextlib import itertools def components(adj: List[List[int]]): vis = [False] * len(adj) for start in range(len(adj)): if vis[start]: continue vis[start] = True component = [start] with contextlib.suppress(IndexError): for i in itertools.count(): for dest in adj[component[i]]: if vis[dest]: continue vis[dest] = True component.append(dest) yield component
930
532
e83e297015f4f5815d966163601c2418de82469c
markhend/demo
cicd/code/image_factory.py
[ "Apache-2.0" ]
Python
_init_local
null
def _init_local(cls, from_dir): """ Init from local git checkout, copy local code into image. Just set `copy_dir` to the local root of this git checkout. Livedebug is automatically enabled because we use `copy_dir`. """ cls.copy_dir = cls.context cls.copy_url = None cls.copy_branch = None cls.path_map = None cls.git_env = { "GIT_SHA1": os.environ.get("GIT_SHA1") or \ cls._shell(f"git -C {from_dir} rev-parse HEAD"), "GIT_BRANCH": os.environ.get("GIT_BRANCH") or \ cls._shell(f"git -C {from_dir} rev-parse --abbrev-ref HEAD"), }
Init from local git checkout, copy local code into image. Just set `copy_dir` to the local root of this git checkout. Livedebug is automatically enabled because we use `copy_dir`.
Init from local git checkout, copy local code into image. Just set `copy_dir` to the local root of this git checkout. Livedebug is automatically enabled because we use `copy_dir`.
[ "Init", "from", "local", "git", "checkout", "copy", "local", "code", "into", "image", ".", "Just", "set", "`", "copy_dir", "`", "to", "the", "local", "root", "of", "this", "git", "checkout", ".", "Livedebug", "is", "automatically", "enabled", "because", "we", "use", "`", "copy_dir", "`", "." ]
def _init_local(cls, from_dir): cls.copy_dir = cls.context cls.copy_url = None cls.copy_branch = None cls.path_map = None cls.git_env = { "GIT_SHA1": os.environ.get("GIT_SHA1") or \ cls._shell(f"git -C {from_dir} rev-parse HEAD"), "GIT_BRANCH": os.environ.get("GIT_BRANCH") or \ cls._shell(f"git -C {from_dir} rev-parse --abbrev-ref HEAD"), }
[ "def", "_init_local", "(", "cls", ",", "from_dir", ")", ":", "cls", ".", "copy_dir", "=", "cls", ".", "context", "cls", ".", "copy_url", "=", "None", "cls", ".", "copy_branch", "=", "None", "cls", ".", "path_map", "=", "None", "cls", ".", "git_env", "=", "{", "\"GIT_SHA1\"", ":", "os", ".", "environ", ".", "get", "(", "\"GIT_SHA1\"", ")", "or", "cls", ".", "_shell", "(", "f\"git -C {from_dir} rev-parse HEAD\"", ")", ",", "\"GIT_BRANCH\"", ":", "os", ".", "environ", ".", "get", "(", "\"GIT_BRANCH\"", ")", "or", "cls", ".", "_shell", "(", "f\"git -C {from_dir} rev-parse --abbrev-ref HEAD\"", ")", ",", "}" ]
Init from local git checkout, copy local code into image.
[ "Init", "from", "local", "git", "checkout", "copy", "local", "code", "into", "image", "." ]
[ "\"\"\"\n Init from local git checkout, copy local code into image.\n Just set `copy_dir` to the local root of this git checkout.\n Livedebug is automatically enabled because we use `copy_dir`.\n \"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "from_dir", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "from_dir", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def _init_local(cls, from_dir): cls.copy_dir = cls.context cls.copy_url = None cls.copy_branch = None cls.path_map = None cls.git_env = { "GIT_SHA1": os.environ.get("GIT_SHA1") or \ cls._shell(f"git -C {from_dir} rev-parse HEAD"), "GIT_BRANCH": os.environ.get("GIT_BRANCH") or \ cls._shell(f"git -C {from_dir} rev-parse --abbrev-ref HEAD"), }
931
766
7b5bd6f8aa3d24c31be8c884850cb2ca556550fc
kellyhb/LISAexoplanets
python/gwTools.py
[ "MIT" ]
Python
aNmin
<not_specific>
def aNmin(ecc): """Check the ecc == 0 and return either 2 or 1. """ if ecc == 0: return(2) else: return(1)
Check the ecc == 0 and return either 2 or 1.
Check the ecc == 0 and return either 2 or 1.
[ "Check", "the", "ecc", "==", "0", "and", "return", "either", "2", "or", "1", "." ]
def aNmin(ecc): if ecc == 0: return(2) else: return(1)
[ "def", "aNmin", "(", "ecc", ")", ":", "if", "ecc", "==", "0", ":", "return", "(", "2", ")", "else", ":", "return", "(", "1", ")" ]
Check the ecc == 0 and return either 2 or 1.
[ "Check", "the", "ecc", "==", "0", "and", "return", "either", "2", "or", "1", "." ]
[ "\"\"\"Check the ecc == 0 and return either 2 or 1.\n \"\"\"" ]
[ { "param": "ecc", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "ecc", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def aNmin(ecc): if ecc == 0: return(2) else: return(1)
932
360
79412ad4772d8e039983c05481ed4a3fd1fafadd
stevenc987/namex
api/namex/services/nro/utils.py
[ "Apache-2.0" ]
Python
row_to_dict
<not_specific>
def row_to_dict(row): """ This takes a row from a resultset and returns a dict with the same structure :param row: :return: dict """ return {key: value for (key, value) in row.items()}
This takes a row from a resultset and returns a dict with the same structure :param row: :return: dict
This takes a row from a resultset and returns a dict with the same structure
[ "This", "takes", "a", "row", "from", "a", "resultset", "and", "returns", "a", "dict", "with", "the", "same", "structure" ]
def row_to_dict(row): return {key: value for (key, value) in row.items()}
[ "def", "row_to_dict", "(", "row", ")", ":", "return", "{", "key", ":", "value", "for", "(", "key", ",", "value", ")", "in", "row", ".", "items", "(", ")", "}" ]
This takes a row from a resultset and returns a dict with the same structure
[ "This", "takes", "a", "row", "from", "a", "resultset", "and", "returns", "a", "dict", "with", "the", "same", "structure" ]
[ "\"\"\"\n This takes a row from a resultset and returns a dict with the same structure\n :param row:\n :return: dict\n \"\"\"" ]
[ { "param": "row", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "row", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def row_to_dict(row): return {key: value for (key, value) in row.items()}
933
89
9041d037d0f15908e29ce6f0be9fb88815a32570
jupyterlab-bot/jupyter_releaser
jupyter_releaser/npm.py
[ "BSD-3-Clause" ]
Python
extract_package
<not_specific>
def extract_package(path): """Get the package json info from the tarball""" fid = tarfile.open(path) data = fid.extractfile("package/package.json").read() data = json.loads(data.decode("utf-8")) fid.close() return data
Get the package json info from the tarball
Get the package json info from the tarball
[ "Get", "the", "package", "json", "info", "from", "the", "tarball" ]
def extract_package(path): fid = tarfile.open(path) data = fid.extractfile("package/package.json").read() data = json.loads(data.decode("utf-8")) fid.close() return data
[ "def", "extract_package", "(", "path", ")", ":", "fid", "=", "tarfile", ".", "open", "(", "path", ")", "data", "=", "fid", ".", "extractfile", "(", "\"package/package.json\"", ")", ".", "read", "(", ")", "data", "=", "json", ".", "loads", "(", "data", ".", "decode", "(", "\"utf-8\"", ")", ")", "fid", ".", "close", "(", ")", "return", "data" ]
Get the package json info from the tarball
[ "Get", "the", "package", "json", "info", "from", "the", "tarball" ]
[ "\"\"\"Get the package json info from the tarball\"\"\"" ]
[ { "param": "path", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "path", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import tarfile import json def extract_package(path): fid = tarfile.open(path) data = fid.extractfile("package/package.json").read() data = json.loads(data.decode("utf-8")) fid.close() return data
934
200
b2b913a6e42ddb34d9037e17a24726a454ad3937
benjisympa/HierarchicalRNN
model.py
[ "MIT" ]
Python
sort_sequences
<not_specific>
def sort_sequences(inputs, lengths): """sort_sequences Sort sequences according to lengths descendingly. :param inputs (Tensor): input sequences, size [L, B, D] :param lengths (Tensor): length of each sequence, size [B] """ lengths_sorted, sorted_idx = lengths.sort(descending=True) _, unsorted_idx = sorted_idx.sort() return inputs[:, sorted_idx, :], lengths_sorted, unsorted_idx
sort_sequences Sort sequences according to lengths descendingly. :param inputs (Tensor): input sequences, size [L, B, D] :param lengths (Tensor): length of each sequence, size [B]
sort_sequences Sort sequences according to lengths descendingly.
[ "sort_sequences", "Sort", "sequences", "according", "to", "lengths", "descendingly", "." ]
def sort_sequences(inputs, lengths): lengths_sorted, sorted_idx = lengths.sort(descending=True) _, unsorted_idx = sorted_idx.sort() return inputs[:, sorted_idx, :], lengths_sorted, unsorted_idx
[ "def", "sort_sequences", "(", "inputs", ",", "lengths", ")", ":", "lengths_sorted", ",", "sorted_idx", "=", "lengths", ".", "sort", "(", "descending", "=", "True", ")", "_", ",", "unsorted_idx", "=", "sorted_idx", ".", "sort", "(", ")", "return", "inputs", "[", ":", ",", "sorted_idx", ",", ":", "]", ",", "lengths_sorted", ",", "unsorted_idx" ]
sort_sequences Sort sequences according to lengths descendingly.
[ "sort_sequences", "Sort", "sequences", "according", "to", "lengths", "descendingly", "." ]
[ "\"\"\"sort_sequences\n Sort sequences according to lengths descendingly.\n\n :param inputs (Tensor): input sequences, size [L, B, D]\n :param lengths (Tensor): length of each sequence, size [B]\n \"\"\"" ]
[ { "param": "inputs", "type": null }, { "param": "lengths", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "inputs", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "lengths", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "(Tensor)", "type": null, "docstring": "length of each sequence, size [B]", "docstring_tokens": [ "length", "of", "each", "sequence", "size", "[", "B", "]" ], "default": null, "is_optional": false } ], "others": [] }
def sort_sequences(inputs, lengths): lengths_sorted, sorted_idx = lengths.sort(descending=True) _, unsorted_idx = sorted_idx.sort() return inputs[:, sorted_idx, :], lengths_sorted, unsorted_idx
936
348
4a1c1b1ff53cf1e4e1767c2757adea2ad022d425
aldoram5/NLP-Utils
utils/string_utils.py
[ "MIT" ]
Python
normalize
<not_specific>
def normalize(line, accepted_chars='abcdefghijklmnopqrstuvwxyz '): """ Return only the subset of chars from accepted_chars. This helps keep the model relatively small by ignoring punctuation, infrequenty symbols, etc. """ return [c.lower() for c in line if c.lower() in accepted_chars]
Return only the subset of chars from accepted_chars. This helps keep the model relatively small by ignoring punctuation, infrequenty symbols, etc.
Return only the subset of chars from accepted_chars. This helps keep the model relatively small by ignoring punctuation, infrequenty symbols, etc.
[ "Return", "only", "the", "subset", "of", "chars", "from", "accepted_chars", ".", "This", "helps", "keep", "the", "model", "relatively", "small", "by", "ignoring", "punctuation", "infrequenty", "symbols", "etc", "." ]
def normalize(line, accepted_chars='abcdefghijklmnopqrstuvwxyz '): return [c.lower() for c in line if c.lower() in accepted_chars]
[ "def", "normalize", "(", "line", ",", "accepted_chars", "=", "'abcdefghijklmnopqrstuvwxyz '", ")", ":", "return", "[", "c", ".", "lower", "(", ")", "for", "c", "in", "line", "if", "c", ".", "lower", "(", ")", "in", "accepted_chars", "]" ]
Return only the subset of chars from accepted_chars.
[ "Return", "only", "the", "subset", "of", "chars", "from", "accepted_chars", "." ]
[ "\"\"\"\n Return only the subset of chars from accepted_chars.\n This helps keep the model relatively small by ignoring punctuation, \n infrequenty symbols, etc.\n \"\"\"" ]
[ { "param": "line", "type": null }, { "param": "accepted_chars", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "line", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "accepted_chars", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def normalize(line, accepted_chars='abcdefghijklmnopqrstuvwxyz '): return [c.lower() for c in line if c.lower() in accepted_chars]
937
225
928a7fa1e275cf4154494ceda205aff10584b1a1
MyHerket/TrivalentGraphs
strat2.py
[ "MIT" ]
Python
build_sub_O2
<not_specific>
def build_sub_O2(Graph): """ For each white node in Graph, we create a copy of Graph and perform O2 in that node. Return a list of the graphs obtained. """ White = Graph.white() sub_list = [] for index, n in enumerate(White): temp_g = Graph.O2(n) temp_g.labeling(index+1) sub_list.append(temp_g) return(sub_list)
For each white node in Graph, we create a copy of Graph and perform O2 in that node. Return a list of the graphs obtained.
For each white node in Graph, we create a copy of Graph and perform O2 in that node. Return a list of the graphs obtained.
[ "For", "each", "white", "node", "in", "Graph", "we", "create", "a", "copy", "of", "Graph", "and", "perform", "O2", "in", "that", "node", ".", "Return", "a", "list", "of", "the", "graphs", "obtained", "." ]
def build_sub_O2(Graph): White = Graph.white() sub_list = [] for index, n in enumerate(White): temp_g = Graph.O2(n) temp_g.labeling(index+1) sub_list.append(temp_g) return(sub_list)
[ "def", "build_sub_O2", "(", "Graph", ")", ":", "White", "=", "Graph", ".", "white", "(", ")", "sub_list", "=", "[", "]", "for", "index", ",", "n", "in", "enumerate", "(", "White", ")", ":", "temp_g", "=", "Graph", ".", "O2", "(", "n", ")", "temp_g", ".", "labeling", "(", "index", "+", "1", ")", "sub_list", ".", "append", "(", "temp_g", ")", "return", "(", "sub_list", ")" ]
For each white node in Graph, we create a copy of Graph and perform O2 in that node.
[ "For", "each", "white", "node", "in", "Graph", "we", "create", "a", "copy", "of", "Graph", "and", "perform", "O2", "in", "that", "node", "." ]
[ "\"\"\"\n\t\tFor each white node in Graph, we create a copy of Graph and perform O2 in that node.\n\t\tReturn a list of the graphs obtained.\n\t\"\"\"" ]
[ { "param": "Graph", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "Graph", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def build_sub_O2(Graph): White = Graph.white() sub_list = [] for index, n in enumerate(White): temp_g = Graph.O2(n) temp_g.labeling(index+1) sub_list.append(temp_g) return(sub_list)
938
106
c99e2d8f0bcacec5a861c540bf48983e7a33556c
dhinakg/BitSTAR
api/git.py
[ "Apache-2.0" ]
Python
git_branch
<not_specific>
def git_branch(): '''Get the current git branch.''' with open(".git/HEAD") as file: head_string = file.read() head_split = head_string.split(": ") if len(head_split) == 2: branch = head_split[1].split("/", 2)[-1] msg = branch.strip() else: msg = "UNKNOWN" return msg
Get the current git branch.
Get the current git branch.
[ "Get", "the", "current", "git", "branch", "." ]
def git_branch(): with open(".git/HEAD") as file: head_string = file.read() head_split = head_string.split(": ") if len(head_split) == 2: branch = head_split[1].split("/", 2)[-1] msg = branch.strip() else: msg = "UNKNOWN" return msg
[ "def", "git_branch", "(", ")", ":", "with", "open", "(", "\".git/HEAD\"", ")", "as", "file", ":", "head_string", "=", "file", ".", "read", "(", ")", "head_split", "=", "head_string", ".", "split", "(", "\": \"", ")", "if", "len", "(", "head_split", ")", "==", "2", ":", "branch", "=", "head_split", "[", "1", "]", ".", "split", "(", "\"/\"", ",", "2", ")", "[", "-", "1", "]", "msg", "=", "branch", ".", "strip", "(", ")", "else", ":", "msg", "=", "\"UNKNOWN\"", "return", "msg" ]
Get the current git branch.
[ "Get", "the", "current", "git", "branch", "." ]
[ "'''Get the current git branch.'''" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def git_branch(): with open(".git/HEAD") as file: head_string = file.read() head_split = head_string.split(": ") if len(head_split) == 2: branch = head_split[1].split("/", 2)[-1] msg = branch.strip() else: msg = "UNKNOWN" return msg
939
609
75b8cc218b2a12c43aad51e4cb7c09ad4184fd46
ukdtom/ExportTools
Contents/Code/misc.py
[ "Unlicense" ]
Python
ConvertSize
<not_specific>
def ConvertSize(SizeAsString): ''' converts Byte to best readable string ''' size = float(SizeAsString) if (size == 0): return '0B' size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") i = int(math.floor(math.log(size, 1024))) p = math.pow(1024, i) s = round(size / p, 2) return '%s %s' % (s, size_name[i])
converts Byte to best readable string
converts Byte to best readable string
[ "converts", "Byte", "to", "best", "readable", "string" ]
def ConvertSize(SizeAsString): size = float(SizeAsString) if (size == 0): return '0B' size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") i = int(math.floor(math.log(size, 1024))) p = math.pow(1024, i) s = round(size / p, 2) return '%s %s' % (s, size_name[i])
[ "def", "ConvertSize", "(", "SizeAsString", ")", ":", "size", "=", "float", "(", "SizeAsString", ")", "if", "(", "size", "==", "0", ")", ":", "return", "'0B'", "size_name", "=", "(", "\"B\"", ",", "\"KB\"", ",", "\"MB\"", ",", "\"GB\"", ",", "\"TB\"", ",", "\"PB\"", ",", "\"EB\"", ",", "\"ZB\"", ",", "\"YB\"", ")", "i", "=", "int", "(", "math", ".", "floor", "(", "math", ".", "log", "(", "size", ",", "1024", ")", ")", ")", "p", "=", "math", ".", "pow", "(", "1024", ",", "i", ")", "s", "=", "round", "(", "size", "/", "p", ",", "2", ")", "return", "'%s %s'", "%", "(", "s", ",", "size_name", "[", "i", "]", ")" ]
converts Byte to best readable string
[ "converts", "Byte", "to", "best", "readable", "string" ]
[ "''' converts Byte to best readable string '''" ]
[ { "param": "SizeAsString", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "SizeAsString", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import math def ConvertSize(SizeAsString): size = float(SizeAsString) if (size == 0): return '0B' size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") i = int(math.floor(math.log(size, 1024))) p = math.pow(1024, i) s = round(size / p, 2) return '%s %s' % (s, size_name[i])
940
406
56bc9baf476c8b3359edf82f43174a85b6fc6986
jevandezande/scripts
qgrep/helper.py
[ "MIT" ]
Python
check_program
<not_specific>
def check_program(file_name): """ Takes the name of an output file and determines what program wrote (or reads) them :param file_name: name of the output file :return: string of the program or None """ programs = { '* O R C A *': 'orca', 'Welcome to Q-Chem': 'qchem', 'PSI4: An Open-Source Ab Initio Electronic Structure Package': 'psi4', 'Psi4: An Open-Source Ab Initio Electronic Structure Package': 'psi4', 'Northwest Computational Chemistry Package (NWChem)': 'nwchem', '#ZMATRIX': 'zmatrix', '* CFOUR Coupled-Cluster techniques for Computational Chemistry *': 'cfour', '*** PROGRAM SYSTEM MOLPRO ***': 'molpro', "----- GAMESS execution script 'rungms' -----": 'gamess', 'N A T U R A L A T O M I C O R B I T A L A N D': 'nbo', 'Entering Gaussian System, Link 0=g09': 'gaussian', '*** PROGRAM SYSTEM MOLPRO ***': 'molpro', # Printed after input file 'BAGEL - Freshly leavened quantum chemistry': 'bagel', } program = None with open(file_name) as f: for i in range(200): line = f.readline().strip() if line in programs: program = programs[line] break return program
Takes the name of an output file and determines what program wrote (or reads) them :param file_name: name of the output file :return: string of the program or None
Takes the name of an output file and determines what program wrote (or reads) them
[ "Takes", "the", "name", "of", "an", "output", "file", "and", "determines", "what", "program", "wrote", "(", "or", "reads", ")", "them" ]
def check_program(file_name): programs = { '* O R C A *': 'orca', 'Welcome to Q-Chem': 'qchem', 'PSI4: An Open-Source Ab Initio Electronic Structure Package': 'psi4', 'Psi4: An Open-Source Ab Initio Electronic Structure Package': 'psi4', 'Northwest Computational Chemistry Package (NWChem)': 'nwchem', '#ZMATRIX': 'zmatrix', '* CFOUR Coupled-Cluster techniques for Computational Chemistry *': 'cfour', '*** PROGRAM SYSTEM MOLPRO ***': 'molpro', "----- GAMESS execution script 'rungms' -----": 'gamess', 'N A T U R A L A T O M I C O R B I T A L A N D': 'nbo', 'Entering Gaussian System, Link 0=g09': 'gaussian', '*** PROGRAM SYSTEM MOLPRO ***': 'molpro', 'BAGEL - Freshly leavened quantum chemistry': 'bagel', } program = None with open(file_name) as f: for i in range(200): line = f.readline().strip() if line in programs: program = programs[line] break return program
[ "def", "check_program", "(", "file_name", ")", ":", "programs", "=", "{", "'* O R C A *'", ":", "'orca'", ",", "'Welcome to Q-Chem'", ":", "'qchem'", ",", "'PSI4: An Open-Source Ab Initio Electronic Structure Package'", ":", "'psi4'", ",", "'Psi4: An Open-Source Ab Initio Electronic Structure Package'", ":", "'psi4'", ",", "'Northwest Computational Chemistry Package (NWChem)'", ":", "'nwchem'", ",", "'#ZMATRIX'", ":", "'zmatrix'", ",", "'* CFOUR Coupled-Cluster techniques for Computational Chemistry *'", ":", "'cfour'", ",", "'*** PROGRAM SYSTEM MOLPRO ***'", ":", "'molpro'", ",", "\"----- GAMESS execution script 'rungms' -----\"", ":", "'gamess'", ",", "'N A T U R A L A T O M I C O R B I T A L A N D'", ":", "'nbo'", ",", "'Entering Gaussian System, Link 0=g09'", ":", "'gaussian'", ",", "'*** PROGRAM SYSTEM MOLPRO ***'", ":", "'molpro'", ",", "'BAGEL - Freshly leavened quantum chemistry'", ":", "'bagel'", ",", "}", "program", "=", "None", "with", "open", "(", "file_name", ")", "as", "f", ":", "for", "i", "in", "range", "(", "200", ")", ":", "line", "=", "f", ".", "readline", "(", ")", ".", "strip", "(", ")", "if", "line", "in", "programs", ":", "program", "=", "programs", "[", "line", "]", "break", "return", "program" ]
Takes the name of an output file and determines what program wrote (or reads) them
[ "Takes", "the", "name", "of", "an", "output", "file", "and", "determines", "what", "program", "wrote", "(", "or", "reads", ")", "them" ]
[ "\"\"\"\n Takes the name of an output file and determines what program wrote (or\n reads) them\n :param file_name: name of the output file\n :return: string of the program or None\n \"\"\"", "# Printed after input file" ]
[ { "param": "file_name", "type": null } ]
{ "returns": [ { "docstring": "string of the program or None", "docstring_tokens": [ "string", "of", "the", "program", "or", "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "file_name", "type": null, "docstring": "name of the output file", "docstring_tokens": [ "name", "of", "the", "output", "file" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def check_program(file_name): programs = { '* O R C A *': 'orca', 'Welcome to Q-Chem': 'qchem', 'PSI4: An Open-Source Ab Initio Electronic Structure Package': 'psi4', 'Psi4: An Open-Source Ab Initio Electronic Structure Package': 'psi4', 'Northwest Computational Chemistry Package (NWChem)': 'nwchem', '#ZMATRIX': 'zmatrix', '* CFOUR Coupled-Cluster techniques for Computational Chemistry *': 'cfour', '*** PROGRAM SYSTEM MOLPRO ***': 'molpro', "----- GAMESS execution script 'rungms' -----": 'gamess', 'N A T U R A L A T O M I C O R B I T A L A N D': 'nbo', 'Entering Gaussian System, Link 0=g09': 'gaussian', '*** PROGRAM SYSTEM MOLPRO ***': 'molpro', 'BAGEL - Freshly leavened quantum chemistry': 'bagel', } program = None with open(file_name) as f: for i in range(200): line = f.readline().strip() if line in programs: program = programs[line] break return program
941
352
6601e32aa8f0485388f38492f7e221944ab845e1
BCCN-Prog/materials
testing/deceivingly_simple/maxima.py
[ "MIT" ]
Python
find_maxima
<not_specific>
def find_maxima(x): """Find local maxima of x. Example: >>> x = [1, 2, 3, 2, 4, 3] >>> find_maxima(x) [2, 4] Input arguments: x -- 1D list of real numbers Output: idx -- list of indices of the local maxima in x """ if type(x) != type([]): message = 'Input argument must be a list, got %d instead' % type(x) raise TypeError(message) idx = [] for i in range(len(x)): # `i` is a local maximum if the signal decreases before and after it if x[i-1] < x[i] and x[i+1] < x[i]: idx.append(i) return idx # NOTE for the curious: the code above could be written using # list comprehension as # return [i for i in range(len(x)) if x[i-1]<x[i] and x[i+1]<x[i]] # not that this would solve the bugs ;-)
Find local maxima of x. Example: >>> x = [1, 2, 3, 2, 4, 3] >>> find_maxima(x) [2, 4] Input arguments: x -- 1D list of real numbers Output: idx -- list of indices of the local maxima in x
Find local maxima of x.
[ "Find", "local", "maxima", "of", "x", "." ]
def find_maxima(x): if type(x) != type([]): message = 'Input argument must be a list, got %d instead' % type(x) raise TypeError(message) idx = [] for i in range(len(x)): if x[i-1] < x[i] and x[i+1] < x[i]: idx.append(i) return idx
[ "def", "find_maxima", "(", "x", ")", ":", "if", "type", "(", "x", ")", "!=", "type", "(", "[", "]", ")", ":", "message", "=", "'Input argument must be a list, got %d instead'", "%", "type", "(", "x", ")", "raise", "TypeError", "(", "message", ")", "idx", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "x", ")", ")", ":", "if", "x", "[", "i", "-", "1", "]", "<", "x", "[", "i", "]", "and", "x", "[", "i", "+", "1", "]", "<", "x", "[", "i", "]", ":", "idx", ".", "append", "(", "i", ")", "return", "idx" ]
Find local maxima of x.
[ "Find", "local", "maxima", "of", "x", "." ]
[ "\"\"\"Find local maxima of x.\n\n Example:\n >>> x = [1, 2, 3, 2, 4, 3]\n >>> find_maxima(x)\n [2, 4]\n\n Input arguments:\n x -- 1D list of real numbers\n\n Output:\n idx -- list of indices of the local maxima in x\n \"\"\"", "# `i` is a local maximum if the signal decreases before and after it", "# NOTE for the curious: the code above could be written using", "# list comprehension as", "# return [i for i in range(len(x)) if x[i-1]<x[i] and x[i+1]<x[i]]", "# not that this would solve the bugs ;-)" ]
[ { "param": "x", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "x", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [ { "identifier": "examples", "docstring": null, "docstring_tokens": [ "None" ] } ] }
def find_maxima(x): if type(x) != type([]): message = 'Input argument must be a list, got %d instead' % type(x) raise TypeError(message) idx = [] for i in range(len(x)): if x[i-1] < x[i] and x[i+1] < x[i]: idx.append(i) return idx
942
153
8c0c5e9f521e05240cc26e8b26d66571ca85f75b
Ligio/hacc-ozmo
custom_components/deebot/__init__.py
[ "MIT" ]
Python
services_to_strings
<not_specific>
def services_to_strings(services, service_to_string): """Convert SUPPORT_* service bitmask to list of service strings.""" strings = [] for service in service_to_string: if service & services: strings.append(service_to_string[service]) return strings
Convert SUPPORT_* service bitmask to list of service strings.
Convert SUPPORT_* service bitmask to list of service strings.
[ "Convert", "SUPPORT_", "*", "service", "bitmask", "to", "list", "of", "service", "strings", "." ]
def services_to_strings(services, service_to_string): strings = [] for service in service_to_string: if service & services: strings.append(service_to_string[service]) return strings
[ "def", "services_to_strings", "(", "services", ",", "service_to_string", ")", ":", "strings", "=", "[", "]", "for", "service", "in", "service_to_string", ":", "if", "service", "&", "services", ":", "strings", ".", "append", "(", "service_to_string", "[", "service", "]", ")", "return", "strings" ]
Convert SUPPORT_* service bitmask to list of service strings.
[ "Convert", "SUPPORT_", "*", "service", "bitmask", "to", "list", "of", "service", "strings", "." ]
[ "\"\"\"Convert SUPPORT_* service bitmask to list of service strings.\"\"\"" ]
[ { "param": "services", "type": null }, { "param": "service_to_string", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "services", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "service_to_string", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def services_to_strings(services, service_to_string): strings = [] for service in service_to_string: if service & services: strings.append(service_to_string[service]) return strings
943
459
3e4b51d2fcf3cb70b12a389dddc8845b785fb954
monotasker/plugin_sqlite_backup
modules/plugin_sqlite_backup.py
[ "Apache-2.0" ]
Python
do_zip_except_sqlite
<not_specific>
def do_zip_except_sqlite(target_dir, file_name): """ Compress all db files not ending in .sqlite and copy to the target_dir. By default the backup is saved to myapp/backup/. If successful, the function returns the full path of the backup file. If unsuccessful, it returns False. """ try: zip = zipfile.ZipFile(file_name, 'w', zipfile.ZIP_DEFLATED) rootlen = len(target_dir) + 1 #print rootlen for base, dirs, files in os.walk(target_dir): #print dir filelist = [] for file in files: if file.find('.sqlite', len(file) - 7) == -1: fn = os.path.join(base, file) zip.write(fn, fn[rootlen:]) filelist.append(fn) zip.close() return filelist except Exception: print(traceback.format_exc(5)) return False
Compress all db files not ending in .sqlite and copy to the target_dir. By default the backup is saved to myapp/backup/. If successful, the function returns the full path of the backup file. If unsuccessful, it returns False.
Compress all db files not ending in .sqlite and copy to the target_dir. By default the backup is saved to myapp/backup/. If successful, the function returns the full path of the backup file. If unsuccessful, it returns False.
[ "Compress", "all", "db", "files", "not", "ending", "in", ".", "sqlite", "and", "copy", "to", "the", "target_dir", ".", "By", "default", "the", "backup", "is", "saved", "to", "myapp", "/", "backup", "/", ".", "If", "successful", "the", "function", "returns", "the", "full", "path", "of", "the", "backup", "file", ".", "If", "unsuccessful", "it", "returns", "False", "." ]
def do_zip_except_sqlite(target_dir, file_name): try: zip = zipfile.ZipFile(file_name, 'w', zipfile.ZIP_DEFLATED) rootlen = len(target_dir) + 1 for base, dirs, files in os.walk(target_dir): filelist = [] for file in files: if file.find('.sqlite', len(file) - 7) == -1: fn = os.path.join(base, file) zip.write(fn, fn[rootlen:]) filelist.append(fn) zip.close() return filelist except Exception: print(traceback.format_exc(5)) return False
[ "def", "do_zip_except_sqlite", "(", "target_dir", ",", "file_name", ")", ":", "try", ":", "zip", "=", "zipfile", ".", "ZipFile", "(", "file_name", ",", "'w'", ",", "zipfile", ".", "ZIP_DEFLATED", ")", "rootlen", "=", "len", "(", "target_dir", ")", "+", "1", "for", "base", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "target_dir", ")", ":", "filelist", "=", "[", "]", "for", "file", "in", "files", ":", "if", "file", ".", "find", "(", "'.sqlite'", ",", "len", "(", "file", ")", "-", "7", ")", "==", "-", "1", ":", "fn", "=", "os", ".", "path", ".", "join", "(", "base", ",", "file", ")", "zip", ".", "write", "(", "fn", ",", "fn", "[", "rootlen", ":", "]", ")", "filelist", ".", "append", "(", "fn", ")", "zip", ".", "close", "(", ")", "return", "filelist", "except", "Exception", ":", "print", "(", "traceback", ".", "format_exc", "(", "5", ")", ")", "return", "False" ]
Compress all db files not ending in .sqlite and copy to the target_dir.
[ "Compress", "all", "db", "files", "not", "ending", "in", ".", "sqlite", "and", "copy", "to", "the", "target_dir", "." ]
[ "\"\"\"\n Compress all db files not ending in .sqlite and copy to the target_dir.\n\n By default the backup is saved to myapp/backup/. If successful, the\n function returns the full path of the backup file. If unsuccessful, it\n returns False.\n\n \"\"\"", "#print rootlen", "#print dir" ]
[ { "param": "target_dir", "type": null }, { "param": "file_name", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "target_dir", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "file_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import zipfile import os import traceback def do_zip_except_sqlite(target_dir, file_name): try: zip = zipfile.ZipFile(file_name, 'w', zipfile.ZIP_DEFLATED) rootlen = len(target_dir) + 1 for base, dirs, files in os.walk(target_dir): filelist = [] for file in files: if file.find('.sqlite', len(file) - 7) == -1: fn = os.path.join(base, file) zip.write(fn, fn[rootlen:]) filelist.append(fn) zip.close() return filelist except Exception: print(traceback.format_exc(5)) return False
944
256
3fb043e4a9921f20f1c4ba84d7cf9451c8e846e6
snototter/ragnaroek
src/helheimr_heating/helheimr_heating/utils/translation.py
[ "MIT" ]
Python
_trans_vars
<not_specific>
def _trans_vars(_gt, msg, **kwargs): """ Extension of gettext to support named placeholders with associated format specifiers, similar to flask-babel. """ t = _gt(msg) for k, val in kwargs.items(): # Search for the variable specifications, e.g. # %(var)d, %(user).2f, %(name)s rx = rf"%[(]{k}[)]([0-9]*[\.]?[0-9]*[a-z])" found = re.search(rx, t) if found: # The second group contains only the format specifier: fspec = found.group(1) repstr = '{:' + fspec + '}' try: repstr = repstr.format(val) except TypeError: repstr = repstr.format(str(val)) t = re.sub(rx, repstr, t) return t.replace('%%', '%')
Extension of gettext to support named placeholders with associated format specifiers, similar to flask-babel.
Extension of gettext to support named placeholders with associated format specifiers, similar to flask-babel.
[ "Extension", "of", "gettext", "to", "support", "named", "placeholders", "with", "associated", "format", "specifiers", "similar", "to", "flask", "-", "babel", "." ]
def _trans_vars(_gt, msg, **kwargs): t = _gt(msg) for k, val in kwargs.items(): rx = rf"%[(]{k}[)]([0-9]*[\.]?[0-9]*[a-z])" found = re.search(rx, t) if found: fspec = found.group(1) repstr = '{:' + fspec + '}' try: repstr = repstr.format(val) except TypeError: repstr = repstr.format(str(val)) t = re.sub(rx, repstr, t) return t.replace('%%', '%')
[ "def", "_trans_vars", "(", "_gt", ",", "msg", ",", "**", "kwargs", ")", ":", "t", "=", "_gt", "(", "msg", ")", "for", "k", ",", "val", "in", "kwargs", ".", "items", "(", ")", ":", "rx", "=", "rf\"%[(]{k}[)]([0-9]*[\\.]?[0-9]*[a-z])\"", "found", "=", "re", ".", "search", "(", "rx", ",", "t", ")", "if", "found", ":", "fspec", "=", "found", ".", "group", "(", "1", ")", "repstr", "=", "'{:'", "+", "fspec", "+", "'}'", "try", ":", "repstr", "=", "repstr", ".", "format", "(", "val", ")", "except", "TypeError", ":", "repstr", "=", "repstr", ".", "format", "(", "str", "(", "val", ")", ")", "t", "=", "re", ".", "sub", "(", "rx", ",", "repstr", ",", "t", ")", "return", "t", ".", "replace", "(", "'%%'", ",", "'%'", ")" ]
Extension of gettext to support named placeholders with associated format specifiers, similar to flask-babel.
[ "Extension", "of", "gettext", "to", "support", "named", "placeholders", "with", "associated", "format", "specifiers", "similar", "to", "flask", "-", "babel", "." ]
[ "\"\"\"\n Extension of gettext to support named placeholders with\n associated format specifiers, similar to flask-babel.\n \"\"\"", "# Search for the variable specifications, e.g.", "# %(var)d, %(user).2f, %(name)s", "# The second group contains only the format specifier:" ]
[ { "param": "_gt", "type": null }, { "param": "msg", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "_gt", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "msg", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def _trans_vars(_gt, msg, **kwargs): t = _gt(msg) for k, val in kwargs.items(): rx = rf"%[(]{k}[)]([0-9]*[\.]?[0-9]*[a-z])" found = re.search(rx, t) if found: fspec = found.group(1) repstr = '{:' + fspec + '}' try: repstr = repstr.format(val) except TypeError: repstr = repstr.format(str(val)) t = re.sub(rx, repstr, t) return t.replace('%%', '%')
945
557
43bfc18cb1b6e49d623c2f14e4ff9fa3c4e08b76
federatedcloud/FRB_pipeline
Pipeline/Modules/agg.py
[ "BSD-3-Clause" ]
Python
sort_results
null
def sort_results(stat_lists, stat_names, sort_stat): ''' Sorts the stat_lists according to the statistic specified by sort_stat. Returns: sorted stat_lists Parameters: stat_lists: a list of sublists, each sublist is a line of statistics from a cluster text file stat_names: a list containing the names of statistics, in order sort_stat: the name of the statistic to sort by ''' sort_index= stat_names.index(sort_stat) stat_lists.sort(reverse= True, key=lambda x:x[sort_index])
Sorts the stat_lists according to the statistic specified by sort_stat. Returns: sorted stat_lists Parameters: stat_lists: a list of sublists, each sublist is a line of statistics from a cluster text file stat_names: a list containing the names of statistics, in order sort_stat: the name of the statistic to sort by
Sorts the stat_lists according to the statistic specified by sort_stat.
[ "Sorts", "the", "stat_lists", "according", "to", "the", "statistic", "specified", "by", "sort_stat", "." ]
def sort_results(stat_lists, stat_names, sort_stat): sort_index= stat_names.index(sort_stat) stat_lists.sort(reverse= True, key=lambda x:x[sort_index])
[ "def", "sort_results", "(", "stat_lists", ",", "stat_names", ",", "sort_stat", ")", ":", "sort_index", "=", "stat_names", ".", "index", "(", "sort_stat", ")", "stat_lists", ".", "sort", "(", "reverse", "=", "True", ",", "key", "=", "lambda", "x", ":", "x", "[", "sort_index", "]", ")" ]
Sorts the stat_lists according to the statistic specified by sort_stat.
[ "Sorts", "the", "stat_lists", "according", "to", "the", "statistic", "specified", "by", "sort_stat", "." ]
[ "'''\n Sorts the stat_lists according to the statistic specified by sort_stat.\n Returns: sorted stat_lists\n Parameters:\n stat_lists: a list of sublists, each sublist is a line of statistics from\n a cluster text file\n stat_names: a list containing the names of statistics, in order\n sort_stat: the name of the statistic to sort by\n\n '''" ]
[ { "param": "stat_lists", "type": null }, { "param": "stat_names", "type": null }, { "param": "sort_stat", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "stat_lists", "type": null, "docstring": "a list of sublists, each sublist is a line of statistics from\na cluster text file", "docstring_tokens": [ "a", "list", "of", "sublists", "each", "sublist", "is", "a", "line", "of", "statistics", "from", "a", "cluster", "text", "file" ], "default": null, "is_optional": null }, { "identifier": "stat_names", "type": null, "docstring": "a list containing the names of statistics, in order", "docstring_tokens": [ "a", "list", "containing", "the", "names", "of", "statistics", "in", "order" ], "default": null, "is_optional": null }, { "identifier": "sort_stat", "type": null, "docstring": "the name of the statistic to sort by", "docstring_tokens": [ "the", "name", "of", "the", "statistic", "to", "sort", "by" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def sort_results(stat_lists, stat_names, sort_stat): sort_index= stat_names.index(sort_stat) stat_lists.sort(reverse= True, key=lambda x:x[sort_index])
946
711
37968f2bec6043891a6a356dca3349a6002d617e
jungerm2/Matplotlib-SVG-Animation-Writer
HTMLDiffWriter.py
[ "MIT" ]
Python
_add_base64_prefix
<not_specific>
def _add_base64_prefix(frame_list, frame_format): """frame_list should be a list of base64-encoded files""" if frame_format == 'svg': # Fix MIME type for svg frame_format = 'svg+xml' template = "data:image/{0};base64,{1}" return [template.format(frame_format, frame_data) for frame_data in frame_list]
frame_list should be a list of base64-encoded files
frame_list should be a list of base64-encoded files
[ "frame_list", "should", "be", "a", "list", "of", "base64", "-", "encoded", "files" ]
def _add_base64_prefix(frame_list, frame_format): if frame_format == 'svg': frame_format = 'svg+xml' template = "data:image/{0};base64,{1}" return [template.format(frame_format, frame_data) for frame_data in frame_list]
[ "def", "_add_base64_prefix", "(", "frame_list", ",", "frame_format", ")", ":", "if", "frame_format", "==", "'svg'", ":", "frame_format", "=", "'svg+xml'", "template", "=", "\"data:image/{0};base64,{1}\"", "return", "[", "template", ".", "format", "(", "frame_format", ",", "frame_data", ")", "for", "frame_data", "in", "frame_list", "]" ]
frame_list should be a list of base64-encoded files
[ "frame_list", "should", "be", "a", "list", "of", "base64", "-", "encoded", "files" ]
[ "\"\"\"frame_list should be a list of base64-encoded files\"\"\"", "# Fix MIME type for svg" ]
[ { "param": "frame_list", "type": null }, { "param": "frame_format", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "frame_list", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "frame_format", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _add_base64_prefix(frame_list, frame_format): if frame_format == 'svg': frame_format = 'svg+xml' template = "data:image/{0};base64,{1}" return [template.format(frame_format, frame_data) for frame_data in frame_list]
947
335
26a8ea9497fb9bdf256f97c967ba600ab882d72c
kunakl07/AL-MLresearch
src/preprocessing_script/selection_table.py
[ "MIT" ]
Python
trim
<not_specific>
def trim(table): """ Keep only the columns prescribed by the Ketos annotation format. Args: table: pandas DataFrame Annotation table. Returns: table: pandas DataFrame Annotation table, after removal of columns. """ keep_cols = ['filename', 'label', 'start', 'end', 'freq_min', 'freq_max'] drop_cols = [x for x in table.columns.values if x not in keep_cols] table = table.drop(drop_cols, axis=1) return table
Keep only the columns prescribed by the Ketos annotation format. Args: table: pandas DataFrame Annotation table. Returns: table: pandas DataFrame Annotation table, after removal of columns.
Keep only the columns prescribed by the Ketos annotation format.
[ "Keep", "only", "the", "columns", "prescribed", "by", "the", "Ketos", "annotation", "format", "." ]
def trim(table): keep_cols = ['filename', 'label', 'start', 'end', 'freq_min', 'freq_max'] drop_cols = [x for x in table.columns.values if x not in keep_cols] table = table.drop(drop_cols, axis=1) return table
[ "def", "trim", "(", "table", ")", ":", "keep_cols", "=", "[", "'filename'", ",", "'label'", ",", "'start'", ",", "'end'", ",", "'freq_min'", ",", "'freq_max'", "]", "drop_cols", "=", "[", "x", "for", "x", "in", "table", ".", "columns", ".", "values", "if", "x", "not", "in", "keep_cols", "]", "table", "=", "table", ".", "drop", "(", "drop_cols", ",", "axis", "=", "1", ")", "return", "table" ]
Keep only the columns prescribed by the Ketos annotation format.
[ "Keep", "only", "the", "columns", "prescribed", "by", "the", "Ketos", "annotation", "format", "." ]
[ "\"\"\" Keep only the columns prescribed by the Ketos annotation format.\n\n Args:\n table: pandas DataFrame\n Annotation table. \n\n Returns:\n table: pandas DataFrame\n Annotation table, after removal of columns.\n \"\"\"" ]
[ { "param": "table", "type": null } ]
{ "returns": [ { "docstring": "pandas DataFrame\nAnnotation table, after removal of columns.", "docstring_tokens": [ "pandas", "DataFrame", "Annotation", "table", "after", "removal", "of", "columns", "." ], "type": "table" } ], "raises": [], "params": [ { "identifier": "table", "type": null, "docstring": "pandas DataFrame\nAnnotation table.", "docstring_tokens": [ "pandas", "DataFrame", "Annotation", "table", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def trim(table): keep_cols = ['filename', 'label', 'start', 'end', 'freq_min', 'freq_max'] drop_cols = [x for x in table.columns.values if x not in keep_cols] table = table.drop(drop_cols, axis=1) return table
948
966
10e947bdae89ed7b8db18360f7d290e49b33356b
open-power-sdk/source-code-advisor
sca/core.py
[ "Apache-2.0" ]
Python
save_sca_json
null
def save_sca_json(problems_dict, file_name): '''This function saves events info in a Json file''' with open(file_name, 'w') as outfile: for key in problems_dict: outfile.write(problems_dict.get(key).to_json())
This function saves events info in a Json file
This function saves events info in a Json file
[ "This", "function", "saves", "events", "info", "in", "a", "Json", "file" ]
def save_sca_json(problems_dict, file_name): with open(file_name, 'w') as outfile: for key in problems_dict: outfile.write(problems_dict.get(key).to_json())
[ "def", "save_sca_json", "(", "problems_dict", ",", "file_name", ")", ":", "with", "open", "(", "file_name", ",", "'w'", ")", "as", "outfile", ":", "for", "key", "in", "problems_dict", ":", "outfile", ".", "write", "(", "problems_dict", ".", "get", "(", "key", ")", ".", "to_json", "(", ")", ")" ]
This function saves events info in a Json file
[ "This", "function", "saves", "events", "info", "in", "a", "Json", "file" ]
[ "'''This function saves events info in a Json file'''" ]
[ { "param": "problems_dict", "type": null }, { "param": "file_name", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "problems_dict", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "file_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def save_sca_json(problems_dict, file_name): with open(file_name, 'w') as outfile: for key in problems_dict: outfile.write(problems_dict.get(key).to_json())
949
492
f3a34072d2955879f1d017db29cd41e5854044b4
kimvanwyk/md_directory
directory_builder/diff_tex.py
[ "BSD-3-Clause" ]
Python
junk_line
<not_specific>
def junk_line(string): ''' Return True if the line is ignorable, False otherwise Ignores "compiled on..." lines ''' ig = 'compiled on ' return ig in string
Return True if the line is ignorable, False otherwise Ignores "compiled on..." lines
Return True if the line is ignorable, False otherwise Ignores "compiled on..." lines
[ "Return", "True", "if", "the", "line", "is", "ignorable", "False", "otherwise", "Ignores", "\"", "compiled", "on", "...", "\"", "lines" ]
def junk_line(string): ig = 'compiled on ' return ig in string
[ "def", "junk_line", "(", "string", ")", ":", "ig", "=", "'compiled on '", "return", "ig", "in", "string" ]
Return True if the line is ignorable, False otherwise Ignores "compiled on..." lines
[ "Return", "True", "if", "the", "line", "is", "ignorable", "False", "otherwise", "Ignores", "\"", "compiled", "on", "...", "\"", "lines" ]
[ "''' Return True if the line is ignorable, False otherwise\n Ignores \"compiled on...\" lines\n '''" ]
[ { "param": "string", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "string", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def junk_line(string): ig = 'compiled on ' return ig in string
950
71
4469c70fee099035fc8f9e34157faf7e992169f8
rainbow-mind-machine/rainbow-mind-machine
rainbowmindmachine/queneau.py
[ "MIT" ]
Python
loadlines
<not_specific>
def loadlines(cls, f, tokens_in='tokens'): """Load from a filehandle that defines a JSON object on every line.""" corpus = cls() for i in f.readlines(): o = json.loads(i) if tokens_in in o: corpus.add(o, tokens_in) return corpus
Load from a filehandle that defines a JSON object on every line.
Load from a filehandle that defines a JSON object on every line.
[ "Load", "from", "a", "filehandle", "that", "defines", "a", "JSON", "object", "on", "every", "line", "." ]
def loadlines(cls, f, tokens_in='tokens'): corpus = cls() for i in f.readlines(): o = json.loads(i) if tokens_in in o: corpus.add(o, tokens_in) return corpus
[ "def", "loadlines", "(", "cls", ",", "f", ",", "tokens_in", "=", "'tokens'", ")", ":", "corpus", "=", "cls", "(", ")", "for", "i", "in", "f", ".", "readlines", "(", ")", ":", "o", "=", "json", ".", "loads", "(", "i", ")", "if", "tokens_in", "in", "o", ":", "corpus", ".", "add", "(", "o", ",", "tokens_in", ")", "return", "corpus" ]
Load from a filehandle that defines a JSON object on every line.
[ "Load", "from", "a", "filehandle", "that", "defines", "a", "JSON", "object", "on", "every", "line", "." ]
[ "\"\"\"Load from a filehandle that defines a JSON object on every line.\"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "f", "type": null }, { "param": "tokens_in", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "f", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "tokens_in", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import json def loadlines(cls, f, tokens_in='tokens'): corpus = cls() for i in f.readlines(): o = json.loads(i) if tokens_in in o: corpus.add(o, tokens_in) return corpus
951
877
b47ef5272451c406372477922c224ffd0deab83f
gc-plp/reddit-moderator-bot
modbot/utils.py
[ "Unlicense" ]
Python
parse_wiki_content
<not_specific>
def parse_wiki_content(crt_content, parser="CFG_INI"): """ Parses given content depending on the type """ if parser == "CFG_INI": parser = configparser.ConfigParser(allow_no_value=True, strict=False) try: parser.read_string(crt_content) except configparser.MissingSectionHeaderError: # All configs should contain [Setup] # If not, try prepending it if "[Setup]" not in crt_content: crt_content = "[Setup]\n" + crt_content # Try again try: parser.read_string(crt_content) except: return None return parser
Parses given content depending on the type
Parses given content depending on the type
[ "Parses", "given", "content", "depending", "on", "the", "type" ]
def parse_wiki_content(crt_content, parser="CFG_INI"): if parser == "CFG_INI": parser = configparser.ConfigParser(allow_no_value=True, strict=False) try: parser.read_string(crt_content) except configparser.MissingSectionHeaderError: if "[Setup]" not in crt_content: crt_content = "[Setup]\n" + crt_content try: parser.read_string(crt_content) except: return None return parser
[ "def", "parse_wiki_content", "(", "crt_content", ",", "parser", "=", "\"CFG_INI\"", ")", ":", "if", "parser", "==", "\"CFG_INI\"", ":", "parser", "=", "configparser", ".", "ConfigParser", "(", "allow_no_value", "=", "True", ",", "strict", "=", "False", ")", "try", ":", "parser", ".", "read_string", "(", "crt_content", ")", "except", "configparser", ".", "MissingSectionHeaderError", ":", "if", "\"[Setup]\"", "not", "in", "crt_content", ":", "crt_content", "=", "\"[Setup]\\n\"", "+", "crt_content", "try", ":", "parser", ".", "read_string", "(", "crt_content", ")", "except", ":", "return", "None", "return", "parser" ]
Parses given content depending on the type
[ "Parses", "given", "content", "depending", "on", "the", "type" ]
[ "\"\"\"\n Parses given content depending on the type\n \"\"\"", "# All configs should contain [Setup]", "# If not, try prepending it", "# Try again" ]
[ { "param": "crt_content", "type": null }, { "param": "parser", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "crt_content", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "parser", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import configparser def parse_wiki_content(crt_content, parser="CFG_INI"): if parser == "CFG_INI": parser = configparser.ConfigParser(allow_no_value=True, strict=False) try: parser.read_string(crt_content) except configparser.MissingSectionHeaderError: if "[Setup]" not in crt_content: crt_content = "[Setup]\n" + crt_content try: parser.read_string(crt_content) except: return None return parser
952
860
cc4d87f25b334f61de769e42299283f621ebdc4b
ccahilla/streamlit_3gfn
code/future_detector_freq_noise_budget.py
[ "MIT" ]
Python
ce_coating_brownian_length_noise
<not_specific>
def ce_coating_brownian_length_noise(ff, L_ce=40e3): """Cosmic Explorer 1um coating brownian length noise in m/rtHz """ strain_asd = 2e-25/(ff/10)**0.5 return strain_asd * L_ce
Cosmic Explorer 1um coating brownian length noise in m/rtHz
Cosmic Explorer 1um coating brownian length noise in m/rtHz
[ "Cosmic", "Explorer", "1um", "coating", "brownian", "length", "noise", "in", "m", "/", "rtHz" ]
def ce_coating_brownian_length_noise(ff, L_ce=40e3): strain_asd = 2e-25/(ff/10)**0.5 return strain_asd * L_ce
[ "def", "ce_coating_brownian_length_noise", "(", "ff", ",", "L_ce", "=", "40e3", ")", ":", "strain_asd", "=", "2e-25", "/", "(", "ff", "/", "10", ")", "**", "0.5", "return", "strain_asd", "*", "L_ce" ]
Cosmic Explorer 1um coating brownian length noise in m/rtHz
[ "Cosmic", "Explorer", "1um", "coating", "brownian", "length", "noise", "in", "m", "/", "rtHz" ]
[ "\"\"\"Cosmic Explorer 1um coating brownian length noise in m/rtHz\n \"\"\"" ]
[ { "param": "ff", "type": null }, { "param": "L_ce", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "ff", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "L_ce", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def ce_coating_brownian_length_noise(ff, L_ce=40e3): strain_asd = 2e-25/(ff/10)**0.5 return strain_asd * L_ce
953
373
945e683781fad7a78c61ba8843f569a64c35fd9b
wjsi/tsfresh
tsfresh/scripts/run_tsfresh.py
[ "MIT" ]
Python
_preprocess
<not_specific>
def _preprocess(df): """ given a DataFrame where records are stored row-wise, rearrange it such that records are stored column-wise. """ df = df.stack() df.index.rename(["id", "time"], inplace=True) # .reset_index() df.name = "value" df = df.reset_index() return df
given a DataFrame where records are stored row-wise, rearrange it such that records are stored column-wise.
given a DataFrame where records are stored row-wise, rearrange it such that records are stored column-wise.
[ "given", "a", "DataFrame", "where", "records", "are", "stored", "row", "-", "wise", "rearrange", "it", "such", "that", "records", "are", "stored", "column", "-", "wise", "." ]
def _preprocess(df): df = df.stack() df.index.rename(["id", "time"], inplace=True) df.name = "value" df = df.reset_index() return df
[ "def", "_preprocess", "(", "df", ")", ":", "df", "=", "df", ".", "stack", "(", ")", "df", ".", "index", ".", "rename", "(", "[", "\"id\"", ",", "\"time\"", "]", ",", "inplace", "=", "True", ")", "df", ".", "name", "=", "\"value\"", "df", "=", "df", ".", "reset_index", "(", ")", "return", "df" ]
given a DataFrame where records are stored row-wise, rearrange it such that records are stored column-wise.
[ "given", "a", "DataFrame", "where", "records", "are", "stored", "row", "-", "wise", "rearrange", "it", "such", "that", "records", "are", "stored", "column", "-", "wise", "." ]
[ "\"\"\"\n given a DataFrame where records are stored row-wise, rearrange it\n such that records are stored column-wise.\n \"\"\"", "# .reset_index()" ]
[ { "param": "df", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "df", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _preprocess(df): df = df.stack() df.index.rename(["id", "time"], inplace=True) df.name = "value" df = df.reset_index() return df
954
1,018
95e037ae14fb6f0b957424b173bfe353677cedd1
tdohany/artifactory
artifactory.py
[ "MIT" ]
Python
encode_matrix_parameters
<not_specific>
def encode_matrix_parameters(parameters): """ Performs encoding of url matrix parameters from dictionary to a string. See http://www.w3.org/DesignIssues/MatrixURIs.html for specs. """ result = [] for param in iter(sorted(parameters)): if isinstance(parameters[param], (list, tuple)): value = f";{param}=".join(parameters[param]) else: value = parameters[param] result.append("=".join((param, value))) return ";".join(result)
Performs encoding of url matrix parameters from dictionary to a string. See http://www.w3.org/DesignIssues/MatrixURIs.html for specs.
Performs encoding of url matrix parameters from dictionary to a string.
[ "Performs", "encoding", "of", "url", "matrix", "parameters", "from", "dictionary", "to", "a", "string", "." ]
def encode_matrix_parameters(parameters): result = [] for param in iter(sorted(parameters)): if isinstance(parameters[param], (list, tuple)): value = f";{param}=".join(parameters[param]) else: value = parameters[param] result.append("=".join((param, value))) return ";".join(result)
[ "def", "encode_matrix_parameters", "(", "parameters", ")", ":", "result", "=", "[", "]", "for", "param", "in", "iter", "(", "sorted", "(", "parameters", ")", ")", ":", "if", "isinstance", "(", "parameters", "[", "param", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "value", "=", "f\";{param}=\"", ".", "join", "(", "parameters", "[", "param", "]", ")", "else", ":", "value", "=", "parameters", "[", "param", "]", "result", ".", "append", "(", "\"=\"", ".", "join", "(", "(", "param", ",", "value", ")", ")", ")", "return", "\";\"", ".", "join", "(", "result", ")" ]
Performs encoding of url matrix parameters from dictionary to a string.
[ "Performs", "encoding", "of", "url", "matrix", "parameters", "from", "dictionary", "to", "a", "string", "." ]
[ "\"\"\"\n Performs encoding of url matrix parameters from dictionary to\n a string.\n See http://www.w3.org/DesignIssues/MatrixURIs.html for specs.\n \"\"\"" ]
[ { "param": "parameters", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "parameters", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def encode_matrix_parameters(parameters): result = [] for param in iter(sorted(parameters)): if isinstance(parameters[param], (list, tuple)): value = f";{param}=".join(parameters[param]) else: value = parameters[param] result.append("=".join((param, value))) return ";".join(result)
955
35
afe419aa89baf19d7f4f97306b7c13e8a9d8ee1d
dalsontws/accessibility-axe-selenium
js-axe-selenium/ansible/a11y/lib/python3.8/site-packages/ansible/module_utils/network/common/utils.py
[ "MIT" ]
Python
param_list_to_dict
<not_specific>
def param_list_to_dict(param_list, unique_key="name", remove_key=True): """Rotates a list of dictionaries to be a dictionary of dictionaries. :param param_list: The aforementioned list of dictionaries :param unique_key: The name of a key which is present and unique in all of param_list's dictionaries. The value behind this key will be the key each dictionary can be found at in the new root dictionary :param remove_key: If True, remove unique_key from the individual dictionaries before returning. """ param_dict = {} for params in param_list: params = params.copy() if remove_key: name = params.pop(unique_key) else: name = params.get(unique_key) param_dict[name] = params return param_dict
Rotates a list of dictionaries to be a dictionary of dictionaries. :param param_list: The aforementioned list of dictionaries :param unique_key: The name of a key which is present and unique in all of param_list's dictionaries. The value behind this key will be the key each dictionary can be found at in the new root dictionary :param remove_key: If True, remove unique_key from the individual dictionaries before returning.
Rotates a list of dictionaries to be a dictionary of dictionaries.
[ "Rotates", "a", "list", "of", "dictionaries", "to", "be", "a", "dictionary", "of", "dictionaries", "." ]
def param_list_to_dict(param_list, unique_key="name", remove_key=True): param_dict = {} for params in param_list: params = params.copy() if remove_key: name = params.pop(unique_key) else: name = params.get(unique_key) param_dict[name] = params return param_dict
[ "def", "param_list_to_dict", "(", "param_list", ",", "unique_key", "=", "\"name\"", ",", "remove_key", "=", "True", ")", ":", "param_dict", "=", "{", "}", "for", "params", "in", "param_list", ":", "params", "=", "params", ".", "copy", "(", ")", "if", "remove_key", ":", "name", "=", "params", ".", "pop", "(", "unique_key", ")", "else", ":", "name", "=", "params", ".", "get", "(", "unique_key", ")", "param_dict", "[", "name", "]", "=", "params", "return", "param_dict" ]
Rotates a list of dictionaries to be a dictionary of dictionaries.
[ "Rotates", "a", "list", "of", "dictionaries", "to", "be", "a", "dictionary", "of", "dictionaries", "." ]
[ "\"\"\"Rotates a list of dictionaries to be a dictionary of dictionaries.\n\n :param param_list: The aforementioned list of dictionaries\n :param unique_key: The name of a key which is present and unique in all of param_list's dictionaries. The value\n behind this key will be the key each dictionary can be found at in the new root dictionary\n :param remove_key: If True, remove unique_key from the individual dictionaries before returning.\n \"\"\"" ]
[ { "param": "param_list", "type": null }, { "param": "unique_key", "type": null }, { "param": "remove_key", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "param_list", "type": null, "docstring": "The aforementioned list of dictionaries", "docstring_tokens": [ "The", "aforementioned", "list", "of", "dictionaries" ], "default": null, "is_optional": null }, { "identifier": "unique_key", "type": null, "docstring": "The name of a key which is present and unique in all of param_list's dictionaries. The value\nbehind this key will be the key each dictionary can be found at in the new root dictionary", "docstring_tokens": [ "The", "name", "of", "a", "key", "which", "is", "present", "and", "unique", "in", "all", "of", "param_list", "'", "s", "dictionaries", ".", "The", "value", "behind", "this", "key", "will", "be", "the", "key", "each", "dictionary", "can", "be", "found", "at", "in", "the", "new", "root", "dictionary" ], "default": null, "is_optional": null }, { "identifier": "remove_key", "type": null, "docstring": "If True, remove unique_key from the individual dictionaries before returning.", "docstring_tokens": [ "If", "True", "remove", "unique_key", "from", "the", "individual", "dictionaries", "before", "returning", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def param_list_to_dict(param_list, unique_key="name", remove_key=True): param_dict = {} for params in param_list: params = params.copy() if remove_key: name = params.pop(unique_key) else: name = params.get(unique_key) param_dict[name] = params return param_dict
956
208
2ad64a9e2f5155daa8ebfcdfb2ab46a58537dd86
qwbjtu2015/dockerizeme
hard-gists/6838974/snippet.py
[ "Apache-2.0" ]
Python
sanitize
<not_specific>
def sanitize(filename): """Turn string to valid file name. """ valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) return ''.join([c for c in filename if c in valid_chars])
Turn string to valid file name.
Turn string to valid file name.
[ "Turn", "string", "to", "valid", "file", "name", "." ]
def sanitize(filename): valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) return ''.join([c for c in filename if c in valid_chars])
[ "def", "sanitize", "(", "filename", ")", ":", "valid_chars", "=", "\"-_.() %s%s\"", "%", "(", "string", ".", "ascii_letters", ",", "string", ".", "digits", ")", "return", "''", ".", "join", "(", "[", "c", "for", "c", "in", "filename", "if", "c", "in", "valid_chars", "]", ")" ]
Turn string to valid file name.
[ "Turn", "string", "to", "valid", "file", "name", "." ]
[ "\"\"\"Turn string to valid file name.\n \"\"\"" ]
[ { "param": "filename", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "filename", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import string def sanitize(filename): valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) return ''.join([c for c in filename if c in valid_chars])
957
51
fc42ffb6cf58592a9c3cdfde584704535a47753d
slaveofcode/rajaongkir
rajaongkir/api.py
[ "MIT" ]
Python
__parse
<not_specific>
def __parse(response_json): """Get the actual result of json response :param response_json: :return: """ return response_json.get('results') if response_json is not None else None
Get the actual result of json response :param response_json: :return:
Get the actual result of json response
[ "Get", "the", "actual", "result", "of", "json", "response" ]
def __parse(response_json): return response_json.get('results') if response_json is not None else None
[ "def", "__parse", "(", "response_json", ")", ":", "return", "response_json", ".", "get", "(", "'results'", ")", "if", "response_json", "is", "not", "None", "else", "None" ]
Get the actual result of json response
[ "Get", "the", "actual", "result", "of", "json", "response" ]
[ "\"\"\"Get the actual result of json response\n\n :param response_json:\n :return:\n \"\"\"" ]
[ { "param": "response_json", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "response_json", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def __parse(response_json): return response_json.get('results') if response_json is not None else None
959
572
bc542e3998794e7ebd4914ea57257173fbf5e133
sniffen/pymol-open-source
modules/pymol/util.py
[ "CNRI-Python" ]
Python
cbay
null
def cbay(selection="(all)",quiet=1,_self=cmd): '''Wrapper around "color atomic"''' cmd=_self s = str(selection) cmd.color("atomic","(("+s+") and not elem C)",quiet=quiet) cmd.color("yellow","(elem C and ("+s+"))",quiet=quiet)
Wrapper around "color atomic"
Wrapper around "color atomic"
[ "Wrapper", "around", "\"", "color", "atomic", "\"" ]
def cbay(selection="(all)",quiet=1,_self=cmd): cmd=_self s = str(selection) cmd.color("atomic","(("+s+") and not elem C)",quiet=quiet) cmd.color("yellow","(elem C and ("+s+"))",quiet=quiet)
[ "def", "cbay", "(", "selection", "=", "\"(all)\"", ",", "quiet", "=", "1", ",", "_self", "=", "cmd", ")", ":", "cmd", "=", "_self", "s", "=", "str", "(", "selection", ")", "cmd", ".", "color", "(", "\"atomic\"", ",", "\"((\"", "+", "s", "+", "\") and not elem C)\"", ",", "quiet", "=", "quiet", ")", "cmd", ".", "color", "(", "\"yellow\"", ",", "\"(elem C and (\"", "+", "s", "+", "\"))\"", ",", "quiet", "=", "quiet", ")" ]
Wrapper around "color atomic"
[ "Wrapper", "around", "\"", "color", "atomic", "\"" ]
[ "'''Wrapper around \"color atomic\"'''" ]
[ { "param": "selection", "type": null }, { "param": "quiet", "type": null }, { "param": "_self", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "selection", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "quiet", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "_self", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def cbay(selection="(all)",quiet=1,_self=cmd): cmd=_self s = str(selection) cmd.color("atomic","(("+s+") and not elem C)",quiet=quiet) cmd.color("yellow","(elem C and ("+s+"))",quiet=quiet)
960
295
e146f22225db4f879603dd8ee8ce0f1a4637fa1e
lrq3000/rfigc
pyFileFixity/tests/test_rfigc.py
[ "MIT" ]
Python
partial_eq
<not_specific>
def partial_eq(file, file_partial): """ Do a partial comparison, line by line, we compare only using "line2 in line1", where line2 is from file_partial """ flag = True with open(file, 'rb') as outf, open(file_partial, 'rb') as expectedf: out = outf.read().strip('\n') expected = expectedf.read().strip('\n').split('\n') for exp in expected: if not exp in out: flag = False break return flag
Do a partial comparison, line by line, we compare only using "line2 in line1", where line2 is from file_partial
Do a partial comparison, line by line, we compare only using "line2 in line1", where line2 is from file_partial
[ "Do", "a", "partial", "comparison", "line", "by", "line", "we", "compare", "only", "using", "\"", "line2", "in", "line1", "\"", "where", "line2", "is", "from", "file_partial" ]
def partial_eq(file, file_partial): flag = True with open(file, 'rb') as outf, open(file_partial, 'rb') as expectedf: out = outf.read().strip('\n') expected = expectedf.read().strip('\n').split('\n') for exp in expected: if not exp in out: flag = False break return flag
[ "def", "partial_eq", "(", "file", ",", "file_partial", ")", ":", "flag", "=", "True", "with", "open", "(", "file", ",", "'rb'", ")", "as", "outf", ",", "open", "(", "file_partial", ",", "'rb'", ")", "as", "expectedf", ":", "out", "=", "outf", ".", "read", "(", ")", ".", "strip", "(", "'\\n'", ")", "expected", "=", "expectedf", ".", "read", "(", ")", ".", "strip", "(", "'\\n'", ")", ".", "split", "(", "'\\n'", ")", "for", "exp", "in", "expected", ":", "if", "not", "exp", "in", "out", ":", "flag", "=", "False", "break", "return", "flag" ]
Do a partial comparison, line by line, we compare only using "line2 in line1", where line2 is from file_partial
[ "Do", "a", "partial", "comparison", "line", "by", "line", "we", "compare", "only", "using", "\"", "line2", "in", "line1", "\"", "where", "line2", "is", "from", "file_partial" ]
[ "\"\"\" Do a partial comparison, line by line, we compare only using \"line2 in line1\", where line2 is from file_partial \"\"\"" ]
[ { "param": "file", "type": null }, { "param": "file_partial", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "file", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "file_partial", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def partial_eq(file, file_partial): flag = True with open(file, 'rb') as outf, open(file_partial, 'rb') as expectedf: out = outf.read().strip('\n') expected = expectedf.read().strip('\n').split('\n') for exp in expected: if not exp in out: flag = False break return flag
961
224
2707a972cedc6edda6fd2965b5855576645702eb
WalrusCow/euler
euler.py
[ "MIT" ]
Python
undigitize
<not_specific>
def undigitize(li): ''' Create a number from a list of digits. The first item should be the 1's column ''' ans = 0 m = 1 for d in li: ans += d * m m *= 10 return ans
Create a number from a list of digits. The first item should be the 1's column
Create a number from a list of digits. The first item should be the 1's column
[ "Create", "a", "number", "from", "a", "list", "of", "digits", ".", "The", "first", "item", "should", "be", "the", "1", "'", "s", "column" ]
def undigitize(li): ans = 0 m = 1 for d in li: ans += d * m m *= 10 return ans
[ "def", "undigitize", "(", "li", ")", ":", "ans", "=", "0", "m", "=", "1", "for", "d", "in", "li", ":", "ans", "+=", "d", "*", "m", "m", "*=", "10", "return", "ans" ]
Create a number from a list of digits.
[ "Create", "a", "number", "from", "a", "list", "of", "digits", "." ]
[ "'''\n Create a number from a list of digits.\n The first item should be the 1's column\n '''" ]
[ { "param": "li", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "li", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def undigitize(li): ans = 0 m = 1 for d in li: ans += d * m m *= 10 return ans
963
580
4aadcef299f6eea0ae90010b3be4bbf33ef1d554
SwamyDev/yeoda
src/yeoda/utils.py
[ "MIT" ]
Python
to_list
<not_specific>
def to_list(value): """ Takes a value and wraps it into a list if it is not already one. The result is returned. If None is passed, None is returned. Parameters ---------- value : object value to convert Returns ------- list or None A list that wraps the value. """ ret_val = copy.deepcopy(value) whitelist = (list, tuple) if ret_val is not None: ret_val = list(ret_val) if isinstance(ret_val, whitelist) else [value] return ret_val
Takes a value and wraps it into a list if it is not already one. The result is returned. If None is passed, None is returned. Parameters ---------- value : object value to convert Returns ------- list or None A list that wraps the value.
Takes a value and wraps it into a list if it is not already one. The result is returned. If None is passed, None is returned. Parameters value : object value to convert Returns list or None A list that wraps the value.
[ "Takes", "a", "value", "and", "wraps", "it", "into", "a", "list", "if", "it", "is", "not", "already", "one", ".", "The", "result", "is", "returned", ".", "If", "None", "is", "passed", "None", "is", "returned", ".", "Parameters", "value", ":", "object", "value", "to", "convert", "Returns", "list", "or", "None", "A", "list", "that", "wraps", "the", "value", "." ]
def to_list(value): ret_val = copy.deepcopy(value) whitelist = (list, tuple) if ret_val is not None: ret_val = list(ret_val) if isinstance(ret_val, whitelist) else [value] return ret_val
[ "def", "to_list", "(", "value", ")", ":", "ret_val", "=", "copy", ".", "deepcopy", "(", "value", ")", "whitelist", "=", "(", "list", ",", "tuple", ")", "if", "ret_val", "is", "not", "None", ":", "ret_val", "=", "list", "(", "ret_val", ")", "if", "isinstance", "(", "ret_val", ",", "whitelist", ")", "else", "[", "value", "]", "return", "ret_val" ]
Takes a value and wraps it into a list if it is not already one.
[ "Takes", "a", "value", "and", "wraps", "it", "into", "a", "list", "if", "it", "is", "not", "already", "one", "." ]
[ "\"\"\"\n Takes a value and wraps it into a list if it is not already one. The result is returned.\n If None is passed, None is returned.\n\n Parameters\n ----------\n value : object\n value to convert\n\n Returns\n -------\n list or None\n A list that wraps the value.\n\n \"\"\"" ]
[ { "param": "value", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "value", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import copy def to_list(value): ret_val = copy.deepcopy(value) whitelist = (list, tuple) if ret_val is not None: ret_val = list(ret_val) if isinstance(ret_val, whitelist) else [value] return ret_val
964
714
64e6cf3d350256a2355b8fe7538b1ffb2ee641ab
tuminguyen/RRT_Star_Simulation
utils.py
[ "MIT" ]
Python
triangle_area
<not_specific>
def triangle_area(p1, p2, p3): """ Calculate the triangle area given from 3 points :param p1: :param p2: :param p3: :return: """ x1, y1 = p1[0], p1[1] x2, y2 = p2[0], p2[1] x3, y3 = p3[0], p3[1] return abs((x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2.0)
Calculate the triangle area given from 3 points :param p1: :param p2: :param p3: :return:
Calculate the triangle area given from 3 points
[ "Calculate", "the", "triangle", "area", "given", "from", "3", "points" ]
def triangle_area(p1, p2, p3): x1, y1 = p1[0], p1[1] x2, y2 = p2[0], p2[1] x3, y3 = p3[0], p3[1] return abs((x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2.0)
[ "def", "triangle_area", "(", "p1", ",", "p2", ",", "p3", ")", ":", "x1", ",", "y1", "=", "p1", "[", "0", "]", ",", "p1", "[", "1", "]", "x2", ",", "y2", "=", "p2", "[", "0", "]", ",", "p2", "[", "1", "]", "x3", ",", "y3", "=", "p3", "[", "0", "]", ",", "p3", "[", "1", "]", "return", "abs", "(", "(", "x1", "*", "(", "y2", "-", "y3", ")", "+", "x2", "*", "(", "y3", "-", "y1", ")", "+", "x3", "*", "(", "y1", "-", "y2", ")", ")", "/", "2.0", ")" ]
Calculate the triangle area given from 3 points
[ "Calculate", "the", "triangle", "area", "given", "from", "3", "points" ]
[ "\"\"\"\n Calculate the triangle area given from 3 points\n :param p1:\n :param p2:\n :param p3:\n :return:\n \"\"\"" ]
[ { "param": "p1", "type": null }, { "param": "p2", "type": null }, { "param": "p3", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "p1", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "p2", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "p3", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def triangle_area(p1, p2, p3): x1, y1 = p1[0], p1[1] x2, y2 = p2[0], p2[1] x3, y3 = p3[0], p3[1] return abs((x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2.0)
965
86
07e1383ae313b9a7c1df84ec958a203c37e542ea
jacoblee628/systemsproject
src/create_trace.py
[ "MIT" ]
Python
_filter_status
<not_specific>
def _filter_status(tests_df): """Filters out any tests with statuses that aren't equal to "Passed" or "Failed" Args: tests (pandas.DataFrame): the test data; note that there should be a column called "status" Returns: two list or pd.DataFrame: the valid and invalid automated rest_api tests """ valid = tests_df[(tests_df['Test Status']=='Passed') | (tests_df['Test Status']=='Failed')] invalid = tests_df[(tests_df['Test Status']!='Passed') & (tests_df['Test Status']!='Failed')] return valid, invalid
Filters out any tests with statuses that aren't equal to "Passed" or "Failed" Args: tests (pandas.DataFrame): the test data; note that there should be a column called "status" Returns: two list or pd.DataFrame: the valid and invalid automated rest_api tests
Filters out any tests with statuses that aren't equal to "Passed" or "Failed"
[ "Filters", "out", "any", "tests", "with", "statuses", "that", "aren", "'", "t", "equal", "to", "\"", "Passed", "\"", "or", "\"", "Failed", "\"" ]
def _filter_status(tests_df): valid = tests_df[(tests_df['Test Status']=='Passed') | (tests_df['Test Status']=='Failed')] invalid = tests_df[(tests_df['Test Status']!='Passed') & (tests_df['Test Status']!='Failed')] return valid, invalid
[ "def", "_filter_status", "(", "tests_df", ")", ":", "valid", "=", "tests_df", "[", "(", "tests_df", "[", "'Test Status'", "]", "==", "'Passed'", ")", "|", "(", "tests_df", "[", "'Test Status'", "]", "==", "'Failed'", ")", "]", "invalid", "=", "tests_df", "[", "(", "tests_df", "[", "'Test Status'", "]", "!=", "'Passed'", ")", "&", "(", "tests_df", "[", "'Test Status'", "]", "!=", "'Failed'", ")", "]", "return", "valid", ",", "invalid" ]
Filters out any tests with statuses that aren't equal to "Passed" or "Failed"
[ "Filters", "out", "any", "tests", "with", "statuses", "that", "aren", "'", "t", "equal", "to", "\"", "Passed", "\"", "or", "\"", "Failed", "\"" ]
[ "\"\"\"Filters out any tests with statuses that aren't equal to \"Passed\" or \"Failed\"\n\n Args:\n tests (pandas.DataFrame): the test data; note that there should be a column called \"status\"\n \n Returns:\n two list or pd.DataFrame: the valid and invalid automated rest_api tests\n \"\"\"" ]
[ { "param": "tests_df", "type": null } ]
{ "returns": [ { "docstring": "two list or pd.DataFrame: the valid and invalid automated rest_api tests", "docstring_tokens": [ "two", "list", "or", "pd", ".", "DataFrame", ":", "the", "valid", "and", "invalid", "automated", "rest_api", "tests" ], "type": null } ], "raises": [], "params": [ { "identifier": "tests_df", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "tests", "type": null, "docstring": "the test data; note that there should be a column called \"status\"", "docstring_tokens": [ "the", "test", "data", ";", "note", "that", "there", "should", "be", "a", "column", "called", "\"", "status", "\"" ], "default": null, "is_optional": false } ], "others": [] }
def _filter_status(tests_df): valid = tests_df[(tests_df['Test Status']=='Passed') | (tests_df['Test Status']=='Failed')] invalid = tests_df[(tests_df['Test Status']!='Passed') & (tests_df['Test Status']!='Failed')] return valid, invalid
966
852
e8c6cba09eea2ff683a2cd2fa4d44fee3e66c944
metrics-ca/opentitan
util/topgen/intermodule.py
[ "Apache-2.0" ]
Python
_get_default_name
<not_specific>
def _get_default_name(sig, suffix): """Generate default for a net if one does not already exist. """ # The else case covers the scenario where neither package nor default is provided. # Specifically, the interface is 'logic' and has no default value. # In this situation, just return 0's if sig['default']: return sig['default'] elif sig['package']: return "{}::{}_DEFAULT".format(sig['package'], (sig["struct"] + suffix).upper()) else: return "'0"
Generate default for a net if one does not already exist.
Generate default for a net if one does not already exist.
[ "Generate", "default", "for", "a", "net", "if", "one", "does", "not", "already", "exist", "." ]
def _get_default_name(sig, suffix): if sig['default']: return sig['default'] elif sig['package']: return "{}::{}_DEFAULT".format(sig['package'], (sig["struct"] + suffix).upper()) else: return "'0"
[ "def", "_get_default_name", "(", "sig", ",", "suffix", ")", ":", "if", "sig", "[", "'default'", "]", ":", "return", "sig", "[", "'default'", "]", "elif", "sig", "[", "'package'", "]", ":", "return", "\"{}::{}_DEFAULT\"", ".", "format", "(", "sig", "[", "'package'", "]", ",", "(", "sig", "[", "\"struct\"", "]", "+", "suffix", ")", ".", "upper", "(", ")", ")", "else", ":", "return", "\"'0\"" ]
Generate default for a net if one does not already exist.
[ "Generate", "default", "for", "a", "net", "if", "one", "does", "not", "already", "exist", "." ]
[ "\"\"\"Generate default for a net if one does not already exist.\n \"\"\"", "# The else case covers the scenario where neither package nor default is provided.", "# Specifically, the interface is 'logic' and has no default value.", "# In this situation, just return 0's" ]
[ { "param": "sig", "type": null }, { "param": "suffix", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "sig", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "suffix", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _get_default_name(sig, suffix): if sig['default']: return sig['default'] elif sig['package']: return "{}::{}_DEFAULT".format(sig['package'], (sig["struct"] + suffix).upper()) else: return "'0"
967
358
bfa27a3da7dbc191f261646635088b379d51e2e6
uktrade/tamato
measures/util.py
[ "MIT" ]
Python
clean_duty_sentence
str
def clean_duty_sentence(value: Union[str, int, float]) -> str: """Given a value, return a string representing a duty sentence taking into account that the value may be storing simple percentages as a number value.""" if isinstance(value, float) or isinstance(value, int): # This is a percentage value that Excel has # represented as a number. decimal.getcontext().prec = 3 decimal.getcontext().rounding = decimal.ROUND_DOWN return f"{decimal.Decimal(str(value)):.3%}" else: # All other values will appear as text. return str(value)
Given a value, return a string representing a duty sentence taking into account that the value may be storing simple percentages as a number value.
Given a value, return a string representing a duty sentence taking into account that the value may be storing simple percentages as a number value.
[ "Given", "a", "value", "return", "a", "string", "representing", "a", "duty", "sentence", "taking", "into", "account", "that", "the", "value", "may", "be", "storing", "simple", "percentages", "as", "a", "number", "value", "." ]
def clean_duty_sentence(value: Union[str, int, float]) -> str: if isinstance(value, float) or isinstance(value, int): decimal.getcontext().prec = 3 decimal.getcontext().rounding = decimal.ROUND_DOWN return f"{decimal.Decimal(str(value)):.3%}" else: return str(value)
[ "def", "clean_duty_sentence", "(", "value", ":", "Union", "[", "str", ",", "int", ",", "float", "]", ")", "->", "str", ":", "if", "isinstance", "(", "value", ",", "float", ")", "or", "isinstance", "(", "value", ",", "int", ")", ":", "decimal", ".", "getcontext", "(", ")", ".", "prec", "=", "3", "decimal", ".", "getcontext", "(", ")", ".", "rounding", "=", "decimal", ".", "ROUND_DOWN", "return", "f\"{decimal.Decimal(str(value)):.3%}\"", "else", ":", "return", "str", "(", "value", ")" ]
Given a value, return a string representing a duty sentence taking into account that the value may be storing simple percentages as a number value.
[ "Given", "a", "value", "return", "a", "string", "representing", "a", "duty", "sentence", "taking", "into", "account", "that", "the", "value", "may", "be", "storing", "simple", "percentages", "as", "a", "number", "value", "." ]
[ "\"\"\"Given a value, return a string representing a duty sentence taking into\n account that the value may be storing simple percentages as a number\n value.\"\"\"", "# This is a percentage value that Excel has", "# represented as a number.", "# All other values will appear as text." ]
[ { "param": "value", "type": "Union[str, int, float]" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "value", "type": "Union[str, int, float]", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import decimal def clean_duty_sentence(value: Union[str, int, float]) -> str: if isinstance(value, float) or isinstance(value, int): decimal.getcontext().prec = 3 decimal.getcontext().rounding = decimal.ROUND_DOWN return f"{decimal.Decimal(str(value)):.3%}" else: return str(value)
969
733
3cb22aaf11d2d9852c6c7005a7b5156f95b5c518
alessandrodepalma/oval-bab
tools/bab_tools/dataset_creation.py
[ "MIT" ]
Python
binary_eps_search
<not_specific>
def binary_eps_search(eps_lower_bound, eps_upper_bound, bab_function, quantization=1e-3, mode="LB"): """ Run binary search on the epsilon values in order to create a BaB dataset. :parameter eps_lower_bound: starting lower bound on epsilon :parameter eps_upper_bound: starting upper bound on epsilon :parameter bab_function: BaB function, takes only epsilon as input :parameter quantization: how spaced apart are the epsilons we are considering (default 1e-3). The effective quantization is the largest (eps_upper_bound - eps_lower_bound)/2^k > quantization. Min quantization/2, max quantization. ### Search criterion: LB mode: the property with min-eps (within quantization) that is either SAT or has timed out. UB mode: the property with max-eps (within quantization) that is either UNSAT or has timed out. Returns result, rounded upwards using the quantization. """ assert mode in ["LB", "UB"] print(f"Starting epsilon bounds: LB: {eps_lower_bound}, UB: {eps_upper_bound}") while (eps_upper_bound - eps_lower_bound) > quantization: c_epsilon = (eps_upper_bound + eps_lower_bound) / 2 # Run BaB with the current epsilon value. bab_status, bab_runtime = bab_function(c_epsilon) print(f"BaB status {bab_status}, BaB runtime {bab_runtime}") conditions = ["True"] if mode == "UB" else ["True", "timeout"] if bab_status in conditions: eps_upper_bound = c_epsilon else: eps_lower_bound = c_epsilon print(f"Current epsilon bounds: LB: {eps_lower_bound}, UB: {eps_upper_bound}") return_value = math.floor(eps_lower_bound / quantization) * quantization if mode == "UB" else \ math.ceil(eps_upper_bound / quantization) * quantization return return_value
Run binary search on the epsilon values in order to create a BaB dataset. :parameter eps_lower_bound: starting lower bound on epsilon :parameter eps_upper_bound: starting upper bound on epsilon :parameter bab_function: BaB function, takes only epsilon as input :parameter quantization: how spaced apart are the epsilons we are considering (default 1e-3). The effective quantization is the largest (eps_upper_bound - eps_lower_bound)/2^k > quantization. Min quantization/2, max quantization. ### Search criterion: LB mode: the property with min-eps (within quantization) that is either SAT or has timed out. UB mode: the property with max-eps (within quantization) that is either UNSAT or has timed out. Returns result, rounded upwards using the quantization.
Run binary search on the epsilon values in order to create a BaB dataset.
[ "Run", "binary", "search", "on", "the", "epsilon", "values", "in", "order", "to", "create", "a", "BaB", "dataset", "." ]
def binary_eps_search(eps_lower_bound, eps_upper_bound, bab_function, quantization=1e-3, mode="LB"): assert mode in ["LB", "UB"] print(f"Starting epsilon bounds: LB: {eps_lower_bound}, UB: {eps_upper_bound}") while (eps_upper_bound - eps_lower_bound) > quantization: c_epsilon = (eps_upper_bound + eps_lower_bound) / 2 bab_status, bab_runtime = bab_function(c_epsilon) print(f"BaB status {bab_status}, BaB runtime {bab_runtime}") conditions = ["True"] if mode == "UB" else ["True", "timeout"] if bab_status in conditions: eps_upper_bound = c_epsilon else: eps_lower_bound = c_epsilon print(f"Current epsilon bounds: LB: {eps_lower_bound}, UB: {eps_upper_bound}") return_value = math.floor(eps_lower_bound / quantization) * quantization if mode == "UB" else \ math.ceil(eps_upper_bound / quantization) * quantization return return_value
[ "def", "binary_eps_search", "(", "eps_lower_bound", ",", "eps_upper_bound", ",", "bab_function", ",", "quantization", "=", "1e-3", ",", "mode", "=", "\"LB\"", ")", ":", "assert", "mode", "in", "[", "\"LB\"", ",", "\"UB\"", "]", "print", "(", "f\"Starting epsilon bounds: LB: {eps_lower_bound}, UB: {eps_upper_bound}\"", ")", "while", "(", "eps_upper_bound", "-", "eps_lower_bound", ")", ">", "quantization", ":", "c_epsilon", "=", "(", "eps_upper_bound", "+", "eps_lower_bound", ")", "/", "2", "bab_status", ",", "bab_runtime", "=", "bab_function", "(", "c_epsilon", ")", "print", "(", "f\"BaB status {bab_status}, BaB runtime {bab_runtime}\"", ")", "conditions", "=", "[", "\"True\"", "]", "if", "mode", "==", "\"UB\"", "else", "[", "\"True\"", ",", "\"timeout\"", "]", "if", "bab_status", "in", "conditions", ":", "eps_upper_bound", "=", "c_epsilon", "else", ":", "eps_lower_bound", "=", "c_epsilon", "print", "(", "f\"Current epsilon bounds: LB: {eps_lower_bound}, UB: {eps_upper_bound}\"", ")", "return_value", "=", "math", ".", "floor", "(", "eps_lower_bound", "/", "quantization", ")", "*", "quantization", "if", "mode", "==", "\"UB\"", "else", "math", ".", "ceil", "(", "eps_upper_bound", "/", "quantization", ")", "*", "quantization", "return", "return_value" ]
Run binary search on the epsilon values in order to create a BaB dataset.
[ "Run", "binary", "search", "on", "the", "epsilon", "values", "in", "order", "to", "create", "a", "BaB", "dataset", "." ]
[ "\"\"\"\n Run binary search on the epsilon values in order to create a BaB dataset.\n :parameter eps_lower_bound: starting lower bound on epsilon\n :parameter eps_upper_bound: starting upper bound on epsilon\n :parameter bab_function: BaB function, takes only epsilon as input\n :parameter quantization: how spaced apart are the epsilons we are considering (default 1e-3).\n The effective quantization is the largest (eps_upper_bound - eps_lower_bound)/2^k > quantization.\n Min quantization/2, max quantization.\n\n ### Search criterion:\n LB mode: the property with min-eps (within quantization) that is either SAT or has timed out.\n UB mode: the property with max-eps (within quantization) that is either UNSAT or has timed out.\n\n Returns result, rounded upwards using the quantization.\n \"\"\"", "# Run BaB with the current epsilon value." ]
[ { "param": "eps_lower_bound", "type": null }, { "param": "eps_upper_bound", "type": null }, { "param": "bab_function", "type": null }, { "param": "quantization", "type": null }, { "param": "mode", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "eps_lower_bound", "type": null, "docstring": "starting lower bound on epsilon", "docstring_tokens": [ "starting", "lower", "bound", "on", "epsilon" ], "default": null, "is_optional": null }, { "identifier": "eps_upper_bound", "type": null, "docstring": "starting upper bound on epsilon", "docstring_tokens": [ "starting", "upper", "bound", "on", "epsilon" ], "default": null, "is_optional": null }, { "identifier": "bab_function", "type": null, "docstring": "BaB function, takes only epsilon as input", "docstring_tokens": [ "BaB", "function", "takes", "only", "epsilon", "as", "input" ], "default": null, "is_optional": null }, { "identifier": "quantization", "type": null, "docstring": "\n\nSearch criterion:\nLB mode: the property with min-eps (within quantization) that is either SAT or has timed out.\nUB mode: the property with max-eps (within quantization) that is either UNSAT or has timed out.\n\nReturns result, rounded upwards using the quantization.", "docstring_tokens": [ "Search", "criterion", ":", "LB", "mode", ":", "the", "property", "with", "min", "-", "eps", "(", "within", "quantization", ")", "that", "is", "either", "SAT", "or", "has", "timed", "out", ".", "UB", "mode", ":", "the", "property", "with", "max", "-", "eps", "(", "within", "quantization", ")", "that", "is", "either", "UNSAT", "or", "has", "timed", "out", ".", "Returns", "result", "rounded", "upwards", "using", "the", "quantization", "." ], "default": null, "is_optional": null }, { "identifier": "mode", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import math def binary_eps_search(eps_lower_bound, eps_upper_bound, bab_function, quantization=1e-3, mode="LB"): assert mode in ["LB", "UB"] print(f"Starting epsilon bounds: LB: {eps_lower_bound}, UB: {eps_upper_bound}") while (eps_upper_bound - eps_lower_bound) > quantization: c_epsilon = (eps_upper_bound + eps_lower_bound) / 2 bab_status, bab_runtime = bab_function(c_epsilon) print(f"BaB status {bab_status}, BaB runtime {bab_runtime}") conditions = ["True"] if mode == "UB" else ["True", "timeout"] if bab_status in conditions: eps_upper_bound = c_epsilon else: eps_lower_bound = c_epsilon print(f"Current epsilon bounds: LB: {eps_lower_bound}, UB: {eps_upper_bound}") return_value = math.floor(eps_lower_bound / quantization) * quantization if mode == "UB" else \ math.ceil(eps_upper_bound / quantization) * quantization return return_value
970
45
8f70b3d3130cfdc468bae4436f0ac6c74876b22b
lechemrc/DS-Unit-3-Sprint-1-Software-Engineering
acme_report updated.py
[ "MIT" ]
Python
inventory_report
null
def inventory_report(products): '''Outputs the inventory report based on the general report function''' names = [] prices = [] weights = [] flames = [] for product in products: names.append(product[0]) prices.append(product[1]) weights.append(product[2]) flames.append(product[3]) # average price calculation total_p = 0 for i in prices: total_p += i avg_price = total_p / len(prices) # average weight calculation total_w = 0 for i in weights: total_p += i avg_weight = total_p / len(weights) # average flammability calculation total_f = 0 for i in flames: total_p += i avg_flammability = total_p / len(flames) # output strings print('ACME CORPORATION OFFICIAL INVENTORY REPORT') print(f'Unique product names: {len(set(names))}') print(f'Average price: {avg_price}') print(f'Average weight: {avg_weight}') print(f'Average flammability: {avg_flammability}')
Outputs the inventory report based on the general report function
Outputs the inventory report based on the general report function
[ "Outputs", "the", "inventory", "report", "based", "on", "the", "general", "report", "function" ]
def inventory_report(products): names = [] prices = [] weights = [] flames = [] for product in products: names.append(product[0]) prices.append(product[1]) weights.append(product[2]) flames.append(product[3]) total_p = 0 for i in prices: total_p += i avg_price = total_p / len(prices) total_w = 0 for i in weights: total_p += i avg_weight = total_p / len(weights) total_f = 0 for i in flames: total_p += i avg_flammability = total_p / len(flames) print('ACME CORPORATION OFFICIAL INVENTORY REPORT') print(f'Unique product names: {len(set(names))}') print(f'Average price: {avg_price}') print(f'Average weight: {avg_weight}') print(f'Average flammability: {avg_flammability}')
[ "def", "inventory_report", "(", "products", ")", ":", "names", "=", "[", "]", "prices", "=", "[", "]", "weights", "=", "[", "]", "flames", "=", "[", "]", "for", "product", "in", "products", ":", "names", ".", "append", "(", "product", "[", "0", "]", ")", "prices", ".", "append", "(", "product", "[", "1", "]", ")", "weights", ".", "append", "(", "product", "[", "2", "]", ")", "flames", ".", "append", "(", "product", "[", "3", "]", ")", "total_p", "=", "0", "for", "i", "in", "prices", ":", "total_p", "+=", "i", "avg_price", "=", "total_p", "/", "len", "(", "prices", ")", "total_w", "=", "0", "for", "i", "in", "weights", ":", "total_p", "+=", "i", "avg_weight", "=", "total_p", "/", "len", "(", "weights", ")", "total_f", "=", "0", "for", "i", "in", "flames", ":", "total_p", "+=", "i", "avg_flammability", "=", "total_p", "/", "len", "(", "flames", ")", "print", "(", "'ACME CORPORATION OFFICIAL INVENTORY REPORT'", ")", "print", "(", "f'Unique product names: {len(set(names))}'", ")", "print", "(", "f'Average price: {avg_price}'", ")", "print", "(", "f'Average weight: {avg_weight}'", ")", "print", "(", "f'Average flammability: {avg_flammability}'", ")" ]
Outputs the inventory report based on the general report function
[ "Outputs", "the", "inventory", "report", "based", "on", "the", "general", "report", "function" ]
[ "'''Outputs the inventory report based on the general report function'''", "# average price calculation\r", "# average weight calculation\r", "# average flammability calculation\r", "# output strings\r" ]
[ { "param": "products", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "products", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def inventory_report(products): names = [] prices = [] weights = [] flames = [] for product in products: names.append(product[0]) prices.append(product[1]) weights.append(product[2]) flames.append(product[3]) total_p = 0 for i in prices: total_p += i avg_price = total_p / len(prices) total_w = 0 for i in weights: total_p += i avg_weight = total_p / len(weights) total_f = 0 for i in flames: total_p += i avg_flammability = total_p / len(flames) print('ACME CORPORATION OFFICIAL INVENTORY REPORT') print(f'Unique product names: {len(set(names))}') print(f'Average price: {avg_price}') print(f'Average weight: {avg_weight}') print(f'Average flammability: {avg_flammability}')
972
677
b205786b7c0874443714e01b95ccd65626a1879d
QuantTraderEd/vnpy_crypto
venv/lib/python3.6/site-packages/statsmodels/tsa/holtwinters.py
[ "MIT" ]
Python
_holt_win_init
<not_specific>
def _holt_win_init(x, xi, p, y, l, b, s, m): """Initialization for the Holt Winters Seasonal Models""" p[xi] = x alpha, beta, gamma, l0, b0, phi = p[:6] s0 = p[6:] alphac = 1 - alpha betac = 1 - beta gammac = 1 - gamma y_alpha = alpha * y y_gamma = gamma * y l[:] = 0 b[:] = 0 s[:] = 0 l[0] = l0 b[0] = b0 s[:m] = s0 return alpha, beta, gamma, phi, alphac, betac, gammac, y_alpha, y_gamma
Initialization for the Holt Winters Seasonal Models
Initialization for the Holt Winters Seasonal Models
[ "Initialization", "for", "the", "Holt", "Winters", "Seasonal", "Models" ]
def _holt_win_init(x, xi, p, y, l, b, s, m): p[xi] = x alpha, beta, gamma, l0, b0, phi = p[:6] s0 = p[6:] alphac = 1 - alpha betac = 1 - beta gammac = 1 - gamma y_alpha = alpha * y y_gamma = gamma * y l[:] = 0 b[:] = 0 s[:] = 0 l[0] = l0 b[0] = b0 s[:m] = s0 return alpha, beta, gamma, phi, alphac, betac, gammac, y_alpha, y_gamma
[ "def", "_holt_win_init", "(", "x", ",", "xi", ",", "p", ",", "y", ",", "l", ",", "b", ",", "s", ",", "m", ")", ":", "p", "[", "xi", "]", "=", "x", "alpha", ",", "beta", ",", "gamma", ",", "l0", ",", "b0", ",", "phi", "=", "p", "[", ":", "6", "]", "s0", "=", "p", "[", "6", ":", "]", "alphac", "=", "1", "-", "alpha", "betac", "=", "1", "-", "beta", "gammac", "=", "1", "-", "gamma", "y_alpha", "=", "alpha", "*", "y", "y_gamma", "=", "gamma", "*", "y", "l", "[", ":", "]", "=", "0", "b", "[", ":", "]", "=", "0", "s", "[", ":", "]", "=", "0", "l", "[", "0", "]", "=", "l0", "b", "[", "0", "]", "=", "b0", "s", "[", ":", "m", "]", "=", "s0", "return", "alpha", ",", "beta", ",", "gamma", ",", "phi", ",", "alphac", ",", "betac", ",", "gammac", ",", "y_alpha", ",", "y_gamma" ]
Initialization for the Holt Winters Seasonal Models
[ "Initialization", "for", "the", "Holt", "Winters", "Seasonal", "Models" ]
[ "\"\"\"Initialization for the Holt Winters Seasonal Models\"\"\"" ]
[ { "param": "x", "type": null }, { "param": "xi", "type": null }, { "param": "p", "type": null }, { "param": "y", "type": null }, { "param": "l", "type": null }, { "param": "b", "type": null }, { "param": "s", "type": null }, { "param": "m", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "x", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "xi", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "p", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "y", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "l", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "b", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "s", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "m", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _holt_win_init(x, xi, p, y, l, b, s, m): p[xi] = x alpha, beta, gamma, l0, b0, phi = p[:6] s0 = p[6:] alphac = 1 - alpha betac = 1 - beta gammac = 1 - gamma y_alpha = alpha * y y_gamma = gamma * y l[:] = 0 b[:] = 0 s[:] = 0 l[0] = l0 b[0] = b0 s[:m] = s0 return alpha, beta, gamma, phi, alphac, betac, gammac, y_alpha, y_gamma
973
935
07d8251e3f20fda0234aac458b66ac902ed0e073
jdswalker/Advent-of-Code-2015
advent_of_code/solvers/day_21.py
[ "MIT" ]
Python
_get_player
<not_specific>
def _get_player(weapon, armor, ring1, ring2): """Calculates the damage and armor for a player with their equipment Args: weapon (Item): Stores a weapon's cost and damage attributes armor (Item): Stores an armor's cost and armor attributes ring1 (Item): Stores a ring's cost and damage or armor attributes ring2 (Item): Stores a ring's cost and damage or armor attributes Returns: dict: Stores damage and armor attributes for a player """ return { 'Damage': weapon.damage + ring1.damage + ring2.damage, 'Armor': armor.armor + ring1.armor + ring2.armor, }
Calculates the damage and armor for a player with their equipment Args: weapon (Item): Stores a weapon's cost and damage attributes armor (Item): Stores an armor's cost and armor attributes ring1 (Item): Stores a ring's cost and damage or armor attributes ring2 (Item): Stores a ring's cost and damage or armor attributes Returns: dict: Stores damage and armor attributes for a player
Calculates the damage and armor for a player with their equipment
[ "Calculates", "the", "damage", "and", "armor", "for", "a", "player", "with", "their", "equipment" ]
def _get_player(weapon, armor, ring1, ring2): return { 'Damage': weapon.damage + ring1.damage + ring2.damage, 'Armor': armor.armor + ring1.armor + ring2.armor, }
[ "def", "_get_player", "(", "weapon", ",", "armor", ",", "ring1", ",", "ring2", ")", ":", "return", "{", "'Damage'", ":", "weapon", ".", "damage", "+", "ring1", ".", "damage", "+", "ring2", ".", "damage", ",", "'Armor'", ":", "armor", ".", "armor", "+", "ring1", ".", "armor", "+", "ring2", ".", "armor", ",", "}" ]
Calculates the damage and armor for a player with their equipment
[ "Calculates", "the", "damage", "and", "armor", "for", "a", "player", "with", "their", "equipment" ]
[ "\"\"\"Calculates the damage and armor for a player with their equipment\n\n Args:\n weapon (Item): Stores a weapon's cost and damage attributes\n armor (Item): Stores an armor's cost and armor attributes\n ring1 (Item): Stores a ring's cost and damage or armor attributes\n ring2 (Item): Stores a ring's cost and damage or armor attributes\n Returns:\n dict: Stores damage and armor attributes for a player\n \"\"\"" ]
[ { "param": "weapon", "type": null }, { "param": "armor", "type": null }, { "param": "ring1", "type": null }, { "param": "ring2", "type": null } ]
{ "returns": [ { "docstring": "Stores damage and armor attributes for a player", "docstring_tokens": [ "Stores", "damage", "and", "armor", "attributes", "for", "a", "player" ], "type": "dict" } ], "raises": [], "params": [ { "identifier": "weapon", "type": null, "docstring": "Stores a weapon's cost and damage attributes", "docstring_tokens": [ "Stores", "a", "weapon", "'", "s", "cost", "and", "damage", "attributes" ], "default": null, "is_optional": false }, { "identifier": "armor", "type": null, "docstring": "Stores an armor's cost and armor attributes", "docstring_tokens": [ "Stores", "an", "armor", "'", "s", "cost", "and", "armor", "attributes" ], "default": null, "is_optional": false }, { "identifier": "ring1", "type": null, "docstring": "Stores a ring's cost and damage or armor attributes", "docstring_tokens": [ "Stores", "a", "ring", "'", "s", "cost", "and", "damage", "or", "armor", "attributes" ], "default": null, "is_optional": false }, { "identifier": "ring2", "type": null, "docstring": "Stores a ring's cost and damage or armor attributes", "docstring_tokens": [ "Stores", "a", "ring", "'", "s", "cost", "and", "damage", "or", "armor", "attributes" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def _get_player(weapon, armor, ring1, ring2): return { 'Damage': weapon.damage + ring1.damage + ring2.damage, 'Armor': armor.armor + ring1.armor + ring2.armor, }
975
663
f68d3ea540135fbe5563a6ca72721fc59b8a4cb8
jvarho/hug
hug/authentication.py
[ "MIT" ]
Python
api_key
<not_specific>
def api_key(request, response, verify_user, context=None, **kwargs): """API Key Header Authentication The verify_user function passed in to ths authenticator shall receive an API key as input, and return a user object to store in the request context if the request was successful. """ api_key = request.get_header("X-Api-Key") if api_key: try: user = verify_user(api_key) except TypeError: user = verify_user(api_key, context) if user: return user else: return False else: return None
API Key Header Authentication The verify_user function passed in to ths authenticator shall receive an API key as input, and return a user object to store in the request context if the request was successful.
API Key Header Authentication The verify_user function passed in to ths authenticator shall receive an API key as input, and return a user object to store in the request context if the request was successful.
[ "API", "Key", "Header", "Authentication", "The", "verify_user", "function", "passed", "in", "to", "ths", "authenticator", "shall", "receive", "an", "API", "key", "as", "input", "and", "return", "a", "user", "object", "to", "store", "in", "the", "request", "context", "if", "the", "request", "was", "successful", "." ]
def api_key(request, response, verify_user, context=None, **kwargs): api_key = request.get_header("X-Api-Key") if api_key: try: user = verify_user(api_key) except TypeError: user = verify_user(api_key, context) if user: return user else: return False else: return None
[ "def", "api_key", "(", "request", ",", "response", ",", "verify_user", ",", "context", "=", "None", ",", "**", "kwargs", ")", ":", "api_key", "=", "request", ".", "get_header", "(", "\"X-Api-Key\"", ")", "if", "api_key", ":", "try", ":", "user", "=", "verify_user", "(", "api_key", ")", "except", "TypeError", ":", "user", "=", "verify_user", "(", "api_key", ",", "context", ")", "if", "user", ":", "return", "user", "else", ":", "return", "False", "else", ":", "return", "None" ]
API Key Header Authentication The verify_user function passed in to ths authenticator shall receive an API key as input, and return a user object to store in the request context if the request was successful.
[ "API", "Key", "Header", "Authentication", "The", "verify_user", "function", "passed", "in", "to", "ths", "authenticator", "shall", "receive", "an", "API", "key", "as", "input", "and", "return", "a", "user", "object", "to", "store", "in", "the", "request", "context", "if", "the", "request", "was", "successful", "." ]
[ "\"\"\"API Key Header Authentication\n\n The verify_user function passed in to ths authenticator shall receive an\n API key as input, and return a user object to store in the request context\n if the request was successful.\n \"\"\"" ]
[ { "param": "request", "type": null }, { "param": "response", "type": null }, { "param": "verify_user", "type": null }, { "param": "context", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "request", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "response", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "verify_user", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "context", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def api_key(request, response, verify_user, context=None, **kwargs): api_key = request.get_header("X-Api-Key") if api_key: try: user = verify_user(api_key) except TypeError: user = verify_user(api_key, context) if user: return user else: return False else: return None
976
779
7e4add5da2807ff0fa7ffa1b9a8219a1a77995b7
jtpio/algo-toolbox
python/mathematics/decimal_to_binary.py
[ "MIT" ]
Python
dec_to_bin
<not_specific>
def dec_to_bin(n): """ Return the binary representation of the number n expressed in base 10 Parameters ---------- n: int Number in base 10 """ return bin(n)[2:]
Return the binary representation of the number n expressed in base 10 Parameters ---------- n: int Number in base 10
Return the binary representation of the number n expressed in base 10 Parameters int Number in base 10
[ "Return", "the", "binary", "representation", "of", "the", "number", "n", "expressed", "in", "base", "10", "Parameters", "int", "Number", "in", "base", "10" ]
def dec_to_bin(n): return bin(n)[2:]
[ "def", "dec_to_bin", "(", "n", ")", ":", "return", "bin", "(", "n", ")", "[", "2", ":", "]" ]
Return the binary representation of the number n expressed in base 10 Parameters
[ "Return", "the", "binary", "representation", "of", "the", "number", "n", "expressed", "in", "base", "10", "Parameters" ]
[ "\"\"\" Return the binary representation of the number n expressed in base 10\n Parameters\n ----------\n n: int\n Number in base 10\n \"\"\"" ]
[ { "param": "n", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "n", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def dec_to_bin(n): return bin(n)[2:]
977
431
381bc040302704f0c37855e064ff248bd5cc96e9
CloudChef/CloudEntries
cloudentries/ucloud/query/response.py
[ "Apache-2.0" ]
Python
filter_region
<not_specific>
def filter_region(cls, params, regions): """ filter one or multi region(s) according to region """ if not params.get('region'): return regions aim = [] for dct in regions: if dct['Region'] == params.get('region'): aim.append(dct) return aim
filter one or multi region(s) according to region
filter one or multi region(s) according to region
[ "filter", "one", "or", "multi", "region", "(", "s", ")", "according", "to", "region" ]
def filter_region(cls, params, regions): if not params.get('region'): return regions aim = [] for dct in regions: if dct['Region'] == params.get('region'): aim.append(dct) return aim
[ "def", "filter_region", "(", "cls", ",", "params", ",", "regions", ")", ":", "if", "not", "params", ".", "get", "(", "'region'", ")", ":", "return", "regions", "aim", "=", "[", "]", "for", "dct", "in", "regions", ":", "if", "dct", "[", "'Region'", "]", "==", "params", ".", "get", "(", "'region'", ")", ":", "aim", ".", "append", "(", "dct", ")", "return", "aim" ]
filter one or multi region(s) according to region
[ "filter", "one", "or", "multi", "region", "(", "s", ")", "according", "to", "region" ]
[ "\"\"\"\n filter one or multi region(s) according to region\n \"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "params", "type": null }, { "param": "regions", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "params", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "regions", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def filter_region(cls, params, regions): if not params.get('region'): return regions aim = [] for dct in regions: if dct['Region'] == params.get('region'): aim.append(dct) return aim
978
877
f8ca7d74fd48c821545915fcd6b896f8986c2991
kurtbrose/relativity
relativity/relativity.py
[ "MIT" ]
Python
from_rel_data_map
null
def from_rel_data_map(cls, rel_data_map): """ convert a map of column label relationships to M2Ms into a M2MGraph rel_data_map -- { (lhs_col, rhs_col): {lhs_val: rhs_val} } """ # TODO: better checking cls(rel_data_map.keys(), rel_data_map)
convert a map of column label relationships to M2Ms into a M2MGraph rel_data_map -- { (lhs_col, rhs_col): {lhs_val: rhs_val} }
convert a map of column label relationships to M2Ms into a M2MGraph
[ "convert", "a", "map", "of", "column", "label", "relationships", "to", "M2Ms", "into", "a", "M2MGraph" ]
def from_rel_data_map(cls, rel_data_map): cls(rel_data_map.keys(), rel_data_map)
[ "def", "from_rel_data_map", "(", "cls", ",", "rel_data_map", ")", ":", "cls", "(", "rel_data_map", ".", "keys", "(", ")", ",", "rel_data_map", ")" ]
convert a map of column label relationships to M2Ms into a M2MGraph
[ "convert", "a", "map", "of", "column", "label", "relationships", "to", "M2Ms", "into", "a", "M2MGraph" ]
[ "\"\"\"\n convert a map of column label relationships to M2Ms\n into a M2MGraph\n\n rel_data_map -- { (lhs_col, rhs_col): {lhs_val: rhs_val} }\n \"\"\"", "# TODO: better checking" ]
[ { "param": "cls", "type": null }, { "param": "rel_data_map", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "rel_data_map", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def from_rel_data_map(cls, rel_data_map): cls(rel_data_map.keys(), rel_data_map)
979
657
6c42c3fb713a9d169beaa2aad6c1a66572aeb1da
LudditeLabs/autodoc-tool
src/autodoc/utils.py
[ "Apache-2.0" ]
Python
trim_docstring
<not_specific>
def trim_docstring(text, strip_leading=False, strip_trailing=False, as_string=False): """Extended version of the ::func``trim`` from the https://www.python.org/dev/peps/pep-0257/. Strip a uniform amount of indentation from the second and further lines of the docstring, equal to the minimum indentation of all non-blank lines after the first line. Args: text: Docstring. strip_leading: Strip off leading blank lines. strip_trailing: Strip off trailing blank lines. as_string: Return result as string, otherwise list of lines. Returns: List of lines or string depending on ``as_string`` parameter. """ if not text: return u'' if as_string else [u''] # Convert tabs to spaces (following the normal Python rules) # and split into a list of lines: if isinstance(text, list): lines = text else: lines = text.expandtabs().splitlines() # Determine minimum indentation (first line doesn't count): indent = sys.maxsize for line in lines[1:]: stripped = line.lstrip() if stripped: indent = min(indent, len(line) - len(stripped)) if indent == sys.maxsize: indent = 0 # Remove indentation (first line is special): trimmed = [lines[0].strip()] if indent < sys.maxsize: for line in lines[1:]: trimmed.append(line[indent:].rstrip()) # Strip off trailing and leading blank lines: if strip_trailing: while trimmed and not trimmed[-1]: trimmed.pop() if strip_leading: while trimmed and not trimmed[0]: trimmed.pop(0) # Return a single string: if as_string: return u'\n'.join(trimmed) else: return trimmed
Extended version of the ::func``trim`` from the https://www.python.org/dev/peps/pep-0257/. Strip a uniform amount of indentation from the second and further lines of the docstring, equal to the minimum indentation of all non-blank lines after the first line. Args: text: Docstring. strip_leading: Strip off leading blank lines. strip_trailing: Strip off trailing blank lines. as_string: Return result as string, otherwise list of lines. Returns: List of lines or string depending on ``as_string`` parameter.
Strip a uniform amount of indentation from the second and further lines of the docstring, equal to the minimum indentation of all non-blank lines after the first line.
[ "Strip", "a", "uniform", "amount", "of", "indentation", "from", "the", "second", "and", "further", "lines", "of", "the", "docstring", "equal", "to", "the", "minimum", "indentation", "of", "all", "non", "-", "blank", "lines", "after", "the", "first", "line", "." ]
def trim_docstring(text, strip_leading=False, strip_trailing=False, as_string=False): if not text: return u'' if as_string else [u''] if isinstance(text, list): lines = text else: lines = text.expandtabs().splitlines() indent = sys.maxsize for line in lines[1:]: stripped = line.lstrip() if stripped: indent = min(indent, len(line) - len(stripped)) if indent == sys.maxsize: indent = 0 trimmed = [lines[0].strip()] if indent < sys.maxsize: for line in lines[1:]: trimmed.append(line[indent:].rstrip()) if strip_trailing: while trimmed and not trimmed[-1]: trimmed.pop() if strip_leading: while trimmed and not trimmed[0]: trimmed.pop(0) if as_string: return u'\n'.join(trimmed) else: return trimmed
[ "def", "trim_docstring", "(", "text", ",", "strip_leading", "=", "False", ",", "strip_trailing", "=", "False", ",", "as_string", "=", "False", ")", ":", "if", "not", "text", ":", "return", "u''", "if", "as_string", "else", "[", "u''", "]", "if", "isinstance", "(", "text", ",", "list", ")", ":", "lines", "=", "text", "else", ":", "lines", "=", "text", ".", "expandtabs", "(", ")", ".", "splitlines", "(", ")", "indent", "=", "sys", ".", "maxsize", "for", "line", "in", "lines", "[", "1", ":", "]", ":", "stripped", "=", "line", ".", "lstrip", "(", ")", "if", "stripped", ":", "indent", "=", "min", "(", "indent", ",", "len", "(", "line", ")", "-", "len", "(", "stripped", ")", ")", "if", "indent", "==", "sys", ".", "maxsize", ":", "indent", "=", "0", "trimmed", "=", "[", "lines", "[", "0", "]", ".", "strip", "(", ")", "]", "if", "indent", "<", "sys", ".", "maxsize", ":", "for", "line", "in", "lines", "[", "1", ":", "]", ":", "trimmed", ".", "append", "(", "line", "[", "indent", ":", "]", ".", "rstrip", "(", ")", ")", "if", "strip_trailing", ":", "while", "trimmed", "and", "not", "trimmed", "[", "-", "1", "]", ":", "trimmed", ".", "pop", "(", ")", "if", "strip_leading", ":", "while", "trimmed", "and", "not", "trimmed", "[", "0", "]", ":", "trimmed", ".", "pop", "(", "0", ")", "if", "as_string", ":", "return", "u'\\n'", ".", "join", "(", "trimmed", ")", "else", ":", "return", "trimmed" ]
Extended version of the ::func``trim`` from the https://www.python.org/dev/peps/pep-0257/.
[ "Extended", "version", "of", "the", "::", "func", "`", "`", "trim", "`", "`", "from", "the", "https", ":", "//", "www", ".", "python", ".", "org", "/", "dev", "/", "peps", "/", "pep", "-", "0257", "/", "." ]
[ "\"\"\"Extended version of the ::func``trim`` from the\n https://www.python.org/dev/peps/pep-0257/.\n\n Strip a uniform amount of indentation from the second and further lines\n of the docstring, equal to the minimum indentation of all non-blank lines\n after the first line.\n\n Args:\n text: Docstring.\n strip_leading: Strip off leading blank lines.\n strip_trailing: Strip off trailing blank lines.\n as_string: Return result as string, otherwise list of lines.\n\n Returns:\n List of lines or string depending on ``as_string`` parameter.\n \"\"\"", "# Convert tabs to spaces (following the normal Python rules)", "# and split into a list of lines:", "# Determine minimum indentation (first line doesn't count):", "# Remove indentation (first line is special):", "# Strip off trailing and leading blank lines:", "# Return a single string:" ]
[ { "param": "text", "type": null }, { "param": "strip_leading", "type": null }, { "param": "strip_trailing", "type": null }, { "param": "as_string", "type": null } ]
{ "returns": [ { "docstring": "List of lines or string depending on ``as_string`` parameter.", "docstring_tokens": [ "List", "of", "lines", "or", "string", "depending", "on", "`", "`", "as_string", "`", "`", "parameter", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "text", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "strip_leading", "type": null, "docstring": "Strip off leading blank lines.", "docstring_tokens": [ "Strip", "off", "leading", "blank", "lines", "." ], "default": null, "is_optional": null }, { "identifier": "strip_trailing", "type": null, "docstring": "Strip off trailing blank lines.", "docstring_tokens": [ "Strip", "off", "trailing", "blank", "lines", "." ], "default": null, "is_optional": null }, { "identifier": "as_string", "type": null, "docstring": "Return result as string, otherwise list of lines.", "docstring_tokens": [ "Return", "result", "as", "string", "otherwise", "list", "of", "lines", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import sys def trim_docstring(text, strip_leading=False, strip_trailing=False, as_string=False): if not text: return u'' if as_string else [u''] if isinstance(text, list): lines = text else: lines = text.expandtabs().splitlines() indent = sys.maxsize for line in lines[1:]: stripped = line.lstrip() if stripped: indent = min(indent, len(line) - len(stripped)) if indent == sys.maxsize: indent = 0 trimmed = [lines[0].strip()] if indent < sys.maxsize: for line in lines[1:]: trimmed.append(line[indent:].rstrip()) if strip_trailing: while trimmed and not trimmed[-1]: trimmed.pop() if strip_leading: while trimmed and not trimmed[0]: trimmed.pop(0) if as_string: return u'\n'.join(trimmed) else: return trimmed
980
489
936b395067571fcb90e21878805ca7e093beb03f
gamcil/frippa
frippa/results.py
[ "MIT" ]
Python
repeats_are_similar
<not_specific>
def repeats_are_similar(repeats, threshold=0.5): """Compute similary between a collection of repeats. Computes hamming distance between each pair of repeat sequences, then divides by the number of repeats supplied (-1). Returns True if this score is over the specified threshold. """ score, total, length = 0, len(repeats), len(repeats[0].sequence) for index in range(1, total): one, two = repeats[index - 1 : index + 1] score += sum(a == b for a, b in zip(one.sequence, two.sequence)) / length return score / (total - 1) >= threshold
Compute similary between a collection of repeats. Computes hamming distance between each pair of repeat sequences, then divides by the number of repeats supplied (-1). Returns True if this score is over the specified threshold.
Compute similary between a collection of repeats. Computes hamming distance between each pair of repeat sequences, then divides by the number of repeats supplied (-1). Returns True if this score is over the specified threshold.
[ "Compute", "similary", "between", "a", "collection", "of", "repeats", ".", "Computes", "hamming", "distance", "between", "each", "pair", "of", "repeat", "sequences", "then", "divides", "by", "the", "number", "of", "repeats", "supplied", "(", "-", "1", ")", ".", "Returns", "True", "if", "this", "score", "is", "over", "the", "specified", "threshold", "." ]
def repeats_are_similar(repeats, threshold=0.5): score, total, length = 0, len(repeats), len(repeats[0].sequence) for index in range(1, total): one, two = repeats[index - 1 : index + 1] score += sum(a == b for a, b in zip(one.sequence, two.sequence)) / length return score / (total - 1) >= threshold
[ "def", "repeats_are_similar", "(", "repeats", ",", "threshold", "=", "0.5", ")", ":", "score", ",", "total", ",", "length", "=", "0", ",", "len", "(", "repeats", ")", ",", "len", "(", "repeats", "[", "0", "]", ".", "sequence", ")", "for", "index", "in", "range", "(", "1", ",", "total", ")", ":", "one", ",", "two", "=", "repeats", "[", "index", "-", "1", ":", "index", "+", "1", "]", "score", "+=", "sum", "(", "a", "==", "b", "for", "a", ",", "b", "in", "zip", "(", "one", ".", "sequence", ",", "two", ".", "sequence", ")", ")", "/", "length", "return", "score", "/", "(", "total", "-", "1", ")", ">=", "threshold" ]
Compute similary between a collection of repeats.
[ "Compute", "similary", "between", "a", "collection", "of", "repeats", "." ]
[ "\"\"\"Compute similary between a collection of repeats.\n\n Computes hamming distance between each pair of repeat sequences, then divides by the\n number of repeats supplied (-1). Returns True if this score is over the specified\n threshold.\n \"\"\"" ]
[ { "param": "repeats", "type": null }, { "param": "threshold", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "repeats", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "threshold", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def repeats_are_similar(repeats, threshold=0.5): score, total, length = 0, len(repeats), len(repeats[0].sequence) for index in range(1, total): one, two = repeats[index - 1 : index + 1] score += sum(a == b for a, b in zip(one.sequence, two.sequence)) / length return score / (total - 1) >= threshold
981
1,000
e6201bbc29b418948f965b2aa29b14e668ccece0
CompCogNeuro/sims
ch2/detector/detector.py
[ "BSD-3-Clause" ]
Python
Init
null
def Init(ss): """ Init restarts the run, and initializes everything, including network weights and resets the epoch log table """ ss.ConfigEnv() ss.Time.Reset() ss.Time.CycPerQtr = 5 # don't need much time ss.InitWts(ss.Net) ss.StopNow = False ss.SetParams("", False) # all sheets ss.UpdateView()
Init restarts the run, and initializes everything, including network weights and resets the epoch log table
Init restarts the run, and initializes everything, including network weights and resets the epoch log table
[ "Init", "restarts", "the", "run", "and", "initializes", "everything", "including", "network", "weights", "and", "resets", "the", "epoch", "log", "table" ]
def Init(ss): ss.ConfigEnv() ss.Time.Reset() ss.Time.CycPerQtr = 5 ss.InitWts(ss.Net) ss.StopNow = False ss.SetParams("", False) ss.UpdateView()
[ "def", "Init", "(", "ss", ")", ":", "ss", ".", "ConfigEnv", "(", ")", "ss", ".", "Time", ".", "Reset", "(", ")", "ss", ".", "Time", ".", "CycPerQtr", "=", "5", "ss", ".", "InitWts", "(", "ss", ".", "Net", ")", "ss", ".", "StopNow", "=", "False", "ss", ".", "SetParams", "(", "\"\"", ",", "False", ")", "ss", ".", "UpdateView", "(", ")" ]
Init restarts the run, and initializes everything, including network weights and resets the epoch log table
[ "Init", "restarts", "the", "run", "and", "initializes", "everything", "including", "network", "weights", "and", "resets", "the", "epoch", "log", "table" ]
[ "\"\"\"\n Init restarts the run, and initializes everything, including network weights\n and resets the epoch log table\n \"\"\"", "# don't need much time", "# all sheets" ]
[ { "param": "ss", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "ss", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def Init(ss): ss.ConfigEnv() ss.Time.Reset() ss.Time.CycPerQtr = 5 ss.InitWts(ss.Net) ss.StopNow = False ss.SetParams("", False) ss.UpdateView()
983
292
89dbc0750820c0a3a04279355d936b9f234bc6a0
voanna/Deep-Features-or-Not
src/time_to_label.py
[ "MIT" ]
Python
week
<not_specific>
def week(timetuple): ''' Extracts the week from dateutil timetuple object, and outputs labels in 0 .. 52 NB weeks start on the day the year started, not calendar weeks >>> import dateutil >>> timetuple = dateutil.parser.parse('2013-04-07_17-02-21', fuzzy=True).timetuple() >>> week(timetuple) 13 >>> timetuple = dateutil.parser.parse('2013-01-07_17-12-21', fuzzy=True).timetuple() >>> week(timetuple) 0 ''' day_of_year = timetuple.tm_yday return (day_of_year - 1) // 7
Extracts the week from dateutil timetuple object, and outputs labels in 0 .. 52 NB weeks start on the day the year started, not calendar weeks >>> import dateutil >>> timetuple = dateutil.parser.parse('2013-04-07_17-02-21', fuzzy=True).timetuple() >>> week(timetuple) 13 >>> timetuple = dateutil.parser.parse('2013-01-07_17-12-21', fuzzy=True).timetuple() >>> week(timetuple) 0
Extracts the week from dateutil timetuple object, and outputs labels in 0 .. 52 NB weeks start on the day the year started, not calendar weeks
[ "Extracts", "the", "week", "from", "dateutil", "timetuple", "object", "and", "outputs", "labels", "in", "0", "..", "52", "NB", "weeks", "start", "on", "the", "day", "the", "year", "started", "not", "calendar", "weeks" ]
def week(timetuple): day_of_year = timetuple.tm_yday return (day_of_year - 1) // 7
[ "def", "week", "(", "timetuple", ")", ":", "day_of_year", "=", "timetuple", ".", "tm_yday", "return", "(", "day_of_year", "-", "1", ")", "//", "7" ]
Extracts the week from dateutil timetuple object, and outputs labels in 0 .. 52 NB weeks start on the day the year started, not calendar weeks
[ "Extracts", "the", "week", "from", "dateutil", "timetuple", "object", "and", "outputs", "labels", "in", "0", "..", "52", "NB", "weeks", "start", "on", "the", "day", "the", "year", "started", "not", "calendar", "weeks" ]
[ "'''\n Extracts the week from dateutil timetuple object, and outputs labels in 0 .. 52\n NB weeks start on the day the year started, not calendar weeks\n\n >>> import dateutil\n >>> timetuple = dateutil.parser.parse('2013-04-07_17-02-21', fuzzy=True).timetuple()\n >>> week(timetuple)\n 13\n\n >>> timetuple = dateutil.parser.parse('2013-01-07_17-12-21', fuzzy=True).timetuple()\n >>> week(timetuple)\n 0\n '''" ]
[ { "param": "timetuple", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "timetuple", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def week(timetuple): day_of_year = timetuple.tm_yday return (day_of_year - 1) // 7
984
319
8ef3006bb2b30919eb85e8efa2e6aeed1f0bb386
oscarkey/multitask-learning
multitask-learning/cityscapestask/main.py
[ "MIT" ]
Python
config
null
def config(): """Contains the default config values.""" batch_size = 3 max_iter = 1000 root_dir_train = 'example-tiny-cityscapes' root_dir_validation = 'example-tiny-cityscapes' root_dir_test = 'example-tiny-cityscapes' num_classes = 20 initial_learning_rate = 2.5e-4 loss_type = 'learned' # One of 'fixed' or 'learned'. loss_uncertainties = (1.0, 1.0, 1.0) # equal to weights when loss_type = 'fixed' enabled_tasks = (True, True, True) gpu = True save_to_db = True validate_epochs = 1 # How frequently to run validation. Set to 0 to disable validation. # When True, we will run one validation pass and then exit. We will not train. This is useful to validate a previous # experiment using restore_from_sacred_run below. validate_only = False model_save_epochs = 0 # How frequently to checkpoint the model to Sacred. Set to 0 to disable saving the model. # Id of the sacred run to continue training on, or -1 to disable restoring. restore_sacred_run = -1 use_adam = True # The learning rate used by Adam. Not used by SGD. learning_rate = 1e-3 # Weight decay to set on the optimizer. Value from paper is 10^4 = 1e4 weight_decay = 0 # When True, drops learning rate when training loss plateaus. reduce_lr_on_plateau = False dataloader_workers = 0 # If num workers > 0 then dataloader caching won't work. # When True the dataloader will cache all data in memory after the first read. dataloader_cache = True # When True the data loader will load precomputed instance vectors from the .npy files. use_precomputed_instances = False # Whether to augment the training data with random cropping. crop = False crop_size = (64, 64) # Whether to augment the training data with random flipping. flip = False pre_train_encoder = True # When true, will download weights for resnet pre-trained on imagenet. # If total available memory is lower than this threshold, we crash rather than loading more data. # This avoids using all the memory on the server and getting it stuck. # Set to 0 to disable the check. min_available_memory_gb = 0 # Size of the dilations in the atrous convolutions in ASPP module of the encoder. Paper default is (12, 24, 36). aspp_dilations = (12, 24, 36) # When True, use minute Cityscapes. This is downsampled to 64x128, then cropped in half to 64x64. minute = False resnet_type = 'resnet101' # when None, no dropout is applied, other options are 'after_layer_4' and 'after_aspp' dropout = 'none'
Contains the default config values.
Contains the default config values.
[ "Contains", "the", "default", "config", "values", "." ]
def config(): batch_size = 3 max_iter = 1000 root_dir_train = 'example-tiny-cityscapes' root_dir_validation = 'example-tiny-cityscapes' root_dir_test = 'example-tiny-cityscapes' num_classes = 20 initial_learning_rate = 2.5e-4 loss_type = 'learned' loss_uncertainties = (1.0, 1.0, 1.0) enabled_tasks = (True, True, True) gpu = True save_to_db = True validate_epochs = 1 validate_only = False model_save_epochs = 0 restore_sacred_run = -1 use_adam = True learning_rate = 1e-3 weight_decay = 0 reduce_lr_on_plateau = False dataloader_workers = 0 dataloader_cache = True use_precomputed_instances = False crop = False crop_size = (64, 64) flip = False pre_train_encoder = True min_available_memory_gb = 0 aspp_dilations = (12, 24, 36) minute = False resnet_type = 'resnet101' dropout = 'none'
[ "def", "config", "(", ")", ":", "batch_size", "=", "3", "max_iter", "=", "1000", "root_dir_train", "=", "'example-tiny-cityscapes'", "root_dir_validation", "=", "'example-tiny-cityscapes'", "root_dir_test", "=", "'example-tiny-cityscapes'", "num_classes", "=", "20", "initial_learning_rate", "=", "2.5e-4", "loss_type", "=", "'learned'", "loss_uncertainties", "=", "(", "1.0", ",", "1.0", ",", "1.0", ")", "enabled_tasks", "=", "(", "True", ",", "True", ",", "True", ")", "gpu", "=", "True", "save_to_db", "=", "True", "validate_epochs", "=", "1", "validate_only", "=", "False", "model_save_epochs", "=", "0", "restore_sacred_run", "=", "-", "1", "use_adam", "=", "True", "learning_rate", "=", "1e-3", "weight_decay", "=", "0", "reduce_lr_on_plateau", "=", "False", "dataloader_workers", "=", "0", "dataloader_cache", "=", "True", "use_precomputed_instances", "=", "False", "crop", "=", "False", "crop_size", "=", "(", "64", ",", "64", ")", "flip", "=", "False", "pre_train_encoder", "=", "True", "min_available_memory_gb", "=", "0", "aspp_dilations", "=", "(", "12", ",", "24", ",", "36", ")", "minute", "=", "False", "resnet_type", "=", "'resnet101'", "dropout", "=", "'none'" ]
Contains the default config values.
[ "Contains", "the", "default", "config", "values", "." ]
[ "\"\"\"Contains the default config values.\"\"\"", "# One of 'fixed' or 'learned'.", "# equal to weights when loss_type = 'fixed'", "# How frequently to run validation. Set to 0 to disable validation.", "# When True, we will run one validation pass and then exit. We will not train. This is useful to validate a previous", "# experiment using restore_from_sacred_run below.", "# How frequently to checkpoint the model to Sacred. Set to 0 to disable saving the model.", "# Id of the sacred run to continue training on, or -1 to disable restoring.", "# The learning rate used by Adam. Not used by SGD.", "# Weight decay to set on the optimizer. Value from paper is 10^4 = 1e4", "# When True, drops learning rate when training loss plateaus.", "# If num workers > 0 then dataloader caching won't work.", "# When True the dataloader will cache all data in memory after the first read.", "# When True the data loader will load precomputed instance vectors from the .npy files.", "# Whether to augment the training data with random cropping.", "# Whether to augment the training data with random flipping.", "# When true, will download weights for resnet pre-trained on imagenet.", "# If total available memory is lower than this threshold, we crash rather than loading more data.", "# This avoids using all the memory on the server and getting it stuck.", "# Set to 0 to disable the check.", "# Size of the dilations in the atrous convolutions in ASPP module of the encoder. Paper default is (12, 24, 36).", "# When True, use minute Cityscapes. This is downsampled to 64x128, then cropped in half to 64x64.", "# when None, no dropout is applied, other options are 'after_layer_4' and 'after_aspp'" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def config(): batch_size = 3 max_iter = 1000 root_dir_train = 'example-tiny-cityscapes' root_dir_validation = 'example-tiny-cityscapes' root_dir_test = 'example-tiny-cityscapes' num_classes = 20 initial_learning_rate = 2.5e-4 loss_type = 'learned' loss_uncertainties = (1.0, 1.0, 1.0) enabled_tasks = (True, True, True) gpu = True save_to_db = True validate_epochs = 1 validate_only = False model_save_epochs = 0 restore_sacred_run = -1 use_adam = True learning_rate = 1e-3 weight_decay = 0 reduce_lr_on_plateau = False dataloader_workers = 0 dataloader_cache = True use_precomputed_instances = False crop = False crop_size = (64, 64) flip = False pre_train_encoder = True min_available_memory_gb = 0 aspp_dilations = (12, 24, 36) minute = False resnet_type = 'resnet101' dropout = 'none'
985
325
2a9bca75834ca6e01d57c317a1ccd6345c784934
Tskken/classification
graphics_display.py
[ "MIT" ]
Python
is_wall
<not_specific>
def is_wall(x, y, walls): """Determine if wall at given coordinate.""" if x < 0 or y < 0: return False if x >= walls.width or y >= walls.height: return False return walls[x][y]
Determine if wall at given coordinate.
Determine if wall at given coordinate.
[ "Determine", "if", "wall", "at", "given", "coordinate", "." ]
def is_wall(x, y, walls): if x < 0 or y < 0: return False if x >= walls.width or y >= walls.height: return False return walls[x][y]
[ "def", "is_wall", "(", "x", ",", "y", ",", "walls", ")", ":", "if", "x", "<", "0", "or", "y", "<", "0", ":", "return", "False", "if", "x", ">=", "walls", ".", "width", "or", "y", ">=", "walls", ".", "height", ":", "return", "False", "return", "walls", "[", "x", "]", "[", "y", "]" ]
Determine if wall at given coordinate.
[ "Determine", "if", "wall", "at", "given", "coordinate", "." ]
[ "\"\"\"Determine if wall at given coordinate.\"\"\"" ]
[ { "param": "x", "type": null }, { "param": "y", "type": null }, { "param": "walls", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "x", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "y", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "walls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def is_wall(x, y, walls): if x < 0 or y < 0: return False if x >= walls.width or y >= walls.height: return False return walls[x][y]
988
455
9436eaf7fae0d1e17d4227f13ec9a59ea19f5914
ISISNeutronMuon/SScanSS-2
sscanss/core/math/misc.py
[ "BSD-3-Clause" ]
Python
trunc
<not_specific>
def trunc(value, decimals=0): """Truncates values after a number of decimal points :param value: number to truncate :type value: float :param decimals: number of decimals points to keep :type decimals: int :return: truncated float :rtype: float """ step = 10 ** decimals return math.trunc(value * step)/step
Truncates values after a number of decimal points :param value: number to truncate :type value: float :param decimals: number of decimals points to keep :type decimals: int :return: truncated float :rtype: float
Truncates values after a number of decimal points
[ "Truncates", "values", "after", "a", "number", "of", "decimal", "points" ]
def trunc(value, decimals=0): step = 10 ** decimals return math.trunc(value * step)/step
[ "def", "trunc", "(", "value", ",", "decimals", "=", "0", ")", ":", "step", "=", "10", "**", "decimals", "return", "math", ".", "trunc", "(", "value", "*", "step", ")", "/", "step" ]
Truncates values after a number of decimal points
[ "Truncates", "values", "after", "a", "number", "of", "decimal", "points" ]
[ "\"\"\"Truncates values after a number of decimal points\n\n :param value: number to truncate\n :type value: float\n :param decimals: number of decimals points to keep\n :type decimals: int\n :return: truncated float\n :rtype: float\n \"\"\"" ]
[ { "param": "value", "type": null }, { "param": "decimals", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": "float" } ], "raises": [], "params": [ { "identifier": "value", "type": null, "docstring": "number to truncate", "docstring_tokens": [ "number", "to", "truncate" ], "default": null, "is_optional": null }, { "identifier": "decimals", "type": null, "docstring": "number of decimals points to keep", "docstring_tokens": [ "number", "of", "decimals", "points", "to", "keep" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import math def trunc(value, decimals=0): step = 10 ** decimals return math.trunc(value * step)/step
989
473
49ad0cc53821dae26992536df87c0f30766db722
benjamincarlin/cromwell-tools
cromwell_tools/cromwell_tools.py
[ "BSD-3-Clause" ]
Python
_length_checker
<not_specific>
def _length_checker(length, content): """Helper function to check if a string is shorter than expected length of not. :param int length: Maximum length of an expected string. :param str content: A string to be validated. :return str: A string of error message if validation fails, or an empty string if validation succeeds. """ if len(content) > length: return 'Invalid label: {0} has {1} characters. The maximum is {2}.\n'.format(content, len(content), length) else: return ''
Helper function to check if a string is shorter than expected length of not. :param int length: Maximum length of an expected string. :param str content: A string to be validated. :return str: A string of error message if validation fails, or an empty string if validation succeeds.
Helper function to check if a string is shorter than expected length of not.
[ "Helper", "function", "to", "check", "if", "a", "string", "is", "shorter", "than", "expected", "length", "of", "not", "." ]
def _length_checker(length, content): if len(content) > length: return 'Invalid label: {0} has {1} characters. The maximum is {2}.\n'.format(content, len(content), length) else: return ''
[ "def", "_length_checker", "(", "length", ",", "content", ")", ":", "if", "len", "(", "content", ")", ">", "length", ":", "return", "'Invalid label: {0} has {1} characters. The maximum is {2}.\\n'", ".", "format", "(", "content", ",", "len", "(", "content", ")", ",", "length", ")", "else", ":", "return", "''" ]
Helper function to check if a string is shorter than expected length of not.
[ "Helper", "function", "to", "check", "if", "a", "string", "is", "shorter", "than", "expected", "length", "of", "not", "." ]
[ "\"\"\"Helper function to check if a string is shorter than expected length of not.\n\n :param int length: Maximum length of an expected string.\n :param str content: A string to be validated.\n\n :return str: A string of error message if validation fails, or an empty string if validation succeeds.\n \"\"\"" ]
[ { "param": "length", "type": null }, { "param": "content", "type": null } ]
{ "returns": [ { "docstring": "A string of error message if validation fails, or an empty string if validation succeeds.", "docstring_tokens": [ "A", "string", "of", "error", "message", "if", "validation", "fails", "or", "an", "empty", "string", "if", "validation", "succeeds", "." ], "type": "str" } ], "raises": [], "params": [ { "identifier": "length", "type": null, "docstring": "Maximum length of an expected string.", "docstring_tokens": [ "Maximum", "length", "of", "an", "expected", "string", "." ], "default": null, "is_optional": false }, { "identifier": "content", "type": null, "docstring": "A string to be validated.", "docstring_tokens": [ "A", "string", "to", "be", "validated", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def _length_checker(length, content): if len(content) > length: return 'Invalid label: {0} has {1} characters. The maximum is {2}.\n'.format(content, len(content), length) else: return ''
990
856
2d09e6d13164c23d2869b7c2476f712393d5de48
mauriballes/django-up
djangoup/management/commands/deploy.py
[ "MIT" ]
Python
run_migrations
<not_specific>
def run_migrations(cls, server_connection, deploy_settings): """Running migrations on server database.""" venv_folder_path = deploy_settings.get('server_venv_path') project_folder_path = deploy_settings.get('server_project_path') project_name = deploy_settings.get('project_name') successful_exit_code = 0 run_migration_command = '{}/bin/python {}/manage.py migrate --settings={}.settings'.format(venv_folder_path, project_folder_path, project_name) has_run_migrations_successfully = server_connection.run(run_migration_command) if has_run_migrations_successfully.exited != successful_exit_code: return False return True
Running migrations on server database.
Running migrations on server database.
[ "Running", "migrations", "on", "server", "database", "." ]
def run_migrations(cls, server_connection, deploy_settings): venv_folder_path = deploy_settings.get('server_venv_path') project_folder_path = deploy_settings.get('server_project_path') project_name = deploy_settings.get('project_name') successful_exit_code = 0 run_migration_command = '{}/bin/python {}/manage.py migrate --settings={}.settings'.format(venv_folder_path, project_folder_path, project_name) has_run_migrations_successfully = server_connection.run(run_migration_command) if has_run_migrations_successfully.exited != successful_exit_code: return False return True
[ "def", "run_migrations", "(", "cls", ",", "server_connection", ",", "deploy_settings", ")", ":", "venv_folder_path", "=", "deploy_settings", ".", "get", "(", "'server_venv_path'", ")", "project_folder_path", "=", "deploy_settings", ".", "get", "(", "'server_project_path'", ")", "project_name", "=", "deploy_settings", ".", "get", "(", "'project_name'", ")", "successful_exit_code", "=", "0", "run_migration_command", "=", "'{}/bin/python {}/manage.py migrate --settings={}.settings'", ".", "format", "(", "venv_folder_path", ",", "project_folder_path", ",", "project_name", ")", "has_run_migrations_successfully", "=", "server_connection", ".", "run", "(", "run_migration_command", ")", "if", "has_run_migrations_successfully", ".", "exited", "!=", "successful_exit_code", ":", "return", "False", "return", "True" ]
Running migrations on server database.
[ "Running", "migrations", "on", "server", "database", "." ]
[ "\"\"\"Running migrations on server database.\"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "server_connection", "type": null }, { "param": "deploy_settings", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "server_connection", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "deploy_settings", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def run_migrations(cls, server_connection, deploy_settings): venv_folder_path = deploy_settings.get('server_venv_path') project_folder_path = deploy_settings.get('server_project_path') project_name = deploy_settings.get('project_name') successful_exit_code = 0 run_migration_command = '{}/bin/python {}/manage.py migrate --settings={}.settings'.format(venv_folder_path, project_folder_path, project_name) has_run_migrations_successfully = server_connection.run(run_migration_command) if has_run_migrations_successfully.exited != successful_exit_code: return False return True
991
110
ec96778d0258023f1763dfde6686c979192b7605
Tyranicangel/dtrans
venv/Lib/site-packages/IPython/html/services/sessions/sessionmanager.py
[ "BSD-3-Clause" ]
Python
row_factory
<not_specific>
def row_factory(cursor, row): """Takes sqlite database session row and turns it into a dictionary""" row = sqlite3.Row(cursor, row) model = { 'id': row['session_id'], 'notebook': { 'name': row['name'], 'path': row['path'] }, 'kernel': { 'id': row['kernel_id'], } } return model
Takes sqlite database session row and turns it into a dictionary
Takes sqlite database session row and turns it into a dictionary
[ "Takes", "sqlite", "database", "session", "row", "and", "turns", "it", "into", "a", "dictionary" ]
def row_factory(cursor, row): row = sqlite3.Row(cursor, row) model = { 'id': row['session_id'], 'notebook': { 'name': row['name'], 'path': row['path'] }, 'kernel': { 'id': row['kernel_id'], } } return model
[ "def", "row_factory", "(", "cursor", ",", "row", ")", ":", "row", "=", "sqlite3", ".", "Row", "(", "cursor", ",", "row", ")", "model", "=", "{", "'id'", ":", "row", "[", "'session_id'", "]", ",", "'notebook'", ":", "{", "'name'", ":", "row", "[", "'name'", "]", ",", "'path'", ":", "row", "[", "'path'", "]", "}", ",", "'kernel'", ":", "{", "'id'", ":", "row", "[", "'kernel_id'", "]", ",", "}", "}", "return", "model" ]
Takes sqlite database session row and turns it into a dictionary
[ "Takes", "sqlite", "database", "session", "row", "and", "turns", "it", "into", "a", "dictionary" ]
[ "\"\"\"Takes sqlite database session row and turns it into a dictionary\"\"\"" ]
[ { "param": "cursor", "type": null }, { "param": "row", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cursor", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "row", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import sqlite3 def row_factory(cursor, row): row = sqlite3.Row(cursor, row) model = { 'id': row['session_id'], 'notebook': { 'name': row['name'], 'path': row['path'] }, 'kernel': { 'id': row['kernel_id'], } } return model
993
333
033120a23235513eede57b2d1e2cd1dfce98ed89
weld-project/baloo
baloo/config.py
[ "BSD-3-Clause" ]
Python
library_ext
<not_specific>
def library_ext(): """Returns the platform-dependent extension for dynamic libraries. """ system = platform.system() if system == 'Linux': ext = "so" elif system == 'Darwin': ext = "dylib" else: raise OSError("Unsupported platform {}", system) return ext
Returns the platform-dependent extension for dynamic libraries.
Returns the platform-dependent extension for dynamic libraries.
[ "Returns", "the", "platform", "-", "dependent", "extension", "for", "dynamic", "libraries", "." ]
def library_ext(): system = platform.system() if system == 'Linux': ext = "so" elif system == 'Darwin': ext = "dylib" else: raise OSError("Unsupported platform {}", system) return ext
[ "def", "library_ext", "(", ")", ":", "system", "=", "platform", ".", "system", "(", ")", "if", "system", "==", "'Linux'", ":", "ext", "=", "\"so\"", "elif", "system", "==", "'Darwin'", ":", "ext", "=", "\"dylib\"", "else", ":", "raise", "OSError", "(", "\"Unsupported platform {}\"", ",", "system", ")", "return", "ext" ]
Returns the platform-dependent extension for dynamic libraries.
[ "Returns", "the", "platform", "-", "dependent", "extension", "for", "dynamic", "libraries", "." ]
[ "\"\"\"Returns the platform-dependent extension for dynamic libraries. \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import platform def library_ext(): system = platform.system() if system == 'Linux': ext = "so" elif system == 'Darwin': ext = "dylib" else: raise OSError("Unsupported platform {}", system) return ext
994
598
f2fb1383ba7b82e53b0346f0042a741a0588a001
Tiggerlaboratoriet/pigweed
pw_env_setup/py/pw_env_setup/python_packages.py
[ "Apache-2.0" ]
Python
_installed_packages
null
def _installed_packages(): """Run pip python_packages and write to out.""" cmd = [ 'python', '-m', 'pip', 'freeze', '--exclude-editable', '--local', ] proc = subprocess.run(cmd, capture_output=True) for line in proc.stdout.decode().splitlines(): if ' @ ' not in line: yield line
Run pip python_packages and write to out.
Run pip python_packages and write to out.
[ "Run", "pip", "python_packages", "and", "write", "to", "out", "." ]
def _installed_packages(): cmd = [ 'python', '-m', 'pip', 'freeze', '--exclude-editable', '--local', ] proc = subprocess.run(cmd, capture_output=True) for line in proc.stdout.decode().splitlines(): if ' @ ' not in line: yield line
[ "def", "_installed_packages", "(", ")", ":", "cmd", "=", "[", "'python'", ",", "'-m'", ",", "'pip'", ",", "'freeze'", ",", "'--exclude-editable'", ",", "'--local'", ",", "]", "proc", "=", "subprocess", ".", "run", "(", "cmd", ",", "capture_output", "=", "True", ")", "for", "line", "in", "proc", ".", "stdout", ".", "decode", "(", ")", ".", "splitlines", "(", ")", ":", "if", "' @ '", "not", "in", "line", ":", "yield", "line" ]
Run pip python_packages and write to out.
[ "Run", "pip", "python_packages", "and", "write", "to", "out", "." ]
[ "\"\"\"Run pip python_packages and write to out.\"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import subprocess def _installed_packages(): cmd = [ 'python', '-m', 'pip', 'freeze', '--exclude-editable', '--local', ] proc = subprocess.run(cmd, capture_output=True) for line in proc.stdout.decode().splitlines(): if ' @ ' not in line: yield line
995
1,012
961a38c232ba39bbe9710a22f21b3db4e7c6fa69
n-nez/blueoil
lmnet/lmnet/datasets/lm_flower.py
[ "Apache-2.0" ]
Python
count_max_boxes
<not_specific>
def count_max_boxes(cls): """Count max boxes size over all subsets.""" num_max_boxes = 0 for subset in cls.available_subsets: obj = cls(subset=subset) gt_boxes_list = obj.annotations subset_max = max([len(gt_boxes) for gt_boxes in gt_boxes_list]) if subset_max >= num_max_boxes: num_max_boxes = subset_max return num_max_boxes
Count max boxes size over all subsets.
Count max boxes size over all subsets.
[ "Count", "max", "boxes", "size", "over", "all", "subsets", "." ]
def count_max_boxes(cls): num_max_boxes = 0 for subset in cls.available_subsets: obj = cls(subset=subset) gt_boxes_list = obj.annotations subset_max = max([len(gt_boxes) for gt_boxes in gt_boxes_list]) if subset_max >= num_max_boxes: num_max_boxes = subset_max return num_max_boxes
[ "def", "count_max_boxes", "(", "cls", ")", ":", "num_max_boxes", "=", "0", "for", "subset", "in", "cls", ".", "available_subsets", ":", "obj", "=", "cls", "(", "subset", "=", "subset", ")", "gt_boxes_list", "=", "obj", ".", "annotations", "subset_max", "=", "max", "(", "[", "len", "(", "gt_boxes", ")", "for", "gt_boxes", "in", "gt_boxes_list", "]", ")", "if", "subset_max", ">=", "num_max_boxes", ":", "num_max_boxes", "=", "subset_max", "return", "num_max_boxes" ]
Count max boxes size over all subsets.
[ "Count", "max", "boxes", "size", "over", "all", "subsets", "." ]
[ "\"\"\"Count max boxes size over all subsets.\"\"\"" ]
[ { "param": "cls", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def count_max_boxes(cls): num_max_boxes = 0 for subset in cls.available_subsets: obj = cls(subset=subset) gt_boxes_list = obj.annotations subset_max = max([len(gt_boxes) for gt_boxes in gt_boxes_list]) if subset_max >= num_max_boxes: num_max_boxes = subset_max return num_max_boxes
996
94
0c5f26ca1f3dc19fd75dea796d395693e60f3215
liuruoze/Raw-vs-Human-in-AlphaStar
alphastarmini/core/rl/rl_loss.py
[ "Apache-2.0" ]
Python
compute_over_actions
<not_specific>
def compute_over_actions(f, *args): """Runs f over all elements in the lists composing *args. Autoregressive actions are composed of many logits. We run losses functions over all sets of logits. """ ''' # show the middle results for a in zip(*args): print("a:", a) r = f(*a) print("r:", r) ''' return sum(f(*a) for a in zip(*args))
Runs f over all elements in the lists composing *args. Autoregressive actions are composed of many logits. We run losses functions over all sets of logits.
Runs f over all elements in the lists composing *args. Autoregressive actions are composed of many logits. We run losses functions over all sets of logits.
[ "Runs", "f", "over", "all", "elements", "in", "the", "lists", "composing", "*", "args", ".", "Autoregressive", "actions", "are", "composed", "of", "many", "logits", ".", "We", "run", "losses", "functions", "over", "all", "sets", "of", "logits", "." ]
def compute_over_actions(f, *args): return sum(f(*a) for a in zip(*args))
[ "def", "compute_over_actions", "(", "f", ",", "*", "args", ")", ":", "'''\n # show the middle results\n for a in zip(*args):\n print(\"a:\", a)\n r = f(*a)\n print(\"r:\", r)\n '''", "return", "sum", "(", "f", "(", "*", "a", ")", "for", "a", "in", "zip", "(", "*", "args", ")", ")" ]
Runs f over all elements in the lists composing *args.
[ "Runs", "f", "over", "all", "elements", "in", "the", "lists", "composing", "*", "args", "." ]
[ "\"\"\"Runs f over all elements in the lists composing *args.\n\n Autoregressive actions are composed of many logits. We run losses functions\n over all sets of logits.\n \"\"\"", "'''\n # show the middle results\n for a in zip(*args):\n print(\"a:\", a)\n r = f(*a)\n print(\"r:\", r)\n '''" ]
[ { "param": "f", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "f", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def compute_over_actions(f, *args): return sum(f(*a) for a in zip(*args))
997
562
6f66871f671c18d4e45d020386c6dd15b78e162e
mengkai94/training_results_v0.6
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/python/tvm/autotvm/tuner/callback.py
[ "Apache-2.0" ]
Python
log_to_database
<not_specific>
def log_to_database(db): """Save the tuning records to a database object. Parameters ---------- db: Database The database """ def _callback(_, inputs, results): """Callback implementation""" for inp, result in zip(inputs, results): db.save(inp, result) return _callback
Save the tuning records to a database object. Parameters ---------- db: Database The database
Save the tuning records to a database object. Parameters Database The database
[ "Save", "the", "tuning", "records", "to", "a", "database", "object", ".", "Parameters", "Database", "The", "database" ]
def log_to_database(db): def _callback(_, inputs, results): for inp, result in zip(inputs, results): db.save(inp, result) return _callback
[ "def", "log_to_database", "(", "db", ")", ":", "def", "_callback", "(", "_", ",", "inputs", ",", "results", ")", ":", "\"\"\"Callback implementation\"\"\"", "for", "inp", ",", "result", "in", "zip", "(", "inputs", ",", "results", ")", ":", "db", ".", "save", "(", "inp", ",", "result", ")", "return", "_callback" ]
Save the tuning records to a database object.
[ "Save", "the", "tuning", "records", "to", "a", "database", "object", "." ]
[ "\"\"\"Save the tuning records to a database object.\n\n Parameters\n ----------\n db: Database\n The database\n \"\"\"", "\"\"\"Callback implementation\"\"\"" ]
[ { "param": "db", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "db", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def log_to_database(db): def _callback(_, inputs, results): for inp, result in zip(inputs, results): db.save(inp, result) return _callback
999
211
530acf89712186189dc316e08cee461e0e5a5f9e
jahr-USGS/TAMA_hydrofunctions
hydrofunctions/typing.py
[ "MIT" ]
Python
check_NWIS_site
<not_specific>
def check_NWIS_site(input): """Checks that the USGS station site id is valid. """ msg = "NWIS station id(s) should be a string or list of strings, \ often in the form of an eight digit number enclosed in quotes. \ Actual value: {}".format(input) output = "" if input is None: return None # assume that if it is a string it will be fine as is. # don't accept a series of sites in a single string. # Test for and reject empty strings: empty strings are falsy. if isinstance(input, str) and input: return input # input = input.split(',') # test for input is a list and it is not empty elif isinstance(input, list) and input: for s in input: if isinstance(s, str) and s: output = output + s + ',' else: raise TypeError(msg + " bad element: {}".format(s)) # format: ['0123', '567'] ==> "0123,567" # remove the last comma return output[:-1] else: raise TypeError(msg) # No longer accept strings with commas. # format site(s) # sites = '{}'.format(input[0]) # if len(input) > 1: # for s in input[1:]: # sites += ',{}'.format(s) # return sites
Checks that the USGS station site id is valid.
Checks that the USGS station site id is valid.
[ "Checks", "that", "the", "USGS", "station", "site", "id", "is", "valid", "." ]
def check_NWIS_site(input): msg = "NWIS station id(s) should be a string or list of strings, \ often in the form of an eight digit number enclosed in quotes. \ Actual value: {}".format(input) output = "" if input is None: return None if isinstance(input, str) and input: return input elif isinstance(input, list) and input: for s in input: if isinstance(s, str) and s: output = output + s + ',' else: raise TypeError(msg + " bad element: {}".format(s)) return output[:-1] else: raise TypeError(msg)
[ "def", "check_NWIS_site", "(", "input", ")", ":", "msg", "=", "\"NWIS station id(s) should be a string or list of strings, \\\n often in the form of an eight digit number enclosed in quotes. \\\n Actual value: {}\"", ".", "format", "(", "input", ")", "output", "=", "\"\"", "if", "input", "is", "None", ":", "return", "None", "if", "isinstance", "(", "input", ",", "str", ")", "and", "input", ":", "return", "input", "elif", "isinstance", "(", "input", ",", "list", ")", "and", "input", ":", "for", "s", "in", "input", ":", "if", "isinstance", "(", "s", ",", "str", ")", "and", "s", ":", "output", "=", "output", "+", "s", "+", "','", "else", ":", "raise", "TypeError", "(", "msg", "+", "\" bad element: {}\"", ".", "format", "(", "s", ")", ")", "return", "output", "[", ":", "-", "1", "]", "else", ":", "raise", "TypeError", "(", "msg", ")" ]
Checks that the USGS station site id is valid.
[ "Checks", "that", "the", "USGS", "station", "site", "id", "is", "valid", "." ]
[ "\"\"\"Checks that the USGS station site id is valid.\n \"\"\"", "# assume that if it is a string it will be fine as is.", "# don't accept a series of sites in a single string.", "# Test for and reject empty strings: empty strings are falsy.", "# input = input.split(',')", "# test for input is a list and it is not empty", "# format: ['0123', '567'] ==> \"0123,567\"", "# remove the last comma", "# No longer accept strings with commas.", "# format site(s)", "# sites = '{}'.format(input[0])", "# if len(input) > 1:", "# for s in input[1:]:", "# sites += ',{}'.format(s)", "# return sites" ]
[ { "param": "input", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "input", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def check_NWIS_site(input): msg = "NWIS station id(s) should be a string or list of strings, \ often in the form of an eight digit number enclosed in quotes. \ Actual value: {}".format(input) output = "" if input is None: return None if isinstance(input, str) and input: return input elif isinstance(input, list) and input: for s in input: if isinstance(s, str) and s: output = output + s + ',' else: raise TypeError(msg + " bad element: {}".format(s)) return output[:-1] else: raise TypeError(msg)
1,001
943
fbbcf9eee60421a1f07a301e092bd7bf9b0e4850
melkisedeath/Harmonic_Analysis_and_Trajectory
NetworkX_GraphTranslation.py
[ "MIT" ]
Python
EdgesSetCreate
<not_specific>
def EdgesSetCreate(TrajectoryEdges): """Take the set of all duplicate points in trajectory object.""" listOfEdges = [] for edgesList in TrajectoryEdges: for edge in edgesList: listOfEdges.append(edge) setOfEdges = list(set(listOfEdges)) return setOfEdges, listOfEdges
Take the set of all duplicate points in trajectory object.
Take the set of all duplicate points in trajectory object.
[ "Take", "the", "set", "of", "all", "duplicate", "points", "in", "trajectory", "object", "." ]
def EdgesSetCreate(TrajectoryEdges): listOfEdges = [] for edgesList in TrajectoryEdges: for edge in edgesList: listOfEdges.append(edge) setOfEdges = list(set(listOfEdges)) return setOfEdges, listOfEdges
[ "def", "EdgesSetCreate", "(", "TrajectoryEdges", ")", ":", "listOfEdges", "=", "[", "]", "for", "edgesList", "in", "TrajectoryEdges", ":", "for", "edge", "in", "edgesList", ":", "listOfEdges", ".", "append", "(", "edge", ")", "setOfEdges", "=", "list", "(", "set", "(", "listOfEdges", ")", ")", "return", "setOfEdges", ",", "listOfEdges" ]
Take the set of all duplicate points in trajectory object.
[ "Take", "the", "set", "of", "all", "duplicate", "points", "in", "trajectory", "object", "." ]
[ "\"\"\"Take the set of all duplicate points in trajectory object.\"\"\"" ]
[ { "param": "TrajectoryEdges", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "TrajectoryEdges", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def EdgesSetCreate(TrajectoryEdges): listOfEdges = [] for edgesList in TrajectoryEdges: for edge in edgesList: listOfEdges.append(edge) setOfEdges = list(set(listOfEdges)) return setOfEdges, listOfEdges
1,002
459
d2bd79af5eacf67b6794b5ac34b614bd6f322af3
Erfard/dash-sample-apps
apps/dash-lyft-explorer/app.py
[ "MIT" ]
Python
compute_pointcloud_for_image
<not_specific>
def compute_pointcloud_for_image( lv5, sample_token: str, dot_size: int = 2, pointsensor_channel: str = "LIDAR_TOP", camera_channel: str = "CAM_FRONT", out_path: str = None, ): """Scatter-plots a point-cloud on top of image. Args: sample_token: Sample token. dot_size: Scatter plot dot size. pointsensor_channel: RADAR or LIDAR channel name, e.g. 'LIDAR_TOP'. camera_channel: Camera channel name, e.g. 'CAM_FRONT'. out_path: Optional path to save the rendered figure to disk. Returns: tuple containing the points, array of colors and a pillow image """ sample_record = lv5.get("sample", sample_token) # Here we just grab the front camera and the point sensor. pointsensor_token = sample_record["data"][pointsensor_channel] camera_token = sample_record["data"][camera_channel] points, coloring, im = lv5.explorer.map_pointcloud_to_image( pointsensor_token, camera_token ) return points, coloring, im
Scatter-plots a point-cloud on top of image. Args: sample_token: Sample token. dot_size: Scatter plot dot size. pointsensor_channel: RADAR or LIDAR channel name, e.g. 'LIDAR_TOP'. camera_channel: Camera channel name, e.g. 'CAM_FRONT'. out_path: Optional path to save the rendered figure to disk. Returns: tuple containing the points, array of colors and a pillow image
Scatter-plots a point-cloud on top of image.
[ "Scatter", "-", "plots", "a", "point", "-", "cloud", "on", "top", "of", "image", "." ]
def compute_pointcloud_for_image( lv5, sample_token: str, dot_size: int = 2, pointsensor_channel: str = "LIDAR_TOP", camera_channel: str = "CAM_FRONT", out_path: str = None, ): sample_record = lv5.get("sample", sample_token) pointsensor_token = sample_record["data"][pointsensor_channel] camera_token = sample_record["data"][camera_channel] points, coloring, im = lv5.explorer.map_pointcloud_to_image( pointsensor_token, camera_token ) return points, coloring, im
[ "def", "compute_pointcloud_for_image", "(", "lv5", ",", "sample_token", ":", "str", ",", "dot_size", ":", "int", "=", "2", ",", "pointsensor_channel", ":", "str", "=", "\"LIDAR_TOP\"", ",", "camera_channel", ":", "str", "=", "\"CAM_FRONT\"", ",", "out_path", ":", "str", "=", "None", ",", ")", ":", "sample_record", "=", "lv5", ".", "get", "(", "\"sample\"", ",", "sample_token", ")", "pointsensor_token", "=", "sample_record", "[", "\"data\"", "]", "[", "pointsensor_channel", "]", "camera_token", "=", "sample_record", "[", "\"data\"", "]", "[", "camera_channel", "]", "points", ",", "coloring", ",", "im", "=", "lv5", ".", "explorer", ".", "map_pointcloud_to_image", "(", "pointsensor_token", ",", "camera_token", ")", "return", "points", ",", "coloring", ",", "im" ]
Scatter-plots a point-cloud on top of image.
[ "Scatter", "-", "plots", "a", "point", "-", "cloud", "on", "top", "of", "image", "." ]
[ "\"\"\"Scatter-plots a point-cloud on top of image.\n Args:\n sample_token: Sample token.\n dot_size: Scatter plot dot size.\n pointsensor_channel: RADAR or LIDAR channel name, e.g. 'LIDAR_TOP'.\n camera_channel: Camera channel name, e.g. 'CAM_FRONT'.\n out_path: Optional path to save the rendered figure to disk.\n Returns:\n tuple containing the points, array of colors and a pillow image\n \"\"\"", "# Here we just grab the front camera and the point sensor." ]
[ { "param": "lv5", "type": null }, { "param": "sample_token", "type": "str" }, { "param": "dot_size", "type": "int" }, { "param": "pointsensor_channel", "type": "str" }, { "param": "camera_channel", "type": "str" }, { "param": "out_path", "type": "str" } ]
{ "returns": [ { "docstring": "tuple containing the points, array of colors and a pillow image", "docstring_tokens": [ "tuple", "containing", "the", "points", "array", "of", "colors", "and", "a", "pillow", "image" ], "type": null } ], "raises": [], "params": [ { "identifier": "lv5", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "sample_token", "type": "str", "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "dot_size", "type": "int", "docstring": "Scatter plot dot size.", "docstring_tokens": [ "Scatter", "plot", "dot", "size", "." ], "default": null, "is_optional": null }, { "identifier": "pointsensor_channel", "type": "str", "docstring": "RADAR or LIDAR channel name, e.g.", "docstring_tokens": [ "RADAR", "or", "LIDAR", "channel", "name", "e", ".", "g", "." ], "default": null, "is_optional": null }, { "identifier": "camera_channel", "type": "str", "docstring": "Camera channel name, e.g.", "docstring_tokens": [ "Camera", "channel", "name", "e", ".", "g", "." ], "default": null, "is_optional": null }, { "identifier": "out_path", "type": "str", "docstring": "Optional path to save the rendered figure to disk.", "docstring_tokens": [ "Optional", "path", "to", "save", "the", "rendered", "figure", "to", "disk", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def compute_pointcloud_for_image( lv5, sample_token: str, dot_size: int = 2, pointsensor_channel: str = "LIDAR_TOP", camera_channel: str = "CAM_FRONT", out_path: str = None, ): sample_record = lv5.get("sample", sample_token) pointsensor_token = sample_record["data"][pointsensor_channel] camera_token = sample_record["data"][camera_channel] points, coloring, im = lv5.explorer.map_pointcloud_to_image( pointsensor_token, camera_token ) return points, coloring, im
1,003
262
fbe0d4e018ef05fd70389b8b5f3503484405147e
melvincabatuan/PythonRefresher
hello_python4.py
[ "MIT" ]
Python
odd_1to10_range
<not_specific>
def odd_1to10_range(): ''' Returns a sequence of odd integers, 1 to 10 (using range). ''' return list(range(1, 11, 2))
Returns a sequence of odd integers, 1 to 10 (using range).
Returns a sequence of odd integers, 1 to 10 (using range).
[ "Returns", "a", "sequence", "of", "odd", "integers", "1", "to", "10", "(", "using", "range", ")", "." ]
def odd_1to10_range(): return list(range(1, 11, 2))
[ "def", "odd_1to10_range", "(", ")", ":", "return", "list", "(", "range", "(", "1", ",", "11", ",", "2", ")", ")" ]
Returns a sequence of odd integers, 1 to 10 (using range).
[ "Returns", "a", "sequence", "of", "odd", "integers", "1", "to", "10", "(", "using", "range", ")", "." ]
[ "'''\r\n Returns a sequence of odd integers, 1 to 10 (using range).\r\n '''" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def odd_1to10_range(): return list(range(1, 11, 2))
1,004
58
7a9065d04b743d4c1a968b662bd3d18be4b09ee8
rethinkdb-incubator/rethinkdb-python
rethinkdb/query.py
[ "Apache-2.0" ]
Python
distinct
<not_specific>
def distinct(*arguments): """ Removes duplicate elements from a sequence. The distinct command can be called on any sequence or table with an index. """ return ast.Distinct(*[ast.func_wrap(arg) for arg in arguments])
Removes duplicate elements from a sequence. The distinct command can be called on any sequence or table with an index.
Removes duplicate elements from a sequence. The distinct command can be called on any sequence or table with an index.
[ "Removes", "duplicate", "elements", "from", "a", "sequence", ".", "The", "distinct", "command", "can", "be", "called", "on", "any", "sequence", "or", "table", "with", "an", "index", "." ]
def distinct(*arguments): return ast.Distinct(*[ast.func_wrap(arg) for arg in arguments])
[ "def", "distinct", "(", "*", "arguments", ")", ":", "return", "ast", ".", "Distinct", "(", "*", "[", "ast", ".", "func_wrap", "(", "arg", ")", "for", "arg", "in", "arguments", "]", ")" ]
Removes duplicate elements from a sequence.
[ "Removes", "duplicate", "elements", "from", "a", "sequence", "." ]
[ "\"\"\"\n Removes duplicate elements from a sequence.\n\n The distinct command can be called on any sequence or table with an index.\n \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import ast def distinct(*arguments): return ast.Distinct(*[ast.func_wrap(arg) for arg in arguments])
1,005
615
f5dc01c88dbf792785b40535bb6dbfddbcd28b65
duceppemo/COWSNPhR
cowsnphr_src/tree_methods.py
[ "MIT" ]
Python
determine_group_snp_positions
<not_specific>
def determine_group_snp_positions(strain_snp_positions, strain_groups, strain_species_dict): """ Find all the group-specific SNP positions :param strain_snp_positions: type DICT: Dictionary of strain name: reference chromosome: list of strain-specific SNP positions :param strain_groups: type DICT: Dictionary of strain name: list of group(s) for which the strain contains the defining SNP :param strain_species_dict: type DICT: Dictionary of strain name: species code :return: group_positions_set: Dictionary of species code: group name: reference chromosome: set of group-specific SNP positions """ # Initialise a dictionary to store all of the species: group-specific SNP positions group_positions_set = dict() for strain_name, groups in strain_groups.items(): for ref_chrom, pos_list in strain_snp_positions[strain_name].items(): # Extract the species code from the dictionary species = strain_species_dict[strain_name] # Initialise the species key in the dictionary if required if species not in group_positions_set: group_positions_set[species] = dict() for group in groups: # Initialise the group key in the dictionary if required if group not in group_positions_set[species]: group_positions_set[species][group] = dict() if ref_chrom not in group_positions_set[species][group]: group_positions_set[species][group][ref_chrom] = set() # Add the group-specific positions to the set for pos in pos_list: group_positions_set[species][group][ref_chrom].add(pos) return group_positions_set
Find all the group-specific SNP positions :param strain_snp_positions: type DICT: Dictionary of strain name: reference chromosome: list of strain-specific SNP positions :param strain_groups: type DICT: Dictionary of strain name: list of group(s) for which the strain contains the defining SNP :param strain_species_dict: type DICT: Dictionary of strain name: species code :return: group_positions_set: Dictionary of species code: group name: reference chromosome: set of group-specific SNP positions
Find all the group-specific SNP positions
[ "Find", "all", "the", "group", "-", "specific", "SNP", "positions" ]
def determine_group_snp_positions(strain_snp_positions, strain_groups, strain_species_dict): group_positions_set = dict() for strain_name, groups in strain_groups.items(): for ref_chrom, pos_list in strain_snp_positions[strain_name].items(): species = strain_species_dict[strain_name] if species not in group_positions_set: group_positions_set[species] = dict() for group in groups: if group not in group_positions_set[species]: group_positions_set[species][group] = dict() if ref_chrom not in group_positions_set[species][group]: group_positions_set[species][group][ref_chrom] = set() for pos in pos_list: group_positions_set[species][group][ref_chrom].add(pos) return group_positions_set
[ "def", "determine_group_snp_positions", "(", "strain_snp_positions", ",", "strain_groups", ",", "strain_species_dict", ")", ":", "group_positions_set", "=", "dict", "(", ")", "for", "strain_name", ",", "groups", "in", "strain_groups", ".", "items", "(", ")", ":", "for", "ref_chrom", ",", "pos_list", "in", "strain_snp_positions", "[", "strain_name", "]", ".", "items", "(", ")", ":", "species", "=", "strain_species_dict", "[", "strain_name", "]", "if", "species", "not", "in", "group_positions_set", ":", "group_positions_set", "[", "species", "]", "=", "dict", "(", ")", "for", "group", "in", "groups", ":", "if", "group", "not", "in", "group_positions_set", "[", "species", "]", ":", "group_positions_set", "[", "species", "]", "[", "group", "]", "=", "dict", "(", ")", "if", "ref_chrom", "not", "in", "group_positions_set", "[", "species", "]", "[", "group", "]", ":", "group_positions_set", "[", "species", "]", "[", "group", "]", "[", "ref_chrom", "]", "=", "set", "(", ")", "for", "pos", "in", "pos_list", ":", "group_positions_set", "[", "species", "]", "[", "group", "]", "[", "ref_chrom", "]", ".", "add", "(", "pos", ")", "return", "group_positions_set" ]
Find all the group-specific SNP positions
[ "Find", "all", "the", "group", "-", "specific", "SNP", "positions" ]
[ "\"\"\"\n Find all the group-specific SNP positions\n :param strain_snp_positions: type DICT: Dictionary of strain name: reference chromosome: list of strain-specific\n SNP positions\n :param strain_groups: type DICT: Dictionary of strain name: list of group(s) for which the strain contains the\n defining SNP\n :param strain_species_dict: type DICT: Dictionary of strain name: species code\n :return: group_positions_set: Dictionary of species code: group name: reference chromosome: set of\n group-specific SNP positions\n \"\"\"", "# Initialise a dictionary to store all of the species: group-specific SNP positions", "# Extract the species code from the dictionary", "# Initialise the species key in the dictionary if required", "# Initialise the group key in the dictionary if required", "# Add the group-specific positions to the set" ]
[ { "param": "strain_snp_positions", "type": null }, { "param": "strain_groups", "type": null }, { "param": "strain_species_dict", "type": null } ]
{ "returns": [ { "docstring": "Dictionary of species code: group name: reference chromosome: set of\ngroup-specific SNP positions", "docstring_tokens": [ "Dictionary", "of", "species", "code", ":", "group", "name", ":", "reference", "chromosome", ":", "set", "of", "group", "-", "specific", "SNP", "positions" ], "type": null } ], "raises": [], "params": [ { "identifier": "strain_snp_positions", "type": null, "docstring": "type DICT: Dictionary of strain name: reference chromosome: list of strain-specific\nSNP positions", "docstring_tokens": [ "type", "DICT", ":", "Dictionary", "of", "strain", "name", ":", "reference", "chromosome", ":", "list", "of", "strain", "-", "specific", "SNP", "positions" ], "default": null, "is_optional": null }, { "identifier": "strain_groups", "type": null, "docstring": "type DICT: Dictionary of strain name: list of group(s) for which the strain contains the\ndefining SNP", "docstring_tokens": [ "type", "DICT", ":", "Dictionary", "of", "strain", "name", ":", "list", "of", "group", "(", "s", ")", "for", "which", "the", "strain", "contains", "the", "defining", "SNP" ], "default": null, "is_optional": null }, { "identifier": "strain_species_dict", "type": null, "docstring": "type DICT: Dictionary of strain name: species code", "docstring_tokens": [ "type", "DICT", ":", "Dictionary", "of", "strain", "name", ":", "species", "code" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def determine_group_snp_positions(strain_snp_positions, strain_groups, strain_species_dict): group_positions_set = dict() for strain_name, groups in strain_groups.items(): for ref_chrom, pos_list in strain_snp_positions[strain_name].items(): species = strain_species_dict[strain_name] if species not in group_positions_set: group_positions_set[species] = dict() for group in groups: if group not in group_positions_set[species]: group_positions_set[species][group] = dict() if ref_chrom not in group_positions_set[species][group]: group_positions_set[species][group][ref_chrom] = set() for pos in pos_list: group_positions_set[species][group][ref_chrom].add(pos) return group_positions_set
1,006
368
6b528d9c1c07bdae3aabeaaf662f83a8ccc7aea4
christopher-burke/warmups
python/misc/doctors_clinic.py
[ "MIT" ]
Python
last_patient_wait_time
int
def last_patient_wait_time(total_patients: int, time_duration: int) -> int: """Calculate the wait time for the last patient. Return wait time in minutes. """ # Find the max wait time for all patients ahead of the last patient. total_max_wait = ((total_patients - 1) * time_duration) # Find the total patient time of all patients ahead of the last patient. total_patient_time = (10 * (total_patients - 1)) # Return the difference. last_patient_wait_time = total_patient_time - total_max_wait return last_patient_wait_time
Calculate the wait time for the last patient. Return wait time in minutes.
Calculate the wait time for the last patient. Return wait time in minutes.
[ "Calculate", "the", "wait", "time", "for", "the", "last", "patient", ".", "Return", "wait", "time", "in", "minutes", "." ]
def last_patient_wait_time(total_patients: int, time_duration: int) -> int: total_max_wait = ((total_patients - 1) * time_duration) total_patient_time = (10 * (total_patients - 1)) last_patient_wait_time = total_patient_time - total_max_wait return last_patient_wait_time
[ "def", "last_patient_wait_time", "(", "total_patients", ":", "int", ",", "time_duration", ":", "int", ")", "->", "int", ":", "total_max_wait", "=", "(", "(", "total_patients", "-", "1", ")", "*", "time_duration", ")", "total_patient_time", "=", "(", "10", "*", "(", "total_patients", "-", "1", ")", ")", "last_patient_wait_time", "=", "total_patient_time", "-", "total_max_wait", "return", "last_patient_wait_time" ]
Calculate the wait time for the last patient.
[ "Calculate", "the", "wait", "time", "for", "the", "last", "patient", "." ]
[ "\"\"\"Calculate the wait time for the last patient.\n\n Return wait time in minutes.\n \"\"\"", "# Find the max wait time for all patients ahead of the last patient.", "# Find the total patient time of all patients ahead of the last patient.", "# Return the difference." ]
[ { "param": "total_patients", "type": "int" }, { "param": "time_duration", "type": "int" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "total_patients", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "time_duration", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def last_patient_wait_time(total_patients: int, time_duration: int) -> int: total_max_wait = ((total_patients - 1) * time_duration) total_patient_time = (10 * (total_patients - 1)) last_patient_wait_time = total_patient_time - total_max_wait return last_patient_wait_time
1,007
15
69cb402eb9170df9b9a30f8811c3081cfe475858
Gustl22/seahub
scripts/pro.py
[ "Apache-2.0" ]
Python
read_config
<not_specific>
def read_config(fn=None): '''Return a case sensitive ConfigParser by reading the file "fn"''' cp = configparser.ConfigParser() cp.optionxform = str if fn: cp.read(fn) return cp
Return a case sensitive ConfigParser by reading the file "fn"
Return a case sensitive ConfigParser by reading the file "fn"
[ "Return", "a", "case", "sensitive", "ConfigParser", "by", "reading", "the", "file", "\"", "fn", "\"" ]
def read_config(fn=None): cp = configparser.ConfigParser() cp.optionxform = str if fn: cp.read(fn) return cp
[ "def", "read_config", "(", "fn", "=", "None", ")", ":", "cp", "=", "configparser", ".", "ConfigParser", "(", ")", "cp", ".", "optionxform", "=", "str", "if", "fn", ":", "cp", ".", "read", "(", "fn", ")", "return", "cp" ]
Return a case sensitive ConfigParser by reading the file "fn"
[ "Return", "a", "case", "sensitive", "ConfigParser", "by", "reading", "the", "file", "\"", "fn", "\"" ]
[ "'''Return a case sensitive ConfigParser by reading the file \"fn\"'''" ]
[ { "param": "fn", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "fn", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import configparser def read_config(fn=None): cp = configparser.ConfigParser() cp.optionxform = str if fn: cp.read(fn) return cp
1,008
127
af0ecdd0fc224d394760b14e14099aae50f30a3e
broadinstitute/lrma-cloud-utils
src/lrmaCU/cromwell/utils.py
[ "BSD-3-Clause" ]
Python
fetch_timing_html
None
def fetch_timing_html(cromwell_server: str, submission_id: str, local_html: str) -> None: """ For fetching timing chart of a cromwell execution and saving that to a local HTML page :param cromwell_server: cromwell server address :param submission_id: hex-string uuid of the submission :param local_html: where to save locally """ s = cromwell_server.rstrip('/') timing_url = f'{s}/api/workflows/v1/{submission_id}/timing' urllib.request.urlretrieve(timing_url, local_html)
For fetching timing chart of a cromwell execution and saving that to a local HTML page :param cromwell_server: cromwell server address :param submission_id: hex-string uuid of the submission :param local_html: where to save locally
For fetching timing chart of a cromwell execution and saving that to a local HTML page
[ "For", "fetching", "timing", "chart", "of", "a", "cromwell", "execution", "and", "saving", "that", "to", "a", "local", "HTML", "page" ]
def fetch_timing_html(cromwell_server: str, submission_id: str, local_html: str) -> None: s = cromwell_server.rstrip('/') timing_url = f'{s}/api/workflows/v1/{submission_id}/timing' urllib.request.urlretrieve(timing_url, local_html)
[ "def", "fetch_timing_html", "(", "cromwell_server", ":", "str", ",", "submission_id", ":", "str", ",", "local_html", ":", "str", ")", "->", "None", ":", "s", "=", "cromwell_server", ".", "rstrip", "(", "'/'", ")", "timing_url", "=", "f'{s}/api/workflows/v1/{submission_id}/timing'", "urllib", ".", "request", ".", "urlretrieve", "(", "timing_url", ",", "local_html", ")" ]
For fetching timing chart of a cromwell execution and saving that to a local HTML page
[ "For", "fetching", "timing", "chart", "of", "a", "cromwell", "execution", "and", "saving", "that", "to", "a", "local", "HTML", "page" ]
[ "\"\"\"\n For fetching timing chart of a cromwell execution and saving that to a local HTML page\n\n :param cromwell_server: cromwell server address\n :param submission_id: hex-string uuid of the submission\n :param local_html: where to save locally\n \"\"\"" ]
[ { "param": "cromwell_server", "type": "str" }, { "param": "submission_id", "type": "str" }, { "param": "local_html", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cromwell_server", "type": "str", "docstring": "cromwell server address", "docstring_tokens": [ "cromwell", "server", "address" ], "default": null, "is_optional": null }, { "identifier": "submission_id", "type": "str", "docstring": "hex-string uuid of the submission", "docstring_tokens": [ "hex", "-", "string", "uuid", "of", "the", "submission" ], "default": null, "is_optional": null }, { "identifier": "local_html", "type": "str", "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import urllib def fetch_timing_html(cromwell_server: str, submission_id: str, local_html: str) -> None: s = cromwell_server.rstrip('/') timing_url = f'{s}/api/workflows/v1/{submission_id}/timing' urllib.request.urlretrieve(timing_url, local_html)
1,009
99
d10aefcd1d1f195af7c2e3d843548e189c3e108d
SethDocherty/ArcGIS-Python
helpers/general_helper.py
[ "Apache-2.0" ]
Python
export_to_csv_dict
null
def export_to_csv_dict(input_dict, header, output_name, output_dir_path): ''' Export a single level dictionary to a .csv Required: - Input Dictionary - Header fields for csv - Name of File - Output filepath *NOTE* This function was created to export a single level dictionary to csv. A dictionary that has multiple inner dictionaries will store the key and value to a single line. ''' Output_Path = os.path.join(output_dir_path,output_name + ".csv") writer = csv.writer(open(Output_Path, 'wb')) if type(header) is not type([]): writer.writerow([header]) else: writer.writerow(header) for key, value in input_dict.items(): writer.writerow([key, value])
Export a single level dictionary to a .csv Required: - Input Dictionary - Header fields for csv - Name of File - Output filepath *NOTE* This function was created to export a single level dictionary to csv. A dictionary that has multiple inner dictionaries will store the key and value to a single line.
Export a single level dictionary to a .csv Required: Input Dictionary Header fields for csv Name of File Output filepath NOTE This function was created to export a single level dictionary to csv. A dictionary that has multiple inner dictionaries will store the key and value to a single line.
[ "Export", "a", "single", "level", "dictionary", "to", "a", ".", "csv", "Required", ":", "Input", "Dictionary", "Header", "fields", "for", "csv", "Name", "of", "File", "Output", "filepath", "NOTE", "This", "function", "was", "created", "to", "export", "a", "single", "level", "dictionary", "to", "csv", ".", "A", "dictionary", "that", "has", "multiple", "inner", "dictionaries", "will", "store", "the", "key", "and", "value", "to", "a", "single", "line", "." ]
def export_to_csv_dict(input_dict, header, output_name, output_dir_path): Output_Path = os.path.join(output_dir_path,output_name + ".csv") writer = csv.writer(open(Output_Path, 'wb')) if type(header) is not type([]): writer.writerow([header]) else: writer.writerow(header) for key, value in input_dict.items(): writer.writerow([key, value])
[ "def", "export_to_csv_dict", "(", "input_dict", ",", "header", ",", "output_name", ",", "output_dir_path", ")", ":", "Output_Path", "=", "os", ".", "path", ".", "join", "(", "output_dir_path", ",", "output_name", "+", "\".csv\"", ")", "writer", "=", "csv", ".", "writer", "(", "open", "(", "Output_Path", ",", "'wb'", ")", ")", "if", "type", "(", "header", ")", "is", "not", "type", "(", "[", "]", ")", ":", "writer", ".", "writerow", "(", "[", "header", "]", ")", "else", ":", "writer", ".", "writerow", "(", "header", ")", "for", "key", ",", "value", "in", "input_dict", ".", "items", "(", ")", ":", "writer", ".", "writerow", "(", "[", "key", ",", "value", "]", ")" ]
Export a single level dictionary to a .csv Required: Input Dictionary Header fields for csv Name of File Output filepath
[ "Export", "a", "single", "level", "dictionary", "to", "a", ".", "csv", "Required", ":", "Input", "Dictionary", "Header", "fields", "for", "csv", "Name", "of", "File", "Output", "filepath" ]
[ "'''\n Export a single level dictionary to a .csv\n\n Required:\n - Input Dictionary\n - Header fields for csv\n - Name of File\n - Output filepath\n\n *NOTE*\n This function was created to export a single level dictionary to csv. A dictionary that has multiple inner dictionaries will\n store the key and value to a single line.\n '''" ]
[ { "param": "input_dict", "type": null }, { "param": "header", "type": null }, { "param": "output_name", "type": null }, { "param": "output_dir_path", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "input_dict", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "header", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "output_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "output_dir_path", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import csv import os def export_to_csv_dict(input_dict, header, output_name, output_dir_path): Output_Path = os.path.join(output_dir_path,output_name + ".csv") writer = csv.writer(open(Output_Path, 'wb')) if type(header) is not type([]): writer.writerow([header]) else: writer.writerow(header) for key, value in input_dict.items(): writer.writerow([key, value])
1,010
203
6501e26aec00cdfee8d612aaac679afb965b6c6b
rbiswas4/utils
utils/ioutils.py
[ "MIT" ]
Python
tokenizeline
<not_specific>
def tokenizeline (line, delimitter="", ignorestrings="#"): """ splits the string line into two substrings before and after the first instance of a string in the list ignorestrings, and returns a tuple of a list of tokens obtained by tokenizing the first substring on the delimiter delimitter and the second substring. Parameters ---------- line: mandatory, string string to tokenize delimitter: optional, defaults to "" string of characters (other than whitespace) to be used as a delimiter for tokenizing the line. for example in the case of a line of TSV, it would be "\t" ignorestrings: string, optional, defaults to "#" string, after which the remainder of the line will be ignored in the list of tokens Returns ------- tuple: (lst, list of metadata) list of token strings, list of metadata strings Examples -------- >>> myline = "KJAHS KH AKJHS jjhJH. JH HJ JHH JH #tests " >>> tokenizeline(myline, delimitter=".") (['KJAHS KH AKJHS jjhJH', ' JH HJ JHH JH'], ['tests']) >>> tokenizeline(myline, delimitter="") (['KJAHS', 'KH', 'AKJHS', 'jjhJH.', 'JH', 'HJ', 'JHH', 'JH'], ['tests']) ..notes: _tokenizeline which had a slightly different call signature seemed too complicated and can be done more simply. Slightly different still, as the metadata is captured as a list rather than a comment string. TODO: allow multiple delimiter strings. """ line = line.strip() # Find comments to ignore lst = line.split(ignorestrings) commentlist = lst[1:] linelst = lst[0].strip() if delimitter == '': tokens = linelst.split() else: tokens = linelst.split(delimitter) return (tokens, commentlist)
splits the string line into two substrings before and after the first instance of a string in the list ignorestrings, and returns a tuple of a list of tokens obtained by tokenizing the first substring on the delimiter delimitter and the second substring. Parameters ---------- line: mandatory, string string to tokenize delimitter: optional, defaults to "" string of characters (other than whitespace) to be used as a delimiter for tokenizing the line. for example in the case of a line of TSV, it would be "\t" ignorestrings: string, optional, defaults to "#" string, after which the remainder of the line will be ignored in the list of tokens Returns ------- tuple: (lst, list of metadata) list of token strings, list of metadata strings Examples -------- >>> myline = "KJAHS KH AKJHS jjhJH. JH HJ JHH JH #tests " >>> tokenizeline(myline, delimitter=".") (['KJAHS KH AKJHS jjhJH', ' JH HJ JHH JH'], ['tests']) >>> tokenizeline(myline, delimitter="") (['KJAHS', 'KH', 'AKJHS', 'jjhJH.', 'JH', 'HJ', 'JHH', 'JH'], ['tests']) ..notes: _tokenizeline which had a slightly different call signature seemed too complicated and can be done more simply. Slightly different still, as the metadata is captured as a list rather than a comment string. TODO: allow multiple delimiter strings.
splits the string line into two substrings before and after the first instance of a string in the list ignorestrings, and returns a tuple of a list of tokens obtained by tokenizing the first substring on the delimiter delimitter and the second substring. Parameters mandatory, string string to tokenize delimitter: optional, defaults to "" string of characters (other than whitespace) to be used as a delimiter for tokenizing the line. for example in the case of a line of TSV, it would be "\t" ignorestrings: string, optional, defaults to "#" string, after which the remainder of the line will be ignored in the list of tokens Returns (lst, list of metadata) list of token strings, list of metadata strings Examples >>> myline = "KJAHS KH AKJHS jjhJH. _tokenizeline which had a slightly different call signature seemed too complicated and can be done more simply. Slightly different still, as the metadata is captured as a list rather than a comment string. TODO: allow multiple delimiter strings.
[ "splits", "the", "string", "line", "into", "two", "substrings", "before", "and", "after", "the", "first", "instance", "of", "a", "string", "in", "the", "list", "ignorestrings", "and", "returns", "a", "tuple", "of", "a", "list", "of", "tokens", "obtained", "by", "tokenizing", "the", "first", "substring", "on", "the", "delimiter", "delimitter", "and", "the", "second", "substring", ".", "Parameters", "mandatory", "string", "string", "to", "tokenize", "delimitter", ":", "optional", "defaults", "to", "\"", "\"", "string", "of", "characters", "(", "other", "than", "whitespace", ")", "to", "be", "used", "as", "a", "delimiter", "for", "tokenizing", "the", "line", ".", "for", "example", "in", "the", "case", "of", "a", "line", "of", "TSV", "it", "would", "be", "\"", "\\", "t", "\"", "ignorestrings", ":", "string", "optional", "defaults", "to", "\"", "#", "\"", "string", "after", "which", "the", "remainder", "of", "the", "line", "will", "be", "ignored", "in", "the", "list", "of", "tokens", "Returns", "(", "lst", "list", "of", "metadata", ")", "list", "of", "token", "strings", "list", "of", "metadata", "strings", "Examples", ">>>", "myline", "=", "\"", "KJAHS", "KH", "AKJHS", "jjhJH", ".", "_tokenizeline", "which", "had", "a", "slightly", "different", "call", "signature", "seemed", "too", "complicated", "and", "can", "be", "done", "more", "simply", ".", "Slightly", "different", "still", "as", "the", "metadata", "is", "captured", "as", "a", "list", "rather", "than", "a", "comment", "string", ".", "TODO", ":", "allow", "multiple", "delimiter", "strings", "." ]
def tokenizeline (line, delimitter="", ignorestrings="#"): line = line.strip() lst = line.split(ignorestrings) commentlist = lst[1:] linelst = lst[0].strip() if delimitter == '': tokens = linelst.split() else: tokens = linelst.split(delimitter) return (tokens, commentlist)
[ "def", "tokenizeline", "(", "line", ",", "delimitter", "=", "\"\"", ",", "ignorestrings", "=", "\"#\"", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "lst", "=", "line", ".", "split", "(", "ignorestrings", ")", "commentlist", "=", "lst", "[", "1", ":", "]", "linelst", "=", "lst", "[", "0", "]", ".", "strip", "(", ")", "if", "delimitter", "==", "''", ":", "tokens", "=", "linelst", ".", "split", "(", ")", "else", ":", "tokens", "=", "linelst", ".", "split", "(", "delimitter", ")", "return", "(", "tokens", ",", "commentlist", ")" ]
splits the string line into two substrings before and after the first instance of a string in the list ignorestrings, and returns a tuple of a list of tokens obtained by tokenizing the first substring on the delimiter delimitter and the second substring.
[ "splits", "the", "string", "line", "into", "two", "substrings", "before", "and", "after", "the", "first", "instance", "of", "a", "string", "in", "the", "list", "ignorestrings", "and", "returns", "a", "tuple", "of", "a", "list", "of", "tokens", "obtained", "by", "tokenizing", "the", "first", "substring", "on", "the", "delimiter", "delimitter", "and", "the", "second", "substring", "." ]
[ "\"\"\"\n splits the string line into two substrings before and after the \n first instance of a string in the list ignorestrings, and returns\n a tuple of a list of tokens obtained by tokenizing the first \n substring on the delimiter delimitter and the second substring.\n\n Parameters\n ----------\n line: mandatory, string \n string to tokenize\n delimitter: optional, defaults to \"\"\n string of characters (other than whitespace) to \n be used as a delimiter for tokenizing the line. \n for example in the case of a line of TSV, it would be \"\\t\"\n ignorestrings: string, optional, defaults to \"#\"\n string, after which the remainder of the line will be ignored\n in the list of tokens\n\n Returns\n -------\n tuple: (lst, list of metadata) \n list of token strings, list of metadata strings\n\n Examples\n --------\n\n >>> myline = \"KJAHS KH AKJHS jjhJH. JH HJ JHH JH #tests \"\n >>> tokenizeline(myline, delimitter=\".\")\n (['KJAHS KH AKJHS jjhJH', ' JH HJ JHH JH'], ['tests'])\n >>> tokenizeline(myline, delimitter=\"\") \n (['KJAHS', 'KH', 'AKJHS', 'jjhJH.', 'JH', 'HJ', 'JHH', 'JH'], ['tests'])\n\n ..notes:\n _tokenizeline which had a slightly different call signature seemed \n too complicated and can be done more simply. Slightly different still,\n as the metadata is captured as a list rather than a comment string.\n TODO: allow multiple delimiter strings. \n \"\"\"", "# Find comments to ignore" ]
[ { "param": "line", "type": null }, { "param": "delimitter", "type": null }, { "param": "ignorestrings", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "line", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "delimitter", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "ignorestrings", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def tokenizeline (line, delimitter="", ignorestrings="#"): line = line.strip() lst = line.split(ignorestrings) commentlist = lst[1:] linelst = lst[0].strip() if delimitter == '': tokens = linelst.split() else: tokens = linelst.split(delimitter) return (tokens, commentlist)
1,012
324
8f40c79d15418e82eaf752b58a857beeb5c4c864
laddie132/MD3
game/tokenizer.py
[ "MIT" ]
Python
_is_punctuation
<not_specific>
def _is_punctuation(char): """ Whether a char is a punctuation :param char: :return: """ # treat as part of words if char == '_': return False """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((33 <= cp <= 47) or (58 <= cp <= 64) or (91 <= cp <= 96) or (123 <= cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
Whether a char is a punctuation :param char: :return:
Whether a char is a punctuation
[ "Whether", "a", "char", "is", "a", "punctuation" ]
def _is_punctuation(char): if char == '_': return False cp = ord(char) if ((33 <= cp <= 47) or (58 <= cp <= 64) or (91 <= cp <= 96) or (123 <= cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
[ "def", "_is_punctuation", "(", "char", ")", ":", "if", "char", "==", "'_'", ":", "return", "False", "\"\"\"Checks whether `chars` is a punctuation character.\"\"\"", "cp", "=", "ord", "(", "char", ")", "if", "(", "(", "33", "<=", "cp", "<=", "47", ")", "or", "(", "58", "<=", "cp", "<=", "64", ")", "or", "(", "91", "<=", "cp", "<=", "96", ")", "or", "(", "123", "<=", "cp", "<=", "126", ")", ")", ":", "return", "True", "cat", "=", "unicodedata", ".", "category", "(", "char", ")", "if", "cat", ".", "startswith", "(", "\"P\"", ")", ":", "return", "True", "return", "False" ]
Whether a char is a punctuation
[ "Whether", "a", "char", "is", "a", "punctuation" ]
[ "\"\"\"\n Whether a char is a punctuation\n :param char:\n :return:\n \"\"\"", "# treat as part of words", "\"\"\"Checks whether `chars` is a punctuation character.\"\"\"", "# We treat all non-letter/number ASCII as punctuation.", "# Characters such as \"^\", \"$\", and \"`\" are not in the Unicode", "# Punctuation class but we treat them as punctuation anyways, for", "# consistency." ]
[ { "param": "char", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "char", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import unicodedata def _is_punctuation(char): if char == '_': return False cp = ord(char) if ((33 <= cp <= 47) or (58 <= cp <= 64) or (91 <= cp <= 96) or (123 <= cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
1,014
570
8eff6c615681d88a3d099dc168cb1e253698498a
ld4l-labs/bib2rdf
testconv/conversion_tester.py
[ "Apache-2.0" ]
Python
run_bib2lod
<not_specific>
def run_bib2lod(args): """Run bib2lod with specified args. First argument must be the configuration file name. """ prefix_args = ['java', '-jar', 'target/bib2lod.jar', '-c'] prefix_args.reverse() for arg in prefix_args: args.insert(0, arg) proc = subprocess.Popen(args, stdout=subprocess.PIPE) (out, err) = proc.communicate() return(out)
Run bib2lod with specified args. First argument must be the configuration file name.
Run bib2lod with specified args. First argument must be the configuration file name.
[ "Run", "bib2lod", "with", "specified", "args", ".", "First", "argument", "must", "be", "the", "configuration", "file", "name", "." ]
def run_bib2lod(args): prefix_args = ['java', '-jar', 'target/bib2lod.jar', '-c'] prefix_args.reverse() for arg in prefix_args: args.insert(0, arg) proc = subprocess.Popen(args, stdout=subprocess.PIPE) (out, err) = proc.communicate() return(out)
[ "def", "run_bib2lod", "(", "args", ")", ":", "prefix_args", "=", "[", "'java'", ",", "'-jar'", ",", "'target/bib2lod.jar'", ",", "'-c'", "]", "prefix_args", ".", "reverse", "(", ")", "for", "arg", "in", "prefix_args", ":", "args", ".", "insert", "(", "0", ",", "arg", ")", "proc", "=", "subprocess", ".", "Popen", "(", "args", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "(", "out", ",", "err", ")", "=", "proc", ".", "communicate", "(", ")", "return", "(", "out", ")" ]
Run bib2lod with specified args.
[ "Run", "bib2lod", "with", "specified", "args", "." ]
[ "\"\"\"Run bib2lod with specified args.\n\n First argument must be the configuration file name.\n \"\"\"" ]
[ { "param": "args", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "args", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import subprocess def run_bib2lod(args): prefix_args = ['java', '-jar', 'target/bib2lod.jar', '-c'] prefix_args.reverse() for arg in prefix_args: args.insert(0, arg) proc = subprocess.Popen(args, stdout=subprocess.PIPE) (out, err) = proc.communicate() return(out)
1,015
578
bed4c319f4c0c361886157fd31ce94574d191f1b
tailhook/aio-routes
aioroutes/decorators.py
[ "MIT" ]
Python
postprocessor
<not_specific>
def postprocessor(fun): """A decorator that accepts method's output and processes it Works only on leaf nodes. Typical use cases are: * turn dict of variables into a JSON * render a template from the dict. """ if not hasattr(fun, '_aio_post'): fun._aio_post = [] def wrapper(proc): proc = asyncio.coroutine(proc) fun._aio_post.append(proc) return fun return wrapper
A decorator that accepts method's output and processes it Works only on leaf nodes. Typical use cases are: * turn dict of variables into a JSON * render a template from the dict.
A decorator that accepts method's output and processes it Works only on leaf nodes. Typical use cases are. turn dict of variables into a JSON render a template from the dict.
[ "A", "decorator", "that", "accepts", "method", "'", "s", "output", "and", "processes", "it", "Works", "only", "on", "leaf", "nodes", ".", "Typical", "use", "cases", "are", ".", "turn", "dict", "of", "variables", "into", "a", "JSON", "render", "a", "template", "from", "the", "dict", "." ]
def postprocessor(fun): if not hasattr(fun, '_aio_post'): fun._aio_post = [] def wrapper(proc): proc = asyncio.coroutine(proc) fun._aio_post.append(proc) return fun return wrapper
[ "def", "postprocessor", "(", "fun", ")", ":", "if", "not", "hasattr", "(", "fun", ",", "'_aio_post'", ")", ":", "fun", ".", "_aio_post", "=", "[", "]", "def", "wrapper", "(", "proc", ")", ":", "proc", "=", "asyncio", ".", "coroutine", "(", "proc", ")", "fun", ".", "_aio_post", ".", "append", "(", "proc", ")", "return", "fun", "return", "wrapper" ]
A decorator that accepts method's output and processes it Works only on leaf nodes.
[ "A", "decorator", "that", "accepts", "method", "'", "s", "output", "and", "processes", "it", "Works", "only", "on", "leaf", "nodes", "." ]
[ "\"\"\"A decorator that accepts method's output and processes it\n\n Works only on leaf nodes. Typical use cases are:\n\n * turn dict of variables into a JSON\n * render a template from the dict.\n \"\"\"" ]
[ { "param": "fun", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "fun", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import asyncio def postprocessor(fun): if not hasattr(fun, '_aio_post'): fun._aio_post = [] def wrapper(proc): proc = asyncio.coroutine(proc) fun._aio_post.append(proc) return fun return wrapper
1,016
68
178ad36594fe66d5b1734702a87dcd260de85ce7
khromiumos/chromiumos-chromite
lib/luci/utils.py
[ "BSD-3-Clause" ]
Python
utcnow
<not_specific>
def utcnow(): """Returns datetime.utcnow(), used for testing. Use this function so it can be mocked everywhere. """ return datetime.utcnow()
Returns datetime.utcnow(), used for testing. Use this function so it can be mocked everywhere.
Returns datetime.utcnow(), used for testing. Use this function so it can be mocked everywhere.
[ "Returns", "datetime", ".", "utcnow", "()", "used", "for", "testing", ".", "Use", "this", "function", "so", "it", "can", "be", "mocked", "everywhere", "." ]
def utcnow(): return datetime.utcnow()
[ "def", "utcnow", "(", ")", ":", "return", "datetime", ".", "utcnow", "(", ")" ]
Returns datetime.utcnow(), used for testing.
[ "Returns", "datetime", ".", "utcnow", "()", "used", "for", "testing", "." ]
[ "\"\"\"Returns datetime.utcnow(), used for testing.\n\n Use this function so it can be mocked everywhere.\n \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import datetime def utcnow(): return datetime.utcnow()
1,017
134
dfad69b0acae76a1f09d0a36fd8937f10e05e427
alexmid/ibmdbpy
ibmdbpy/statistics.py
[ "BSD-3-Clause" ]
Python
_count_level
<not_specific>
def _count_level(idadf, columnlist=None): """ Count distinct levels across a list of columns of an IdaDataFrame grouped by themselves. Parameters ---------- columnlist : list List of column names that exist in the IdaDataFrame. By default, these are all columns in IdaDataFrame. Returns ------- Tuple Notes ----- The function assumes the following: * The columns given as parameter exists in the IdaDataframe. * The parameter columnlist is an optional list. * Columns are referenced by their own name (character string). """ if columnlist is None: columnlist = idadf.columns name = idadf.internal_state.current_state query_list = [] for column in columnlist: # Here cast ? query_list.append("(SELECT COUNT(*) AS \"" + column +"\" FROM (" + "SELECT \"" + column + "\" FROM " + name + " GROUP BY \"" + column + "\" ))") #query_list.append("(SELECT CAST(COUNT(*) AS BIGINT) AS \"" + column +"\" FROM (" + # "SELECT \"" + column + "\" FROM " + name + " ))") query_string = ', '.join(query_list) column_string = '\"' + '\", \"'.join(columnlist) + '\"' return idadf.ida_query("SELECT " + column_string + " FROM " + query_string, first_row_only = True)
Count distinct levels across a list of columns of an IdaDataFrame grouped by themselves. Parameters ---------- columnlist : list List of column names that exist in the IdaDataFrame. By default, these are all columns in IdaDataFrame. Returns ------- Tuple Notes ----- The function assumes the following: * The columns given as parameter exists in the IdaDataframe. * The parameter columnlist is an optional list. * Columns are referenced by their own name (character string).
Count distinct levels across a list of columns of an IdaDataFrame grouped by themselves. Parameters columnlist : list List of column names that exist in the IdaDataFrame. By default, these are all columns in IdaDataFrame. Returns Tuple Notes The function assumes the following. The columns given as parameter exists in the IdaDataframe. The parameter columnlist is an optional list. Columns are referenced by their own name (character string).
[ "Count", "distinct", "levels", "across", "a", "list", "of", "columns", "of", "an", "IdaDataFrame", "grouped", "by", "themselves", ".", "Parameters", "columnlist", ":", "list", "List", "of", "column", "names", "that", "exist", "in", "the", "IdaDataFrame", ".", "By", "default", "these", "are", "all", "columns", "in", "IdaDataFrame", ".", "Returns", "Tuple", "Notes", "The", "function", "assumes", "the", "following", ".", "The", "columns", "given", "as", "parameter", "exists", "in", "the", "IdaDataframe", ".", "The", "parameter", "columnlist", "is", "an", "optional", "list", ".", "Columns", "are", "referenced", "by", "their", "own", "name", "(", "character", "string", ")", "." ]
def _count_level(idadf, columnlist=None): if columnlist is None: columnlist = idadf.columns name = idadf.internal_state.current_state query_list = [] for column in columnlist: query_list.append("(SELECT COUNT(*) AS \"" + column +"\" FROM (" + "SELECT \"" + column + "\" FROM " + name + " GROUP BY \"" + column + "\" ))") query_string = ', '.join(query_list) column_string = '\"' + '\", \"'.join(columnlist) + '\"' return idadf.ida_query("SELECT " + column_string + " FROM " + query_string, first_row_only = True)
[ "def", "_count_level", "(", "idadf", ",", "columnlist", "=", "None", ")", ":", "if", "columnlist", "is", "None", ":", "columnlist", "=", "idadf", ".", "columns", "name", "=", "idadf", ".", "internal_state", ".", "current_state", "query_list", "=", "[", "]", "for", "column", "in", "columnlist", ":", "query_list", ".", "append", "(", "\"(SELECT COUNT(*) AS \\\"\"", "+", "column", "+", "\"\\\" FROM (\"", "+", "\"SELECT \\\"\"", "+", "column", "+", "\"\\\" FROM \"", "+", "name", "+", "\" GROUP BY \\\"\"", "+", "column", "+", "\"\\\" ))\"", ")", "query_string", "=", "', '", ".", "join", "(", "query_list", ")", "column_string", "=", "'\\\"'", "+", "'\\\", \\\"'", ".", "join", "(", "columnlist", ")", "+", "'\\\"'", "return", "idadf", ".", "ida_query", "(", "\"SELECT \"", "+", "column_string", "+", "\" FROM \"", "+", "query_string", ",", "first_row_only", "=", "True", ")" ]
Count distinct levels across a list of columns of an IdaDataFrame grouped by themselves.
[ "Count", "distinct", "levels", "across", "a", "list", "of", "columns", "of", "an", "IdaDataFrame", "grouped", "by", "themselves", "." ]
[ "\"\"\"\n Count distinct levels across a list of columns of an IdaDataFrame grouped\n by themselves.\n\n\n Parameters\n ----------\n columnlist : list\n List of column names that exist in the IdaDataFrame. By default, these\n are all columns in IdaDataFrame.\n\n Returns\n -------\n Tuple\n\n Notes\n -----\n The function assumes the following:\n\n * The columns given as parameter exists in the IdaDataframe.\n * The parameter columnlist is an optional list.\n * Columns are referenced by their own name (character string).\n \"\"\"", "# Here cast ?", "#query_list.append(\"(SELECT CAST(COUNT(*) AS BIGINT) AS \\\"\" + column +\"\\\" FROM (\" +", "# \"SELECT \\\"\" + column + \"\\\" FROM \" + name + \" ))\")" ]
[ { "param": "idadf", "type": null }, { "param": "columnlist", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "idadf", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "columnlist", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _count_level(idadf, columnlist=None): if columnlist is None: columnlist = idadf.columns name = idadf.internal_state.current_state query_list = [] for column in columnlist: query_list.append("(SELECT COUNT(*) AS \"" + column +"\" FROM (" + "SELECT \"" + column + "\" FROM " + name + " GROUP BY \"" + column + "\" ))") query_string = ', '.join(query_list) column_string = '\"' + '\", \"'.join(columnlist) + '\"' return idadf.ida_query("SELECT " + column_string + " FROM " + query_string, first_row_only = True)
1,018
614
9819052e9efa975227c4fe0763b51845ac573c89
Arsks12/Fizilion
userbot/modules/misc.py
[ "Naumen", "Condor-1.1", "MS-PL" ]
Python
repo_is_here
null
async def repo_is_here(wannasee): """ For .repo command, just returns the repo URL. """ await wannasee.edit( "[Click here](https://github.com/royalturd/Fizilion) to open Fizilion's GitHub Repo." )
For .repo command, just returns the repo URL.
For .repo command, just returns the repo URL.
[ "For", ".", "repo", "command", "just", "returns", "the", "repo", "URL", "." ]
async def repo_is_here(wannasee): await wannasee.edit( "[Click here](https://github.com/royalturd/Fizilion) to open Fizilion's GitHub Repo." )
[ "async", "def", "repo_is_here", "(", "wannasee", ")", ":", "await", "wannasee", ".", "edit", "(", "\"[Click here](https://github.com/royalturd/Fizilion) to open Fizilion's GitHub Repo.\"", ")" ]
For .repo command, just returns the repo URL.
[ "For", ".", "repo", "command", "just", "returns", "the", "repo", "URL", "." ]
[ "\"\"\" For .repo command, just returns the repo URL. \"\"\"" ]
[ { "param": "wannasee", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "wannasee", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
async def repo_is_here(wannasee): await wannasee.edit( "[Click here](https://github.com/royalturd/Fizilion) to open Fizilion's GitHub Repo." )
1,019
776
41d9a02f8eafa6da5178ec832a387d9a4b20f0c6
jatin69/mca101-OOPS
Recursive list functions/reverse_list.py
[ "MIT" ]
Python
rev
<not_specific>
def rev(mylist,index=0): ''' Objective : To reverse a list Parameter : mylist: The list to be reversed Return value : None ''' # approach : Recursive & changes within the same list # if reached in middle, return if(index==(len(mylist)//2)): return # else swap extremes mylist[index],mylist[(len(mylist)-1)-index]=mylist[(len(mylist)-1)-index],mylist[index] # and call the function for index+1 return rev(mylist,index+1)
Objective : To reverse a list Parameter : mylist: The list to be reversed Return value : None
Objective : To reverse a list Parameter : mylist: The list to be reversed Return value : None
[ "Objective", ":", "To", "reverse", "a", "list", "Parameter", ":", "mylist", ":", "The", "list", "to", "be", "reversed", "Return", "value", ":", "None" ]
def rev(mylist,index=0): if(index==(len(mylist)//2)): return mylist[index],mylist[(len(mylist)-1)-index]=mylist[(len(mylist)-1)-index],mylist[index] return rev(mylist,index+1)
[ "def", "rev", "(", "mylist", ",", "index", "=", "0", ")", ":", "if", "(", "index", "==", "(", "len", "(", "mylist", ")", "//", "2", ")", ")", ":", "return", "mylist", "[", "index", "]", ",", "mylist", "[", "(", "len", "(", "mylist", ")", "-", "1", ")", "-", "index", "]", "=", "mylist", "[", "(", "len", "(", "mylist", ")", "-", "1", ")", "-", "index", "]", ",", "mylist", "[", "index", "]", "return", "rev", "(", "mylist", ",", "index", "+", "1", ")" ]
Objective : To reverse a list Parameter : mylist: The list to be reversed Return value : None
[ "Objective", ":", "To", "reverse", "a", "list", "Parameter", ":", "mylist", ":", "The", "list", "to", "be", "reversed", "Return", "value", ":", "None" ]
[ "'''\n Objective : To reverse a list\n Parameter :\n mylist: The list to be reversed\n Return value : None\n '''", "# approach : Recursive & changes within the same list", "# if reached in middle, return", "# else swap extremes", "# and call the function for index+1" ]
[ { "param": "mylist", "type": null }, { "param": "index", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "mylist", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "index", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def rev(mylist,index=0): if(index==(len(mylist)//2)): return mylist[index],mylist[(len(mylist)-1)-index]=mylist[(len(mylist)-1)-index],mylist[index] return rev(mylist,index+1)
1,020
874
ad9b1f63f0423d5970d16c9faac0964ce81f3fee
davedittrich/python_secrets
psec/ssh.py
[ "Apache-2.0" ]
Python
_parse_fingerprint_awsconsole
<not_specific>
def _parse_fingerprint_awsconsole(line, host=None): """Parse SSH host fingerprint from AWS console output""" fingerprint = None if line.startswith('ec2:'): host = host fingerprint = line.split(': ', 2)[1] return host, fingerprint
Parse SSH host fingerprint from AWS console output
Parse SSH host fingerprint from AWS console output
[ "Parse", "SSH", "host", "fingerprint", "from", "AWS", "console", "output" ]
def _parse_fingerprint_awsconsole(line, host=None): fingerprint = None if line.startswith('ec2:'): host = host fingerprint = line.split(': ', 2)[1] return host, fingerprint
[ "def", "_parse_fingerprint_awsconsole", "(", "line", ",", "host", "=", "None", ")", ":", "fingerprint", "=", "None", "if", "line", ".", "startswith", "(", "'ec2:'", ")", ":", "host", "=", "host", "fingerprint", "=", "line", ".", "split", "(", "': '", ",", "2", ")", "[", "1", "]", "return", "host", ",", "fingerprint" ]
Parse SSH host fingerprint from AWS console output
[ "Parse", "SSH", "host", "fingerprint", "from", "AWS", "console", "output" ]
[ "\"\"\"Parse SSH host fingerprint from AWS console output\"\"\"" ]
[ { "param": "line", "type": null }, { "param": "host", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "line", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "host", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _parse_fingerprint_awsconsole(line, host=None): fingerprint = None if line.startswith('ec2:'): host = host fingerprint = line.split(': ', 2)[1] return host, fingerprint
1,021
854
09ee5fb810587397da2494a28a68160a3533c95b
greed1sland/PythonProgrammingPuzzles
generators/codeforces.py
[ "MIT" ]
Python
sat
<not_specific>
def sat(lists: List[List[int]], items=[5, 4, 9, 4, 5, 5, 5, 1, 5, 5], length=4): """ Given a list of integers and a target length, create of the given length such that: * The first list must be all different numbers. * The second must be all the same number. * The two lists together comprise a sublist of all the list items """ a, b = lists assert len(a) == len(b) == length assert len(set(a)) == len(a) assert len(set(b)) == 1 for i in a + b: assert (a + b).count(i) <= items.count(i) return True
Given a list of integers and a target length, create of the given length such that: * The first list must be all different numbers. * The second must be all the same number. * The two lists together comprise a sublist of all the list items
Given a list of integers and a target length, create of the given length such that: The first list must be all different numbers. The second must be all the same number. The two lists together comprise a sublist of all the list items
[ "Given", "a", "list", "of", "integers", "and", "a", "target", "length", "create", "of", "the", "given", "length", "such", "that", ":", "The", "first", "list", "must", "be", "all", "different", "numbers", ".", "The", "second", "must", "be", "all", "the", "same", "number", ".", "The", "two", "lists", "together", "comprise", "a", "sublist", "of", "all", "the", "list", "items" ]
def sat(lists: List[List[int]], items=[5, 4, 9, 4, 5, 5, 5, 1, 5, 5], length=4): a, b = lists assert len(a) == len(b) == length assert len(set(a)) == len(a) assert len(set(b)) == 1 for i in a + b: assert (a + b).count(i) <= items.count(i) return True
[ "def", "sat", "(", "lists", ":", "List", "[", "List", "[", "int", "]", "]", ",", "items", "=", "[", "5", ",", "4", ",", "9", ",", "4", ",", "5", ",", "5", ",", "5", ",", "1", ",", "5", ",", "5", "]", ",", "length", "=", "4", ")", ":", "a", ",", "b", "=", "lists", "assert", "len", "(", "a", ")", "==", "len", "(", "b", ")", "==", "length", "assert", "len", "(", "set", "(", "a", ")", ")", "==", "len", "(", "a", ")", "assert", "len", "(", "set", "(", "b", ")", ")", "==", "1", "for", "i", "in", "a", "+", "b", ":", "assert", "(", "a", "+", "b", ")", ".", "count", "(", "i", ")", "<=", "items", ".", "count", "(", "i", ")", "return", "True" ]
Given a list of integers and a target length, create of the given length such that: The first list must be all different numbers.
[ "Given", "a", "list", "of", "integers", "and", "a", "target", "length", "create", "of", "the", "given", "length", "such", "that", ":", "The", "first", "list", "must", "be", "all", "different", "numbers", "." ]
[ "\"\"\"\n Given a list of integers and a target length, create of the given length such that:\n * The first list must be all different numbers.\n * The second must be all the same number.\n * The two lists together comprise a sublist of all the list items\n \"\"\"" ]
[ { "param": "lists", "type": "List[List[int]]" }, { "param": "items", "type": null }, { "param": "length", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "lists", "type": "List[List[int]]", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "items", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "length", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def sat(lists: List[List[int]], items=[5, 4, 9, 4, 5, 5, 5, 1, 5, 5], length=4): a, b = lists assert len(a) == len(b) == length assert len(set(a)) == len(a) assert len(set(b)) == 1 for i in a + b: assert (a + b).count(i) <= items.count(i) return True
1,022
1,004
1535508ecd846e67592463848a745aeaa9d8ff97
GabrielFalcom/verless
scripts/check_imports.py
[ "Apache-2.0" ]
Python
check_imports
null
def check_imports(): """ Run `goimports -l .` to find all Go files whose imports aren't formatted appropriately. Imported Go packages don't only have to be ordered alphabetical, they also need to be grouped by imports from the standard library and from third-party vendors. For example: import ( "errors" "os" "github.com/verless/verless/builder" "github.com/verless/verless/core/build" "github.com/verless/verless/parser" ) If all files are formatted appropriately, exit with status 0, otherwise exit with status 1 by printing all invalid files. """ output = subprocess.check_output(["goimports", "-l", "."]) output = output.decode("utf-8") if output != "": msg = """goimports found non-ordered imports in the following files: {0} Run `goimports -w .` to order the imports or group and order them yourself.""".format(output) sys.exit(msg)
Run `goimports -l .` to find all Go files whose imports aren't formatted appropriately. Imported Go packages don't only have to be ordered alphabetical, they also need to be grouped by imports from the standard library and from third-party vendors. For example: import ( "errors" "os" "github.com/verless/verless/builder" "github.com/verless/verless/core/build" "github.com/verless/verless/parser" ) If all files are formatted appropriately, exit with status 0, otherwise exit with status 1 by printing all invalid files.
Run `goimports -l .` to find all Go files whose imports aren't formatted appropriately. Imported Go packages don't only have to be ordered alphabetical, they also need to be grouped by imports from the standard library and from third-party vendors. For example. import ( "errors" "os" If all files are formatted appropriately, exit with status 0, otherwise exit with status 1 by printing all invalid files.
[ "Run", "`", "goimports", "-", "l", ".", "`", "to", "find", "all", "Go", "files", "whose", "imports", "aren", "'", "t", "formatted", "appropriately", ".", "Imported", "Go", "packages", "don", "'", "t", "only", "have", "to", "be", "ordered", "alphabetical", "they", "also", "need", "to", "be", "grouped", "by", "imports", "from", "the", "standard", "library", "and", "from", "third", "-", "party", "vendors", ".", "For", "example", ".", "import", "(", "\"", "errors", "\"", "\"", "os", "\"", "If", "all", "files", "are", "formatted", "appropriately", "exit", "with", "status", "0", "otherwise", "exit", "with", "status", "1", "by", "printing", "all", "invalid", "files", "." ]
def check_imports(): output = subprocess.check_output(["goimports", "-l", "."]) output = output.decode("utf-8") if output != "": msg = """goimports found non-ordered imports in the following files: {0} Run `goimports -w .` to order the imports or group and order them yourself.""".format(output) sys.exit(msg)
[ "def", "check_imports", "(", ")", ":", "output", "=", "subprocess", ".", "check_output", "(", "[", "\"goimports\"", ",", "\"-l\"", ",", "\".\"", "]", ")", "output", "=", "output", ".", "decode", "(", "\"utf-8\"", ")", "if", "output", "!=", "\"\"", ":", "msg", "=", "\"\"\"goimports found non-ordered imports in the following files:\n{0}\nRun `goimports -w .` to order the imports or group and order them yourself.\"\"\"", ".", "format", "(", "output", ")", "sys", ".", "exit", "(", "msg", ")" ]
Run `goimports -l .` to find all Go files whose imports aren't formatted appropriately.
[ "Run", "`", "goimports", "-", "l", ".", "`", "to", "find", "all", "Go", "files", "whose", "imports", "aren", "'", "t", "formatted", "appropriately", "." ]
[ "\"\"\"\n Run `goimports -l .` to find all Go files whose imports aren't\n formatted appropriately.\n\n Imported Go packages don't only have to be ordered alphabetical,\n they also need to be grouped by imports from the standard library\n and from third-party vendors. For example:\n\n import (\n \"errors\"\n \"os\"\n\n \"github.com/verless/verless/builder\"\n \"github.com/verless/verless/core/build\"\n \"github.com/verless/verless/parser\"\n )\n\n If all files are formatted appropriately, exit with status 0,\n otherwise exit with status 1 by printing all invalid files.\n \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import subprocess import sys def check_imports(): output = subprocess.check_output(["goimports", "-l", "."]) output = output.decode("utf-8") if output != "": msg = """goimports found non-ordered imports in the following files: {0} Run `goimports -w .` to order the imports or group and order them yourself.""".format(output) sys.exit(msg)
1,023
599
656697c9621080c559588c6ab84cc8aaf2eae6d5
victor-huang/alpha_tech_tracker
alpha_tech_tracker/technical_analysis.py
[ "MIT" ]
Python
is_sorted
<not_specific>
def is_sorted(x, order=-1): if order == 1: key = lambda x: x else: key = lambda x: -x """ check for descending order by default """ return all([key(x[i]) <= key(x[i + 1]) for i in range(len(x) - 1)])
check for descending order by default
check for descending order by default
[ "check", "for", "descending", "order", "by", "default" ]
def is_sorted(x, order=-1): if order == 1: key = lambda x: x else: key = lambda x: -x return all([key(x[i]) <= key(x[i + 1]) for i in range(len(x) - 1)])
[ "def", "is_sorted", "(", "x", ",", "order", "=", "-", "1", ")", ":", "if", "order", "==", "1", ":", "key", "=", "lambda", "x", ":", "x", "else", ":", "key", "=", "lambda", "x", ":", "-", "x", "return", "all", "(", "[", "key", "(", "x", "[", "i", "]", ")", "<=", "key", "(", "x", "[", "i", "+", "1", "]", ")", "for", "i", "in", "range", "(", "len", "(", "x", ")", "-", "1", ")", "]", ")" ]
check for descending order by default
[ "check", "for", "descending", "order", "by", "default" ]
[ "\"\"\" check for descending order by default \"\"\"" ]
[ { "param": "x", "type": null }, { "param": "order", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "x", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "order", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def is_sorted(x, order=-1): if order == 1: key = lambda x: x else: key = lambda x: -x return all([key(x[i]) <= key(x[i + 1]) for i in range(len(x) - 1)])
1,024
421
fad36f1df927027bd9d02aa962669b49bffbddc6
nhenriksen/aimtools
aimtools/unique_types.py
[ "MIT" ]
Python
_create_atom_type_list
<not_specific>
def _create_atom_type_list(first_chars, second_chars): """ Create all possible two character atom types """ # NMH: Could make this into a list comprehension probably. types = [] for first_char in first_chars: for second_char in second_chars: types.append(first_char + second_char) return types
Create all possible two character atom types
Create all possible two character atom types
[ "Create", "all", "possible", "two", "character", "atom", "types" ]
def _create_atom_type_list(first_chars, second_chars): types = [] for first_char in first_chars: for second_char in second_chars: types.append(first_char + second_char) return types
[ "def", "_create_atom_type_list", "(", "first_chars", ",", "second_chars", ")", ":", "types", "=", "[", "]", "for", "first_char", "in", "first_chars", ":", "for", "second_char", "in", "second_chars", ":", "types", ".", "append", "(", "first_char", "+", "second_char", ")", "return", "types" ]
Create all possible two character atom types
[ "Create", "all", "possible", "two", "character", "atom", "types" ]
[ "\"\"\" Create all possible two character atom types \"\"\"", "# NMH: Could make this into a list comprehension probably." ]
[ { "param": "first_chars", "type": null }, { "param": "second_chars", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "first_chars", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "second_chars", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _create_atom_type_list(first_chars, second_chars): types = [] for first_char in first_chars: for second_char in second_chars: types.append(first_char + second_char) return types
1,025
714
8b844570c6477dd689e1d0a73c6ec93710ef7f46
Sage-Bionetworks/challengeutils
challengeutils/utils.py
[ "Apache-2.0" ]
Python
change_all_submission_status
null
def change_all_submission_status(syn, evaluationid, submission_status='SCORED', change_to_status='VALIDATED'): ''' Function to change submission status of all submissions in a queue The defaults is to change submissions from SCORED -> VALIDATED This function can be useful for 'rescoring' submissions Args: syn: Synapse object evaluationid: Id of an Evaluation queue submission_status: Submissions with this status that you want to change. Default is SCORED. change_to_status: Submission status to change a submission to. Default is VALIDATED. ''' submission_bundle = syn.getSubmissionBundles(evaluationid, status=submission_status) for _, status in submission_bundle: status.status = change_to_status syn.store(status)
Function to change submission status of all submissions in a queue The defaults is to change submissions from SCORED -> VALIDATED This function can be useful for 'rescoring' submissions Args: syn: Synapse object evaluationid: Id of an Evaluation queue submission_status: Submissions with this status that you want to change. Default is SCORED. change_to_status: Submission status to change a submission to. Default is VALIDATED.
Function to change submission status of all submissions in a queue The defaults is to change submissions from SCORED -> VALIDATED This function can be useful for 'rescoring' submissions
[ "Function", "to", "change", "submission", "status", "of", "all", "submissions", "in", "a", "queue", "The", "defaults", "is", "to", "change", "submissions", "from", "SCORED", "-", ">", "VALIDATED", "This", "function", "can", "be", "useful", "for", "'", "rescoring", "'", "submissions" ]
def change_all_submission_status(syn, evaluationid, submission_status='SCORED', change_to_status='VALIDATED'): submission_bundle = syn.getSubmissionBundles(evaluationid, status=submission_status) for _, status in submission_bundle: status.status = change_to_status syn.store(status)
[ "def", "change_all_submission_status", "(", "syn", ",", "evaluationid", ",", "submission_status", "=", "'SCORED'", ",", "change_to_status", "=", "'VALIDATED'", ")", ":", "submission_bundle", "=", "syn", ".", "getSubmissionBundles", "(", "evaluationid", ",", "status", "=", "submission_status", ")", "for", "_", ",", "status", "in", "submission_bundle", ":", "status", ".", "status", "=", "change_to_status", "syn", ".", "store", "(", "status", ")" ]
Function to change submission status of all submissions in a queue The defaults is to change submissions from SCORED -> VALIDATED This function can be useful for 'rescoring' submissions
[ "Function", "to", "change", "submission", "status", "of", "all", "submissions", "in", "a", "queue", "The", "defaults", "is", "to", "change", "submissions", "from", "SCORED", "-", ">", "VALIDATED", "This", "function", "can", "be", "useful", "for", "'", "rescoring", "'", "submissions" ]
[ "'''\n Function to change submission status of all submissions in a queue\n The defaults is to change submissions from SCORED -> VALIDATED\n This function can be useful for 'rescoring' submissions\n\n Args:\n syn: Synapse object\n evaluationid: Id of an Evaluation queue\n submission_status: Submissions with this status that you want to\n change. Default is SCORED.\n change_to_status: Submission status to change a submission to.\n Default is VALIDATED.\n '''" ]
[ { "param": "syn", "type": null }, { "param": "evaluationid", "type": null }, { "param": "submission_status", "type": null }, { "param": "change_to_status", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "syn", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "evaluationid", "type": null, "docstring": "Id of an Evaluation queue", "docstring_tokens": [ "Id", "of", "an", "Evaluation", "queue" ], "default": null, "is_optional": null }, { "identifier": "submission_status", "type": null, "docstring": "Submissions with this status that you want to\nchange. Default is SCORED.", "docstring_tokens": [ "Submissions", "with", "this", "status", "that", "you", "want", "to", "change", ".", "Default", "is", "SCORED", "." ], "default": null, "is_optional": null }, { "identifier": "change_to_status", "type": null, "docstring": "Submission status to change a submission to.\nDefault is VALIDATED.", "docstring_tokens": [ "Submission", "status", "to", "change", "a", "submission", "to", ".", "Default", "is", "VALIDATED", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def change_all_submission_status(syn, evaluationid, submission_status='SCORED', change_to_status='VALIDATED'): submission_bundle = syn.getSubmissionBundles(evaluationid, status=submission_status) for _, status in submission_bundle: status.status = change_to_status syn.store(status)
1,026
653
47a50f271efbf6949f141b0f714c8ad02252f56c
akugarg/scancode-toolkit
etc/release/utils_thirdparty.py
[ "Apache-2.0", "CC-BY-4.0" ]
Python
from_data
<not_specific>
def from_data(cls, data, keep_extra=False): """ Return a distribution built from a `data` mapping. """ filename = data['filename'] dist = cls.from_filename(filename) dist.update(data, keep_extra=keep_extra) return dist
Return a distribution built from a `data` mapping.
Return a distribution built from a `data` mapping.
[ "Return", "a", "distribution", "built", "from", "a", "`", "data", "`", "mapping", "." ]
def from_data(cls, data, keep_extra=False): filename = data['filename'] dist = cls.from_filename(filename) dist.update(data, keep_extra=keep_extra) return dist
[ "def", "from_data", "(", "cls", ",", "data", ",", "keep_extra", "=", "False", ")", ":", "filename", "=", "data", "[", "'filename'", "]", "dist", "=", "cls", ".", "from_filename", "(", "filename", ")", "dist", ".", "update", "(", "data", ",", "keep_extra", "=", "keep_extra", ")", "return", "dist" ]
Return a distribution built from a `data` mapping.
[ "Return", "a", "distribution", "built", "from", "a", "`", "data", "`", "mapping", "." ]
[ "\"\"\"\n Return a distribution built from a `data` mapping.\n \"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "data", "type": null }, { "param": "keep_extra", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "data", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "keep_extra", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def from_data(cls, data, keep_extra=False): filename = data['filename'] dist = cls.from_filename(filename) dist.update(data, keep_extra=keep_extra) return dist
1,028
655