hexsha
stringlengths
40
40
repo
stringlengths
5
121
path
stringlengths
4
227
license
list
language
stringclasses
1 value
identifier
stringlengths
1
107
return_type
stringlengths
2
237
original_string
stringlengths
75
13.4k
original_docstring
stringlengths
13
12.9k
docstring
stringlengths
13
2.57k
docstring_tokens
list
code
stringlengths
23
1.88k
code_tokens
list
short_docstring
stringlengths
1
1.32k
short_docstring_tokens
list
comment
list
parameters
list
docstring_params
dict
code_with_imports
stringlengths
23
1.88k
idxs
int64
0
611k
cluster
int64
0
1.02k
9905e91ef5d7341b2661ece43339869a4ad90707
arniebarni/rhasspy_weather
examples/rhasspy_weather_mqtt.py
[ "MIT" ]
Python
prepare_dict
<not_specific>
def prepare_dict(hermes_dict): """ Prepare a rhasspy type like dict from a hermes intent dict """ intent = hermes_dict["intent"]["intentName"] out_dict = {} out_dict.update({"slots": {s["slotName"]:s["rawValue"] for s in hermes_dict["slots"]}}) out_dict["intent"] = {"name": intent} return out_dict
Prepare a rhasspy type like dict from a hermes intent dict
Prepare a rhasspy type like dict from a hermes intent dict
[ "Prepare", "a", "rhasspy", "type", "like", "dict", "from", "a", "hermes", "intent", "dict" ]
def prepare_dict(hermes_dict): intent = hermes_dict["intent"]["intentName"] out_dict = {} out_dict.update({"slots": {s["slotName"]:s["rawValue"] for s in hermes_dict["slots"]}}) out_dict["intent"] = {"name": intent} return out_dict
[ "def", "prepare_dict", "(", "hermes_dict", ")", ":", "intent", "=", "hermes_dict", "[", "\"intent\"", "]", "[", "\"intentName\"", "]", "out_dict", "=", "{", "}", "out_dict", ".", "update", "(", "{", "\"slots\"", ":", "{", "s", "[", "\"slotName\"", "]", ":", "s", "[", "\"rawValue\"", "]", "for", "s", "in", "hermes_dict", "[", "\"slots\"", "]", "}", "}", ")", "out_dict", "[", "\"intent\"", "]", "=", "{", "\"name\"", ":", "intent", "}", "return", "out_dict" ]
Prepare a rhasspy type like dict from a hermes intent dict
[ "Prepare", "a", "rhasspy", "type", "like", "dict", "from", "a", "hermes", "intent", "dict" ]
[ "\"\"\"\n Prepare a rhasspy type like dict from a hermes intent dict\n \"\"\"" ]
[ { "param": "hermes_dict", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "hermes_dict", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def prepare_dict(hermes_dict): intent = hermes_dict["intent"]["intentName"] out_dict = {} out_dict.update({"slots": {s["slotName"]:s["rawValue"] for s in hermes_dict["slots"]}}) out_dict["intent"] = {"name": intent} return out_dict
1,029
97
ab01247a895cec6a8a7637f0e7f46b24c83a2855
misken/pymwts
pymwts/mwts_probe_phase2.py
[ "MIT" ]
Python
okTourTypeDayShift_rule
<not_specific>
def okTourTypeDayShift_rule(M): """ List of (window, tour type, shift length, day) tuples that are allowable. To be allowable, for every week and day there must be an allowable shift a given length allowed for that tour type). :param M: Model """ index_list = [] for (i, t, j) in M.WINDOWS * M.activeTT * M.DAYS: # n_ok_weeks = 0 for w in M.WEEKS: # n_ok_days = 0 for k in M.tt_length_x[t]: if (i, j, w) in M.okStartWindowRoots[t, k] and (i, t, k, j) not in index_list: index_list.append((i, t, k, j)) return index_list
List of (window, tour type, shift length, day) tuples that are allowable. To be allowable, for every week and day there must be an allowable shift a given length allowed for that tour type). :param M: Model
List of (window, tour type, shift length, day) tuples that are allowable. To be allowable, for every week and day there must be an allowable shift a given length allowed for that tour type).
[ "List", "of", "(", "window", "tour", "type", "shift", "length", "day", ")", "tuples", "that", "are", "allowable", ".", "To", "be", "allowable", "for", "every", "week", "and", "day", "there", "must", "be", "an", "allowable", "shift", "a", "given", "length", "allowed", "for", "that", "tour", "type", ")", "." ]
def okTourTypeDayShift_rule(M): index_list = [] for (i, t, j) in M.WINDOWS * M.activeTT * M.DAYS: for w in M.WEEKS: for k in M.tt_length_x[t]: if (i, j, w) in M.okStartWindowRoots[t, k] and (i, t, k, j) not in index_list: index_list.append((i, t, k, j)) return index_list
[ "def", "okTourTypeDayShift_rule", "(", "M", ")", ":", "index_list", "=", "[", "]", "for", "(", "i", ",", "t", ",", "j", ")", "in", "M", ".", "WINDOWS", "*", "M", ".", "activeTT", "*", "M", ".", "DAYS", ":", "for", "w", "in", "M", ".", "WEEKS", ":", "for", "k", "in", "M", ".", "tt_length_x", "[", "t", "]", ":", "if", "(", "i", ",", "j", ",", "w", ")", "in", "M", ".", "okStartWindowRoots", "[", "t", ",", "k", "]", "and", "(", "i", ",", "t", ",", "k", ",", "j", ")", "not", "in", "index_list", ":", "index_list", ".", "append", "(", "(", "i", ",", "t", ",", "k", ",", "j", ")", ")", "return", "index_list" ]
List of (window, tour type, shift length, day) tuples that are allowable.
[ "List", "of", "(", "window", "tour", "type", "shift", "length", "day", ")", "tuples", "that", "are", "allowable", "." ]
[ "\"\"\"\n List of (window, tour type, shift length, day) tuples that are allowable.\n\n To be allowable,\n for every week and day there must be an allowable shift a given length allowed for that\n tour type).\n\n :param M: Model\n \"\"\"", "# n_ok_weeks = 0", "# n_ok_days = 0" ]
[ { "param": "M", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "M", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def okTourTypeDayShift_rule(M): index_list = [] for (i, t, j) in M.WINDOWS * M.activeTT * M.DAYS: for w in M.WEEKS: for k in M.tt_length_x[t]: if (i, j, w) in M.okStartWindowRoots[t, k] and (i, t, k, j) not in index_list: index_list.append((i, t, k, j)) return index_list
1,030
187
e053bf4b48597158dec4264e9973ab7dd7771874
nsang0u/TweetMapping
plot.py
[ "MIT" ]
Python
gradient
<not_specific>
def gradient(region): """ a gradient color based on percentages of votes in a region Args: region (Region): a region object Yields: (int, int, int): a triple (RGB values between 0 and 255) """ red_val = int(region.republican_percentage() * 255) green_val = int(region.other_percentage() * 255) blue_val = int(region.democrat_percentage() * 255) return (red_val, green_val, blue_val)
a gradient color based on percentages of votes in a region Args: region (Region): a region object Yields: (int, int, int): a triple (RGB values between 0 and 255)
a gradient color based on percentages of votes in a region
[ "a", "gradient", "color", "based", "on", "percentages", "of", "votes", "in", "a", "region" ]
def gradient(region): red_val = int(region.republican_percentage() * 255) green_val = int(region.other_percentage() * 255) blue_val = int(region.democrat_percentage() * 255) return (red_val, green_val, blue_val)
[ "def", "gradient", "(", "region", ")", ":", "red_val", "=", "int", "(", "region", ".", "republican_percentage", "(", ")", "*", "255", ")", "green_val", "=", "int", "(", "region", ".", "other_percentage", "(", ")", "*", "255", ")", "blue_val", "=", "int", "(", "region", ".", "democrat_percentage", "(", ")", "*", "255", ")", "return", "(", "red_val", ",", "green_val", ",", "blue_val", ")" ]
a gradient color based on percentages of votes in a region
[ "a", "gradient", "color", "based", "on", "percentages", "of", "votes", "in", "a", "region" ]
[ "\"\"\"\n a gradient color based on percentages of votes in a region\n Args:\n region (Region): a region object\n Yields:\n (int, int, int): a triple (RGB values between 0 and 255)\n \"\"\"" ]
[ { "param": "region", "type": null } ]
{ "returns": [ { "docstring": "(int, int, int): a triple (RGB values between 0 and 255)", "docstring_tokens": [ "(", "int", "int", "int", ")", ":", "a", "triple", "(", "RGB", "values", "between", "0", "and", "255", ")" ], "type": null } ], "raises": [], "params": [ { "identifier": "region", "type": null, "docstring": "a region object", "docstring_tokens": [ "a", "region", "object" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def gradient(region): red_val = int(region.republican_percentage() * 255) green_val = int(region.other_percentage() * 255) blue_val = int(region.democrat_percentage() * 255) return (red_val, green_val, blue_val)
1,031
805
960e6feba20e4e140ae24d3f3bbac22a448420ff
kaithar/salt
salt/fileserver/gitfs.py
[ "Apache-2.0" ]
Python
_stale_refs_pygit2
<not_specific>
def _stale_refs_pygit2(repo): ''' Return a list of stale refs by running git remote prune --dry-run <remote>, since libgit2 can't do this. ''' remote = repo.remotes[0].name key = ' * [would prune] ' ret = [] for line in subprocess.Popen( 'git remote prune --dry-run {0!r}'.format(remote), shell=True, close_fds=True, cwd=repo.workdir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].splitlines(): if line.startswith(key): line = line.replace(key, '') ret.append(line) return ret
Return a list of stale refs by running git remote prune --dry-run <remote>, since libgit2 can't do this.
Return a list of stale refs by running git remote prune --dry-run , since libgit2 can't do this.
[ "Return", "a", "list", "of", "stale", "refs", "by", "running", "git", "remote", "prune", "--", "dry", "-", "run", "since", "libgit2", "can", "'", "t", "do", "this", "." ]
def _stale_refs_pygit2(repo): remote = repo.remotes[0].name key = ' * [would prune] ' ret = [] for line in subprocess.Popen( 'git remote prune --dry-run {0!r}'.format(remote), shell=True, close_fds=True, cwd=repo.workdir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].splitlines(): if line.startswith(key): line = line.replace(key, '') ret.append(line) return ret
[ "def", "_stale_refs_pygit2", "(", "repo", ")", ":", "remote", "=", "repo", ".", "remotes", "[", "0", "]", ".", "name", "key", "=", "' * [would prune] '", "ret", "=", "[", "]", "for", "line", "in", "subprocess", ".", "Popen", "(", "'git remote prune --dry-run {0!r}'", ".", "format", "(", "remote", ")", ",", "shell", "=", "True", ",", "close_fds", "=", "True", ",", "cwd", "=", "repo", ".", "workdir", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", ".", "communicate", "(", ")", "[", "0", "]", ".", "splitlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "key", ")", ":", "line", "=", "line", ".", "replace", "(", "key", ",", "''", ")", "ret", ".", "append", "(", "line", ")", "return", "ret" ]
Return a list of stale refs by running git remote prune --dry-run <remote>, since libgit2 can't do this.
[ "Return", "a", "list", "of", "stale", "refs", "by", "running", "git", "remote", "prune", "--", "dry", "-", "run", "<remote", ">", "since", "libgit2", "can", "'", "t", "do", "this", "." ]
[ "'''\n Return a list of stale refs by running git remote prune --dry-run <remote>,\n since libgit2 can't do this.\n '''" ]
[ { "param": "repo", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "repo", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import subprocess def _stale_refs_pygit2(repo): remote = repo.remotes[0].name key = ' * [would prune] ' ret = [] for line in subprocess.Popen( 'git remote prune --dry-run {0!r}'.format(remote), shell=True, close_fds=True, cwd=repo.workdir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].splitlines(): if line.startswith(key): line = line.replace(key, '') ret.append(line) return ret
1,032
991
c9de3dab86a5f11e2f18ee0716e8b4fb8935c686
victorfica/utils
epitope_mapping_df.py
[ "MIT" ]
Python
respCoords
<not_specific>
def respCoords(r, plot=False): """Return coordinates of the response peptide Plot option returns coordinates based on start and length of peptide, as opposed to end coordinate which is subject to indsertions/deletions Use end like a stop in range(start, stop)""" if plot: return list(range(int(r.start), int(r.start) + len(r.seq))) else: return list(range(int(r.start), int(r.end)))
Return coordinates of the response peptide Plot option returns coordinates based on start and length of peptide, as opposed to end coordinate which is subject to indsertions/deletions Use end like a stop in range(start, stop)
Return coordinates of the response peptide Plot option returns coordinates based on start and length of peptide, as opposed to end coordinate which is subject to indsertions/deletions Use end like a stop in range(start, stop)
[ "Return", "coordinates", "of", "the", "response", "peptide", "Plot", "option", "returns", "coordinates", "based", "on", "start", "and", "length", "of", "peptide", "as", "opposed", "to", "end", "coordinate", "which", "is", "subject", "to", "indsertions", "/", "deletions", "Use", "end", "like", "a", "stop", "in", "range", "(", "start", "stop", ")" ]
def respCoords(r, plot=False): if plot: return list(range(int(r.start), int(r.start) + len(r.seq))) else: return list(range(int(r.start), int(r.end)))
[ "def", "respCoords", "(", "r", ",", "plot", "=", "False", ")", ":", "if", "plot", ":", "return", "list", "(", "range", "(", "int", "(", "r", ".", "start", ")", ",", "int", "(", "r", ".", "start", ")", "+", "len", "(", "r", ".", "seq", ")", ")", ")", "else", ":", "return", "list", "(", "range", "(", "int", "(", "r", ".", "start", ")", ",", "int", "(", "r", ".", "end", ")", ")", ")" ]
Return coordinates of the response peptide Plot option returns coordinates based on start and length of peptide, as opposed to end coordinate which is subject to indsertions/deletions Use end like a stop in range(start, stop)
[ "Return", "coordinates", "of", "the", "response", "peptide", "Plot", "option", "returns", "coordinates", "based", "on", "start", "and", "length", "of", "peptide", "as", "opposed", "to", "end", "coordinate", "which", "is", "subject", "to", "indsertions", "/", "deletions", "Use", "end", "like", "a", "stop", "in", "range", "(", "start", "stop", ")" ]
[ "\"\"\"Return coordinates of the response peptide\n Plot option returns coordinates based on start and length of peptide,\n as opposed to end coordinate which is subject to indsertions/deletions\n Use end like a stop in range(start, stop)\"\"\"" ]
[ { "param": "r", "type": null }, { "param": "plot", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "r", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "plot", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def respCoords(r, plot=False): if plot: return list(range(int(r.start), int(r.start) + len(r.seq))) else: return list(range(int(r.start), int(r.end)))
1,033
993
e8c90bdcf67f9e33a2d7008d1a8c5365561a19fc
MSO4SC/cloudify-hpc-plugin
hpc_plugin/workload_managers/torque.py
[ "Apache-2.0" ]
Python
_parse_qselect
<not_specific>
def _parse_qselect(qselect_output): """ Parse `qselect` output and returns list of job ids without host names """ jobs = qselect_output.splitlines() if not jobs or (len(jobs) == 1 and jobs[0] is ''): return [] return [int(job.split('.')[0]) for job in jobs]
Parse `qselect` output and returns list of job ids without host names
Parse `qselect` output and returns list of job ids without host names
[ "Parse", "`", "qselect", "`", "output", "and", "returns", "list", "of", "job", "ids", "without", "host", "names" ]
def _parse_qselect(qselect_output): jobs = qselect_output.splitlines() if not jobs or (len(jobs) == 1 and jobs[0] is ''): return [] return [int(job.split('.')[0]) for job in jobs]
[ "def", "_parse_qselect", "(", "qselect_output", ")", ":", "jobs", "=", "qselect_output", ".", "splitlines", "(", ")", "if", "not", "jobs", "or", "(", "len", "(", "jobs", ")", "==", "1", "and", "jobs", "[", "0", "]", "is", "''", ")", ":", "return", "[", "]", "return", "[", "int", "(", "job", ".", "split", "(", "'.'", ")", "[", "0", "]", ")", "for", "job", "in", "jobs", "]" ]
Parse `qselect` output and returns list of job ids without host names
[ "Parse", "`", "qselect", "`", "output", "and", "returns", "list", "of", "job", "ids", "without", "host", "names" ]
[ "\"\"\" Parse `qselect` output and returns\n list of job ids without host names \"\"\"" ]
[ { "param": "qselect_output", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "qselect_output", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _parse_qselect(qselect_output): jobs = qselect_output.splitlines() if not jobs or (len(jobs) == 1 and jobs[0] is ''): return [] return [int(job.split('.')[0]) for job in jobs]
1,034
286
547d9292fa2f72ca7067140c286475d95e754325
david4096/cell-server
celldb/client/client.py
[ "Apache-2.0" ]
Python
_safe_float_vector
<not_specific>
def _safe_float_vector(iterable): """ Takes an iterable and returns a vector of floats. Respects the null return value. :param iterable: :return: """ # FIXME workaround in client to deal with data ingestion problem # The RNA seqer API puts NA in for values that haven't been quantified. # We send back None instead of a string. return [float(x) if x and x != 'NA' else None for x in iterable]
Takes an iterable and returns a vector of floats. Respects the null return value. :param iterable: :return:
Takes an iterable and returns a vector of floats. Respects the null return value.
[ "Takes", "an", "iterable", "and", "returns", "a", "vector", "of", "floats", ".", "Respects", "the", "null", "return", "value", "." ]
def _safe_float_vector(iterable): return [float(x) if x and x != 'NA' else None for x in iterable]
[ "def", "_safe_float_vector", "(", "iterable", ")", ":", "return", "[", "float", "(", "x", ")", "if", "x", "and", "x", "!=", "'NA'", "else", "None", "for", "x", "in", "iterable", "]" ]
Takes an iterable and returns a vector of floats.
[ "Takes", "an", "iterable", "and", "returns", "a", "vector", "of", "floats", "." ]
[ "\"\"\"\n Takes an iterable and returns a vector of floats. Respects the null\n return value.\n :param iterable:\n :return:\n \"\"\"", "# FIXME workaround in client to deal with data ingestion problem", "# The RNA seqer API puts NA in for values that haven't been quantified.", "# We send back None instead of a string." ]
[ { "param": "iterable", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "iterable", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _safe_float_vector(iterable): return [float(x) if x and x != 'NA' else None for x in iterable]
1,035
615
51b65984c680a93ee4d970b3ef5fd56d686343ec
SweydManaf/estudos-python
CursoIntensivoPython/curso-intensivo-python-master/capitulo_08/exercicios/albuns_dos_usuarios.py
[ "MIT" ]
Python
interromper_loop
<not_specific>
def interromper_loop( prompt='Deseja construir outro álbum musical?\n[S/N] '): """ -> Verifica se quer interromper o loop while. :return: Retorna a resposta da pergunta do prompt. """ while True: continuar = str(input(f'\n{prompt}')).upper().strip() if continuar.replace(' ', '').isalpha(): if continuar in 'SN': return continuar print('Digite apenas "S" ou "N".')
-> Verifica se quer interromper o loop while. :return: Retorna a resposta da pergunta do prompt.
> Verifica se quer interromper o loop while.
[ ">", "Verifica", "se", "quer", "interromper", "o", "loop", "while", "." ]
def interromper_loop( prompt='Deseja construir outro álbum musical?\n[S/N] '): while True: continuar = str(input(f'\n{prompt}')).upper().strip() if continuar.replace(' ', '').isalpha(): if continuar in 'SN': return continuar print('Digite apenas "S" ou "N".')
[ "def", "interromper_loop", "(", "prompt", "=", "'Deseja construir outro álbum musical?\\n[S/N] ')", ":", "", "while", "True", ":", "continuar", "=", "str", "(", "input", "(", "f'\\n{prompt}'", ")", ")", ".", "upper", "(", ")", ".", "strip", "(", ")", "if", "continuar", ".", "replace", "(", "' '", ",", "''", ")", ".", "isalpha", "(", ")", ":", "if", "continuar", "in", "'SN'", ":", "return", "continuar", "print", "(", "'Digite apenas \"S\" ou \"N\".'", ")" ]
> Verifica se quer interromper o loop while.
[ ">", "Verifica", "se", "quer", "interromper", "o", "loop", "while", "." ]
[ "\"\"\"\n -> Verifica se quer interromper o loop while.\n :return: Retorna a resposta da pergunta do prompt.\n \"\"\"" ]
[ { "param": "prompt", "type": null } ]
{ "returns": [ { "docstring": "Retorna a resposta da pergunta do prompt.", "docstring_tokens": [ "Retorna", "a", "resposta", "da", "pergunta", "do", "prompt", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "prompt", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def interromper_loop( prompt='Deseja construir outro álbum musical?\n[S/N] '): while True: continuar = str(input(f'\n{prompt}')).upper().strip() if continuar.replace(' ', '').isalpha(): if continuar in 'SN': return continuar print('Digite apenas "S" ou "N".')
1,036
353
abd83d105cd391474b1d1ab6f0f241205f6a338f
unixorn/agamotto
agamotto/network.py
[ "Apache-2.0" ]
Python
probePort
<not_specific>
def probePort(matchtext, host='127.0.0.1', port=80, command=None): """ Connect to port on host, send an optional command, then return the response. Usage: self.assertTrue(agamotto.network.probePort(host='localhost', matchtext='<title>', port=80, command="GET / HTTP/1.1\nHost: localhost\n\n"), 'Did not see a title in https result') """ s = socket.socket() s.connect((host, port)) if command: s.send(command) rawData = s.recv(1024) s.close() return matchtext in rawData
Connect to port on host, send an optional command, then return the response. Usage: self.assertTrue(agamotto.network.probePort(host='localhost', matchtext='<title>', port=80, command="GET / HTTP/1.1\nHost: localhost\n\n"), 'Did not see a title in https result')
Connect to port on host, send an optional command, then return the response.
[ "Connect", "to", "port", "on", "host", "send", "an", "optional", "command", "then", "return", "the", "response", "." ]
def probePort(matchtext, host='127.0.0.1', port=80, command=None): s = socket.socket() s.connect((host, port)) if command: s.send(command) rawData = s.recv(1024) s.close() return matchtext in rawData
[ "def", "probePort", "(", "matchtext", ",", "host", "=", "'127.0.0.1'", ",", "port", "=", "80", ",", "command", "=", "None", ")", ":", "s", "=", "socket", ".", "socket", "(", ")", "s", ".", "connect", "(", "(", "host", ",", "port", ")", ")", "if", "command", ":", "s", ".", "send", "(", "command", ")", "rawData", "=", "s", ".", "recv", "(", "1024", ")", "s", ".", "close", "(", ")", "return", "matchtext", "in", "rawData" ]
Connect to port on host, send an optional command, then return the response.
[ "Connect", "to", "port", "on", "host", "send", "an", "optional", "command", "then", "return", "the", "response", "." ]
[ "\"\"\"\n Connect to port on host, send an optional command, then return the response.\n\n Usage:\n self.assertTrue(agamotto.network.probePort(host='localhost',\n matchtext='<title>', port=80,\n command=\"GET / HTTP/1.1\\nHost: localhost\\n\\n\"),\n 'Did not see a title in https result')\n \"\"\"" ]
[ { "param": "matchtext", "type": null }, { "param": "host", "type": null }, { "param": "port", "type": null }, { "param": "command", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "matchtext", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "host", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "port", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "command", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import socket def probePort(matchtext, host='127.0.0.1', port=80, command=None): s = socket.socket() s.connect((host, port)) if command: s.send(command) rawData = s.recv(1024) s.close() return matchtext in rawData
1,037
710
b1bcbf669bca26d340fcb638bcfc611e38fd558b
justinchen673/Catan-AI
gameFunctions.py
[ "MIT" ]
Python
diceRoll
<not_specific>
def diceRoll(): ''' Simulates rolling a pair of dice that are numbered 1-6 each. Returns a number 2-12 at varying frequencies. ''' die1 = random.randint(1, 6) die2 = random.randint(1, 6) return die1 + die2
Simulates rolling a pair of dice that are numbered 1-6 each. Returns a number 2-12 at varying frequencies.
Simulates rolling a pair of dice that are numbered 1-6 each. Returns a number 2-12 at varying frequencies.
[ "Simulates", "rolling", "a", "pair", "of", "dice", "that", "are", "numbered", "1", "-", "6", "each", ".", "Returns", "a", "number", "2", "-", "12", "at", "varying", "frequencies", "." ]
def diceRoll(): die1 = random.randint(1, 6) die2 = random.randint(1, 6) return die1 + die2
[ "def", "diceRoll", "(", ")", ":", "die1", "=", "random", ".", "randint", "(", "1", ",", "6", ")", "die2", "=", "random", ".", "randint", "(", "1", ",", "6", ")", "return", "die1", "+", "die2" ]
Simulates rolling a pair of dice that are numbered 1-6 each.
[ "Simulates", "rolling", "a", "pair", "of", "dice", "that", "are", "numbered", "1", "-", "6", "each", "." ]
[ "'''\r\n Simulates rolling a pair of dice that are numbered 1-6 each. Returns a\r\n number 2-12 at varying frequencies.\r\n '''" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import random def diceRoll(): die1 = random.randint(1, 6) die2 = random.randint(1, 6) return die1 + die2
1,038
38
31101fd3e8921e3962e106b9c9062bc1670b1d14
JoelEager/Functional-Python
code/decorators.py
[ "MIT" ]
Python
naive_fib
<not_specific>
def naive_fib(iterations): """ Example of a function that can have a long run time """ if 0 <= iterations <= 1: return iterations else: return naive_fib(iterations - 1) + naive_fib(iterations - 2)
Example of a function that can have a long run time
Example of a function that can have a long run time
[ "Example", "of", "a", "function", "that", "can", "have", "a", "long", "run", "time" ]
def naive_fib(iterations): if 0 <= iterations <= 1: return iterations else: return naive_fib(iterations - 1) + naive_fib(iterations - 2)
[ "def", "naive_fib", "(", "iterations", ")", ":", "if", "0", "<=", "iterations", "<=", "1", ":", "return", "iterations", "else", ":", "return", "naive_fib", "(", "iterations", "-", "1", ")", "+", "naive_fib", "(", "iterations", "-", "2", ")" ]
Example of a function that can have a long run time
[ "Example", "of", "a", "function", "that", "can", "have", "a", "long", "run", "time" ]
[ "\"\"\"\n Example of a function that can have a long run time\n \"\"\"" ]
[ { "param": "iterations", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "iterations", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def naive_fib(iterations): if 0 <= iterations <= 1: return iterations else: return naive_fib(iterations - 1) + naive_fib(iterations - 2)
1,039
868
6b38dcf451092fa2733a40b0414d33bd6cf7e675
asherif844/diarization
GreenwayHealthBackup.py
[ "Unlicense" ]
Python
stop_cb
null
def stop_cb(evt): """callback that signals to stop continuous recognition upon receiving an event `evt`""" print('CLOSING on {}'.format(evt)) nonlocal done done = True
callback that signals to stop continuous recognition upon receiving an event `evt`
callback that signals to stop continuous recognition upon receiving an event `evt`
[ "callback", "that", "signals", "to", "stop", "continuous", "recognition", "upon", "receiving", "an", "event", "`", "evt", "`" ]
def stop_cb(evt): print('CLOSING on {}'.format(evt)) nonlocal done done = True
[ "def", "stop_cb", "(", "evt", ")", ":", "print", "(", "'CLOSING on {}'", ".", "format", "(", "evt", ")", ")", "nonlocal", "done", "done", "=", "True" ]
callback that signals to stop continuous recognition upon receiving an event `evt`
[ "callback", "that", "signals", "to", "stop", "continuous", "recognition", "upon", "receiving", "an", "event", "`", "evt", "`" ]
[ "\"\"\"callback that signals to stop continuous recognition upon receiving an event `evt`\"\"\"" ]
[ { "param": "evt", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "evt", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def stop_cb(evt): print('CLOSING on {}'.format(evt)) nonlocal done done = True
1,040
754
16ab116a23464c69cc37e2b414eb690b90cde32a
arvindcheru/rocFFT
library/src/device/generator.py
[ "MIT" ]
Python
clang_format
<not_specific>
def clang_format(code): """Format code using clang-format.""" try: p = subprocess.run(['/opt/rocm/llvm/bin/clang-format', '-style=file'], stdout=subprocess.PIPE, input=str(code), encoding='ascii', check=True) return p.stdout except FileNotFoundError: # code formatting doesn't affect functionality, so just assume # default ROCm path and ignore errors if it's not there. pass return str(code)
Format code using clang-format.
Format code using clang-format.
[ "Format", "code", "using", "clang", "-", "format", "." ]
def clang_format(code): try: p = subprocess.run(['/opt/rocm/llvm/bin/clang-format', '-style=file'], stdout=subprocess.PIPE, input=str(code), encoding='ascii', check=True) return p.stdout except FileNotFoundError: pass return str(code)
[ "def", "clang_format", "(", "code", ")", ":", "try", ":", "p", "=", "subprocess", ".", "run", "(", "[", "'/opt/rocm/llvm/bin/clang-format'", ",", "'-style=file'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "input", "=", "str", "(", "code", ")", ",", "encoding", "=", "'ascii'", ",", "check", "=", "True", ")", "return", "p", ".", "stdout", "except", "FileNotFoundError", ":", "pass", "return", "str", "(", "code", ")" ]
Format code using clang-format.
[ "Format", "code", "using", "clang", "-", "format", "." ]
[ "\"\"\"Format code using clang-format.\"\"\"", "# code formatting doesn't affect functionality, so just assume", "# default ROCm path and ignore errors if it's not there." ]
[ { "param": "code", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "code", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import subprocess def clang_format(code): try: p = subprocess.run(['/opt/rocm/llvm/bin/clang-format', '-style=file'], stdout=subprocess.PIPE, input=str(code), encoding='ascii', check=True) return p.stdout except FileNotFoundError: pass return str(code)
1,041
291
60edc29101973cd03509f1df5b29769fb023b791
donilan/python-genthemall
genthemall/conf.py
[ "Apache-2.0" ]
Python
_to_upper_name
<not_specific>
def _to_upper_name(name): """ Convert name to uppercase name. example: sysUser will convert to SYS_USER sysRole will convert to SYS_ROLE """ if name is not None and len(name) > 1: u_name = '' for s in name: if s == s.upper(): u_name += '_' + s else: u_name += s.upper() return u_name return name
Convert name to uppercase name. example: sysUser will convert to SYS_USER sysRole will convert to SYS_ROLE
Convert name to uppercase name. example: sysUser will convert to SYS_USER sysRole will convert to SYS_ROLE
[ "Convert", "name", "to", "uppercase", "name", ".", "example", ":", "sysUser", "will", "convert", "to", "SYS_USER", "sysRole", "will", "convert", "to", "SYS_ROLE" ]
def _to_upper_name(name): if name is not None and len(name) > 1: u_name = '' for s in name: if s == s.upper(): u_name += '_' + s else: u_name += s.upper() return u_name return name
[ "def", "_to_upper_name", "(", "name", ")", ":", "if", "name", "is", "not", "None", "and", "len", "(", "name", ")", ">", "1", ":", "u_name", "=", "''", "for", "s", "in", "name", ":", "if", "s", "==", "s", ".", "upper", "(", ")", ":", "u_name", "+=", "'_'", "+", "s", "else", ":", "u_name", "+=", "s", ".", "upper", "(", ")", "return", "u_name", "return", "name" ]
Convert name to uppercase name.
[ "Convert", "name", "to", "uppercase", "name", "." ]
[ "\"\"\"\n Convert name to uppercase name. \n example: \n sysUser will convert to SYS_USER\n sysRole will convert to SYS_ROLE\n \"\"\"" ]
[ { "param": "name", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _to_upper_name(name): if name is not None and len(name) > 1: u_name = '' for s in name: if s == s.upper(): u_name += '_' + s else: u_name += s.upper() return u_name return name
1,042
356
00881ccbd1d72fb8f695505b63c791d53444d77b
surf-sci-bc/uspy
uspy/leem/elettra.py
[ "MIT" ]
Python
read_beamline_metadata
dict
def read_beamline_metadata(fname: str) -> dict: """Reads the beamline metadata of the Elettra LEEM file format The beamline file contains a lot of information. Currently only the photon energy is read because its the most important. Parameters ---------- fname : str Filename of beamline file Returns ------- dict Dictionary containing metadata """ with open(fname, "r", encoding="latin-1") as f: idict = {} for line in f: if "Monochromator energy" in line: idict["photon_energy"] = float(line.split(",")[3]) idict["photon_energy_unit"] = "eV" elif "Mesh" in line: idict["mesh"] = float(line.split(",")[3]) idict["mesh_unit"] = line.split(",")[4] return idict
Reads the beamline metadata of the Elettra LEEM file format The beamline file contains a lot of information. Currently only the photon energy is read because its the most important. Parameters ---------- fname : str Filename of beamline file Returns ------- dict Dictionary containing metadata
Reads the beamline metadata of the Elettra LEEM file format The beamline file contains a lot of information. Currently only the photon energy is read because its the most important. Parameters fname : str Filename of beamline file Returns dict Dictionary containing metadata
[ "Reads", "the", "beamline", "metadata", "of", "the", "Elettra", "LEEM", "file", "format", "The", "beamline", "file", "contains", "a", "lot", "of", "information", ".", "Currently", "only", "the", "photon", "energy", "is", "read", "because", "its", "the", "most", "important", ".", "Parameters", "fname", ":", "str", "Filename", "of", "beamline", "file", "Returns", "dict", "Dictionary", "containing", "metadata" ]
def read_beamline_metadata(fname: str) -> dict: with open(fname, "r", encoding="latin-1") as f: idict = {} for line in f: if "Monochromator energy" in line: idict["photon_energy"] = float(line.split(",")[3]) idict["photon_energy_unit"] = "eV" elif "Mesh" in line: idict["mesh"] = float(line.split(",")[3]) idict["mesh_unit"] = line.split(",")[4] return idict
[ "def", "read_beamline_metadata", "(", "fname", ":", "str", ")", "->", "dict", ":", "with", "open", "(", "fname", ",", "\"r\"", ",", "encoding", "=", "\"latin-1\"", ")", "as", "f", ":", "idict", "=", "{", "}", "for", "line", "in", "f", ":", "if", "\"Monochromator energy\"", "in", "line", ":", "idict", "[", "\"photon_energy\"", "]", "=", "float", "(", "line", ".", "split", "(", "\",\"", ")", "[", "3", "]", ")", "idict", "[", "\"photon_energy_unit\"", "]", "=", "\"eV\"", "elif", "\"Mesh\"", "in", "line", ":", "idict", "[", "\"mesh\"", "]", "=", "float", "(", "line", ".", "split", "(", "\",\"", ")", "[", "3", "]", ")", "idict", "[", "\"mesh_unit\"", "]", "=", "line", ".", "split", "(", "\",\"", ")", "[", "4", "]", "return", "idict" ]
Reads the beamline metadata of the Elettra LEEM file format The beamline file contains a lot of information.
[ "Reads", "the", "beamline", "metadata", "of", "the", "Elettra", "LEEM", "file", "format", "The", "beamline", "file", "contains", "a", "lot", "of", "information", "." ]
[ "\"\"\"Reads the beamline metadata of the Elettra LEEM file format\n\n The beamline file contains a lot of information. Currently only the photon energy\n is read because its the most important.\n\n Parameters\n ----------\n fname : str\n Filename of beamline file\n\n Returns\n -------\n dict\n Dictionary containing metadata\n \"\"\"" ]
[ { "param": "fname", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "fname", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def read_beamline_metadata(fname: str) -> dict: with open(fname, "r", encoding="latin-1") as f: idict = {} for line in f: if "Monochromator energy" in line: idict["photon_energy"] = float(line.split(",")[3]) idict["photon_energy_unit"] = "eV" elif "Mesh" in line: idict["mesh"] = float(line.split(",")[3]) idict["mesh_unit"] = line.split(",")[4] return idict
1,043
557
30ea17aa8e12d9e828b613b0ece0e52f6bb41391
dmartin35/pronosfoot
external/lfp_tools.py
[ "MIT" ]
Python
escape_team_names
<not_specific>
def escape_team_names(mystr): """ temporary fix for solving ? characters in bad team names encoding from LFP's ICS calendar """ mystr = mystr.replace('N?MES','NÎMES') mystr = mystr.replace('SAINT-?TIENNE','SAINT-ÉTIENNE') mystr = mystr.replace('H?RAULT', 'HÉRAULT') return mystr
temporary fix for solving ? characters in bad team names encoding from LFP's ICS calendar
temporary fix for solving . characters in bad team names encoding from LFP's ICS calendar
[ "temporary", "fix", "for", "solving", ".", "characters", "in", "bad", "team", "names", "encoding", "from", "LFP", "'", "s", "ICS", "calendar" ]
def escape_team_names(mystr): mystr = mystr.replace('N?MES','NÎMES') mystr = mystr.replace('SAINT-?TIENNE','SAINT-ÉTIENNE') mystr = mystr.replace('H?RAULT', 'HÉRAULT') return mystr
[ "def", "escape_team_names", "(", "mystr", ")", ":", "mystr", "=", "mystr", ".", "replace", "(", "'N?MES'", ",", "'NÎMES')", "", "mystr", "=", "mystr", ".", "replace", "(", "'SAINT-?TIENNE'", ",", "'SAINT-ÉTIENNE')", "", "mystr", "=", "mystr", ".", "replace", "(", "'H?RAULT'", ",", "'HÉRAULT')", "", "return", "mystr" ]
temporary fix for solving ?
[ "temporary", "fix", "for", "solving", "?" ]
[ "\"\"\"\n temporary fix for solving ? characters in bad team names encoding\n from LFP's ICS calendar\n \"\"\"" ]
[ { "param": "mystr", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "mystr", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def escape_team_names(mystr): mystr = mystr.replace('N?MES','NÎMES') mystr = mystr.replace('SAINT-?TIENNE','SAINT-ÉTIENNE') mystr = mystr.replace('H?RAULT', 'HÉRAULT') return mystr
1,044
735
da31eeb63ec84c49d5c76d9d2cf05af96aa9f274
scizzorz/slag
slag/__init__.py
[ "MIT" ]
Python
datetime_filter
<not_specific>
def datetime_filter(src, fmt="%b %e, %I:%M%P"): """Convert a datetime into a human-readable string.""" if isinstance(src, int): src = datetime.fromtimestamp(src) return src.strftime(fmt)
Convert a datetime into a human-readable string.
Convert a datetime into a human-readable string.
[ "Convert", "a", "datetime", "into", "a", "human", "-", "readable", "string", "." ]
def datetime_filter(src, fmt="%b %e, %I:%M%P"): if isinstance(src, int): src = datetime.fromtimestamp(src) return src.strftime(fmt)
[ "def", "datetime_filter", "(", "src", ",", "fmt", "=", "\"%b %e, %I:%M%P\"", ")", ":", "if", "isinstance", "(", "src", ",", "int", ")", ":", "src", "=", "datetime", ".", "fromtimestamp", "(", "src", ")", "return", "src", ".", "strftime", "(", "fmt", ")" ]
Convert a datetime into a human-readable string.
[ "Convert", "a", "datetime", "into", "a", "human", "-", "readable", "string", "." ]
[ "\"\"\"Convert a datetime into a human-readable string.\"\"\"" ]
[ { "param": "src", "type": null }, { "param": "fmt", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "src", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "fmt", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import datetime def datetime_filter(src, fmt="%b %e, %I:%M%P"): if isinstance(src, int): src = datetime.fromtimestamp(src) return src.strftime(fmt)
1,045
488
f4fcd0a83e97a5eb0eb28191c40c6b857e321d3e
sonwell/biotools
src/sequence.py
[ "BSD-2-Clause" ]
Python
chop
null
def chop(seq, length=70): ''' Yields a chunk of a sequence of no more than `length` characters, it is meant to be used to print fasta files. ''' while seq: try: piece, seq = seq[:length], seq[length:] except IndexError: piece, seq = seq, '' yield piece raise StopIteration()
Yields a chunk of a sequence of no more than `length` characters, it is meant to be used to print fasta files.
Yields a chunk of a sequence of no more than `length` characters, it is meant to be used to print fasta files.
[ "Yields", "a", "chunk", "of", "a", "sequence", "of", "no", "more", "than", "`", "length", "`", "characters", "it", "is", "meant", "to", "be", "used", "to", "print", "fasta", "files", "." ]
def chop(seq, length=70): while seq: try: piece, seq = seq[:length], seq[length:] except IndexError: piece, seq = seq, '' yield piece raise StopIteration()
[ "def", "chop", "(", "seq", ",", "length", "=", "70", ")", ":", "while", "seq", ":", "try", ":", "piece", ",", "seq", "=", "seq", "[", ":", "length", "]", ",", "seq", "[", "length", ":", "]", "except", "IndexError", ":", "piece", ",", "seq", "=", "seq", ",", "''", "yield", "piece", "raise", "StopIteration", "(", ")" ]
Yields a chunk of a sequence of no more than `length` characters, it is meant to be used to print fasta files.
[ "Yields", "a", "chunk", "of", "a", "sequence", "of", "no", "more", "than", "`", "length", "`", "characters", "it", "is", "meant", "to", "be", "used", "to", "print", "fasta", "files", "." ]
[ "'''\n Yields a chunk of a sequence of no more than `length` characters,\n it is meant to be used to print fasta files.\n '''" ]
[ { "param": "seq", "type": null }, { "param": "length", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "seq", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "length", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def chop(seq, length=70): while seq: try: piece, seq = seq[:length], seq[length:] except IndexError: piece, seq = seq, '' yield piece raise StopIteration()
1,046
105
9a12338c1e7dca9eaec9e7ba476f73550135a6b4
yahoo/SparkADMM
SparkADMM.py
[ "Apache-2.0" ]
Python
generateBatches
<not_specific>
def generateBatches(solver,key,iterator): """ Prepare main rdd that stores data, Z_l, U_l variables, and some statistics. Uses readPointBatch, from an abstract solver, that aggregates data read into a single variable (data). It is called by mapPartitionsWithIndex, so it receives as input: - the id/key of a partition - an iterator over the partition's data """ data, keys, stats = solver.readPointBatch(iterator) Ul = dict( zip (keys,[0.0]*len(keys)) ) Zl = dict( zip (keys,[0.0]*len(keys)) ) return [ (key, (data,stats, Ul,Zl,0.0, float('Inf') )) ]
Prepare main rdd that stores data, Z_l, U_l variables, and some statistics. Uses readPointBatch, from an abstract solver, that aggregates data read into a single variable (data). It is called by mapPartitionsWithIndex, so it receives as input: - the id/key of a partition - an iterator over the partition's data
Prepare main rdd that stores data, Z_l, U_l variables, and some statistics. Uses readPointBatch, from an abstract solver, that aggregates data read into a single variable (data). It is called by mapPartitionsWithIndex, so it receives as input: the id/key of a partition an iterator over the partition's data
[ "Prepare", "main", "rdd", "that", "stores", "data", "Z_l", "U_l", "variables", "and", "some", "statistics", ".", "Uses", "readPointBatch", "from", "an", "abstract", "solver", "that", "aggregates", "data", "read", "into", "a", "single", "variable", "(", "data", ")", ".", "It", "is", "called", "by", "mapPartitionsWithIndex", "so", "it", "receives", "as", "input", ":", "the", "id", "/", "key", "of", "a", "partition", "an", "iterator", "over", "the", "partition", "'", "s", "data" ]
def generateBatches(solver,key,iterator): data, keys, stats = solver.readPointBatch(iterator) Ul = dict( zip (keys,[0.0]*len(keys)) ) Zl = dict( zip (keys,[0.0]*len(keys)) ) return [ (key, (data,stats, Ul,Zl,0.0, float('Inf') )) ]
[ "def", "generateBatches", "(", "solver", ",", "key", ",", "iterator", ")", ":", "data", ",", "keys", ",", "stats", "=", "solver", ".", "readPointBatch", "(", "iterator", ")", "Ul", "=", "dict", "(", "zip", "(", "keys", ",", "[", "0.0", "]", "*", "len", "(", "keys", ")", ")", ")", "Zl", "=", "dict", "(", "zip", "(", "keys", ",", "[", "0.0", "]", "*", "len", "(", "keys", ")", ")", ")", "return", "[", "(", "key", ",", "(", "data", ",", "stats", ",", "Ul", ",", "Zl", ",", "0.0", ",", "float", "(", "'Inf'", ")", ")", ")", "]" ]
Prepare main rdd that stores data, Z_l, U_l variables, and some statistics.
[ "Prepare", "main", "rdd", "that", "stores", "data", "Z_l", "U_l", "variables", "and", "some", "statistics", "." ]
[ "\"\"\"\n Prepare main rdd that stores data, Z_l, U_l variables, and some statistics. \n\n Uses readPointBatch, from an abstract solver, that aggregates data read into a single variable (data).\n It is called by mapPartitionsWithIndex, so it receives as input:\n - the id/key of a partition\n - an iterator over the partition's data\n \"\"\"" ]
[ { "param": "solver", "type": null }, { "param": "key", "type": null }, { "param": "iterator", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "solver", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "key", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "iterator", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def generateBatches(solver,key,iterator): data, keys, stats = solver.readPointBatch(iterator) Ul = dict( zip (keys,[0.0]*len(keys)) ) Zl = dict( zip (keys,[0.0]*len(keys)) ) return [ (key, (data,stats, Ul,Zl,0.0, float('Inf') )) ]
1,047
707
ba06db1ddc25a47e1a5816315a2e80b0ce917de5
jojobozz/erddapy
erddapy/erddapy.py
[ "BSD-3-Clause" ]
Python
_check_substrings
<not_specific>
def _check_substrings(constraint): """ The tabledap protocol extends the OPeNDAP with these strings and we need to pass them intact to the URL builder. """ substrings = ["now", "min", "max"] return any([True for substring in substrings if substring in str(constraint)])
The tabledap protocol extends the OPeNDAP with these strings and we need to pass them intact to the URL builder.
The tabledap protocol extends the OPeNDAP with these strings and we need to pass them intact to the URL builder.
[ "The", "tabledap", "protocol", "extends", "the", "OPeNDAP", "with", "these", "strings", "and", "we", "need", "to", "pass", "them", "intact", "to", "the", "URL", "builder", "." ]
def _check_substrings(constraint): substrings = ["now", "min", "max"] return any([True for substring in substrings if substring in str(constraint)])
[ "def", "_check_substrings", "(", "constraint", ")", ":", "substrings", "=", "[", "\"now\"", ",", "\"min\"", ",", "\"max\"", "]", "return", "any", "(", "[", "True", "for", "substring", "in", "substrings", "if", "substring", "in", "str", "(", "constraint", ")", "]", ")" ]
The tabledap protocol extends the OPeNDAP with these strings and we need to pass them intact to the URL builder.
[ "The", "tabledap", "protocol", "extends", "the", "OPeNDAP", "with", "these", "strings", "and", "we", "need", "to", "pass", "them", "intact", "to", "the", "URL", "builder", "." ]
[ "\"\"\"\n The tabledap protocol extends the OPeNDAP with these strings and we\n need to pass them intact to the URL builder.\n\n \"\"\"" ]
[ { "param": "constraint", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "constraint", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _check_substrings(constraint): substrings = ["now", "min", "max"] return any([True for substring in substrings if substring in str(constraint)])
1,048
51
a4fc517231a91024357602d7a8b58cfb80bff727
CSCfi/lega-mirroring
lega_mirroring/scripts/res.py
[ "Apache-2.0" ]
Python
parse_arguments
<not_specific>
def parse_arguments(arguments): """ This function parses command line inputs and returns them for main() :method: parameter that determines the operation of the script either encrypt or decrypt, can not be left empty :path: path to file to be worked on :path_to_config: full path to config.ini (or just config.ini if cwd: lega-mirroring) """ parser = argparse.ArgumentParser(description='Utilizes RES Microservice' ' to decrypt or encrypt files.') parser.add_argument('method', help='encrypt or decrypt') parser.add_argument('path', help='path to file to be worked on') parser.add_argument('config', help='location of config.ini') return parser.parse_args(arguments)
This function parses command line inputs and returns them for main() :method: parameter that determines the operation of the script either encrypt or decrypt, can not be left empty :path: path to file to be worked on :path_to_config: full path to config.ini (or just config.ini if cwd: lega-mirroring)
This function parses command line inputs and returns them for main()
[ "This", "function", "parses", "command", "line", "inputs", "and", "returns", "them", "for", "main", "()" ]
def parse_arguments(arguments): parser = argparse.ArgumentParser(description='Utilizes RES Microservice' ' to decrypt or encrypt files.') parser.add_argument('method', help='encrypt or decrypt') parser.add_argument('path', help='path to file to be worked on') parser.add_argument('config', help='location of config.ini') return parser.parse_args(arguments)
[ "def", "parse_arguments", "(", "arguments", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Utilizes RES Microservice'", "' to decrypt or encrypt files.'", ")", "parser", ".", "add_argument", "(", "'method'", ",", "help", "=", "'encrypt or decrypt'", ")", "parser", ".", "add_argument", "(", "'path'", ",", "help", "=", "'path to file to be worked on'", ")", "parser", ".", "add_argument", "(", "'config'", ",", "help", "=", "'location of config.ini'", ")", "return", "parser", ".", "parse_args", "(", "arguments", ")" ]
This function parses command line inputs and returns them for main()
[ "This", "function", "parses", "command", "line", "inputs", "and", "returns", "them", "for", "main", "()" ]
[ "\"\"\"\n This function parses command line inputs and returns them for main()\n\n :method: parameter that determines the operation of the script\n either encrypt or decrypt, can not be left empty\n :path: path to file to be worked on\n :path_to_config: full path to config.ini (or just config.ini if\n cwd: lega-mirroring)\n \"\"\"" ]
[ { "param": "arguments", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "arguments", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [ { "identifier": "method", "docstring": "parameter that determines the operation of the script\neither encrypt or decrypt, can not be left empty", "docstring_tokens": [ "parameter", "that", "determines", "the", "operation", "of", "the", "script", "either", "encrypt", "or", "decrypt", "can", "not", "be", "left", "empty" ] }, { "identifier": "path", "docstring": "path to file to be worked on", "docstring_tokens": [ "path", "to", "file", "to", "be", "worked", "on" ] }, { "identifier": "path_to_config", "docstring": "full path to config.ini (or just config.ini if\ncwd: lega-mirroring)", "docstring_tokens": [ "full", "path", "to", "config", ".", "ini", "(", "or", "just", "config", ".", "ini", "if", "cwd", ":", "lega", "-", "mirroring", ")" ] } ] }
import argparse def parse_arguments(arguments): parser = argparse.ArgumentParser(description='Utilizes RES Microservice' ' to decrypt or encrypt files.') parser.add_argument('method', help='encrypt or decrypt') parser.add_argument('path', help='path to file to be worked on') parser.add_argument('config', help='location of config.ini') return parser.parse_args(arguments)
1,049
435
604b9cd9461aa2e926e060b5afcb1c37919797c5
GeoSander/geocore-pygeoapi
geocore_pygeoapi/provider/cgp.py
[ "MIT" ]
Python
_getbbox
<not_specific>
def _getbbox(coords): """ Creates a bounding box array from a coordinate list. """ minx = float('NaN') miny = float('NaN') maxx = float('NaN') maxy = float('NaN') for part in coords: for x, y in part: minx = min(x, minx) miny = min(y, miny) maxx = max(x, maxx) maxy = max(y, maxy) return [minx, miny, maxx, maxy]
Creates a bounding box array from a coordinate list.
Creates a bounding box array from a coordinate list.
[ "Creates", "a", "bounding", "box", "array", "from", "a", "coordinate", "list", "." ]
def _getbbox(coords): minx = float('NaN') miny = float('NaN') maxx = float('NaN') maxy = float('NaN') for part in coords: for x, y in part: minx = min(x, minx) miny = min(y, miny) maxx = max(x, maxx) maxy = max(y, maxy) return [minx, miny, maxx, maxy]
[ "def", "_getbbox", "(", "coords", ")", ":", "minx", "=", "float", "(", "'NaN'", ")", "miny", "=", "float", "(", "'NaN'", ")", "maxx", "=", "float", "(", "'NaN'", ")", "maxy", "=", "float", "(", "'NaN'", ")", "for", "part", "in", "coords", ":", "for", "x", ",", "y", "in", "part", ":", "minx", "=", "min", "(", "x", ",", "minx", ")", "miny", "=", "min", "(", "y", ",", "miny", ")", "maxx", "=", "max", "(", "x", ",", "maxx", ")", "maxy", "=", "max", "(", "y", ",", "maxy", ")", "return", "[", "minx", ",", "miny", ",", "maxx", ",", "maxy", "]" ]
Creates a bounding box array from a coordinate list.
[ "Creates", "a", "bounding", "box", "array", "from", "a", "coordinate", "list", "." ]
[ "\"\"\" Creates a bounding box array from a coordinate list. \"\"\"" ]
[ { "param": "coords", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "coords", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _getbbox(coords): minx = float('NaN') miny = float('NaN') maxx = float('NaN') maxy = float('NaN') for part in coords: for x, y in part: minx = min(x, minx) miny = min(y, miny) maxx = max(x, maxx) maxy = max(y, maxy) return [minx, miny, maxx, maxy]
1,051
265
3d850d08edfde5194c347e25c703770807f4fbdc
ekrell/fujin
env_setup.py
[ "MIT" ]
Python
printEnv
null
def printEnv(env): '''Prints formatted contents of environment. Args: env (dict of 'environment'): See DEVELOPMENT.md data structs. Returns: None ''' print("------") print("Start coordinates") print(" {}".format(env["start"])) print("Target coordinates") print(" {}".format(env["target"])) print("Region images:") for i in range(len(env["occupancy"])): print(" {}".format(env["occupancy"][i])) print("Vector u images:") if env["ucomponents"] is not None: for i in range(len(env["ucomponents"])): print(" Vector {} : {}".format(i, env["ucomponents"][i])) else: print("none") print("Vector v images:") if env["vcomponents"] is not None: for i in range(len(env["vcomponents"])): print(" Vector {} : {}".format(i, env["vcomponents"][i])) else: print("none") #print("Vector weights:") #for i in range(len(env["weights"])): # print(" Vector {} : {}".format(i, env["weights"][i])) #print("Vector errors:") #for i in range(len(env["errors"])): # print(" Vector {} : +/- {}".format(i, env["errors"][i])) print("------")
Prints formatted contents of environment. Args: env (dict of 'environment'): See DEVELOPMENT.md data structs. Returns: None
Prints formatted contents of environment.
[ "Prints", "formatted", "contents", "of", "environment", "." ]
def printEnv(env): print("------") print("Start coordinates") print(" {}".format(env["start"])) print("Target coordinates") print(" {}".format(env["target"])) print("Region images:") for i in range(len(env["occupancy"])): print(" {}".format(env["occupancy"][i])) print("Vector u images:") if env["ucomponents"] is not None: for i in range(len(env["ucomponents"])): print(" Vector {} : {}".format(i, env["ucomponents"][i])) else: print("none") print("Vector v images:") if env["vcomponents"] is not None: for i in range(len(env["vcomponents"])): print(" Vector {} : {}".format(i, env["vcomponents"][i])) else: print("none") print("------")
[ "def", "printEnv", "(", "env", ")", ":", "print", "(", "\"------\"", ")", "print", "(", "\"Start coordinates\"", ")", "print", "(", "\" {}\"", ".", "format", "(", "env", "[", "\"start\"", "]", ")", ")", "print", "(", "\"Target coordinates\"", ")", "print", "(", "\" {}\"", ".", "format", "(", "env", "[", "\"target\"", "]", ")", ")", "print", "(", "\"Region images:\"", ")", "for", "i", "in", "range", "(", "len", "(", "env", "[", "\"occupancy\"", "]", ")", ")", ":", "print", "(", "\" {}\"", ".", "format", "(", "env", "[", "\"occupancy\"", "]", "[", "i", "]", ")", ")", "print", "(", "\"Vector u images:\"", ")", "if", "env", "[", "\"ucomponents\"", "]", "is", "not", "None", ":", "for", "i", "in", "range", "(", "len", "(", "env", "[", "\"ucomponents\"", "]", ")", ")", ":", "print", "(", "\" Vector {} : {}\"", ".", "format", "(", "i", ",", "env", "[", "\"ucomponents\"", "]", "[", "i", "]", ")", ")", "else", ":", "print", "(", "\"none\"", ")", "print", "(", "\"Vector v images:\"", ")", "if", "env", "[", "\"vcomponents\"", "]", "is", "not", "None", ":", "for", "i", "in", "range", "(", "len", "(", "env", "[", "\"vcomponents\"", "]", ")", ")", ":", "print", "(", "\" Vector {} : {}\"", ".", "format", "(", "i", ",", "env", "[", "\"vcomponents\"", "]", "[", "i", "]", ")", ")", "else", ":", "print", "(", "\"none\"", ")", "print", "(", "\"------\"", ")" ]
Prints formatted contents of environment.
[ "Prints", "formatted", "contents", "of", "environment", "." ]
[ "'''Prints formatted contents of environment.\n\n Args:\n env (dict of 'environment'): See DEVELOPMENT.md data structs.\n\n Returns:\n None\n '''", "#print(\"Vector weights:\")", "#for i in range(len(env[\"weights\"])):", "# print(\" Vector {} : {}\".format(i, env[\"weights\"][i]))", "#print(\"Vector errors:\")", "#for i in range(len(env[\"errors\"])):", "# print(\" Vector {} : +/- {}\".format(i, env[\"errors\"][i]))" ]
[ { "param": "env", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "env", "type": null, "docstring": "See DEVELOPMENT.md data structs.", "docstring_tokens": [ "See", "DEVELOPMENT", ".", "md", "data", "structs", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def printEnv(env): print("------") print("Start coordinates") print(" {}".format(env["start"])) print("Target coordinates") print(" {}".format(env["target"])) print("Region images:") for i in range(len(env["occupancy"])): print(" {}".format(env["occupancy"][i])) print("Vector u images:") if env["ucomponents"] is not None: for i in range(len(env["ucomponents"])): print(" Vector {} : {}".format(i, env["ucomponents"][i])) else: print("none") print("Vector v images:") if env["vcomponents"] is not None: for i in range(len(env["vcomponents"])): print(" Vector {} : {}".format(i, env["vcomponents"][i])) else: print("none") print("------")
1,053
677
4a276fae313692800c347880b9d90906557fa7a6
jwallnoefer/graphepp
src/graphepp/graphepp.py
[ "MIT" ]
Python
_mask_k
<not_specific>
def _mask_k(j, graph, subset): """Spread a bit string on a subset to the whole bitstring. Takes an int representing a bit string of length len(myset) and spreads it to a length graph.N bit string with the bits set at the correct places. Example: graph.N = 4, myset = (0, 2), j=3 (bitstring "11") will return 10 (bitstring "1010") Example: graph.N = 4, myset = (0, 2), j=1 (bitstring "01") will return 2 (bitstring "0010") Parameters ---------- j : int Representing a bit string of length len(subset). graph : Graph The graph, basically just here for graph.N subset : tuple of ints A subset of vertices of the graph. Ideally use a tuple not a list to allow caching to work. Returns ------- int Representing a bit string of length graph.N, i.e. `j` spread out over the appropriate positions in the bit string. """ m = ["0"] * graph.N short_string = format(j, "0" + str(len(subset)) + "b") for bit, idx in zip(short_string, subset): m[idx] = bit long_string = "".join(m) return int(long_string, base=2)
Spread a bit string on a subset to the whole bitstring. Takes an int representing a bit string of length len(myset) and spreads it to a length graph.N bit string with the bits set at the correct places. Example: graph.N = 4, myset = (0, 2), j=3 (bitstring "11") will return 10 (bitstring "1010") Example: graph.N = 4, myset = (0, 2), j=1 (bitstring "01") will return 2 (bitstring "0010") Parameters ---------- j : int Representing a bit string of length len(subset). graph : Graph The graph, basically just here for graph.N subset : tuple of ints A subset of vertices of the graph. Ideally use a tuple not a list to allow caching to work. Returns ------- int Representing a bit string of length graph.N, i.e. `j` spread out over the appropriate positions in the bit string.
Spread a bit string on a subset to the whole bitstring. Takes an int representing a bit string of length len(myset) and spreads it to a length graph.N bit string with the bits set at the correct places. Parameters j : int Representing a bit string of length len(subset). graph : Graph The graph, basically just here for graph.N subset : tuple of ints A subset of vertices of the graph. Ideally use a tuple not a list to allow caching to work. Returns
[ "Spread", "a", "bit", "string", "on", "a", "subset", "to", "the", "whole", "bitstring", ".", "Takes", "an", "int", "representing", "a", "bit", "string", "of", "length", "len", "(", "myset", ")", "and", "spreads", "it", "to", "a", "length", "graph", ".", "N", "bit", "string", "with", "the", "bits", "set", "at", "the", "correct", "places", ".", "Parameters", "j", ":", "int", "Representing", "a", "bit", "string", "of", "length", "len", "(", "subset", ")", ".", "graph", ":", "Graph", "The", "graph", "basically", "just", "here", "for", "graph", ".", "N", "subset", ":", "tuple", "of", "ints", "A", "subset", "of", "vertices", "of", "the", "graph", ".", "Ideally", "use", "a", "tuple", "not", "a", "list", "to", "allow", "caching", "to", "work", ".", "Returns" ]
def _mask_k(j, graph, subset): m = ["0"] * graph.N short_string = format(j, "0" + str(len(subset)) + "b") for bit, idx in zip(short_string, subset): m[idx] = bit long_string = "".join(m) return int(long_string, base=2)
[ "def", "_mask_k", "(", "j", ",", "graph", ",", "subset", ")", ":", "m", "=", "[", "\"0\"", "]", "*", "graph", ".", "N", "short_string", "=", "format", "(", "j", ",", "\"0\"", "+", "str", "(", "len", "(", "subset", ")", ")", "+", "\"b\"", ")", "for", "bit", ",", "idx", "in", "zip", "(", "short_string", ",", "subset", ")", ":", "m", "[", "idx", "]", "=", "bit", "long_string", "=", "\"\"", ".", "join", "(", "m", ")", "return", "int", "(", "long_string", ",", "base", "=", "2", ")" ]
Spread a bit string on a subset to the whole bitstring.
[ "Spread", "a", "bit", "string", "on", "a", "subset", "to", "the", "whole", "bitstring", "." ]
[ "\"\"\"Spread a bit string on a subset to the whole bitstring.\n\n Takes an int representing a bit string of length len(myset)\n and spreads it to a length graph.N bit string with the bits set at the\n correct places.\n\n Example: graph.N = 4, myset = (0, 2), j=3 (bitstring \"11\")\n will return 10 (bitstring \"1010\")\n\n Example: graph.N = 4, myset = (0, 2), j=1 (bitstring \"01\")\n will return 2 (bitstring \"0010\")\n\n Parameters\n ----------\n j : int\n Representing a bit string of length len(subset).\n graph : Graph\n The graph, basically just here for graph.N\n subset : tuple of ints\n A subset of vertices of the graph. Ideally use a tuple not a list to\n allow caching to work.\n\n Returns\n -------\n int\n Representing a bit string of length graph.N, i.e. `j` spread out over\n the appropriate positions in the bit string.\n\n \"\"\"" ]
[ { "param": "j", "type": null }, { "param": "graph", "type": null }, { "param": "subset", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "j", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "graph", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "subset", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _mask_k(j, graph, subset): m = ["0"] * graph.N short_string = format(j, "0" + str(len(subset)) + "b") for bit, idx in zip(short_string, subset): m[idx] = bit long_string = "".join(m) return int(long_string, base=2)
1,054
135
f4cac924fe9b2db3e848c417028153f00df8da25
josebrwn/python-interview
project/euler.py
[ "MIT" ]
Python
pythagorean_triplet
((int, int, int), int)
def pythagorean_triplet(n: int) -> ((int, int, int), int): """find the Pythagorean triplet whose sum equal n and return its product. """ import math for c in reversed(range(int(math.sqrt(n)), n//2)): for b in reversed(range(1, c)): a = n - c - b if a > c: break # print("c", c, "b", b, "a", a) if (c**2 == a**2 + b**2): return (a, b, c), a*b*c
find the Pythagorean triplet whose sum equal n and return its product.
find the Pythagorean triplet whose sum equal n and return its product.
[ "find", "the", "Pythagorean", "triplet", "whose", "sum", "equal", "n", "and", "return", "its", "product", "." ]
def pythagorean_triplet(n: int) -> ((int, int, int), int): import math for c in reversed(range(int(math.sqrt(n)), n//2)): for b in reversed(range(1, c)): a = n - c - b if a > c: break if (c**2 == a**2 + b**2): return (a, b, c), a*b*c
[ "def", "pythagorean_triplet", "(", "n", ":", "int", ")", "->", "(", "(", "int", ",", "int", ",", "int", ")", ",", "int", ")", ":", "import", "math", "for", "c", "in", "reversed", "(", "range", "(", "int", "(", "math", ".", "sqrt", "(", "n", ")", ")", ",", "n", "//", "2", ")", ")", ":", "for", "b", "in", "reversed", "(", "range", "(", "1", ",", "c", ")", ")", ":", "a", "=", "n", "-", "c", "-", "b", "if", "a", ">", "c", ":", "break", "if", "(", "c", "**", "2", "==", "a", "**", "2", "+", "b", "**", "2", ")", ":", "return", "(", "a", ",", "b", ",", "c", ")", ",", "a", "*", "b", "*", "c" ]
find the Pythagorean triplet whose sum equal n and return its product.
[ "find", "the", "Pythagorean", "triplet", "whose", "sum", "equal", "n", "and", "return", "its", "product", "." ]
[ "\"\"\"find the Pythagorean triplet whose sum equal n and return its product. \"\"\"", "# print(\"c\", c, \"b\", b, \"a\", a)\r" ]
[ { "param": "n", "type": "int" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "n", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import math def pythagorean_triplet(n: int) -> ((int, int, int), int): import math for c in reversed(range(int(math.sqrt(n)), n//2)): for b in reversed(range(1, c)): a = n - c - b if a > c: break if (c**2 == a**2 + b**2): return (a, b, c), a*b*c
1,055
279
2c269a0d96b758c988c2b6d52c495517e5754e95
smartdatalake/loci
loci/time_series.py
[ "Apache-2.0" ]
Python
__get_date_range
null
def __get_date_range(start, end, intv, date_format='%m/%d/%Y'): """ Returns a list of dates within a given range. :param start: The starting date. :type start: datetime :param end: The ending date. :type end: datetime :param intv: The interval between dates (in days). :type intv: int :return: A list of dates. :rtype: list """ # start = datetime.strptime(start,"%m/%d/%Y") # end = datetime.strptime(end,"%m/%d/%Y") diff = (end - start ) / intv for i in range(intv): yield (start + diff * i).strftime(date_format) yield end.strftime(date_format)
Returns a list of dates within a given range. :param start: The starting date. :type start: datetime :param end: The ending date. :type end: datetime :param intv: The interval between dates (in days). :type intv: int :return: A list of dates. :rtype: list
Returns a list of dates within a given range.
[ "Returns", "a", "list", "of", "dates", "within", "a", "given", "range", "." ]
def __get_date_range(start, end, intv, date_format='%m/%d/%Y'): diff = (end - start ) / intv for i in range(intv): yield (start + diff * i).strftime(date_format) yield end.strftime(date_format)
[ "def", "__get_date_range", "(", "start", ",", "end", ",", "intv", ",", "date_format", "=", "'%m/%d/%Y'", ")", ":", "diff", "=", "(", "end", "-", "start", ")", "/", "intv", "for", "i", "in", "range", "(", "intv", ")", ":", "yield", "(", "start", "+", "diff", "*", "i", ")", ".", "strftime", "(", "date_format", ")", "yield", "end", ".", "strftime", "(", "date_format", ")" ]
Returns a list of dates within a given range.
[ "Returns", "a", "list", "of", "dates", "within", "a", "given", "range", "." ]
[ "\"\"\"\n Returns a list of dates within a given range.\n \n :param start: The starting date.\n :type start: datetime\n :param end: The ending date.\n :type end: datetime\n :param intv: The interval between dates (in days).\n :type intv: int\n :return: A list of dates.\n :rtype: list\n \"\"\"", "# start = datetime.strptime(start,\"%m/%d/%Y\")", "# end = datetime.strptime(end,\"%m/%d/%Y\")" ]
[ { "param": "start", "type": null }, { "param": "end", "type": null }, { "param": "intv", "type": null }, { "param": "date_format", "type": null } ]
{ "returns": [ { "docstring": "A list of dates.", "docstring_tokens": [ "A", "list", "of", "dates", "." ], "type": "list" } ], "raises": [], "params": [ { "identifier": "start", "type": null, "docstring": "The starting date.", "docstring_tokens": [ "The", "starting", "date", "." ], "default": null, "is_optional": null }, { "identifier": "end", "type": null, "docstring": "The ending date.", "docstring_tokens": [ "The", "ending", "date", "." ], "default": null, "is_optional": null }, { "identifier": "intv", "type": null, "docstring": "The interval between dates (in days).", "docstring_tokens": [ "The", "interval", "between", "dates", "(", "in", "days", ")", "." ], "default": null, "is_optional": null }, { "identifier": "date_format", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def __get_date_range(start, end, intv, date_format='%m/%d/%Y'): diff = (end - start ) / intv for i in range(intv): yield (start + diff * i).strftime(date_format) yield end.strftime(date_format)
1,057
993
e0fc46851dbb41c2d75b9b2c14e1ab646c27dbe4
klimzaporojets/e2e-knowledge-ie
src/evaluation_script.py
[ "Apache-2.0" ]
Python
phi4_mention_centric
<not_specific>
def phi4_mention_centric(gold_clustering, predicted_clustering): """ Subroutine for ceafe. Computes the mention F measure between gold and predicted mentions in a cluster. (kzaporoj) - Mention centric (sum of the number of mentions in intersected clusters) """ return ( len([mention for mention in gold_clustering if mention in predicted_clustering]) )
Subroutine for ceafe. Computes the mention F measure between gold and predicted mentions in a cluster. (kzaporoj) - Mention centric (sum of the number of mentions in intersected clusters)
Subroutine for ceafe. Computes the mention F measure between gold and predicted mentions in a cluster. (kzaporoj) - Mention centric (sum of the number of mentions in intersected clusters)
[ "Subroutine", "for", "ceafe", ".", "Computes", "the", "mention", "F", "measure", "between", "gold", "and", "predicted", "mentions", "in", "a", "cluster", ".", "(", "kzaporoj", ")", "-", "Mention", "centric", "(", "sum", "of", "the", "number", "of", "mentions", "in", "intersected", "clusters", ")" ]
def phi4_mention_centric(gold_clustering, predicted_clustering): return ( len([mention for mention in gold_clustering if mention in predicted_clustering]) )
[ "def", "phi4_mention_centric", "(", "gold_clustering", ",", "predicted_clustering", ")", ":", "return", "(", "len", "(", "[", "mention", "for", "mention", "in", "gold_clustering", "if", "mention", "in", "predicted_clustering", "]", ")", ")" ]
Subroutine for ceafe.
[ "Subroutine", "for", "ceafe", "." ]
[ "\"\"\"\n Subroutine for ceafe. Computes the mention F measure between gold and\n predicted mentions in a cluster.\n (kzaporoj) - Mention centric (sum of the number of mentions in intersected clusters)\n \"\"\"" ]
[ { "param": "gold_clustering", "type": null }, { "param": "predicted_clustering", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "gold_clustering", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "predicted_clustering", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def phi4_mention_centric(gold_clustering, predicted_clustering): return ( len([mention for mention in gold_clustering if mention in predicted_clustering]) )
1,058
667
beb5a34f25b0a7a8e98b61add36f3c2c0f33209e
orion-search/orion-backend
orion/packages/mag/parsing_mag_data.py
[ "MIT" ]
Python
parse_fos
<not_specific>
def parse_fos(response, paper_id): """Parse the fields of study of a paper from a MAG API response. Args: response (json): Response from MAG API in JSON format. Contains all paper information. paper_id (int): Paper ID. Returns: fields_of_study (:obj:`list` of :obj:`dict`): List of dictionaries with fields of study information. There's one dictionary per field of study. paper_with_fos (:obj:`list` of :obj:`dict`): Matching fields of study and paper IDs. """ # two outputs: fos_id with fos_name, fos_id with paper_id paper_with_fos = [] fields_of_study = [] for fos in response["F"]: # mag_fields_of_study fields_of_study.append({"id": fos["FId"], "name": fos["DFN"]}) # mag_paper_fields_of_study paper_with_fos.append({"field_of_study_id": fos["FId"], "paper_id": paper_id}) return paper_with_fos, fields_of_study
Parse the fields of study of a paper from a MAG API response. Args: response (json): Response from MAG API in JSON format. Contains all paper information. paper_id (int): Paper ID. Returns: fields_of_study (:obj:`list` of :obj:`dict`): List of dictionaries with fields of study information. There's one dictionary per field of study. paper_with_fos (:obj:`list` of :obj:`dict`): Matching fields of study and paper IDs.
Parse the fields of study of a paper from a MAG API response.
[ "Parse", "the", "fields", "of", "study", "of", "a", "paper", "from", "a", "MAG", "API", "response", "." ]
def parse_fos(response, paper_id): paper_with_fos = [] fields_of_study = [] for fos in response["F"]: fields_of_study.append({"id": fos["FId"], "name": fos["DFN"]}) paper_with_fos.append({"field_of_study_id": fos["FId"], "paper_id": paper_id}) return paper_with_fos, fields_of_study
[ "def", "parse_fos", "(", "response", ",", "paper_id", ")", ":", "paper_with_fos", "=", "[", "]", "fields_of_study", "=", "[", "]", "for", "fos", "in", "response", "[", "\"F\"", "]", ":", "fields_of_study", ".", "append", "(", "{", "\"id\"", ":", "fos", "[", "\"FId\"", "]", ",", "\"name\"", ":", "fos", "[", "\"DFN\"", "]", "}", ")", "paper_with_fos", ".", "append", "(", "{", "\"field_of_study_id\"", ":", "fos", "[", "\"FId\"", "]", ",", "\"paper_id\"", ":", "paper_id", "}", ")", "return", "paper_with_fos", ",", "fields_of_study" ]
Parse the fields of study of a paper from a MAG API response.
[ "Parse", "the", "fields", "of", "study", "of", "a", "paper", "from", "a", "MAG", "API", "response", "." ]
[ "\"\"\"Parse the fields of study of a paper from a MAG API response.\n\n Args:\n response (json): Response from MAG API in JSON format. Contains all paper information.\n paper_id (int): Paper ID.\n\n Returns:\n fields_of_study (:obj:`list` of :obj:`dict`): List of dictionaries with fields of study information.\n There's one dictionary per field of study.\n paper_with_fos (:obj:`list` of :obj:`dict`): Matching fields of study and paper IDs.\n\n \"\"\"", "# two outputs: fos_id with fos_name, fos_id with paper_id", "# mag_fields_of_study", "# mag_paper_fields_of_study" ]
[ { "param": "response", "type": null }, { "param": "paper_id", "type": null } ]
{ "returns": [ { "docstring": "fields_of_study (:obj:`list` of :obj:`dict`): List of dictionaries with fields of study information.\nThere's one dictionary per field of study.\npaper_with_fos (:obj:`list` of :obj:`dict`): Matching fields of study and paper IDs.", "docstring_tokens": [ "fields_of_study", "(", ":", "obj", ":", "`", "list", "`", "of", ":", "obj", ":", "`", "dict", "`", ")", ":", "List", "of", "dictionaries", "with", "fields", "of", "study", "information", ".", "There", "'", "s", "one", "dictionary", "per", "field", "of", "study", ".", "paper_with_fos", "(", ":", "obj", ":", "`", "list", "`", "of", ":", "obj", ":", "`", "dict", "`", ")", ":", "Matching", "fields", "of", "study", "and", "paper", "IDs", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "response", "type": null, "docstring": "Response from MAG API in JSON format. Contains all paper information.", "docstring_tokens": [ "Response", "from", "MAG", "API", "in", "JSON", "format", ".", "Contains", "all", "paper", "information", "." ], "default": null, "is_optional": false }, { "identifier": "paper_id", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def parse_fos(response, paper_id): paper_with_fos = [] fields_of_study = [] for fos in response["F"]: fields_of_study.append({"id": fos["FId"], "name": fos["DFN"]}) paper_with_fos.append({"field_of_study_id": fos["FId"], "paper_id": paper_id}) return paper_with_fos, fields_of_study
1,059
130
95c047d7601d203a886c753239620785582fd5fb
harshitsinghai77/jetBrains-PyChamps-hackathon
backend/model/nst.py
[ "MIT" ]
Python
high_pass_x_y
<not_specific>
def high_pass_x_y(image): ''' Adds total variation loss to reduce the high frequency artifacts. Applies high frequency explicit regularization term on the high frequency components of the image.''' x_var = image[:,:,1:,:] - image[:,:,:-1,:] y_var = image[:,1:,:,:] - image[:,:-1,:,:] return x_var, y_var
Adds total variation loss to reduce the high frequency artifacts. Applies high frequency explicit regularization term on the high frequency components of the image.
Adds total variation loss to reduce the high frequency artifacts. Applies high frequency explicit regularization term on the high frequency components of the image.
[ "Adds", "total", "variation", "loss", "to", "reduce", "the", "high", "frequency", "artifacts", ".", "Applies", "high", "frequency", "explicit", "regularization", "term", "on", "the", "high", "frequency", "components", "of", "the", "image", "." ]
def high_pass_x_y(image): x_var = image[:,:,1:,:] - image[:,:,:-1,:] y_var = image[:,1:,:,:] - image[:,:-1,:,:] return x_var, y_var
[ "def", "high_pass_x_y", "(", "image", ")", ":", "x_var", "=", "image", "[", ":", ",", ":", ",", "1", ":", ",", ":", "]", "-", "image", "[", ":", ",", ":", ",", ":", "-", "1", ",", ":", "]", "y_var", "=", "image", "[", ":", ",", "1", ":", ",", ":", ",", ":", "]", "-", "image", "[", ":", ",", ":", "-", "1", ",", ":", ",", ":", "]", "return", "x_var", ",", "y_var" ]
Adds total variation loss to reduce the high frequency artifacts.
[ "Adds", "total", "variation", "loss", "to", "reduce", "the", "high", "frequency", "artifacts", "." ]
[ "''' \n Adds total variation loss to reduce the high frequency artifacts. \n Applies high frequency explicit regularization term on the high \n frequency components of the image.'''" ]
[ { "param": "image", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "image", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def high_pass_x_y(image): x_var = image[:,:,1:,:] - image[:,:,:-1,:] y_var = image[:,1:,:,:] - image[:,:-1,:,:] return x_var, y_var
1,060
386
a874e821cffbbab442aca722def8f5078536f179
omid55/teams_in_games
Experiment1_analysis_champion_levels/utils.py
[ "Apache-2.0" ]
Python
load_data
<not_specific>
def load_data(filename): """ Loads data from filename stored using pickle Args: filename: Returns: data """ with open(filename, 'r') as f: data = pickle.load(f) return data
Loads data from filename stored using pickle Args: filename: Returns: data
Loads data from filename stored using pickle
[ "Loads", "data", "from", "filename", "stored", "using", "pickle" ]
def load_data(filename): with open(filename, 'r') as f: data = pickle.load(f) return data
[ "def", "load_data", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "data", "=", "pickle", ".", "load", "(", "f", ")", "return", "data" ]
Loads data from filename stored using pickle
[ "Loads", "data", "from", "filename", "stored", "using", "pickle" ]
[ "\"\"\"\n Loads data from filename stored using pickle\n Args:\n filename:\n\n Returns:\n data\n \"\"\"" ]
[ { "param": "filename", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "filename", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import pickle def load_data(filename): with open(filename, 'r') as f: data = pickle.load(f) return data
1,061
515
adcd7edda604a38f19f1f58810da6aa1f6009681
orneryhippo/saturdays
rabbit/saturdays.py
[ "Apache-2.0" ]
Python
__make_ks
<not_specific>
def __make_ks(reverse=False): """ks are the bits of the ints from 0 to 7, used as truth flags for vars in predicates. I.e. 0 means the denial of a var, 0 for var X means notX 1 for var X means X is true """ ks = [] for i in range(2): for j in range(2): for k in range(2): if reverse: ks.append((1-i,1-j,1-k)) else: ks.append((i,j,k)) return ks
ks are the bits of the ints from 0 to 7, used as truth flags for vars in predicates. I.e. 0 means the denial of a var, 0 for var X means notX 1 for var X means X is true
ks are the bits of the ints from 0 to 7, used as truth flags for vars in predicates.
[ "ks", "are", "the", "bits", "of", "the", "ints", "from", "0", "to", "7", "used", "as", "truth", "flags", "for", "vars", "in", "predicates", "." ]
def __make_ks(reverse=False): ks = [] for i in range(2): for j in range(2): for k in range(2): if reverse: ks.append((1-i,1-j,1-k)) else: ks.append((i,j,k)) return ks
[ "def", "__make_ks", "(", "reverse", "=", "False", ")", ":", "ks", "=", "[", "]", "for", "i", "in", "range", "(", "2", ")", ":", "for", "j", "in", "range", "(", "2", ")", ":", "for", "k", "in", "range", "(", "2", ")", ":", "if", "reverse", ":", "ks", ".", "append", "(", "(", "1", "-", "i", ",", "1", "-", "j", ",", "1", "-", "k", ")", ")", "else", ":", "ks", ".", "append", "(", "(", "i", ",", "j", ",", "k", ")", ")", "return", "ks" ]
ks are the bits of the ints from 0 to 7, used as truth flags for vars in predicates.
[ "ks", "are", "the", "bits", "of", "the", "ints", "from", "0", "to", "7", "used", "as", "truth", "flags", "for", "vars", "in", "predicates", "." ]
[ "\"\"\"ks are the bits of the ints from 0 to 7, used as truth flags\n\tfor vars in predicates. I.e. 0 means the denial of a var, 0 for var X means notX\n\t1 for var X means X is true\n\t\"\"\"" ]
[ { "param": "reverse", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "reverse", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def __make_ks(reverse=False): ks = [] for i in range(2): for j in range(2): for k in range(2): if reverse: ks.append((1-i,1-j,1-k)) else: ks.append((i,j,k)) return ks
1,062
479
fc40a2ceb2de1c2d56c17697393713804d7da350
pstjohn/alphafold
alphafold/model/tf/utils.py
[ "Apache-2.0" ]
Python
tf_combine_mask
<not_specific>
def tf_combine_mask(*masks): """Take the intersection of float-valued masks.""" ret = 1 for m in masks: ret *= m return ret
Take the intersection of float-valued masks.
Take the intersection of float-valued masks.
[ "Take", "the", "intersection", "of", "float", "-", "valued", "masks", "." ]
def tf_combine_mask(*masks): ret = 1 for m in masks: ret *= m return ret
[ "def", "tf_combine_mask", "(", "*", "masks", ")", ":", "ret", "=", "1", "for", "m", "in", "masks", ":", "ret", "*=", "m", "return", "ret" ]
Take the intersection of float-valued masks.
[ "Take", "the", "intersection", "of", "float", "-", "valued", "masks", "." ]
[ "\"\"\"Take the intersection of float-valued masks.\"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def tf_combine_mask(*masks): ret = 1 for m in masks: ret *= m return ret
1,063
473
9b75380d75ab08daf796ec1e59ecf4092b1597c8
yeongjoonJu/transfer-learning-conv-ai
pretrain_dialog.py
[ "MIT" ]
Python
add_special_tokens_
null
def add_special_tokens_(model, tokenizer, attr_to_special_token): """ Add special tokens to the tokenizer and the model if they have not already been added. """ orig_num_tokens = len(tokenizer.encoder) num_added_tokens = tokenizer.add_special_tokens(attr_to_special_token) # doesn't add if they are already there if num_added_tokens > 0: a = model.resize_token_embeddings(new_num_tokens=orig_num_tokens + num_added_tokens)
Add special tokens to the tokenizer and the model if they have not already been added.
Add special tokens to the tokenizer and the model if they have not already been added.
[ "Add", "special", "tokens", "to", "the", "tokenizer", "and", "the", "model", "if", "they", "have", "not", "already", "been", "added", "." ]
def add_special_tokens_(model, tokenizer, attr_to_special_token): orig_num_tokens = len(tokenizer.encoder) num_added_tokens = tokenizer.add_special_tokens(attr_to_special_token) if num_added_tokens > 0: a = model.resize_token_embeddings(new_num_tokens=orig_num_tokens + num_added_tokens)
[ "def", "add_special_tokens_", "(", "model", ",", "tokenizer", ",", "attr_to_special_token", ")", ":", "orig_num_tokens", "=", "len", "(", "tokenizer", ".", "encoder", ")", "num_added_tokens", "=", "tokenizer", ".", "add_special_tokens", "(", "attr_to_special_token", ")", "if", "num_added_tokens", ">", "0", ":", "a", "=", "model", ".", "resize_token_embeddings", "(", "new_num_tokens", "=", "orig_num_tokens", "+", "num_added_tokens", ")" ]
Add special tokens to the tokenizer and the model if they have not already been added.
[ "Add", "special", "tokens", "to", "the", "tokenizer", "and", "the", "model", "if", "they", "have", "not", "already", "been", "added", "." ]
[ "\"\"\" Add special tokens to the tokenizer and the model if they have not already been added. \"\"\"", "# doesn't add if they are already there" ]
[ { "param": "model", "type": null }, { "param": "tokenizer", "type": null }, { "param": "attr_to_special_token", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "model", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "tokenizer", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "attr_to_special_token", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def add_special_tokens_(model, tokenizer, attr_to_special_token): orig_num_tokens = len(tokenizer.encoder) num_added_tokens = tokenizer.add_special_tokens(attr_to_special_token) if num_added_tokens > 0: a = model.resize_token_embeddings(new_num_tokens=orig_num_tokens + num_added_tokens)
1,064
438
681b3dce4bd3f76a9d5be7c721bfd305c9d645e4
pieter98/Algorithm-Implementations
Knuth_Morris_Pratt/python/tdoly/kmp_match.py
[ "MIT" ]
Python
failTable
<not_specific>
def failTable(pattern): '''Create the resulting table, which for length zero is None. Usage: >>>failTable('ABCDABD') [None, 0, 0, 0, 0, 1, 2, 0] >>>failTable('py py py python py') [None, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 1, 2] ''' result=[None] # Iterate across the rest of the characters, filling in the values for the # rest of the table. for i in range(len(pattern)): j = i while True: # If j hits zero, the recursion says that the resulting value is # zero since we're looking for the LPB of a single-character # string. if j == 0: result.append(0) break # Otherwise, if the character one step after the LPB matches the # next character in the sequence, then we can extend the LPB by one # character to get an LPB for the whole sequence. if pattern[result[j]] == pattern[i]: result.append(result[j] + 1) break # Finally, if neither of these hold, then we need to reduce the # subproblem to the LPB of the LPB. j = result[j] return result
Create the resulting table, which for length zero is None. Usage: >>>failTable('ABCDABD') [None, 0, 0, 0, 0, 1, 2, 0] >>>failTable('py py py python py') [None, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 1, 2]
Create the resulting table, which for length zero is None.
[ "Create", "the", "resulting", "table", "which", "for", "length", "zero", "is", "None", "." ]
def failTable(pattern): result=[None] for i in range(len(pattern)): j = i while True: if j == 0: result.append(0) break if pattern[result[j]] == pattern[i]: result.append(result[j] + 1) break j = result[j] return result
[ "def", "failTable", "(", "pattern", ")", ":", "result", "=", "[", "None", "]", "for", "i", "in", "range", "(", "len", "(", "pattern", ")", ")", ":", "j", "=", "i", "while", "True", ":", "if", "j", "==", "0", ":", "result", ".", "append", "(", "0", ")", "break", "if", "pattern", "[", "result", "[", "j", "]", "]", "==", "pattern", "[", "i", "]", ":", "result", ".", "append", "(", "result", "[", "j", "]", "+", "1", ")", "break", "j", "=", "result", "[", "j", "]", "return", "result" ]
Create the resulting table, which for length zero is None.
[ "Create", "the", "resulting", "table", "which", "for", "length", "zero", "is", "None", "." ]
[ "'''Create the resulting table, which for length zero is None.\n Usage:\n >>>failTable('ABCDABD')\n [None, 0, 0, 0, 0, 1, 2, 0]\n >>>failTable('py py py python py')\n [None, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 1, 2]\n '''", "# Iterate across the rest of the characters, filling in the values for the", "# rest of the table.", "# If j hits zero, the recursion says that the resulting value is", "# zero since we're looking for the LPB of a single-character", "# string.", "# Otherwise, if the character one step after the LPB matches the", "# next character in the sequence, then we can extend the LPB by one", "# character to get an LPB for the whole sequence.", "# Finally, if neither of these hold, then we need to reduce the", "# subproblem to the LPB of the LPB." ]
[ { "param": "pattern", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "pattern", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def failTable(pattern): result=[None] for i in range(len(pattern)): j = i while True: if j == 0: result.append(0) break if pattern[result[j]] == pattern[i]: result.append(result[j] + 1) break j = result[j] return result
1,065
217
5605df04559edec3d7b1c8c0fa80cd10c18b1905
chulminkw/efficientdet
efficientnetv2/effnetv2_model.py
[ "Apache-2.0" ]
Python
round_filters
<not_specific>
def round_filters(filters, mconfig, skip=False): """Round number of filters based on depth multiplier.""" multiplier = mconfig.width_coefficient divisor = mconfig.depth_divisor min_depth = mconfig.min_depth if skip or not multiplier: return filters filters *= multiplier min_depth = min_depth or divisor new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor) return int(new_filters)
Round number of filters based on depth multiplier.
Round number of filters based on depth multiplier.
[ "Round", "number", "of", "filters", "based", "on", "depth", "multiplier", "." ]
def round_filters(filters, mconfig, skip=False): multiplier = mconfig.width_coefficient divisor = mconfig.depth_divisor min_depth = mconfig.min_depth if skip or not multiplier: return filters filters *= multiplier min_depth = min_depth or divisor new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor) return int(new_filters)
[ "def", "round_filters", "(", "filters", ",", "mconfig", ",", "skip", "=", "False", ")", ":", "multiplier", "=", "mconfig", ".", "width_coefficient", "divisor", "=", "mconfig", ".", "depth_divisor", "min_depth", "=", "mconfig", ".", "min_depth", "if", "skip", "or", "not", "multiplier", ":", "return", "filters", "filters", "*=", "multiplier", "min_depth", "=", "min_depth", "or", "divisor", "new_filters", "=", "max", "(", "min_depth", ",", "int", "(", "filters", "+", "divisor", "/", "2", ")", "//", "divisor", "*", "divisor", ")", "return", "int", "(", "new_filters", ")" ]
Round number of filters based on depth multiplier.
[ "Round", "number", "of", "filters", "based", "on", "depth", "multiplier", "." ]
[ "\"\"\"Round number of filters based on depth multiplier.\"\"\"" ]
[ { "param": "filters", "type": null }, { "param": "mconfig", "type": null }, { "param": "skip", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "filters", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "mconfig", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "skip", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def round_filters(filters, mconfig, skip=False): multiplier = mconfig.width_coefficient divisor = mconfig.depth_divisor min_depth = mconfig.min_depth if skip or not multiplier: return filters filters *= multiplier min_depth = min_depth or divisor new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor) return int(new_filters)
1,066
543
40b134049b4f1d70a3dd877bb99f98a50b61c93a
graingert/mom
mom/math.py
[ "ECL-2.0", "Apache-2.0" ]
Python
inverse_mod
<not_specific>
def inverse_mod(num_a, num_b): """Returns inverse of a mod b, zero if none Uses Extended Euclidean Algorithm :param num_a: Long value :param num_b: Long value :returns: Inverse of a mod b, zero if none. """ num_c, num_d = num_a, num_b num_uc, num_ud = 1, 0 while num_c: quotient = num_d // num_c num_c, num_d = num_d - (quotient * num_c), num_c num_uc, num_ud = num_ud - (quotient * num_uc), num_uc if num_d == 1: return num_ud % num_b return 0
Returns inverse of a mod b, zero if none Uses Extended Euclidean Algorithm :param num_a: Long value :param num_b: Long value :returns: Inverse of a mod b, zero if none.
Returns inverse of a mod b, zero if none Uses Extended Euclidean Algorithm
[ "Returns", "inverse", "of", "a", "mod", "b", "zero", "if", "none", "Uses", "Extended", "Euclidean", "Algorithm" ]
def inverse_mod(num_a, num_b): num_c, num_d = num_a, num_b num_uc, num_ud = 1, 0 while num_c: quotient = num_d // num_c num_c, num_d = num_d - (quotient * num_c), num_c num_uc, num_ud = num_ud - (quotient * num_uc), num_uc if num_d == 1: return num_ud % num_b return 0
[ "def", "inverse_mod", "(", "num_a", ",", "num_b", ")", ":", "num_c", ",", "num_d", "=", "num_a", ",", "num_b", "num_uc", ",", "num_ud", "=", "1", ",", "0", "while", "num_c", ":", "quotient", "=", "num_d", "//", "num_c", "num_c", ",", "num_d", "=", "num_d", "-", "(", "quotient", "*", "num_c", ")", ",", "num_c", "num_uc", ",", "num_ud", "=", "num_ud", "-", "(", "quotient", "*", "num_uc", ")", ",", "num_uc", "if", "num_d", "==", "1", ":", "return", "num_ud", "%", "num_b", "return", "0" ]
Returns inverse of a mod b, zero if none Uses Extended Euclidean Algorithm
[ "Returns", "inverse", "of", "a", "mod", "b", "zero", "if", "none", "Uses", "Extended", "Euclidean", "Algorithm" ]
[ "\"\"\"Returns inverse of a mod b, zero if none\n\n Uses Extended Euclidean Algorithm\n\n :param num_a:\n Long value\n :param num_b:\n Long value\n :returns:\n Inverse of a mod b, zero if none.\n \"\"\"" ]
[ { "param": "num_a", "type": null }, { "param": "num_b", "type": null } ]
{ "returns": [ { "docstring": "Inverse of a mod b, zero if none.", "docstring_tokens": [ "Inverse", "of", "a", "mod", "b", "zero", "if", "none", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "num_a", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "num_b", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def inverse_mod(num_a, num_b): num_c, num_d = num_a, num_b num_uc, num_ud = 1, 0 while num_c: quotient = num_d // num_c num_c, num_d = num_d - (quotient * num_c), num_c num_uc, num_ud = num_ud - (quotient * num_uc), num_uc if num_d == 1: return num_ud % num_b return 0
1,067
204
41fac871f10da16f52782386ee624f6b01e45f7e
priyankagohil/coursebuilder-assessment
controllers/sites.py
[ "Apache-2.0" ]
Python
_default_error_handler
null
def _default_error_handler(cls, request, response, status_code): """Render default global error page.""" response.status_code = status_code if status_code == 404: cls._404_handler(request, response) elif status_code < 500: response.out.write( 'Unable to access requested page. ' 'HTTP status code: %s.' % status_code) elif status_code >= 500: cls._5xx_handler(request, response, status_code) else: msg = 'Server error. HTTP status code: %s.' % status_code logging.error(msg) response.out.write(msg)
Render default global error page.
Render default global error page.
[ "Render", "default", "global", "error", "page", "." ]
def _default_error_handler(cls, request, response, status_code): response.status_code = status_code if status_code == 404: cls._404_handler(request, response) elif status_code < 500: response.out.write( 'Unable to access requested page. ' 'HTTP status code: %s.' % status_code) elif status_code >= 500: cls._5xx_handler(request, response, status_code) else: msg = 'Server error. HTTP status code: %s.' % status_code logging.error(msg) response.out.write(msg)
[ "def", "_default_error_handler", "(", "cls", ",", "request", ",", "response", ",", "status_code", ")", ":", "response", ".", "status_code", "=", "status_code", "if", "status_code", "==", "404", ":", "cls", ".", "_404_handler", "(", "request", ",", "response", ")", "elif", "status_code", "<", "500", ":", "response", ".", "out", ".", "write", "(", "'Unable to access requested page. '", "'HTTP status code: %s.'", "%", "status_code", ")", "elif", "status_code", ">=", "500", ":", "cls", ".", "_5xx_handler", "(", "request", ",", "response", ",", "status_code", ")", "else", ":", "msg", "=", "'Server error. HTTP status code: %s.'", "%", "status_code", "logging", ".", "error", "(", "msg", ")", "response", ".", "out", ".", "write", "(", "msg", ")" ]
Render default global error page.
[ "Render", "default", "global", "error", "page", "." ]
[ "\"\"\"Render default global error page.\"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "request", "type": null }, { "param": "response", "type": null }, { "param": "status_code", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "request", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "response", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "status_code", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import logging def _default_error_handler(cls, request, response, status_code): response.status_code = status_code if status_code == 404: cls._404_handler(request, response) elif status_code < 500: response.out.write( 'Unable to access requested page. ' 'HTTP status code: %s.' % status_code) elif status_code >= 500: cls._5xx_handler(request, response, status_code) else: msg = 'Server error. HTTP status code: %s.' % status_code logging.error(msg) response.out.write(msg)
1,068
309
9d95a64b65ee31a1a038c56a7777d61bff2d0cd8
ariensligar/farfields_postprocessing
Lib/Utillities.py
[ "MIT" ]
Python
convert_units
<not_specific>
def convert_units(value, oldUnits, newUnits): """ used for converting between common unit types in HFSS """ unitConv = {"nm": .000000001, "um": .000001, "mm": .001, "meter": 1.0, "cm": .01, "ft": .3048, "in": .0254, "mil": .0000254, "uin": .0000000254} value =float(value) sf = 1.0 BaseUnits = None NewUnits = None if oldUnits.lower() in unitConv: BaseUnits = unitConv[oldUnits.lower()] if newUnits.lower() in unitConv: NewUnits = unitConv[newUnits.lower()] if BaseUnits != None and NewUnits != None: sf = BaseUnits/NewUnits if oldUnits != newUnits: nuValue = value*sf else: nuValue = value return nuValue
used for converting between common unit types in HFSS
used for converting between common unit types in HFSS
[ "used", "for", "converting", "between", "common", "unit", "types", "in", "HFSS" ]
def convert_units(value, oldUnits, newUnits): unitConv = {"nm": .000000001, "um": .000001, "mm": .001, "meter": 1.0, "cm": .01, "ft": .3048, "in": .0254, "mil": .0000254, "uin": .0000000254} value =float(value) sf = 1.0 BaseUnits = None NewUnits = None if oldUnits.lower() in unitConv: BaseUnits = unitConv[oldUnits.lower()] if newUnits.lower() in unitConv: NewUnits = unitConv[newUnits.lower()] if BaseUnits != None and NewUnits != None: sf = BaseUnits/NewUnits if oldUnits != newUnits: nuValue = value*sf else: nuValue = value return nuValue
[ "def", "convert_units", "(", "value", ",", "oldUnits", ",", "newUnits", ")", ":", "unitConv", "=", "{", "\"nm\"", ":", ".000000001", ",", "\"um\"", ":", ".000001", ",", "\"mm\"", ":", ".001", ",", "\"meter\"", ":", "1.0", ",", "\"cm\"", ":", ".01", ",", "\"ft\"", ":", ".3048", ",", "\"in\"", ":", ".0254", ",", "\"mil\"", ":", ".0000254", ",", "\"uin\"", ":", ".0000000254", "}", "value", "=", "float", "(", "value", ")", "sf", "=", "1.0", "BaseUnits", "=", "None", "NewUnits", "=", "None", "if", "oldUnits", ".", "lower", "(", ")", "in", "unitConv", ":", "BaseUnits", "=", "unitConv", "[", "oldUnits", ".", "lower", "(", ")", "]", "if", "newUnits", ".", "lower", "(", ")", "in", "unitConv", ":", "NewUnits", "=", "unitConv", "[", "newUnits", ".", "lower", "(", ")", "]", "if", "BaseUnits", "!=", "None", "and", "NewUnits", "!=", "None", ":", "sf", "=", "BaseUnits", "/", "NewUnits", "if", "oldUnits", "!=", "newUnits", ":", "nuValue", "=", "value", "*", "sf", "else", ":", "nuValue", "=", "value", "return", "nuValue" ]
used for converting between common unit types in HFSS
[ "used", "for", "converting", "between", "common", "unit", "types", "in", "HFSS" ]
[ "\"\"\"\n used for converting between common unit types in HFSS\n \"\"\"" ]
[ { "param": "value", "type": null }, { "param": "oldUnits", "type": null }, { "param": "newUnits", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "value", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "oldUnits", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "newUnits", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def convert_units(value, oldUnits, newUnits): unitConv = {"nm": .000000001, "um": .000001, "mm": .001, "meter": 1.0, "cm": .01, "ft": .3048, "in": .0254, "mil": .0000254, "uin": .0000000254} value =float(value) sf = 1.0 BaseUnits = None NewUnits = None if oldUnits.lower() in unitConv: BaseUnits = unitConv[oldUnits.lower()] if newUnits.lower() in unitConv: NewUnits = unitConv[newUnits.lower()] if BaseUnits != None and NewUnits != None: sf = BaseUnits/NewUnits if oldUnits != newUnits: nuValue = value*sf else: nuValue = value return nuValue
1,069
855
efc8be2a8d8c262dc8be1c5747c6e7100a67549d
puumuki/tahmatassu-api
tahmatassu-server/users.py
[ "MIT" ]
Python
calculate_hash
<not_specific>
def calculate_hash(stuff): """ Calculate sha1 hash sum for given stuff :param stuff: stuff to be hashed :returns: calculated hash """ sha1 = hashlib.sha1() sha1.update(stuff) return sha1.hexdigest()
Calculate sha1 hash sum for given stuff :param stuff: stuff to be hashed :returns: calculated hash
Calculate sha1 hash sum for given stuff
[ "Calculate", "sha1", "hash", "sum", "for", "given", "stuff" ]
def calculate_hash(stuff): sha1 = hashlib.sha1() sha1.update(stuff) return sha1.hexdigest()
[ "def", "calculate_hash", "(", "stuff", ")", ":", "sha1", "=", "hashlib", ".", "sha1", "(", ")", "sha1", ".", "update", "(", "stuff", ")", "return", "sha1", ".", "hexdigest", "(", ")" ]
Calculate sha1 hash sum for given stuff
[ "Calculate", "sha1", "hash", "sum", "for", "given", "stuff" ]
[ "\"\"\"\n\tCalculate sha1 hash sum for given stuff\n\t:param stuff: stuff to be hashed\n\t:returns: calculated hash \n\t\"\"\"" ]
[ { "param": "stuff", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "stuff", "type": null, "docstring": "stuff to be hashed", "docstring_tokens": [ "stuff", "to", "be", "hashed" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import hashlib def calculate_hash(stuff): sha1 = hashlib.sha1() sha1.update(stuff) return sha1.hexdigest()
1,070
658
bfaf3ce611232241cd16a6da112a9749a97d97b8
ahesford/habis-tools
habis/conductor.py
[ "BSD-2-Clause" ]
Python
fromargs
<not_specific>
def fromargs(cls, cmd, *args, **kwargs): ''' Create a HabisRemoteCommand instance for the given command, using the provided args and kwargs as 'default' values. ''' return cls(cmd, argmap={'default': args}, kwargmap={'default': kwargs})
Create a HabisRemoteCommand instance for the given command, using the provided args and kwargs as 'default' values.
Create a HabisRemoteCommand instance for the given command, using the provided args and kwargs as 'default' values.
[ "Create", "a", "HabisRemoteCommand", "instance", "for", "the", "given", "command", "using", "the", "provided", "args", "and", "kwargs", "as", "'", "default", "'", "values", "." ]
def fromargs(cls, cmd, *args, **kwargs): return cls(cmd, argmap={'default': args}, kwargmap={'default': kwargs})
[ "def", "fromargs", "(", "cls", ",", "cmd", ",", "*", "args", ",", "**", "kwargs", ")", ":", "return", "cls", "(", "cmd", ",", "argmap", "=", "{", "'default'", ":", "args", "}", ",", "kwargmap", "=", "{", "'default'", ":", "kwargs", "}", ")" ]
Create a HabisRemoteCommand instance for the given command, using the provided args and kwargs as 'default' values.
[ "Create", "a", "HabisRemoteCommand", "instance", "for", "the", "given", "command", "using", "the", "provided", "args", "and", "kwargs", "as", "'", "default", "'", "values", "." ]
[ "'''\n\t\tCreate a HabisRemoteCommand instance for the given command,\n\t\tusing the provided args and kwargs as 'default' values.\n\t\t'''" ]
[ { "param": "cls", "type": null }, { "param": "cmd", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "cmd", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def fromargs(cls, cmd, *args, **kwargs): return cls(cmd, argmap={'default': args}, kwargmap={'default': kwargs})
1,071
183
70cc81658795d01cca92504a540156c2ce706f9c
KenwoodFox/pyfrc
pyfrc/util.py
[ "MIT" ]
Python
yesno
<not_specific>
def yesno(prompt): """Returns True if user answers 'y'""" prompt += " [y/n]" a = "" while a not in ["y", "n"]: a = input(prompt).lower() return a == "y"
Returns True if user answers 'y
Returns True if user answers 'y
[ "Returns", "True", "if", "user", "answers", "'", "y" ]
def yesno(prompt): prompt += " [y/n]" a = "" while a not in ["y", "n"]: a = input(prompt).lower() return a == "y"
[ "def", "yesno", "(", "prompt", ")", ":", "prompt", "+=", "\" [y/n]\"", "a", "=", "\"\"", "while", "a", "not", "in", "[", "\"y\"", ",", "\"n\"", "]", ":", "a", "=", "input", "(", "prompt", ")", ".", "lower", "(", ")", "return", "a", "==", "\"y\"" ]
Returns True if user answers 'y
[ "Returns", "True", "if", "user", "answers", "'", "y" ]
[ "\"\"\"Returns True if user answers 'y'\"\"\"" ]
[ { "param": "prompt", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "prompt", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def yesno(prompt): prompt += " [y/n]" a = "" while a not in ["y", "n"]: a = input(prompt).lower() return a == "y"
1,072
808
f6410dbc9c590ad8328f00daabb64e7570c1ae80
octaflop/gbotservice
gbotservice/__main__.py
[ "MIT" ]
Python
pull_request_reopened_event
null
async def pull_request_reopened_event(event, gh, *args, **kwargs): """ Whenever a pull_request is opened, greet the author.""" url = event.data["pull_request"]["comments_url"] # reaction_url = f"{url}/reactions" author = event.data["pull_request"]["user"]["login"] message = ("Way to keep going! " f"🤖 Thanks for the pull_request @{author}! " "I will look into it ASAP! (I'm a bot, BTW 🤖).") await gh.post(url, data={"body": message})
Whenever a pull_request is opened, greet the author.
Whenever a pull_request is opened, greet the author.
[ "Whenever", "a", "pull_request", "is", "opened", "greet", "the", "author", "." ]
async def pull_request_reopened_event(event, gh, *args, **kwargs): url = event.data["pull_request"]["comments_url"] author = event.data["pull_request"]["user"]["login"] message = ("Way to keep going! " f"🤖 Thanks for the pull_request @{author}! " "I will look into it ASAP! (I'm a bot, BTW 🤖).") await gh.post(url, data={"body": message})
[ "async", "def", "pull_request_reopened_event", "(", "event", ",", "gh", ",", "*", "args", ",", "**", "kwargs", ")", ":", "url", "=", "event", ".", "data", "[", "\"pull_request\"", "]", "[", "\"comments_url\"", "]", "author", "=", "event", ".", "data", "[", "\"pull_request\"", "]", "[", "\"user\"", "]", "[", "\"login\"", "]", "message", "=", "(", "\"Way to keep going! \"", "f\"🤖 Thanks for the pull_request @{author}! \"", "\"I will look into it ASAP! (I'm a bot, BTW 🤖).\")", "", "await", "gh", ".", "post", "(", "url", ",", "data", "=", "{", "\"body\"", ":", "message", "}", ")" ]
Whenever a pull_request is opened, greet the author.
[ "Whenever", "a", "pull_request", "is", "opened", "greet", "the", "author", "." ]
[ "\"\"\" Whenever a pull_request is opened, greet the author.\"\"\"", "# reaction_url = f\"{url}/reactions\"" ]
[ { "param": "event", "type": null }, { "param": "gh", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "event", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "gh", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
async def pull_request_reopened_event(event, gh, *args, **kwargs): url = event.data["pull_request"]["comments_url"] author = event.data["pull_request"]["user"]["login"] message = ("Way to keep going! " f"🤖 Thanks for the pull_request @{author}! " "I will look into it ASAP! (I'm a bot, BTW 🤖).") await gh.post(url, data={"body": message})
1,073
599
9c37614f9f019bf8d5f21cfff79ed808404a0ddf
AUCR/halogen
halogen/parser.py
[ "MIT" ]
Python
jpg_sos
<not_specific>
def jpg_sos(file_map): """ if the jpg_sos option has been set (JPG_SOS), we find the jpg header, and then find the SOS section. Grab bytes from the sos section onwards. parameter: file_map - bytes of file returns: matching bytes. """ match_list = [] jpg_header = re.compile(b'(?s)\xff\xd8\xff\xe0\x00\x10') sos = re.compile(b'(?s)(\xff\xda.{50})') for match in jpg_header.finditer(file_map): end = match.end() match_list.append(sos.search(file_map, end).group()) return match_list
if the jpg_sos option has been set (JPG_SOS), we find the jpg header, and then find the SOS section. Grab bytes from the sos section onwards. parameter: file_map - bytes of file returns: matching bytes.
if the jpg_sos option has been set (JPG_SOS), we find the jpg header, and then find the SOS section. Grab bytes from the sos section onwards. parameter: file_map - bytes of file returns: matching bytes.
[ "if", "the", "jpg_sos", "option", "has", "been", "set", "(", "JPG_SOS", ")", "we", "find", "the", "jpg", "header", "and", "then", "find", "the", "SOS", "section", ".", "Grab", "bytes", "from", "the", "sos", "section", "onwards", ".", "parameter", ":", "file_map", "-", "bytes", "of", "file", "returns", ":", "matching", "bytes", "." ]
def jpg_sos(file_map): match_list = [] jpg_header = re.compile(b'(?s)\xff\xd8\xff\xe0\x00\x10') sos = re.compile(b'(?s)(\xff\xda.{50})') for match in jpg_header.finditer(file_map): end = match.end() match_list.append(sos.search(file_map, end).group()) return match_list
[ "def", "jpg_sos", "(", "file_map", ")", ":", "match_list", "=", "[", "]", "jpg_header", "=", "re", ".", "compile", "(", "b'(?s)\\xff\\xd8\\xff\\xe0\\x00\\x10'", ")", "sos", "=", "re", ".", "compile", "(", "b'(?s)(\\xff\\xda.{50})'", ")", "for", "match", "in", "jpg_header", ".", "finditer", "(", "file_map", ")", ":", "end", "=", "match", ".", "end", "(", ")", "match_list", ".", "append", "(", "sos", ".", "search", "(", "file_map", ",", "end", ")", ".", "group", "(", ")", ")", "return", "match_list" ]
if the jpg_sos option has been set (JPG_SOS), we find the jpg header, and then find the SOS section.
[ "if", "the", "jpg_sos", "option", "has", "been", "set", "(", "JPG_SOS", ")", "we", "find", "the", "jpg", "header", "and", "then", "find", "the", "SOS", "section", "." ]
[ "\"\"\" if the jpg_sos option has been set (JPG_SOS), we find the jpg header,\n and then find the SOS section. Grab bytes from the sos section onwards.\n parameter: file_map - bytes of file\n returns: matching bytes. \"\"\"" ]
[ { "param": "file_map", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "file_map", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def jpg_sos(file_map): match_list = [] jpg_header = re.compile(b'(?s)\xff\xd8\xff\xe0\x00\x10') sos = re.compile(b'(?s)(\xff\xda.{50})') for match in jpg_header.finditer(file_map): end = match.end() match_list.append(sos.search(file_map, end).group()) return match_list
1,074
312
b1ceb012a646f7086115cb3056e72d5cff0a48e3
NewGuonx/DST_algorithm
Python/bfs.py
[ "Unlicense" ]
Python
BFS
null
def BFS(g, s, discovered): """Perform BFS of the undiscovered portion of Graph g starting at Vertex s. discovered is a dictionary mapping each vertex to the edge that was used to discover it during the BFS (s should be mapped to None prior to the call). Newly discovered vertices will be added to the dictionary as a result. """ level = [s] # first level includes only s while len(level) > 0: next_level = [] # prepare to gather newly found vertices for u in level: for e in g.incident_edges(u): # for every outgoing edge from u v = e.opposite(u) if v not in discovered: # v is an unvisited vertex discovered[v] = e # e is the tree edge that discovered v # v will be further considered in next pass next_level.append(v) level = next_level # relabel 'next' level to become current
Perform BFS of the undiscovered portion of Graph g starting at Vertex s. discovered is a dictionary mapping each vertex to the edge that was used to discover it during the BFS (s should be mapped to None prior to the call). Newly discovered vertices will be added to the dictionary as a result.
Perform BFS of the undiscovered portion of Graph g starting at Vertex s. discovered is a dictionary mapping each vertex to the edge that was used to discover it during the BFS (s should be mapped to None prior to the call). Newly discovered vertices will be added to the dictionary as a result.
[ "Perform", "BFS", "of", "the", "undiscovered", "portion", "of", "Graph", "g", "starting", "at", "Vertex", "s", ".", "discovered", "is", "a", "dictionary", "mapping", "each", "vertex", "to", "the", "edge", "that", "was", "used", "to", "discover", "it", "during", "the", "BFS", "(", "s", "should", "be", "mapped", "to", "None", "prior", "to", "the", "call", ")", ".", "Newly", "discovered", "vertices", "will", "be", "added", "to", "the", "dictionary", "as", "a", "result", "." ]
def BFS(g, s, discovered): level = [s] while len(level) > 0: next_level = [] for u in level: for e in g.incident_edges(u): v = e.opposite(u) if v not in discovered: discovered[v] = e next_level.append(v) level = next_level
[ "def", "BFS", "(", "g", ",", "s", ",", "discovered", ")", ":", "level", "=", "[", "s", "]", "while", "len", "(", "level", ")", ">", "0", ":", "next_level", "=", "[", "]", "for", "u", "in", "level", ":", "for", "e", "in", "g", ".", "incident_edges", "(", "u", ")", ":", "v", "=", "e", ".", "opposite", "(", "u", ")", "if", "v", "not", "in", "discovered", ":", "discovered", "[", "v", "]", "=", "e", "next_level", ".", "append", "(", "v", ")", "level", "=", "next_level" ]
Perform BFS of the undiscovered portion of Graph g starting at Vertex s. discovered is a dictionary mapping each vertex to the edge that was used to discover it during the BFS (s should be mapped to None prior to the call).
[ "Perform", "BFS", "of", "the", "undiscovered", "portion", "of", "Graph", "g", "starting", "at", "Vertex", "s", ".", "discovered", "is", "a", "dictionary", "mapping", "each", "vertex", "to", "the", "edge", "that", "was", "used", "to", "discover", "it", "during", "the", "BFS", "(", "s", "should", "be", "mapped", "to", "None", "prior", "to", "the", "call", ")", "." ]
[ "\"\"\"Perform BFS of the undiscovered portion of Graph g starting at Vertex s.\n discovered is a dictionary mapping each vertex to the edge that was used to\n discover it during the BFS (s should be mapped to None prior to the call).\n Newly discovered vertices will be added to the dictionary as a result.\n \"\"\"", "# first level includes only s", "# prepare to gather newly found vertices", "# for every outgoing edge from u", "# v is an unvisited vertex", "# e is the tree edge that discovered v", "# v will be further considered in next pass", "# relabel 'next' level to become current" ]
[ { "param": "g", "type": null }, { "param": "s", "type": null }, { "param": "discovered", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "g", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "s", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "discovered", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def BFS(g, s, discovered): level = [s] while len(level) > 0: next_level = [] for u in level: for e in g.incident_edges(u): v = e.opposite(u) if v not in discovered: discovered[v] = e next_level.append(v) level = next_level
1,075
155
3e174d55f2be69e698ca3dd237ab09833161eef6
nwunderly/RevBots
utils/juan_checks.py
[ "MIT" ]
Python
juan_perm_check
<not_specific>
async def juan_perm_check(ctx, permission): """ Checks purely the requested permission. """ if await ctx.bot.is_owner(ctx.author): # if it's me return True # function to make it easier def check(field): if isinstance(field, list): return permission in field or 'unrestricted' in field elif isinstance(field, str): return permission == field or 'unrestricted' == field return False # checks perms associated with user id perms = ctx.bot.properties.perms if ctx.author.id in perms.keys() and check(ctx.bot.properties.perms[ctx.author.id]): return True # checks perms associated with user's roles for role in ctx.author.roles: if role.id in perms.keys() and check(perms[role.id]): return True # if it's here, access will be denied. return False
Checks purely the requested permission.
Checks purely the requested permission.
[ "Checks", "purely", "the", "requested", "permission", "." ]
async def juan_perm_check(ctx, permission): if await ctx.bot.is_owner(ctx.author): return True def check(field): if isinstance(field, list): return permission in field or 'unrestricted' in field elif isinstance(field, str): return permission == field or 'unrestricted' == field return False perms = ctx.bot.properties.perms if ctx.author.id in perms.keys() and check(ctx.bot.properties.perms[ctx.author.id]): return True for role in ctx.author.roles: if role.id in perms.keys() and check(perms[role.id]): return True return False
[ "async", "def", "juan_perm_check", "(", "ctx", ",", "permission", ")", ":", "if", "await", "ctx", ".", "bot", ".", "is_owner", "(", "ctx", ".", "author", ")", ":", "return", "True", "def", "check", "(", "field", ")", ":", "if", "isinstance", "(", "field", ",", "list", ")", ":", "return", "permission", "in", "field", "or", "'unrestricted'", "in", "field", "elif", "isinstance", "(", "field", ",", "str", ")", ":", "return", "permission", "==", "field", "or", "'unrestricted'", "==", "field", "return", "False", "perms", "=", "ctx", ".", "bot", ".", "properties", ".", "perms", "if", "ctx", ".", "author", ".", "id", "in", "perms", ".", "keys", "(", ")", "and", "check", "(", "ctx", ".", "bot", ".", "properties", ".", "perms", "[", "ctx", ".", "author", ".", "id", "]", ")", ":", "return", "True", "for", "role", "in", "ctx", ".", "author", ".", "roles", ":", "if", "role", ".", "id", "in", "perms", ".", "keys", "(", ")", "and", "check", "(", "perms", "[", "role", ".", "id", "]", ")", ":", "return", "True", "return", "False" ]
Checks purely the requested permission.
[ "Checks", "purely", "the", "requested", "permission", "." ]
[ "\"\"\"\n Checks purely the requested permission.\n \"\"\"", "# if it's me", "# function to make it easier", "# checks perms associated with user id", "# checks perms associated with user's roles", "# if it's here, access will be denied." ]
[ { "param": "ctx", "type": null }, { "param": "permission", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "ctx", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "permission", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
async def juan_perm_check(ctx, permission): if await ctx.bot.is_owner(ctx.author): return True def check(field): if isinstance(field, list): return permission in field or 'unrestricted' in field elif isinstance(field, str): return permission == field or 'unrestricted' == field return False perms = ctx.bot.properties.perms if ctx.author.id in perms.keys() and check(ctx.bot.properties.perms[ctx.author.id]): return True for role in ctx.author.roles: if role.id in perms.keys() and check(perms[role.id]): return True return False
1,076
930
f751f381137e53e40be0138d866c942bd38b56c9
icycookies/cogdl
cogdl/models/nn/lightgcn.py
[ "MIT" ]
Python
add_args
null
def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dim', type=int, default=64, help='embedding size') parser.add_argument('--l2', type=float, default=1e-4, help='l2 regularization weight, 1e-5 for NGCF') parser.add_argument("--mess_dropout", type=bool, default=False, help="consider mess dropout or not") parser.add_argument("--mess_dropout_rate", type=float, default=0.1, help="ratio of mess dropout") parser.add_argument("--edge_dropout", type=bool, default=False, help="consider edge dropout or not") parser.add_argument("--edge_dropout_rate", type=float, default=0.1, help="ratio of edge sampling") parser.add_argument("--ns", type=str, default='mixgcf', help="rns,mixgcf") parser.add_argument("--K", type=int, default=1, help="number of negative in K-pair loss") parser.add_argument("--n_negs", type=int, default=64, help="number of candidate negative") parser.add_argument("--pool", type=str, default='mean', help="[concat, mean, sum, final]") parser.add_argument("--context_hops", type=int, default=3, help="hop") # fmt: on
Add model-specific arguments to the parser.
Add model-specific arguments to the parser.
[ "Add", "model", "-", "specific", "arguments", "to", "the", "parser", "." ]
def add_args(parser): parser.add_argument('--dim', type=int, default=64, help='embedding size') parser.add_argument('--l2', type=float, default=1e-4, help='l2 regularization weight, 1e-5 for NGCF') parser.add_argument("--mess_dropout", type=bool, default=False, help="consider mess dropout or not") parser.add_argument("--mess_dropout_rate", type=float, default=0.1, help="ratio of mess dropout") parser.add_argument("--edge_dropout", type=bool, default=False, help="consider edge dropout or not") parser.add_argument("--edge_dropout_rate", type=float, default=0.1, help="ratio of edge sampling") parser.add_argument("--ns", type=str, default='mixgcf', help="rns,mixgcf") parser.add_argument("--K", type=int, default=1, help="number of negative in K-pair loss") parser.add_argument("--n_negs", type=int, default=64, help="number of candidate negative") parser.add_argument("--pool", type=str, default='mean', help="[concat, mean, sum, final]") parser.add_argument("--context_hops", type=int, default=3, help="hop")
[ "def", "add_args", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "'--dim'", ",", "type", "=", "int", ",", "default", "=", "64", ",", "help", "=", "'embedding size'", ")", "parser", ".", "add_argument", "(", "'--l2'", ",", "type", "=", "float", ",", "default", "=", "1e-4", ",", "help", "=", "'l2 regularization weight, 1e-5 for NGCF'", ")", "parser", ".", "add_argument", "(", "\"--mess_dropout\"", ",", "type", "=", "bool", ",", "default", "=", "False", ",", "help", "=", "\"consider mess dropout or not\"", ")", "parser", ".", "add_argument", "(", "\"--mess_dropout_rate\"", ",", "type", "=", "float", ",", "default", "=", "0.1", ",", "help", "=", "\"ratio of mess dropout\"", ")", "parser", ".", "add_argument", "(", "\"--edge_dropout\"", ",", "type", "=", "bool", ",", "default", "=", "False", ",", "help", "=", "\"consider edge dropout or not\"", ")", "parser", ".", "add_argument", "(", "\"--edge_dropout_rate\"", ",", "type", "=", "float", ",", "default", "=", "0.1", ",", "help", "=", "\"ratio of edge sampling\"", ")", "parser", ".", "add_argument", "(", "\"--ns\"", ",", "type", "=", "str", ",", "default", "=", "'mixgcf'", ",", "help", "=", "\"rns,mixgcf\"", ")", "parser", ".", "add_argument", "(", "\"--K\"", ",", "type", "=", "int", ",", "default", "=", "1", ",", "help", "=", "\"number of negative in K-pair loss\"", ")", "parser", ".", "add_argument", "(", "\"--n_negs\"", ",", "type", "=", "int", ",", "default", "=", "64", ",", "help", "=", "\"number of candidate negative\"", ")", "parser", ".", "add_argument", "(", "\"--pool\"", ",", "type", "=", "str", ",", "default", "=", "'mean'", ",", "help", "=", "\"[concat, mean, sum, final]\"", ")", "parser", ".", "add_argument", "(", "\"--context_hops\"", ",", "type", "=", "int", ",", "default", "=", "3", ",", "help", "=", "\"hop\"", ")" ]
Add model-specific arguments to the parser.
[ "Add", "model", "-", "specific", "arguments", "to", "the", "parser", "." ]
[ "\"\"\"Add model-specific arguments to the parser.\"\"\"", "# fmt: off", "# fmt: on" ]
[ { "param": "parser", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "parser", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def add_args(parser): parser.add_argument('--dim', type=int, default=64, help='embedding size') parser.add_argument('--l2', type=float, default=1e-4, help='l2 regularization weight, 1e-5 for NGCF') parser.add_argument("--mess_dropout", type=bool, default=False, help="consider mess dropout or not") parser.add_argument("--mess_dropout_rate", type=float, default=0.1, help="ratio of mess dropout") parser.add_argument("--edge_dropout", type=bool, default=False, help="consider edge dropout or not") parser.add_argument("--edge_dropout_rate", type=float, default=0.1, help="ratio of edge sampling") parser.add_argument("--ns", type=str, default='mixgcf', help="rns,mixgcf") parser.add_argument("--K", type=int, default=1, help="number of negative in K-pair loss") parser.add_argument("--n_negs", type=int, default=64, help="number of candidate negative") parser.add_argument("--pool", type=str, default='mean', help="[concat, mean, sum, final]") parser.add_argument("--context_hops", type=int, default=3, help="hop")
1,077
810
437c3fc5650f1c4c41d98f959851208c9d3bc0e8
lukewolcott/med-bell
mb_utils.py
[ "MIT" ]
Python
is_overlapping
<not_specific>
def is_overlapping(segment_time, previous_segments): """ Checks if the time of a segment overlaps with the times of existing segments. Arguments: segment_time -- a tuple of (segment_start, segment_end) for the new segment previous_segments -- a list of tuples of (segment_start, segment_end) for the existing segments Returns: True if the time segment overlaps with any of the existing segments, False otherwise """ segment_start, segment_end = segment_time overlap = False for previous_start, previous_end in previous_segments: if segment_start <= previous_end and segment_end >= previous_start: overlap = True return overlap
Checks if the time of a segment overlaps with the times of existing segments. Arguments: segment_time -- a tuple of (segment_start, segment_end) for the new segment previous_segments -- a list of tuples of (segment_start, segment_end) for the existing segments Returns: True if the time segment overlaps with any of the existing segments, False otherwise
Checks if the time of a segment overlaps with the times of existing segments. Arguments: segment_time -- a tuple of (segment_start, segment_end) for the new segment previous_segments -- a list of tuples of (segment_start, segment_end) for the existing segments True if the time segment overlaps with any of the existing segments, False otherwise
[ "Checks", "if", "the", "time", "of", "a", "segment", "overlaps", "with", "the", "times", "of", "existing", "segments", ".", "Arguments", ":", "segment_time", "--", "a", "tuple", "of", "(", "segment_start", "segment_end", ")", "for", "the", "new", "segment", "previous_segments", "--", "a", "list", "of", "tuples", "of", "(", "segment_start", "segment_end", ")", "for", "the", "existing", "segments", "True", "if", "the", "time", "segment", "overlaps", "with", "any", "of", "the", "existing", "segments", "False", "otherwise" ]
def is_overlapping(segment_time, previous_segments): segment_start, segment_end = segment_time overlap = False for previous_start, previous_end in previous_segments: if segment_start <= previous_end and segment_end >= previous_start: overlap = True return overlap
[ "def", "is_overlapping", "(", "segment_time", ",", "previous_segments", ")", ":", "segment_start", ",", "segment_end", "=", "segment_time", "overlap", "=", "False", "for", "previous_start", ",", "previous_end", "in", "previous_segments", ":", "if", "segment_start", "<=", "previous_end", "and", "segment_end", ">=", "previous_start", ":", "overlap", "=", "True", "return", "overlap" ]
Checks if the time of a segment overlaps with the times of existing segments.
[ "Checks", "if", "the", "time", "of", "a", "segment", "overlaps", "with", "the", "times", "of", "existing", "segments", "." ]
[ "\"\"\"\n Checks if the time of a segment overlaps with the times of existing segments.\n \n Arguments:\n segment_time -- a tuple of (segment_start, segment_end) for the new segment\n previous_segments -- a list of tuples of (segment_start, segment_end) for the existing segments\n \n Returns:\n True if the time segment overlaps with any of the existing segments, False otherwise\n \"\"\"" ]
[ { "param": "segment_time", "type": null }, { "param": "previous_segments", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "segment_time", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "previous_segments", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def is_overlapping(segment_time, previous_segments): segment_start, segment_end = segment_time overlap = False for previous_start, previous_end in previous_segments: if segment_start <= previous_end and segment_end >= previous_start: overlap = True return overlap
1,078
807
06d2acf107606e1dcccc7fb67bc22b0d9d31cf9b
njamalova/search_alchemy
constructor/input_reader.py
[ "MIT" ]
Python
compute_square_coordinates
list
def compute_square_coordinates(height: int = 10, width: int = 10) -> list: """Compute coordinates of the bottom right corner for each square on the 10x10 grid, where each square is of size 10x10. This function will store the coordinate information of the bottom right corner of each square for subsequent use. Indices in the resultant lists are equal to respective Square IDs. :param height: height of the grid, defaults to 10 :type height: int :param width: width of the grid, defaults to 10 :type width: int :return: list of approximate square coordinates :rtype: list """ square_coordinates = [] # initialize location of the top left corner of the grid (square 0) loc_x, loc_y = 0, 0 # move down 10 times for i in range(height): loc_x = loc_x - 10 # move right 10 times for j in range(width): loc_y = loc_y + 10 square_coordinates.append((loc_x, loc_y)) return square_coordinates
Compute coordinates of the bottom right corner for each square on the 10x10 grid, where each square is of size 10x10. This function will store the coordinate information of the bottom right corner of each square for subsequent use. Indices in the resultant lists are equal to respective Square IDs. :param height: height of the grid, defaults to 10 :type height: int :param width: width of the grid, defaults to 10 :type width: int :return: list of approximate square coordinates :rtype: list
Compute coordinates of the bottom right corner for each square on the 10x10 grid, where each square is of size 10x10. This function will store the coordinate information of the bottom right corner of each square for subsequent use. Indices in the resultant lists are equal to respective Square IDs.
[ "Compute", "coordinates", "of", "the", "bottom", "right", "corner", "for", "each", "square", "on", "the", "10x10", "grid", "where", "each", "square", "is", "of", "size", "10x10", ".", "This", "function", "will", "store", "the", "coordinate", "information", "of", "the", "bottom", "right", "corner", "of", "each", "square", "for", "subsequent", "use", ".", "Indices", "in", "the", "resultant", "lists", "are", "equal", "to", "respective", "Square", "IDs", "." ]
def compute_square_coordinates(height: int = 10, width: int = 10) -> list: square_coordinates = [] loc_x, loc_y = 0, 0 for i in range(height): loc_x = loc_x - 10 for j in range(width): loc_y = loc_y + 10 square_coordinates.append((loc_x, loc_y)) return square_coordinates
[ "def", "compute_square_coordinates", "(", "height", ":", "int", "=", "10", ",", "width", ":", "int", "=", "10", ")", "->", "list", ":", "square_coordinates", "=", "[", "]", "loc_x", ",", "loc_y", "=", "0", ",", "0", "for", "i", "in", "range", "(", "height", ")", ":", "loc_x", "=", "loc_x", "-", "10", "for", "j", "in", "range", "(", "width", ")", ":", "loc_y", "=", "loc_y", "+", "10", "square_coordinates", ".", "append", "(", "(", "loc_x", ",", "loc_y", ")", ")", "return", "square_coordinates" ]
Compute coordinates of the bottom right corner for each square on the 10x10 grid, where each square is of size 10x10.
[ "Compute", "coordinates", "of", "the", "bottom", "right", "corner", "for", "each", "square", "on", "the", "10x10", "grid", "where", "each", "square", "is", "of", "size", "10x10", "." ]
[ "\"\"\"Compute coordinates of the bottom right corner for each square on the 10x10 grid, where each square is of size 10x10.\n\n This function will store the coordinate information of the bottom right corner of each square for subsequent use.\n Indices in the resultant lists are equal to respective Square IDs.\n\n :param height: height of the grid, defaults to 10\n :type height: int\n :param width: width of the grid, defaults to 10\n :type width: int\n :return: list of approximate square coordinates\n :rtype: list\n \"\"\"", "# initialize location of the top left corner of the grid (square 0)", "# move down 10 times", "# move right 10 times" ]
[ { "param": "height", "type": "int" }, { "param": "width", "type": "int" } ]
{ "returns": [ { "docstring": "list of approximate square coordinates", "docstring_tokens": [ "list", "of", "approximate", "square", "coordinates" ], "type": "list" } ], "raises": [], "params": [ { "identifier": "height", "type": "int", "docstring": "height of the grid, defaults to 10", "docstring_tokens": [ "height", "of", "the", "grid", "defaults", "to", "10" ], "default": "10", "is_optional": null }, { "identifier": "width", "type": "int", "docstring": "width of the grid, defaults to 10", "docstring_tokens": [ "width", "of", "the", "grid", "defaults", "to", "10" ], "default": "10", "is_optional": null } ], "outlier_params": [], "others": [] }
def compute_square_coordinates(height: int = 10, width: int = 10) -> list: square_coordinates = [] loc_x, loc_y = 0, 0 for i in range(height): loc_x = loc_x - 10 for j in range(width): loc_y = loc_y + 10 square_coordinates.append((loc_x, loc_y)) return square_coordinates
1,079
237
67ed8a3aa19235e3cbe9b340e12f118c87ec094d
Exclaminator/clusterfuzz
src/python/metrics/logs.py
[ "Apache-2.0" ]
Python
_is_running_on_app_engine
<not_specific>
def _is_running_on_app_engine(): """Return whether or not we're running on App Engine (production or development).""" return (os.getenv('SERVER_SOFTWARE') and (os.getenv('SERVER_SOFTWARE').startswith('Development/') or os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')))
Return whether or not we're running on App Engine (production or development).
Return whether or not we're running on App Engine (production or development).
[ "Return", "whether", "or", "not", "we", "'", "re", "running", "on", "App", "Engine", "(", "production", "or", "development", ")", "." ]
def _is_running_on_app_engine(): return (os.getenv('SERVER_SOFTWARE') and (os.getenv('SERVER_SOFTWARE').startswith('Development/') or os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')))
[ "def", "_is_running_on_app_engine", "(", ")", ":", "return", "(", "os", ".", "getenv", "(", "'SERVER_SOFTWARE'", ")", "and", "(", "os", ".", "getenv", "(", "'SERVER_SOFTWARE'", ")", ".", "startswith", "(", "'Development/'", ")", "or", "os", ".", "getenv", "(", "'SERVER_SOFTWARE'", ")", ".", "startswith", "(", "'Google App Engine/'", ")", ")", ")" ]
Return whether or not we're running on App Engine (production or development).
[ "Return", "whether", "or", "not", "we", "'", "re", "running", "on", "App", "Engine", "(", "production", "or", "development", ")", "." ]
[ "\"\"\"Return whether or not we're running on App Engine (production or\n development).\"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import os def _is_running_on_app_engine(): return (os.getenv('SERVER_SOFTWARE') and (os.getenv('SERVER_SOFTWARE').startswith('Development/') or os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')))
1,080
718
43cb6fa6c09525671571e3b3b45469c1d61f81be
jaidevd/monopoly
monopoly.py
[ "MIT" ]
Python
is_cg_developed
<not_specific>
def is_cg_developed(properties): """Check if a colorgroup is fully developed.""" # check validity of colorgroup assert len(set([c.color for c in properties])) == 1 return all([c.has_hotel for c in properties])
Check if a colorgroup is fully developed.
Check if a colorgroup is fully developed.
[ "Check", "if", "a", "colorgroup", "is", "fully", "developed", "." ]
def is_cg_developed(properties): assert len(set([c.color for c in properties])) == 1 return all([c.has_hotel for c in properties])
[ "def", "is_cg_developed", "(", "properties", ")", ":", "assert", "len", "(", "set", "(", "[", "c", ".", "color", "for", "c", "in", "properties", "]", ")", ")", "==", "1", "return", "all", "(", "[", "c", ".", "has_hotel", "for", "c", "in", "properties", "]", ")" ]
Check if a colorgroup is fully developed.
[ "Check", "if", "a", "colorgroup", "is", "fully", "developed", "." ]
[ "\"\"\"Check if a colorgroup is fully developed.\"\"\"", "# check validity of colorgroup" ]
[ { "param": "properties", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "properties", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def is_cg_developed(properties): assert len(set([c.color for c in properties])) == 1 return all([c.has_hotel for c in properties])
1,081
397
83a70023443864ed8f404879014a8a2f374c0f97
shnaizerk/gts
AppServer/google/appengine/tools/dev_appserver.py
[ "Apache-2.0" ]
Python
_StaticFilePathRe
<not_specific>
def _StaticFilePathRe(url_map): """Returns a regular expression string that matches static file paths. Args: url_map: A fully initialized static_files or static_dir appinfo.URLMap instance. Returns: The regular expression matches paths, relative to the application's root directory, of files that this static handler serves. re.compile should accept the returned string. Raises: AssertionError: The url_map argument was not an URLMap for a static handler. """ handler_type = url_map.GetHandlerType() if handler_type == 'static_files': return url_map.upload + '$' elif handler_type == 'static_dir': path = url_map.static_dir.rstrip(os.path.sep) return path + re.escape(os.path.sep) + r'(.*)' assert False, 'This property only applies to static handlers.'
Returns a regular expression string that matches static file paths. Args: url_map: A fully initialized static_files or static_dir appinfo.URLMap instance. Returns: The regular expression matches paths, relative to the application's root directory, of files that this static handler serves. re.compile should accept the returned string. Raises: AssertionError: The url_map argument was not an URLMap for a static handler.
Returns a regular expression string that matches static file paths.
[ "Returns", "a", "regular", "expression", "string", "that", "matches", "static", "file", "paths", "." ]
def _StaticFilePathRe(url_map): handler_type = url_map.GetHandlerType() if handler_type == 'static_files': return url_map.upload + '$' elif handler_type == 'static_dir': path = url_map.static_dir.rstrip(os.path.sep) return path + re.escape(os.path.sep) + r'(.*)' assert False, 'This property only applies to static handlers.'
[ "def", "_StaticFilePathRe", "(", "url_map", ")", ":", "handler_type", "=", "url_map", ".", "GetHandlerType", "(", ")", "if", "handler_type", "==", "'static_files'", ":", "return", "url_map", ".", "upload", "+", "'$'", "elif", "handler_type", "==", "'static_dir'", ":", "path", "=", "url_map", ".", "static_dir", ".", "rstrip", "(", "os", ".", "path", ".", "sep", ")", "return", "path", "+", "re", ".", "escape", "(", "os", ".", "path", ".", "sep", ")", "+", "r'(.*)'", "assert", "False", ",", "'This property only applies to static handlers.'" ]
Returns a regular expression string that matches static file paths.
[ "Returns", "a", "regular", "expression", "string", "that", "matches", "static", "file", "paths", "." ]
[ "\"\"\"Returns a regular expression string that matches static file paths.\n\n Args:\n url_map: A fully initialized static_files or static_dir appinfo.URLMap\n instance.\n\n Returns:\n The regular expression matches paths, relative to the application's root\n directory, of files that this static handler serves. re.compile should\n accept the returned string.\n\n Raises:\n AssertionError: The url_map argument was not an URLMap for a static handler.\n \"\"\"" ]
[ { "param": "url_map", "type": null } ]
{ "returns": [ { "docstring": "The regular expression matches paths, relative to the application's root\ndirectory, of files that this static handler serves. re.compile should\naccept the returned string.", "docstring_tokens": [ "The", "regular", "expression", "matches", "paths", "relative", "to", "the", "application", "'", "s", "root", "directory", "of", "files", "that", "this", "static", "handler", "serves", ".", "re", ".", "compile", "should", "accept", "the", "returned", "string", "." ], "type": null } ], "raises": [ { "docstring": "The url_map argument was not an URLMap for a static handler.", "docstring_tokens": [ "The", "url_map", "argument", "was", "not", "an", "URLMap", "for", "a", "static", "handler", "." ], "type": "AssertionError" } ], "params": [ { "identifier": "url_map", "type": null, "docstring": "A fully initialized static_files or static_dir appinfo.URLMap\ninstance.", "docstring_tokens": [ "A", "fully", "initialized", "static_files", "or", "static_dir", "appinfo", ".", "URLMap", "instance", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re import os def _StaticFilePathRe(url_map): handler_type = url_map.GetHandlerType() if handler_type == 'static_files': return url_map.upload + '$' elif handler_type == 'static_dir': path = url_map.static_dir.rstrip(os.path.sep) return path + re.escape(os.path.sep) + r'(.*)' assert False, 'This property only applies to static handlers.'
1,082
453
0a9f5151892c2a8bbf44783622766c8fdc6d7003
ambrosejcarr/pysmFISH
pysmFISH/stitching_package/inout.py
[ "MIT" ]
Python
save_to_file
null
def save_to_file(file_name, **kwargs): """Save pickled stitching info to file_name Parameters: ----------- file_name: str The name of the file where the data will be saved. Should be given without extension. **kwarg All keyword argument values will be gathered in a dictionary, with the variable name as their key in the dictionary. This dictionary will then be saved in the pickled file. """ # Add extension file_name = file_name + '.pkl' logger = logging.getLogger(__name__) logger.info("Saving data to: {}".format(file_name)) # Make a dictionary data = {} for key, val in kwargs.items(): data[key] = val logger.debug("Saving data: {}".format(data)) # Write to file with open(file_name, 'wb') as f: pickle.dump(data, f)
Save pickled stitching info to file_name Parameters: ----------- file_name: str The name of the file where the data will be saved. Should be given without extension. **kwarg All keyword argument values will be gathered in a dictionary, with the variable name as their key in the dictionary. This dictionary will then be saved in the pickled file.
Save pickled stitching info to file_name Parameters. str The name of the file where the data will be saved. Should be given without extension. kwarg All keyword argument values will be gathered in a dictionary, with the variable name as their key in the dictionary. This dictionary will then be saved in the pickled file.
[ "Save", "pickled", "stitching", "info", "to", "file_name", "Parameters", ".", "str", "The", "name", "of", "the", "file", "where", "the", "data", "will", "be", "saved", ".", "Should", "be", "given", "without", "extension", ".", "kwarg", "All", "keyword", "argument", "values", "will", "be", "gathered", "in", "a", "dictionary", "with", "the", "variable", "name", "as", "their", "key", "in", "the", "dictionary", ".", "This", "dictionary", "will", "then", "be", "saved", "in", "the", "pickled", "file", "." ]
def save_to_file(file_name, **kwargs): file_name = file_name + '.pkl' logger = logging.getLogger(__name__) logger.info("Saving data to: {}".format(file_name)) data = {} for key, val in kwargs.items(): data[key] = val logger.debug("Saving data: {}".format(data)) with open(file_name, 'wb') as f: pickle.dump(data, f)
[ "def", "save_to_file", "(", "file_name", ",", "**", "kwargs", ")", ":", "file_name", "=", "file_name", "+", "'.pkl'", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "info", "(", "\"Saving data to: {}\"", ".", "format", "(", "file_name", ")", ")", "data", "=", "{", "}", "for", "key", ",", "val", "in", "kwargs", ".", "items", "(", ")", ":", "data", "[", "key", "]", "=", "val", "logger", ".", "debug", "(", "\"Saving data: {}\"", ".", "format", "(", "data", ")", ")", "with", "open", "(", "file_name", ",", "'wb'", ")", "as", "f", ":", "pickle", ".", "dump", "(", "data", ",", "f", ")" ]
Save pickled stitching info to file_name Parameters:
[ "Save", "pickled", "stitching", "info", "to", "file_name", "Parameters", ":" ]
[ "\"\"\"Save pickled stitching info to file_name\n\n Parameters:\n -----------\n\n file_name: str\n The name of the file where the data will be saved. Should be given without extension.\n \n **kwarg\n All keyword argument values will be\n gathered in a dictionary, with the variable name\n as their key in the dictionary. This dictionary will\n then be saved in the pickled file.\n\n \"\"\"", "# Add extension", "# Make a dictionary", "# Write to file" ]
[ { "param": "file_name", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "file_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import logging import pickle def save_to_file(file_name, **kwargs): file_name = file_name + '.pkl' logger = logging.getLogger(__name__) logger.info("Saving data to: {}".format(file_name)) data = {} for key, val in kwargs.items(): data[key] = val logger.debug("Saving data: {}".format(data)) with open(file_name, 'wb') as f: pickle.dump(data, f)
1,083
203
dfe8a64f0db20766233f3c1755478f8249832327
philippesanio/proteomics
aminostructurdetection.py
[ "MIT" ]
Python
countStructuresForAA
<not_specific>
def countStructuresForAA(aa="ALA", elemName="all"): """ Detects for a single amino acid how :param aa: 3 letter code amino acid :param elemName: all :return: list of counts in [helices, betasheet, loops, total amount] """ structurList = [0, 0, 0] # [H,S,L] objList = cmd.get_names("objects") obj = objList[0] # elements (right in the protein browser in pymol) models = cmd.get_model(obj + " and resn " + aa + " and name CA") for i in models.atom: if i.ss == "H": structurList[0] += 1 # counting the Helix elif i.ss == "S": structurList[1] += 1 else: structurList[2] += 1 return structurList
Detects for a single amino acid how :param aa: 3 letter code amino acid :param elemName: all :return: list of counts in [helices, betasheet, loops, total amount]
Detects for a single amino acid how
[ "Detects", "for", "a", "single", "amino", "acid", "how" ]
def countStructuresForAA(aa="ALA", elemName="all"): structurList = [0, 0, 0] objList = cmd.get_names("objects") obj = objList[0] models = cmd.get_model(obj + " and resn " + aa + " and name CA") for i in models.atom: if i.ss == "H": structurList[0] += 1 elif i.ss == "S": structurList[1] += 1 else: structurList[2] += 1 return structurList
[ "def", "countStructuresForAA", "(", "aa", "=", "\"ALA\"", ",", "elemName", "=", "\"all\"", ")", ":", "structurList", "=", "[", "0", ",", "0", ",", "0", "]", "objList", "=", "cmd", ".", "get_names", "(", "\"objects\"", ")", "obj", "=", "objList", "[", "0", "]", "models", "=", "cmd", ".", "get_model", "(", "obj", "+", "\" and resn \"", "+", "aa", "+", "\" and name CA\"", ")", "for", "i", "in", "models", ".", "atom", ":", "if", "i", ".", "ss", "==", "\"H\"", ":", "structurList", "[", "0", "]", "+=", "1", "elif", "i", ".", "ss", "==", "\"S\"", ":", "structurList", "[", "1", "]", "+=", "1", "else", ":", "structurList", "[", "2", "]", "+=", "1", "return", "structurList" ]
Detects for a single amino acid how
[ "Detects", "for", "a", "single", "amino", "acid", "how" ]
[ "\"\"\"\n Detects for a single amino acid how\n :param aa: 3 letter code amino acid\n :param elemName: all\n :return: list of counts in [helices, betasheet, loops, total amount]\n \"\"\"", "# [H,S,L]", "# elements (right in the protein browser in pymol)", "# counting the Helix" ]
[ { "param": "aa", "type": null }, { "param": "elemName", "type": null } ]
{ "returns": [ { "docstring": "list of counts in [helices, betasheet, loops, total amount]", "docstring_tokens": [ "list", "of", "counts", "in", "[", "helices", "betasheet", "loops", "total", "amount", "]" ], "type": null } ], "raises": [], "params": [ { "identifier": "aa", "type": null, "docstring": "3 letter code amino acid", "docstring_tokens": [ "3", "letter", "code", "amino", "acid" ], "default": null, "is_optional": null }, { "identifier": "elemName", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import cmd def countStructuresForAA(aa="ALA", elemName="all"): structurList = [0, 0, 0] objList = cmd.get_names("objects") obj = objList[0] models = cmd.get_model(obj + " and resn " + aa + " and name CA") for i in models.atom: if i.ss == "H": structurList[0] += 1 elif i.ss == "S": structurList[1] += 1 else: structurList[2] += 1 return structurList
1,084
750
c04c87387fd180c24d3e2997e85b0c27170bd57a
mccruz07/tutorials
Workstation/cp_group_option_metadata_maker.py
[ "BSD-3-Clause" ]
Python
find_linenumbers_for_filename_metadata
<not_specific>
def find_linenumbers_for_filename_metadata(pipeline): """ Find the linenumbers that contain the filename metadata info within the Metadata module in a CellProfiler pipeline. * pipeline: a list where each item is a line from a CellProfiler `*.cppipe` file. """ metadata_filename_linenumber_list = [] for ind, line in enumerate(pipeline): if re.match(r'^\s*Metadata source:File name', line) is not None: metadata_filename_linenumber_list.append(ind) return metadata_filename_linenumber_list
Find the linenumbers that contain the filename metadata info within the Metadata module in a CellProfiler pipeline. * pipeline: a list where each item is a line from a CellProfiler `*.cppipe` file.
Find the linenumbers that contain the filename metadata info within the Metadata module in a CellProfiler pipeline. pipeline: a list where each item is a line from a CellProfiler `*.cppipe` file.
[ "Find", "the", "linenumbers", "that", "contain", "the", "filename", "metadata", "info", "within", "the", "Metadata", "module", "in", "a", "CellProfiler", "pipeline", ".", "pipeline", ":", "a", "list", "where", "each", "item", "is", "a", "line", "from", "a", "CellProfiler", "`", "*", ".", "cppipe", "`", "file", "." ]
def find_linenumbers_for_filename_metadata(pipeline): metadata_filename_linenumber_list = [] for ind, line in enumerate(pipeline): if re.match(r'^\s*Metadata source:File name', line) is not None: metadata_filename_linenumber_list.append(ind) return metadata_filename_linenumber_list
[ "def", "find_linenumbers_for_filename_metadata", "(", "pipeline", ")", ":", "metadata_filename_linenumber_list", "=", "[", "]", "for", "ind", ",", "line", "in", "enumerate", "(", "pipeline", ")", ":", "if", "re", ".", "match", "(", "r'^\\s*Metadata source:File name'", ",", "line", ")", "is", "not", "None", ":", "metadata_filename_linenumber_list", ".", "append", "(", "ind", ")", "return", "metadata_filename_linenumber_list" ]
Find the linenumbers that contain the filename metadata info within the Metadata module in a CellProfiler pipeline.
[ "Find", "the", "linenumbers", "that", "contain", "the", "filename", "metadata", "info", "within", "the", "Metadata", "module", "in", "a", "CellProfiler", "pipeline", "." ]
[ "\"\"\"\n Find the linenumbers that contain the filename metadata info within the Metadata module in a CellProfiler pipeline.\n \n * pipeline: a list where each item is a line from a CellProfiler `*.cppipe` file.\n \"\"\"" ]
[ { "param": "pipeline", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "pipeline", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def find_linenumbers_for_filename_metadata(pipeline): metadata_filename_linenumber_list = [] for ind, line in enumerate(pipeline): if re.match(r'^\s*Metadata source:File name', line) is not None: metadata_filename_linenumber_list.append(ind) return metadata_filename_linenumber_list
1,085
667
562ae7fee7648c89d04e701aa3a8c61cb7769499
raymondEhlers/alice-jet-hadron
exploration_scripts/test_hist_ops.py
[ "BSD-3-Clause" ]
Python
print_hists
None
def print_hists(hist_1: Hist, hist_2: Hist) -> None: """ Helper to print the contents of histograms. """ for i in range(1, hist_1.GetXaxis().GetNbins()): print(f"i: {i}") print(f"counts hist_1: {hist_1.GetBinContent(i)}, error: {hist_1.GetBinError(i)}") print(f"counts hist_2: {hist_2.GetBinContent(i)}, error: {hist_2.GetBinError(i)}")
Helper to print the contents of histograms.
Helper to print the contents of histograms.
[ "Helper", "to", "print", "the", "contents", "of", "histograms", "." ]
def print_hists(hist_1: Hist, hist_2: Hist) -> None: for i in range(1, hist_1.GetXaxis().GetNbins()): print(f"i: {i}") print(f"counts hist_1: {hist_1.GetBinContent(i)}, error: {hist_1.GetBinError(i)}") print(f"counts hist_2: {hist_2.GetBinContent(i)}, error: {hist_2.GetBinError(i)}")
[ "def", "print_hists", "(", "hist_1", ":", "Hist", ",", "hist_2", ":", "Hist", ")", "->", "None", ":", "for", "i", "in", "range", "(", "1", ",", "hist_1", ".", "GetXaxis", "(", ")", ".", "GetNbins", "(", ")", ")", ":", "print", "(", "f\"i: {i}\"", ")", "print", "(", "f\"counts hist_1: {hist_1.GetBinContent(i)}, error: {hist_1.GetBinError(i)}\"", ")", "print", "(", "f\"counts hist_2: {hist_2.GetBinContent(i)}, error: {hist_2.GetBinError(i)}\"", ")" ]
Helper to print the contents of histograms.
[ "Helper", "to", "print", "the", "contents", "of", "histograms", "." ]
[ "\"\"\" Helper to print the contents of histograms. \"\"\"" ]
[ { "param": "hist_1", "type": "Hist" }, { "param": "hist_2", "type": "Hist" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "hist_1", "type": "Hist", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "hist_2", "type": "Hist", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def print_hists(hist_1: Hist, hist_2: Hist) -> None: for i in range(1, hist_1.GetXaxis().GetNbins()): print(f"i: {i}") print(f"counts hist_1: {hist_1.GetBinContent(i)}, error: {hist_1.GetBinError(i)}") print(f"counts hist_2: {hist_2.GetBinContent(i)}, error: {hist_2.GetBinError(i)}")
1,086
507
693a0d2d02d864b6440764be9ba59690417aa762
nitin-ebi/eva-accession
eva-accession-clustering-automation/clustering_automation/create_clustering_properties.py
[ "Apache-2.0" ]
Python
check_vcf_source_requirements
null
def check_vcf_source_requirements(source, vcf_file, project_accession): """ This method checks that if the source is VCF the VCF file and project accession are provided """ if source == 'VCF' and not (vcf_file and project_accession): raise ValueError('If the source is VCF the file path and project accession must be provided')
This method checks that if the source is VCF the VCF file and project accession are provided
This method checks that if the source is VCF the VCF file and project accession are provided
[ "This", "method", "checks", "that", "if", "the", "source", "is", "VCF", "the", "VCF", "file", "and", "project", "accession", "are", "provided" ]
def check_vcf_source_requirements(source, vcf_file, project_accession): if source == 'VCF' and not (vcf_file and project_accession): raise ValueError('If the source is VCF the file path and project accession must be provided')
[ "def", "check_vcf_source_requirements", "(", "source", ",", "vcf_file", ",", "project_accession", ")", ":", "if", "source", "==", "'VCF'", "and", "not", "(", "vcf_file", "and", "project_accession", ")", ":", "raise", "ValueError", "(", "'If the source is VCF the file path and project accession must be provided'", ")" ]
This method checks that if the source is VCF the VCF file and project accession are provided
[ "This", "method", "checks", "that", "if", "the", "source", "is", "VCF", "the", "VCF", "file", "and", "project", "accession", "are", "provided" ]
[ "\"\"\"\n This method checks that if the source is VCF the VCF file and project accession are provided\n \"\"\"" ]
[ { "param": "source", "type": null }, { "param": "vcf_file", "type": null }, { "param": "project_accession", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "source", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "vcf_file", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "project_accession", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def check_vcf_source_requirements(source, vcf_file, project_accession): if source == 'VCF' and not (vcf_file and project_accession): raise ValueError('If the source is VCF the file path and project accession must be provided')
1,087
813
898455fe2daca1143ad9f82fa0186df5fb7ec6e1
rtgibbons/cloudflare-ip-security-group-update
cf-security-group-update.py
[ "MIT" ]
Python
check_ipv6_rule_exists
<not_specific>
def check_ipv6_rule_exists(rules, address, port): """ Check if the rule currently exists """ for rule in rules: for ip_range in rule['Ipv6Ranges']: if ip_range['CidrIpv6'] == address and rule['FromPort'] == port: return True return False
Check if the rule currently exists
Check if the rule currently exists
[ "Check", "if", "the", "rule", "currently", "exists" ]
def check_ipv6_rule_exists(rules, address, port): for rule in rules: for ip_range in rule['Ipv6Ranges']: if ip_range['CidrIpv6'] == address and rule['FromPort'] == port: return True return False
[ "def", "check_ipv6_rule_exists", "(", "rules", ",", "address", ",", "port", ")", ":", "for", "rule", "in", "rules", ":", "for", "ip_range", "in", "rule", "[", "'Ipv6Ranges'", "]", ":", "if", "ip_range", "[", "'CidrIpv6'", "]", "==", "address", "and", "rule", "[", "'FromPort'", "]", "==", "port", ":", "return", "True", "return", "False" ]
Check if the rule currently exists
[ "Check", "if", "the", "rule", "currently", "exists" ]
[ "\"\"\" Check if the rule currently exists \"\"\"" ]
[ { "param": "rules", "type": null }, { "param": "address", "type": null }, { "param": "port", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "rules", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "address", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "port", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def check_ipv6_rule_exists(rules, address, port): for rule in rules: for ip_range in rule['Ipv6Ranges']: if ip_range['CidrIpv6'] == address and rule['FromPort'] == port: return True return False
1,088
751
10cee8d395a8b4a3a3cc1f3b5625287e46d96880
FoIIon/google-ads-python
examples/error_handling/handle_keyword_policy_violations.py
[ "Apache-2.0" ]
Python
_request_exemption
null
def _request_exemption( customer_id, ad_group_criterion_service, ad_group_criterion_operation, exempt_policy_violation_keys, ): """Sends exemption requests for creating a keyword. Args: customer_id: The customer ID for which to add the expanded text ad. ad_group_criterion_service: The AdGroupCriterionService client instance. ad_group_criterion_operation: The AdGroupCriterionOperation for which to request exemption. exempt_policy_violation_keys: The exemptible policy violation keys. """ print( "Attempting to add a keyword again by requesting exemption for its " "policy violations." ) ad_group_criterion_operation.exempt_policy_violation_keys.extend( exempt_policy_violation_keys ) response = ad_group_criterion_service.mutate_ad_group_criteria( customer_id=customer_id, operations=[ad_group_criterion_operation] ) print( "Successfully added a keyword with resource name " f"'{response.results[0].resource_name}' by requesting a policy " "violation exemption." ) # [END handle_keyword_policy_violations_1]
Sends exemption requests for creating a keyword. Args: customer_id: The customer ID for which to add the expanded text ad. ad_group_criterion_service: The AdGroupCriterionService client instance. ad_group_criterion_operation: The AdGroupCriterionOperation for which to request exemption. exempt_policy_violation_keys: The exemptible policy violation keys.
Sends exemption requests for creating a keyword.
[ "Sends", "exemption", "requests", "for", "creating", "a", "keyword", "." ]
def _request_exemption( customer_id, ad_group_criterion_service, ad_group_criterion_operation, exempt_policy_violation_keys, ): print( "Attempting to add a keyword again by requesting exemption for its " "policy violations." ) ad_group_criterion_operation.exempt_policy_violation_keys.extend( exempt_policy_violation_keys ) response = ad_group_criterion_service.mutate_ad_group_criteria( customer_id=customer_id, operations=[ad_group_criterion_operation] ) print( "Successfully added a keyword with resource name " f"'{response.results[0].resource_name}' by requesting a policy " "violation exemption." )
[ "def", "_request_exemption", "(", "customer_id", ",", "ad_group_criterion_service", ",", "ad_group_criterion_operation", ",", "exempt_policy_violation_keys", ",", ")", ":", "print", "(", "\"Attempting to add a keyword again by requesting exemption for its \"", "\"policy violations.\"", ")", "ad_group_criterion_operation", ".", "exempt_policy_violation_keys", ".", "extend", "(", "exempt_policy_violation_keys", ")", "response", "=", "ad_group_criterion_service", ".", "mutate_ad_group_criteria", "(", "customer_id", "=", "customer_id", ",", "operations", "=", "[", "ad_group_criterion_operation", "]", ")", "print", "(", "\"Successfully added a keyword with resource name \"", "f\"'{response.results[0].resource_name}' by requesting a policy \"", "\"violation exemption.\"", ")" ]
Sends exemption requests for creating a keyword.
[ "Sends", "exemption", "requests", "for", "creating", "a", "keyword", "." ]
[ "\"\"\"Sends exemption requests for creating a keyword.\n\n Args:\n customer_id: The customer ID for which to add the expanded text ad.\n ad_group_criterion_service: The AdGroupCriterionService client instance.\n ad_group_criterion_operation: The AdGroupCriterionOperation for which\n to request exemption.\n exempt_policy_violation_keys: The exemptible policy violation keys.\n \"\"\"", "# [END handle_keyword_policy_violations_1]" ]
[ { "param": "customer_id", "type": null }, { "param": "ad_group_criterion_service", "type": null }, { "param": "ad_group_criterion_operation", "type": null }, { "param": "exempt_policy_violation_keys", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "customer_id", "type": null, "docstring": "The customer ID for which to add the expanded text ad.", "docstring_tokens": [ "The", "customer", "ID", "for", "which", "to", "add", "the", "expanded", "text", "ad", "." ], "default": null, "is_optional": null }, { "identifier": "ad_group_criterion_service", "type": null, "docstring": "The AdGroupCriterionService client instance.", "docstring_tokens": [ "The", "AdGroupCriterionService", "client", "instance", "." ], "default": null, "is_optional": null }, { "identifier": "ad_group_criterion_operation", "type": null, "docstring": "The AdGroupCriterionOperation for which\nto request exemption.", "docstring_tokens": [ "The", "AdGroupCriterionOperation", "for", "which", "to", "request", "exemption", "." ], "default": null, "is_optional": null }, { "identifier": "exempt_policy_violation_keys", "type": null, "docstring": "The exemptible policy violation keys.", "docstring_tokens": [ "The", "exemptible", "policy", "violation", "keys", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _request_exemption( customer_id, ad_group_criterion_service, ad_group_criterion_operation, exempt_policy_violation_keys, ): print( "Attempting to add a keyword again by requesting exemption for its " "policy violations." ) ad_group_criterion_operation.exempt_policy_violation_keys.extend( exempt_policy_violation_keys ) response = ad_group_criterion_service.mutate_ad_group_criteria( customer_id=customer_id, operations=[ad_group_criterion_operation] ) print( "Successfully added a keyword with resource name " f"'{response.results[0].resource_name}' by requesting a policy " "violation exemption." )
1,089
753
64642cfa88bdd975a6d448685e619aa5d43769b1
liangw89/faas_measure
code/python/tests.py
[ "BSD-2-Clause" ]
Python
ioload
<not_specific>
def ioload(size, cnt): """ One round of IO throughput test """ proc = subprocess.Popen(["dd", "if=/dev/urandom", "of=/tmp/ioload.log", "bs=%s" % size, "count=%s" % cnt, "conv=fdatasync", "oflag=dsync"], stderr=subprocess.PIPE) out, err = proc.communicate() buf = err.split("\n")[-2].split(",") t, s = buf[-2], buf[-1] t = t.split(" ")[1] # s = s.split(" ")[1] return "%s,%s" % (t, s)
One round of IO throughput test
One round of IO throughput test
[ "One", "round", "of", "IO", "throughput", "test" ]
def ioload(size, cnt): proc = subprocess.Popen(["dd", "if=/dev/urandom", "of=/tmp/ioload.log", "bs=%s" % size, "count=%s" % cnt, "conv=fdatasync", "oflag=dsync"], stderr=subprocess.PIPE) out, err = proc.communicate() buf = err.split("\n")[-2].split(",") t, s = buf[-2], buf[-1] t = t.split(" ")[1] return "%s,%s" % (t, s)
[ "def", "ioload", "(", "size", ",", "cnt", ")", ":", "proc", "=", "subprocess", ".", "Popen", "(", "[", "\"dd\"", ",", "\"if=/dev/urandom\"", ",", "\"of=/tmp/ioload.log\"", ",", "\"bs=%s\"", "%", "size", ",", "\"count=%s\"", "%", "cnt", ",", "\"conv=fdatasync\"", ",", "\"oflag=dsync\"", "]", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "out", ",", "err", "=", "proc", ".", "communicate", "(", ")", "buf", "=", "err", ".", "split", "(", "\"\\n\"", ")", "[", "-", "2", "]", ".", "split", "(", "\",\"", ")", "t", ",", "s", "=", "buf", "[", "-", "2", "]", ",", "buf", "[", "-", "1", "]", "t", "=", "t", ".", "split", "(", "\" \"", ")", "[", "1", "]", "return", "\"%s,%s\"", "%", "(", "t", ",", "s", ")" ]
One round of IO throughput test
[ "One", "round", "of", "IO", "throughput", "test" ]
[ "\"\"\" One round of IO throughput test \"\"\"", "# s = s.split(\" \")[1]" ]
[ { "param": "size", "type": null }, { "param": "cnt", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "size", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "cnt", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import subprocess def ioload(size, cnt): proc = subprocess.Popen(["dd", "if=/dev/urandom", "of=/tmp/ioload.log", "bs=%s" % size, "count=%s" % cnt, "conv=fdatasync", "oflag=dsync"], stderr=subprocess.PIPE) out, err = proc.communicate() buf = err.split("\n")[-2].split(",") t, s = buf[-2], buf[-1] t = t.split(" ")[1] return "%s,%s" % (t, s)
1,090
1,002
e74a6c9b2520a8cbf05fb4519cde1b1e3c56d665
hyroai/wptools
wptools/wptool.py
[ "MIT" ]
Python
_image
<not_specific>
def _image(page): """ returns (preferred) image from wptools object """ pageimage = page.images(token='pageimage') if pageimage: return pageimage[0]['url']
returns (preferred) image from wptools object
returns (preferred) image from wptools object
[ "returns", "(", "preferred", ")", "image", "from", "wptools", "object" ]
def _image(page): pageimage = page.images(token='pageimage') if pageimage: return pageimage[0]['url']
[ "def", "_image", "(", "page", ")", ":", "pageimage", "=", "page", ".", "images", "(", "token", "=", "'pageimage'", ")", "if", "pageimage", ":", "return", "pageimage", "[", "0", "]", "[", "'url'", "]" ]
returns (preferred) image from wptools object
[ "returns", "(", "preferred", ")", "image", "from", "wptools", "object" ]
[ "\"\"\"\n returns (preferred) image from wptools object\n \"\"\"" ]
[ { "param": "page", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "page", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _image(page): pageimage = page.images(token='pageimage') if pageimage: return pageimage[0]['url']
1,091
787
a63b41687071b8dc98aa0df8bd3dd87a57c5c90a
Neurita/pypes
neuro_pypes/interfaces/nilearn/image.py
[ "Apache-2.0" ]
Python
_pick_an_input_file
<not_specific>
def _pick_an_input_file(*args, **kwargs): """Assume that either the first arg or the first kwarg is an input file.""" if args: return args[0] else: return list(kwargs.values())[0]
Assume that either the first arg or the first kwarg is an input file.
Assume that either the first arg or the first kwarg is an input file.
[ "Assume", "that", "either", "the", "first", "arg", "or", "the", "first", "kwarg", "is", "an", "input", "file", "." ]
def _pick_an_input_file(*args, **kwargs): if args: return args[0] else: return list(kwargs.values())[0]
[ "def", "_pick_an_input_file", "(", "*", "args", ",", "**", "kwargs", ")", ":", "if", "args", ":", "return", "args", "[", "0", "]", "else", ":", "return", "list", "(", "kwargs", ".", "values", "(", ")", ")", "[", "0", "]" ]
Assume that either the first arg or the first kwarg is an input file.
[ "Assume", "that", "either", "the", "first", "arg", "or", "the", "first", "kwarg", "is", "an", "input", "file", "." ]
[ "\"\"\"Assume that either the first arg or the first kwarg is an input file.\"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def _pick_an_input_file(*args, **kwargs): if args: return args[0] else: return list(kwargs.values())[0]
1,092
216
4f3fe69979332f4566d60fe6f3fbba9c9265c0ce
AnnikaCodes/ps-client
psclient/__init__.py
[ "MIT" ]
Python
toID
<not_specific>
def toID(string): """Converts a string into an ID Arguments: string (string): the string to be converted Returns: string: the ID """ return re.sub('[^0-9a-zA-Z]+', '', string).lower()
Converts a string into an ID Arguments: string (string): the string to be converted Returns: string: the ID
Converts a string into an ID
[ "Converts", "a", "string", "into", "an", "ID" ]
def toID(string): return re.sub('[^0-9a-zA-Z]+', '', string).lower()
[ "def", "toID", "(", "string", ")", ":", "return", "re", ".", "sub", "(", "'[^0-9a-zA-Z]+'", ",", "''", ",", "string", ")", ".", "lower", "(", ")" ]
Converts a string into an ID
[ "Converts", "a", "string", "into", "an", "ID" ]
[ "\"\"\"Converts a string into an ID\n\n Arguments:\n string (string): the string to be converted\n\n Returns:\n string: the ID\n \"\"\"" ]
[ { "param": "string", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": "string" } ], "raises": [], "params": [ { "identifier": "string", "type": null, "docstring": "the string to be converted", "docstring_tokens": [ "the", "string", "to", "be", "converted" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
import re def toID(string): return re.sub('[^0-9a-zA-Z]+', '', string).lower()
1,093
618
f1d91b9203f2b1ef375956739a164d90ca9efda3
katrinleinweber/lombscargle
lombscargle/implementations/utils.py
[ "BSD-3-Clause" ]
Python
bitceil
<not_specific>
def bitceil(N): """ Find the bit (i.e. power of 2) immediately greater than or equal to N Note: this works for numbers up to 2 ** 64. Roughly equivalent to int(2 ** np.ceil(np.log2(N))) """ # Note: for Python 2.7 and 3.x, this is faster: # return 1 << int(N - 1).bit_length() N = int(N) - 1 for i in [1, 2, 4, 8, 16, 32]: N |= N >> i return N + 1
Find the bit (i.e. power of 2) immediately greater than or equal to N Note: this works for numbers up to 2 ** 64. Roughly equivalent to int(2 ** np.ceil(np.log2(N)))
Find the bit immediately greater than or equal to N Note: this works for numbers up to 2 ** 64.
[ "Find", "the", "bit", "immediately", "greater", "than", "or", "equal", "to", "N", "Note", ":", "this", "works", "for", "numbers", "up", "to", "2", "**", "64", "." ]
def bitceil(N): N = int(N) - 1 for i in [1, 2, 4, 8, 16, 32]: N |= N >> i return N + 1
[ "def", "bitceil", "(", "N", ")", ":", "N", "=", "int", "(", "N", ")", "-", "1", "for", "i", "in", "[", "1", ",", "2", ",", "4", ",", "8", ",", "16", ",", "32", "]", ":", "N", "|=", "N", ">>", "i", "return", "N", "+", "1" ]
Find the bit (i.e.
[ "Find", "the", "bit", "(", "i", ".", "e", "." ]
[ "\"\"\"\n Find the bit (i.e. power of 2) immediately greater than or equal to N\n Note: this works for numbers up to 2 ** 64.\n Roughly equivalent to int(2 ** np.ceil(np.log2(N)))\n \"\"\"", "# Note: for Python 2.7 and 3.x, this is faster:", "# return 1 << int(N - 1).bit_length()" ]
[ { "param": "N", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "N", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def bitceil(N): N = int(N) - 1 for i in [1, 2, 4, 8, 16, 32]: N |= N >> i return N + 1
1,094
727
f96b050f6f08b128979161d0a0ebfaf3222aafc1
mcgreevy/chromium-infra
appengine/monorail/framework/framework_views.py
[ "BSD-3-Clause" ]
Python
ParseAndObscureAddress
<not_specific>
def ParseAndObscureAddress(email): """Break the given email into username and domain, and obscure. Args: email: string email address to process Returns: A 3-tuple (username, domain, obscured_username). The obscured_username is trucated the same way that Google Groups does it. """ if '@' in email: username, user_domain = email.split('@', 1) else: # don't fail if User table has unexpected email address format. username, user_domain = email, '' base_username = username.split('+')[0] cutoff_point = min(8, max(1, len(base_username) - 3)) obscured_username = base_username[:cutoff_point] return username, user_domain, obscured_username
Break the given email into username and domain, and obscure. Args: email: string email address to process Returns: A 3-tuple (username, domain, obscured_username). The obscured_username is trucated the same way that Google Groups does it.
Break the given email into username and domain, and obscure.
[ "Break", "the", "given", "email", "into", "username", "and", "domain", "and", "obscure", "." ]
def ParseAndObscureAddress(email): if '@' in email: username, user_domain = email.split('@', 1) else: username, user_domain = email, '' base_username = username.split('+')[0] cutoff_point = min(8, max(1, len(base_username) - 3)) obscured_username = base_username[:cutoff_point] return username, user_domain, obscured_username
[ "def", "ParseAndObscureAddress", "(", "email", ")", ":", "if", "'@'", "in", "email", ":", "username", ",", "user_domain", "=", "email", ".", "split", "(", "'@'", ",", "1", ")", "else", ":", "username", ",", "user_domain", "=", "email", ",", "''", "base_username", "=", "username", ".", "split", "(", "'+'", ")", "[", "0", "]", "cutoff_point", "=", "min", "(", "8", ",", "max", "(", "1", ",", "len", "(", "base_username", ")", "-", "3", ")", ")", "obscured_username", "=", "base_username", "[", ":", "cutoff_point", "]", "return", "username", ",", "user_domain", ",", "obscured_username" ]
Break the given email into username and domain, and obscure.
[ "Break", "the", "given", "email", "into", "username", "and", "domain", "and", "obscure", "." ]
[ "\"\"\"Break the given email into username and domain, and obscure.\n\n Args:\n email: string email address to process\n\n Returns:\n A 3-tuple (username, domain, obscured_username).\n The obscured_username is trucated the same way that Google Groups does it.\n \"\"\"", "# don't fail if User table has unexpected email address format." ]
[ { "param": "email", "type": null } ]
{ "returns": [ { "docstring": "A 3-tuple (username, domain, obscured_username).\nThe obscured_username is trucated the same way that Google Groups does it.", "docstring_tokens": [ "A", "3", "-", "tuple", "(", "username", "domain", "obscured_username", ")", ".", "The", "obscured_username", "is", "trucated", "the", "same", "way", "that", "Google", "Groups", "does", "it", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "email", "type": null, "docstring": "string email address to process", "docstring_tokens": [ "string", "email", "address", "to", "process" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def ParseAndObscureAddress(email): if '@' in email: username, user_domain = email.split('@', 1) else: username, user_domain = email, '' base_username = username.split('+')[0] cutoff_point = min(8, max(1, len(base_username) - 3)) obscured_username = base_username[:cutoff_point] return username, user_domain, obscured_username
1,095
214
bfaaf008f83f5d8a50cf7d80b6c1a551111d0bfa
ThiagoA20/web-scraper-for-dropshipping
venv/Lib/site-packages/cx_Freeze/darwintools.py
[ "MIT" ]
Python
_printFile
<not_specific>
def _printFile( darwinFile: DarwinFile, seenFiles: Set[DarwinFile], level: int, noRecurse=False, ): """Utility function to prints details about a DarwinFile and (optionally) recursively any other DarwinFiles that it references.""" print("{}{}".format(level * "| ", darwinFile.originalFilePath), end="") print(" (already seen)" if noRecurse else "") if noRecurse: return for ref in darwinFile.machOReferenceDict.values(): if not ref.isCopied: continue mf = ref.targetFile _printFile( mf, seenFiles=seenFiles, level=level + 1, noRecurse=(mf in seenFiles), ) seenFiles.add(mf) return
Utility function to prints details about a DarwinFile and (optionally) recursively any other DarwinFiles that it references.
Utility function to prints details about a DarwinFile and (optionally) recursively any other DarwinFiles that it references.
[ "Utility", "function", "to", "prints", "details", "about", "a", "DarwinFile", "and", "(", "optionally", ")", "recursively", "any", "other", "DarwinFiles", "that", "it", "references", "." ]
def _printFile( darwinFile: DarwinFile, seenFiles: Set[DarwinFile], level: int, noRecurse=False, ): print("{}{}".format(level * "| ", darwinFile.originalFilePath), end="") print(" (already seen)" if noRecurse else "") if noRecurse: return for ref in darwinFile.machOReferenceDict.values(): if not ref.isCopied: continue mf = ref.targetFile _printFile( mf, seenFiles=seenFiles, level=level + 1, noRecurse=(mf in seenFiles), ) seenFiles.add(mf) return
[ "def", "_printFile", "(", "darwinFile", ":", "DarwinFile", ",", "seenFiles", ":", "Set", "[", "DarwinFile", "]", ",", "level", ":", "int", ",", "noRecurse", "=", "False", ",", ")", ":", "print", "(", "\"{}{}\"", ".", "format", "(", "level", "*", "\"| \"", ",", "darwinFile", ".", "originalFilePath", ")", ",", "end", "=", "\"\"", ")", "print", "(", "\" (already seen)\"", "if", "noRecurse", "else", "\"\"", ")", "if", "noRecurse", ":", "return", "for", "ref", "in", "darwinFile", ".", "machOReferenceDict", ".", "values", "(", ")", ":", "if", "not", "ref", ".", "isCopied", ":", "continue", "mf", "=", "ref", ".", "targetFile", "_printFile", "(", "mf", ",", "seenFiles", "=", "seenFiles", ",", "level", "=", "level", "+", "1", ",", "noRecurse", "=", "(", "mf", "in", "seenFiles", ")", ",", ")", "seenFiles", ".", "add", "(", "mf", ")", "return" ]
Utility function to prints details about a DarwinFile and (optionally) recursively any other DarwinFiles that it references.
[ "Utility", "function", "to", "prints", "details", "about", "a", "DarwinFile", "and", "(", "optionally", ")", "recursively", "any", "other", "DarwinFiles", "that", "it", "references", "." ]
[ "\"\"\"Utility function to prints details about a DarwinFile and (optionally) recursively\n any other DarwinFiles that it references.\"\"\"" ]
[ { "param": "darwinFile", "type": "DarwinFile" }, { "param": "seenFiles", "type": "Set[DarwinFile]" }, { "param": "level", "type": "int" }, { "param": "noRecurse", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "darwinFile", "type": "DarwinFile", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "seenFiles", "type": "Set[DarwinFile]", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "level", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "noRecurse", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _printFile( darwinFile: DarwinFile, seenFiles: Set[DarwinFile], level: int, noRecurse=False, ): print("{}{}".format(level * "| ", darwinFile.originalFilePath), end="") print(" (already seen)" if noRecurse else "") if noRecurse: return for ref in darwinFile.machOReferenceDict.values(): if not ref.isCopied: continue mf = ref.targetFile _printFile( mf, seenFiles=seenFiles, level=level + 1, noRecurse=(mf in seenFiles), ) seenFiles.add(mf) return
1,096
943
3399f8530216f557d4114903611d17a0c21b46c2
gawseed/threat-feed-tools
gawseed/threatfeed/tools/pklcompare.py
[ "MIT" ]
Python
find_results
<not_specific>
def find_results(input_files, verbose=False): """Read in a (recursive) directory of pkl files and return a list of their contents """ results = [] for infile in input_files: if os.path.isfile(infile): results.append(pickle.load(open(infile, "rb"))) elif os.path.isdir(infile): for dirfile in os.listdir(infile): if dirfile[0] == '.': continue path = os.path.join(infile, dirfile) if os.path.isdir(path): results.extend(find_results([path])) elif dirfile[-4:] == '.pkl': results.append(pickle.load(open(path, "rb"))) elif verbose: sys.stderr.write(f'ignoring {path}') return results
Read in a (recursive) directory of pkl files and return a list of their contents
Read in a (recursive) directory of pkl files and return a list of their contents
[ "Read", "in", "a", "(", "recursive", ")", "directory", "of", "pkl", "files", "and", "return", "a", "list", "of", "their", "contents" ]
def find_results(input_files, verbose=False): results = [] for infile in input_files: if os.path.isfile(infile): results.append(pickle.load(open(infile, "rb"))) elif os.path.isdir(infile): for dirfile in os.listdir(infile): if dirfile[0] == '.': continue path = os.path.join(infile, dirfile) if os.path.isdir(path): results.extend(find_results([path])) elif dirfile[-4:] == '.pkl': results.append(pickle.load(open(path, "rb"))) elif verbose: sys.stderr.write(f'ignoring {path}') return results
[ "def", "find_results", "(", "input_files", ",", "verbose", "=", "False", ")", ":", "results", "=", "[", "]", "for", "infile", "in", "input_files", ":", "if", "os", ".", "path", ".", "isfile", "(", "infile", ")", ":", "results", ".", "append", "(", "pickle", ".", "load", "(", "open", "(", "infile", ",", "\"rb\"", ")", ")", ")", "elif", "os", ".", "path", ".", "isdir", "(", "infile", ")", ":", "for", "dirfile", "in", "os", ".", "listdir", "(", "infile", ")", ":", "if", "dirfile", "[", "0", "]", "==", "'.'", ":", "continue", "path", "=", "os", ".", "path", ".", "join", "(", "infile", ",", "dirfile", ")", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "results", ".", "extend", "(", "find_results", "(", "[", "path", "]", ")", ")", "elif", "dirfile", "[", "-", "4", ":", "]", "==", "'.pkl'", ":", "results", ".", "append", "(", "pickle", ".", "load", "(", "open", "(", "path", ",", "\"rb\"", ")", ")", ")", "elif", "verbose", ":", "sys", ".", "stderr", ".", "write", "(", "f'ignoring {path}'", ")", "return", "results" ]
Read in a (recursive) directory of pkl files and return a list of their contents
[ "Read", "in", "a", "(", "recursive", ")", "directory", "of", "pkl", "files", "and", "return", "a", "list", "of", "their", "contents" ]
[ "\"\"\"Read in a (recursive) directory of pkl files and return a list of\n their contents\n \"\"\"" ]
[ { "param": "input_files", "type": null }, { "param": "verbose", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "input_files", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "verbose", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import sys import os import pickle def find_results(input_files, verbose=False): results = [] for infile in input_files: if os.path.isfile(infile): results.append(pickle.load(open(infile, "rb"))) elif os.path.isdir(infile): for dirfile in os.listdir(infile): if dirfile[0] == '.': continue path = os.path.join(infile, dirfile) if os.path.isdir(path): results.extend(find_results([path])) elif dirfile[-4:] == '.pkl': results.append(pickle.load(open(path, "rb"))) elif verbose: sys.stderr.write(f'ignoring {path}') return results
1,097
826
bc46b97f868f8da4124963a2565fe6cff93a0ed1
SyanahWynn/2020-EEGMEMPIC
ExperimentalDesign/my.py
[ "CC0-1.0" ]
Python
openCSVFile
<not_specific>
def openCSVFile(ppn=0): """open a data file for output with a filename that nicely uses the current date and time""" directory= "data" if not os.path.isdir(directory): os.mkdir(directory) try: filename="{}/{}_{}.csv".format(directory, ppn, time.strftime('%Y-%m-%dT%H:%M:%S')) # ISO compliant datafileCSV = open(filename, 'wb') except Exception as e: filename="{}/{}_{}.csv".format(directory, ppn, time.strftime('%Y-%m-%dT%H.%M.%S')) #for MS Windows datafileCSV = open(filename, 'wb') return datafileCSV
open a data file for output with a filename that nicely uses the current date and time
open a data file for output with a filename that nicely uses the current date and time
[ "open", "a", "data", "file", "for", "output", "with", "a", "filename", "that", "nicely", "uses", "the", "current", "date", "and", "time" ]
def openCSVFile(ppn=0): directory= "data" if not os.path.isdir(directory): os.mkdir(directory) try: filename="{}/{}_{}.csv".format(directory, ppn, time.strftime('%Y-%m-%dT%H:%M:%S')) datafileCSV = open(filename, 'wb') except Exception as e: filename="{}/{}_{}.csv".format(directory, ppn, time.strftime('%Y-%m-%dT%H.%M.%S')) datafileCSV = open(filename, 'wb') return datafileCSV
[ "def", "openCSVFile", "(", "ppn", "=", "0", ")", ":", "directory", "=", "\"data\"", "if", "not", "os", ".", "path", ".", "isdir", "(", "directory", ")", ":", "os", ".", "mkdir", "(", "directory", ")", "try", ":", "filename", "=", "\"{}/{}_{}.csv\"", ".", "format", "(", "directory", ",", "ppn", ",", "time", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%S'", ")", ")", "datafileCSV", "=", "open", "(", "filename", ",", "'wb'", ")", "except", "Exception", "as", "e", ":", "filename", "=", "\"{}/{}_{}.csv\"", ".", "format", "(", "directory", ",", "ppn", ",", "time", ".", "strftime", "(", "'%Y-%m-%dT%H.%M.%S'", ")", ")", "datafileCSV", "=", "open", "(", "filename", ",", "'wb'", ")", "return", "datafileCSV" ]
open a data file for output with a filename that nicely uses the current date and time
[ "open", "a", "data", "file", "for", "output", "with", "a", "filename", "that", "nicely", "uses", "the", "current", "date", "and", "time" ]
[ "\"\"\"open a data file for output with a filename that nicely uses the current date and time\"\"\"", "# ISO compliant", "#for MS Windows" ]
[ { "param": "ppn", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "ppn", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import time import os def openCSVFile(ppn=0): directory= "data" if not os.path.isdir(directory): os.mkdir(directory) try: filename="{}/{}_{}.csv".format(directory, ppn, time.strftime('%Y-%m-%dT%H:%M:%S')) datafileCSV = open(filename, 'wb') except Exception as e: filename="{}/{}_{}.csv".format(directory, ppn, time.strftime('%Y-%m-%dT%H.%M.%S')) datafileCSV = open(filename, 'wb') return datafileCSV
1,098
79
cacb5af127a02abbd90d2e091a260242e5099ed4
lucashelfs/DOUTOR
src/parse_dou_article.py
[ "MIT" ]
Python
branch_text
<not_specific>
def branch_text(branch): """ Takes and lxml tree element 'branch' and returns its text, joined to its children's tails (i.e. the content that follows childrens of 'branch'). """ texts = list(filter(lambda s: s != None, [branch.text] + [child.tail for child in branch])) if len(texts) == 0: return None text = ' | '.join(texts) return text
Takes and lxml tree element 'branch' and returns its text, joined to its children's tails (i.e. the content that follows childrens of 'branch').
Takes and lxml tree element 'branch' and returns its text, joined to its children's tails .
[ "Takes", "and", "lxml", "tree", "element", "'", "branch", "'", "and", "returns", "its", "text", "joined", "to", "its", "children", "'", "s", "tails", "." ]
def branch_text(branch): texts = list(filter(lambda s: s != None, [branch.text] + [child.tail for child in branch])) if len(texts) == 0: return None text = ' | '.join(texts) return text
[ "def", "branch_text", "(", "branch", ")", ":", "texts", "=", "list", "(", "filter", "(", "lambda", "s", ":", "s", "!=", "None", ",", "[", "branch", ".", "text", "]", "+", "[", "child", ".", "tail", "for", "child", "in", "branch", "]", ")", ")", "if", "len", "(", "texts", ")", "==", "0", ":", "return", "None", "text", "=", "' | '", ".", "join", "(", "texts", ")", "return", "text" ]
Takes and lxml tree element 'branch' and returns its text, joined to its children's tails (i.e.
[ "Takes", "and", "lxml", "tree", "element", "'", "branch", "'", "and", "returns", "its", "text", "joined", "to", "its", "children", "'", "s", "tails", "(", "i", ".", "e", "." ]
[ "\"\"\"\n Takes and lxml tree element 'branch' and returns its text, \n joined to its children's tails (i.e. the content that follows \n childrens of 'branch').\n \"\"\"" ]
[ { "param": "branch", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "branch", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def branch_text(branch): texts = list(filter(lambda s: s != None, [branch.text] + [child.tail for child in branch])) if len(texts) == 0: return None text = ' | '.join(texts) return text
1,099
993
c10c74411d296db5313808eb65bcd6cbfb65818d
daniele-athome/rhasspy-hermes
rhasspyhermes/asr.py
[ "MIT" ]
Python
topic
str
def topic(cls, **kwargs) -> str: """Get MQTT topic for this message type.""" site_id = kwargs.get("site_id", "+") session_id = kwargs.get("session_id", "+") return f"rhasspy/asr/{site_id}/{session_id}/audioCaptured"
Get MQTT topic for this message type.
Get MQTT topic for this message type.
[ "Get", "MQTT", "topic", "for", "this", "message", "type", "." ]
def topic(cls, **kwargs) -> str: site_id = kwargs.get("site_id", "+") session_id = kwargs.get("session_id", "+") return f"rhasspy/asr/{site_id}/{session_id}/audioCaptured"
[ "def", "topic", "(", "cls", ",", "**", "kwargs", ")", "->", "str", ":", "site_id", "=", "kwargs", ".", "get", "(", "\"site_id\"", ",", "\"+\"", ")", "session_id", "=", "kwargs", ".", "get", "(", "\"session_id\"", ",", "\"+\"", ")", "return", "f\"rhasspy/asr/{site_id}/{session_id}/audioCaptured\"" ]
Get MQTT topic for this message type.
[ "Get", "MQTT", "topic", "for", "this", "message", "type", "." ]
[ "\"\"\"Get MQTT topic for this message type.\"\"\"" ]
[ { "param": "cls", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def topic(cls, **kwargs) -> str: site_id = kwargs.get("site_id", "+") session_id = kwargs.get("session_id", "+") return f"rhasspy/asr/{site_id}/{session_id}/audioCaptured"
1,100
133
2a403f71acf83a60015e28f886cb55fea801f530
notpeter/legiscrape
legiscrape/captions.py
[ "MIT" ]
Python
timecode
<not_specific>
def timecode(seconds, seperator="."): """Takes seconds and returns SRT timecode format: e.g. 00:02:36,894""" # Leading & trailing zeros matter: 1.5 -> 00:00:01,500 # SRT uses ',' before fractional seconds seperator, WebVTT uses '.' seconds = float(seconds) timecode_fmt = "%(hour)02d:%(minute)02d:%(second)02d%(ms_seperator)s%(ms)03d" return timecode_fmt % {'hour': seconds // 3600, 'minute': seconds // 60 % 60, 'second': seconds % 60, 'ms': seconds % 1 * 1000, 'ms_seperator': seperator}
Takes seconds and returns SRT timecode format: e.g. 00:02:36,894
Takes seconds and returns SRT timecode format: e.g.
[ "Takes", "seconds", "and", "returns", "SRT", "timecode", "format", ":", "e", ".", "g", "." ]
def timecode(seconds, seperator="."): seconds = float(seconds) timecode_fmt = "%(hour)02d:%(minute)02d:%(second)02d%(ms_seperator)s%(ms)03d" return timecode_fmt % {'hour': seconds // 3600, 'minute': seconds // 60 % 60, 'second': seconds % 60, 'ms': seconds % 1 * 1000, 'ms_seperator': seperator}
[ "def", "timecode", "(", "seconds", ",", "seperator", "=", "\".\"", ")", ":", "seconds", "=", "float", "(", "seconds", ")", "timecode_fmt", "=", "\"%(hour)02d:%(minute)02d:%(second)02d%(ms_seperator)s%(ms)03d\"", "return", "timecode_fmt", "%", "{", "'hour'", ":", "seconds", "//", "3600", ",", "'minute'", ":", "seconds", "//", "60", "%", "60", ",", "'second'", ":", "seconds", "%", "60", ",", "'ms'", ":", "seconds", "%", "1", "*", "1000", ",", "'ms_seperator'", ":", "seperator", "}" ]
Takes seconds and returns SRT timecode format: e.g.
[ "Takes", "seconds", "and", "returns", "SRT", "timecode", "format", ":", "e", ".", "g", "." ]
[ "\"\"\"Takes seconds and returns SRT timecode format: e.g. 00:02:36,894\"\"\"", "# Leading & trailing zeros matter: 1.5 -> 00:00:01,500", "# SRT uses ',' before fractional seconds seperator, WebVTT uses '.'" ]
[ { "param": "seconds", "type": null }, { "param": "seperator", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "seconds", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "seperator", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def timecode(seconds, seperator="."): seconds = float(seconds) timecode_fmt = "%(hour)02d:%(minute)02d:%(second)02d%(ms_seperator)s%(ms)03d" return timecode_fmt % {'hour': seconds // 3600, 'minute': seconds // 60 % 60, 'second': seconds % 60, 'ms': seconds % 1 * 1000, 'ms_seperator': seperator}
1,103
241
889067a3aadc69e45d148dd79cf1ed88b545267d
PythonMove/Mining-tools
mining-calculator/xmr/mining_math.py
[ "MIT" ]
Python
daily_profit
float
def daily_profit(daily_mined: float, xmr_price: float, kwh: float, kwh_cost: float, pool_fee: float, precision: int) -> float: """ Computes how much money you gain after paying the daily mining expense. Formula: (daily_income - daily_electricity_cost) * (100% - pool_fee) :param daily_mined: Float. Amount of XMR you can mine per day. :param xmr_price: Float. Current market price of XMR coin. :param kwh: Float. Electricity usage in kilowatts per hour. :param kwh_cost: Float. Electricity cost you have to pay for kwh. :param pool_fee: Float. Mining pool fee. :param precision: Int. Precision of computing. :return: Float. Money you gain after paying the daily mining expense. """ return round(((daily_mined * xmr_price) - (kwh * 24 * kwh_cost)) * (1 - pool_fee), precision)
Computes how much money you gain after paying the daily mining expense. Formula: (daily_income - daily_electricity_cost) * (100% - pool_fee) :param daily_mined: Float. Amount of XMR you can mine per day. :param xmr_price: Float. Current market price of XMR coin. :param kwh: Float. Electricity usage in kilowatts per hour. :param kwh_cost: Float. Electricity cost you have to pay for kwh. :param pool_fee: Float. Mining pool fee. :param precision: Int. Precision of computing. :return: Float. Money you gain after paying the daily mining expense.
Computes how much money you gain after paying the daily mining expense.
[ "Computes", "how", "much", "money", "you", "gain", "after", "paying", "the", "daily", "mining", "expense", "." ]
def daily_profit(daily_mined: float, xmr_price: float, kwh: float, kwh_cost: float, pool_fee: float, precision: int) -> float: return round(((daily_mined * xmr_price) - (kwh * 24 * kwh_cost)) * (1 - pool_fee), precision)
[ "def", "daily_profit", "(", "daily_mined", ":", "float", ",", "xmr_price", ":", "float", ",", "kwh", ":", "float", ",", "kwh_cost", ":", "float", ",", "pool_fee", ":", "float", ",", "precision", ":", "int", ")", "->", "float", ":", "return", "round", "(", "(", "(", "daily_mined", "*", "xmr_price", ")", "-", "(", "kwh", "*", "24", "*", "kwh_cost", ")", ")", "*", "(", "1", "-", "pool_fee", ")", ",", "precision", ")" ]
Computes how much money you gain after paying the daily mining expense.
[ "Computes", "how", "much", "money", "you", "gain", "after", "paying", "the", "daily", "mining", "expense", "." ]
[ "\"\"\"\n Computes how much money you gain after paying the daily mining expense. Formula:\n (daily_income - daily_electricity_cost) * (100% - pool_fee)\n\n :param daily_mined: Float. Amount of XMR you can mine per day.\n :param xmr_price: Float. Current market price of XMR coin.\n :param kwh: Float. Electricity usage in kilowatts per hour.\n :param kwh_cost: Float. Electricity cost you have to pay for kwh.\n :param pool_fee: Float. Mining pool fee.\n :param precision: Int. Precision of computing.\n :return: Float. Money you gain after paying the daily mining expense.\n \"\"\"" ]
[ { "param": "daily_mined", "type": "float" }, { "param": "xmr_price", "type": "float" }, { "param": "kwh", "type": "float" }, { "param": "kwh_cost", "type": "float" }, { "param": "pool_fee", "type": "float" }, { "param": "precision", "type": "int" } ]
{ "returns": [ { "docstring": "Float. Money you gain after paying the daily mining expense.", "docstring_tokens": [ "Float", ".", "Money", "you", "gain", "after", "paying", "the", "daily", "mining", "expense", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "daily_mined", "type": "float", "docstring": "Float. Amount of XMR you can mine per day.", "docstring_tokens": [ "Float", ".", "Amount", "of", "XMR", "you", "can", "mine", "per", "day", "." ], "default": null, "is_optional": null }, { "identifier": "xmr_price", "type": "float", "docstring": "Float. Current market price of XMR coin.", "docstring_tokens": [ "Float", ".", "Current", "market", "price", "of", "XMR", "coin", "." ], "default": null, "is_optional": null }, { "identifier": "kwh", "type": "float", "docstring": "Float. Electricity usage in kilowatts per hour.", "docstring_tokens": [ "Float", ".", "Electricity", "usage", "in", "kilowatts", "per", "hour", "." ], "default": null, "is_optional": null }, { "identifier": "kwh_cost", "type": "float", "docstring": "Float. Electricity cost you have to pay for kwh.", "docstring_tokens": [ "Float", ".", "Electricity", "cost", "you", "have", "to", "pay", "for", "kwh", "." ], "default": null, "is_optional": null }, { "identifier": "pool_fee", "type": "float", "docstring": "Float. Mining pool fee.", "docstring_tokens": [ "Float", ".", "Mining", "pool", "fee", "." ], "default": null, "is_optional": null }, { "identifier": "precision", "type": "int", "docstring": "Int. Precision of computing.", "docstring_tokens": [ "Int", ".", "Precision", "of", "computing", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def daily_profit(daily_mined: float, xmr_price: float, kwh: float, kwh_cost: float, pool_fee: float, precision: int) -> float: return round(((daily_mined * xmr_price) - (kwh * 24 * kwh_cost)) * (1 - pool_fee), precision)
1,104
805
d731e392f8866b4277145908edeb563da89e15d3
broadinstitute/gene-hints
gene_hints/citations/enrich_citations.py
[ "BSD-3-Clause" ]
Python
sort_genes
<not_specific>
def sort_genes(gene_dict, key): """Return list of genes ordered by key, in descending order """ sorted_genes = sorted( gene_dict.items(), key=lambda x: x[1][key], reverse=True ) # print(f"Gene with highest {key}: {str(sorted_genes[0])}") return sorted_genes
Return list of genes ordered by key, in descending order
Return list of genes ordered by key, in descending order
[ "Return", "list", "of", "genes", "ordered", "by", "key", "in", "descending", "order" ]
def sort_genes(gene_dict, key): sorted_genes = sorted( gene_dict.items(), key=lambda x: x[1][key], reverse=True ) return sorted_genes
[ "def", "sort_genes", "(", "gene_dict", ",", "key", ")", ":", "sorted_genes", "=", "sorted", "(", "gene_dict", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", "[", "key", "]", ",", "reverse", "=", "True", ")", "return", "sorted_genes" ]
Return list of genes ordered by key, in descending order
[ "Return", "list", "of", "genes", "ordered", "by", "key", "in", "descending", "order" ]
[ "\"\"\"Return list of genes ordered by key, in descending order\n \"\"\"", "# print(f\"Gene with highest {key}: {str(sorted_genes[0])}\")" ]
[ { "param": "gene_dict", "type": null }, { "param": "key", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "gene_dict", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "key", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def sort_genes(gene_dict, key): sorted_genes = sorted( gene_dict.items(), key=lambda x: x[1][key], reverse=True ) return sorted_genes
1,105
411
c3cad02d3c70b072ad872d0a60232d8073885cd1
andrethemac/wipyDHTSIM800
wipyDHTSIM800.py
[ "MIT" ]
Python
decode
<not_specific>
def decode(inp): """ convert the data from the sensor to 4 values :param inp: the bits read from the sensor :return: 4 values containing the temp and humidity """ res = [0] * 5 bits = [] ix = 0 try: if inp[0] == 1: ix = inp.index(0, ix) # skip to first 0 ix = inp.index(1, ix) # skip first 0's to next 1 ix = inp.index(0, ix) # skip first 1's to next 0 while len(bits) < len(res) * 8: # need 5 * 8 bits : ix = inp.index(1, ix) # index of next 1 ie = inp.index(0, ix) # nr of 1's = ie-ix bits.append(ie - ix) ix = ie except: return ([0xff, 0xff, 0xff, 0xff]) for i in range(len(res)): for v in bits[i * 8: (i + 1) * 8]: # process next 8 bit res[i] <<= 1 # shift byte one place to left if v > 2: res[i] += 1 # and add 1 if lsb is 1 if (res[0] + res[1] + res[2] + res[3]) & 0xff != res[4]: # parity error! print("Checksum Error") res = [0xff, 0xff, 0xff, 0xff] return (res[0:4])
convert the data from the sensor to 4 values :param inp: the bits read from the sensor :return: 4 values containing the temp and humidity
convert the data from the sensor to 4 values
[ "convert", "the", "data", "from", "the", "sensor", "to", "4", "values" ]
def decode(inp): res = [0] * 5 bits = [] ix = 0 try: if inp[0] == 1: ix = inp.index(0, ix) ix = inp.index(1, ix) ix = inp.index(0, ix) while len(bits) < len(res) * 8: ix = inp.index(1, ix) ie = inp.index(0, ix) bits.append(ie - ix) ix = ie except: return ([0xff, 0xff, 0xff, 0xff]) for i in range(len(res)): for v in bits[i * 8: (i + 1) * 8]: res[i] <<= 1 if v > 2: res[i] += 1 if (res[0] + res[1] + res[2] + res[3]) & 0xff != res[4]: print("Checksum Error") res = [0xff, 0xff, 0xff, 0xff] return (res[0:4])
[ "def", "decode", "(", "inp", ")", ":", "res", "=", "[", "0", "]", "*", "5", "bits", "=", "[", "]", "ix", "=", "0", "try", ":", "if", "inp", "[", "0", "]", "==", "1", ":", "ix", "=", "inp", ".", "index", "(", "0", ",", "ix", ")", "ix", "=", "inp", ".", "index", "(", "1", ",", "ix", ")", "ix", "=", "inp", ".", "index", "(", "0", ",", "ix", ")", "while", "len", "(", "bits", ")", "<", "len", "(", "res", ")", "*", "8", ":", "ix", "=", "inp", ".", "index", "(", "1", ",", "ix", ")", "ie", "=", "inp", ".", "index", "(", "0", ",", "ix", ")", "bits", ".", "append", "(", "ie", "-", "ix", ")", "ix", "=", "ie", "except", ":", "return", "(", "[", "0xff", ",", "0xff", ",", "0xff", ",", "0xff", "]", ")", "for", "i", "in", "range", "(", "len", "(", "res", ")", ")", ":", "for", "v", "in", "bits", "[", "i", "*", "8", ":", "(", "i", "+", "1", ")", "*", "8", "]", ":", "res", "[", "i", "]", "<<=", "1", "if", "v", ">", "2", ":", "res", "[", "i", "]", "+=", "1", "if", "(", "res", "[", "0", "]", "+", "res", "[", "1", "]", "+", "res", "[", "2", "]", "+", "res", "[", "3", "]", ")", "&", "0xff", "!=", "res", "[", "4", "]", ":", "print", "(", "\"Checksum Error\"", ")", "res", "=", "[", "0xff", ",", "0xff", ",", "0xff", ",", "0xff", "]", "return", "(", "res", "[", "0", ":", "4", "]", ")" ]
convert the data from the sensor to 4 values
[ "convert", "the", "data", "from", "the", "sensor", "to", "4", "values" ]
[ "\"\"\"\n convert the data from the sensor to 4 values\n :param inp: the bits read from the sensor\n :return: 4 values containing the temp and humidity\n \"\"\"", "# skip to first 0", "# skip first 0's to next 1", "# skip first 1's to next 0", "# need 5 * 8 bits :", "# index of next 1", "# nr of 1's = ie-ix", "# process next 8 bit", "# shift byte one place to left", "# and add 1 if lsb is 1", "# parity error!" ]
[ { "param": "inp", "type": null } ]
{ "returns": [ { "docstring": "4 values containing the temp and humidity", "docstring_tokens": [ "4", "values", "containing", "the", "temp", "and", "humidity" ], "type": null } ], "raises": [], "params": [ { "identifier": "inp", "type": null, "docstring": "the bits read from the sensor", "docstring_tokens": [ "the", "bits", "read", "from", "the", "sensor" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def decode(inp): res = [0] * 5 bits = [] ix = 0 try: if inp[0] == 1: ix = inp.index(0, ix) ix = inp.index(1, ix) ix = inp.index(0, ix) while len(bits) < len(res) * 8: ix = inp.index(1, ix) ie = inp.index(0, ix) bits.append(ie - ix) ix = ie except: return ([0xff, 0xff, 0xff, 0xff]) for i in range(len(res)): for v in bits[i * 8: (i + 1) * 8]: res[i] <<= 1 if v > 2: res[i] += 1 if (res[0] + res[1] + res[2] + res[3]) & 0xff != res[4]: print("Checksum Error") res = [0xff, 0xff, 0xff, 0xff] return (res[0:4])
1,106
489
2527bfc0faa6c36073521e683918927472462b94
NelsonDaniel/SiamDW
lib/models/modules.py
[ "MIT" ]
Python
center_crop7
<not_specific>
def center_crop7(x): """ Center crop layer for stage1 of resnet. (7*7) input x can be a Variable or Tensor """ return x[:, :, 2:-2, 2:-2].contiguous()
Center crop layer for stage1 of resnet. (7*7) input x can be a Variable or Tensor
Center crop layer for stage1 of resnet. (7*7) input x can be a Variable or Tensor
[ "Center", "crop", "layer", "for", "stage1", "of", "resnet", ".", "(", "7", "*", "7", ")", "input", "x", "can", "be", "a", "Variable", "or", "Tensor" ]
def center_crop7(x): return x[:, :, 2:-2, 2:-2].contiguous()
[ "def", "center_crop7", "(", "x", ")", ":", "return", "x", "[", ":", ",", ":", ",", "2", ":", "-", "2", ",", "2", ":", "-", "2", "]", ".", "contiguous", "(", ")" ]
Center crop layer for stage1 of resnet.
[ "Center", "crop", "layer", "for", "stage1", "of", "resnet", "." ]
[ "\"\"\"\r\n Center crop layer for stage1 of resnet. (7*7)\r\n input x can be a Variable or Tensor\r\n \"\"\"" ]
[ { "param": "x", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "x", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def center_crop7(x): return x[:, :, 2:-2, 2:-2].contiguous()
1,107
58
b872034a00bbcc2536b12b84e36fd48cf849f1cf
shanemmattner/skidl
skidl/part_query.py
[ "MIT" ]
Python
parse_search_terms
<not_specific>
def parse_search_terms(terms): """ Return a regular expression for a sequence of search terms. Substitute a zero-width lookahead assertion (?= ) for each term. Thus, the "abc def" would become "(?=.*(abc))(?=.*(def))" and would match any string containing both "abc" and "def". Or "abc (def|ghi)" would become "(?=.*(abc))((?=.*(def|ghi))" and would match any string containing "abc" and "def" or "ghi". Quoted terms can be used for phrases containing whitespace. """ # Place the quote-delimited REs before the RE for sequences of # non-white chars to prevent the initial portion of a quoted string from being # gathered up as a non-white character sequence. terms = terms.strip().rstrip() # Remove leading/trailing spaces. terms = re.sub(r"\s*\|\s*", r"|", terms) # Remove spaces around OR operator. terms = re.sub(r"((\".*?\")|(\'.*?\')|(\S+))\s*", r"(?=.*(\1))", terms) terms = re.sub(r"[\'\"]", "", terms) # Remove quotes. terms = terms + '.*' return terms
Return a regular expression for a sequence of search terms. Substitute a zero-width lookahead assertion (?= ) for each term. Thus, the "abc def" would become "(?=.*(abc))(?=.*(def))" and would match any string containing both "abc" and "def". Or "abc (def|ghi)" would become "(?=.*(abc))((?=.*(def|ghi))" and would match any string containing "abc" and "def" or "ghi". Quoted terms can be used for phrases containing whitespace.
Return a regular expression for a sequence of search terms. Substitute a zero-width lookahead assertion (?= ) for each term.
[ "Return", "a", "regular", "expression", "for", "a", "sequence", "of", "search", "terms", ".", "Substitute", "a", "zero", "-", "width", "lookahead", "assertion", "(", "?", "=", ")", "for", "each", "term", "." ]
def parse_search_terms(terms): terms = terms.strip().rstrip() terms = re.sub(r"\s*\|\s*", r"|", terms) terms = re.sub(r"((\".*?\")|(\'.*?\')|(\S+))\s*", r"(?=.*(\1))", terms) terms = re.sub(r"[\'\"]", "", terms) terms = terms + '.*' return terms
[ "def", "parse_search_terms", "(", "terms", ")", ":", "terms", "=", "terms", ".", "strip", "(", ")", ".", "rstrip", "(", ")", "terms", "=", "re", ".", "sub", "(", "r\"\\s*\\|\\s*\"", ",", "r\"|\"", ",", "terms", ")", "terms", "=", "re", ".", "sub", "(", "r\"((\\\".*?\\\")|(\\'.*?\\')|(\\S+))\\s*\"", ",", "r\"(?=.*(\\1))\"", ",", "terms", ")", "terms", "=", "re", ".", "sub", "(", "r\"[\\'\\\"]\"", ",", "\"\"", ",", "terms", ")", "terms", "=", "terms", "+", "'.*'", "return", "terms" ]
Return a regular expression for a sequence of search terms.
[ "Return", "a", "regular", "expression", "for", "a", "sequence", "of", "search", "terms", "." ]
[ "\"\"\"\n Return a regular expression for a sequence of search terms.\n\n Substitute a zero-width lookahead assertion (?= ) for each term. Thus,\n the \"abc def\" would become \"(?=.*(abc))(?=.*(def))\" and would match any string\n containing both \"abc\" and \"def\". Or \"abc (def|ghi)\" would become\n \"(?=.*(abc))((?=.*(def|ghi))\" and would match any string containing\n \"abc\" and \"def\" or \"ghi\". Quoted terms can be used for phrases containing\n whitespace.\n \"\"\"", "# Place the quote-delimited REs before the RE for sequences of", "# non-white chars to prevent the initial portion of a quoted string from being", "# gathered up as a non-white character sequence.", "# Remove leading/trailing spaces.", "# Remove spaces around OR operator.", "# Remove quotes." ]
[ { "param": "terms", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "terms", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def parse_search_terms(terms): terms = terms.strip().rstrip() terms = re.sub(r"\s*\|\s*", r"|", terms) terms = re.sub(r"((\".*?\")|(\'.*?\')|(\S+))\s*", r"(?=.*(\1))", terms) terms = re.sub(r"[\'\"]", "", terms) terms = terms + '.*' return terms
1,108
312
43a63374d974d972e7b4e4741d0e7d3cb2e70f5b
hacknaman/glStream
spu/print/enums.py
[ "MIT" ]
Python
GenerateBlock
<not_specific>
def GenerateBlock(group): """group is a list of (name, value) pairs in which all values have the same Most Significant Byte. Prefix the list with, and insert dummy (name, value) pairs so that value % 256 = position in the list for all values. Example input: [ ("GL_FOO", 0xcc02), ("GL_BAR", 0xcc03), ("GL_BAZ", 0xcc05) ] And the result: [ ("", 0xcc00), ("", 0xcc01), ("GL_FOO", 0xcc02), ("GL_BAR", 0xcc03), ("", 0xcc004), ("GL_BAZ", 0xcc05) ] """ i = 0 block = [] for (name, value) in group: while i < value % 256: block.append(("", value)) i += 1 assert i == value % 256 block.append((name, value)) i += 1 return block
group is a list of (name, value) pairs in which all values have the same Most Significant Byte. Prefix the list with, and insert dummy (name, value) pairs so that value % 256 = position in the list for all values. Example input: [ ("GL_FOO", 0xcc02), ("GL_BAR", 0xcc03), ("GL_BAZ", 0xcc05) ] And the result: [ ("", 0xcc00), ("", 0xcc01), ("GL_FOO", 0xcc02), ("GL_BAR", 0xcc03), ("", 0xcc004), ("GL_BAZ", 0xcc05) ]
group is a list of (name, value) pairs in which all values have the same Most Significant Byte. Prefix the list with, and insert dummy (name, value) pairs so that value % 256 = position in the list for all values.
[ "group", "is", "a", "list", "of", "(", "name", "value", ")", "pairs", "in", "which", "all", "values", "have", "the", "same", "Most", "Significant", "Byte", ".", "Prefix", "the", "list", "with", "and", "insert", "dummy", "(", "name", "value", ")", "pairs", "so", "that", "value", "%", "256", "=", "position", "in", "the", "list", "for", "all", "values", "." ]
def GenerateBlock(group): i = 0 block = [] for (name, value) in group: while i < value % 256: block.append(("", value)) i += 1 assert i == value % 256 block.append((name, value)) i += 1 return block
[ "def", "GenerateBlock", "(", "group", ")", ":", "i", "=", "0", "block", "=", "[", "]", "for", "(", "name", ",", "value", ")", "in", "group", ":", "while", "i", "<", "value", "%", "256", ":", "block", ".", "append", "(", "(", "\"\"", ",", "value", ")", ")", "i", "+=", "1", "assert", "i", "==", "value", "%", "256", "block", ".", "append", "(", "(", "name", ",", "value", ")", ")", "i", "+=", "1", "return", "block" ]
group is a list of (name, value) pairs in which all values have the same Most Significant Byte.
[ "group", "is", "a", "list", "of", "(", "name", "value", ")", "pairs", "in", "which", "all", "values", "have", "the", "same", "Most", "Significant", "Byte", "." ]
[ "\"\"\"group is a list of (name, value) pairs in which all values have the\n\tsame Most Significant Byte.\n\tPrefix the list with, and insert dummy (name, value) pairs so that\n\tvalue % 256 = position in the list for all values.\n\tExample input:\n\t [\n\t\t(\"GL_FOO\", 0xcc02),\n\t\t(\"GL_BAR\", 0xcc03),\n\t\t(\"GL_BAZ\", 0xcc05)\n\t ]\n\tAnd the result:\n\t [ (\"\", 0xcc00),\n\t\t(\"\", 0xcc01),\n\t\t(\"GL_FOO\", 0xcc02),\n\t\t(\"GL_BAR\", 0xcc03),\n\t\t(\"\", 0xcc004),\n\t\t(\"GL_BAZ\", 0xcc05)\n\t ]\n\t\"\"\"" ]
[ { "param": "group", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "group", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def GenerateBlock(group): i = 0 block = [] for (name, value) in group: while i < value % 256: block.append(("", value)) i += 1 assert i == value % 256 block.append((name, value)) i += 1 return block
1,109
479
78d9e167b3c0e1ccef68860ca80653029b981f61
iplo/Chain
PRESUBMIT.py
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
Python
_CheckFilePermissions
<not_specific>
def _CheckFilePermissions(input_api, output_api): """Check that all files have their permissions properly set.""" if input_api.platform == 'win32': return [] args = [sys.executable, 'tools/checkperms/checkperms.py', '--root', input_api.change.RepositoryRoot()] for f in input_api.AffectedFiles(): args += ['--file', f.LocalPath()] checkperms = input_api.subprocess.Popen(args, stdout=input_api.subprocess.PIPE) errors = checkperms.communicate()[0].strip() if errors: return [output_api.PresubmitError('checkperms.py failed.', errors.splitlines())] return []
Check that all files have their permissions properly set.
Check that all files have their permissions properly set.
[ "Check", "that", "all", "files", "have", "their", "permissions", "properly", "set", "." ]
def _CheckFilePermissions(input_api, output_api): if input_api.platform == 'win32': return [] args = [sys.executable, 'tools/checkperms/checkperms.py', '--root', input_api.change.RepositoryRoot()] for f in input_api.AffectedFiles(): args += ['--file', f.LocalPath()] checkperms = input_api.subprocess.Popen(args, stdout=input_api.subprocess.PIPE) errors = checkperms.communicate()[0].strip() if errors: return [output_api.PresubmitError('checkperms.py failed.', errors.splitlines())] return []
[ "def", "_CheckFilePermissions", "(", "input_api", ",", "output_api", ")", ":", "if", "input_api", ".", "platform", "==", "'win32'", ":", "return", "[", "]", "args", "=", "[", "sys", ".", "executable", ",", "'tools/checkperms/checkperms.py'", ",", "'--root'", ",", "input_api", ".", "change", ".", "RepositoryRoot", "(", ")", "]", "for", "f", "in", "input_api", ".", "AffectedFiles", "(", ")", ":", "args", "+=", "[", "'--file'", ",", "f", ".", "LocalPath", "(", ")", "]", "checkperms", "=", "input_api", ".", "subprocess", ".", "Popen", "(", "args", ",", "stdout", "=", "input_api", ".", "subprocess", ".", "PIPE", ")", "errors", "=", "checkperms", ".", "communicate", "(", ")", "[", "0", "]", ".", "strip", "(", ")", "if", "errors", ":", "return", "[", "output_api", ".", "PresubmitError", "(", "'checkperms.py failed.'", ",", "errors", ".", "splitlines", "(", ")", ")", "]", "return", "[", "]" ]
Check that all files have their permissions properly set.
[ "Check", "that", "all", "files", "have", "their", "permissions", "properly", "set", "." ]
[ "\"\"\"Check that all files have their permissions properly set.\"\"\"" ]
[ { "param": "input_api", "type": null }, { "param": "output_api", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "input_api", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "output_api", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import sys def _CheckFilePermissions(input_api, output_api): if input_api.platform == 'win32': return [] args = [sys.executable, 'tools/checkperms/checkperms.py', '--root', input_api.change.RepositoryRoot()] for f in input_api.AffectedFiles(): args += ['--file', f.LocalPath()] checkperms = input_api.subprocess.Popen(args, stdout=input_api.subprocess.PIPE) errors = checkperms.communicate()[0].strip() if errors: return [output_api.PresubmitError('checkperms.py failed.', errors.splitlines())] return []
1,110
831
9345546eac3f2e1c1bd29343c0cd89250040a34d
kpagacz/NMDownloader
utils/mod_import.py
[ "MIT" ]
Python
from_csv
<not_specific>
def from_csv(cls, file_name: str, modid_column: int = 0): """Imports a list of mod ids from a .csv file. Mod ids defined by Nexus Mods site. Args: file_name (str): path to .csv file containing the mod ids modid_column (:obj:'int', optional): defines the number of the column with mod ids. Defaults to the 1st column in the file = 0. Returns: mod_import.ModListImporter """ modid_array = [] with open(file_name, newline="") as csvfile: # Recognizes the dialect of the file and whether it has headers dialect = csv.Sniffer().sniff(csvfile.readline()) csvfile.seek(0) has_headers = csv.Sniffer().has_header(csvfile.read(1024)) csvfile.seek(0) # Check for column names if has_headers: fieldnames = csvfile.readline() # Reads the modids and adds to modid_array csv_reader = csv.reader(csvfile, dialect=dialect) for row in csv_reader: modid_array.append(int(row[modid_column])) modid_array = list(modid_array) assert isinstance(modid_array, list), "modid_array is not a list. Expected a list." return cls(modid_array)
Imports a list of mod ids from a .csv file. Mod ids defined by Nexus Mods site. Args: file_name (str): path to .csv file containing the mod ids modid_column (:obj:'int', optional): defines the number of the column with mod ids. Defaults to the 1st column in the file = 0. Returns: mod_import.ModListImporter
Imports a list of mod ids from a .csv file. Mod ids defined by Nexus Mods site.
[ "Imports", "a", "list", "of", "mod", "ids", "from", "a", ".", "csv", "file", ".", "Mod", "ids", "defined", "by", "Nexus", "Mods", "site", "." ]
def from_csv(cls, file_name: str, modid_column: int = 0): modid_array = [] with open(file_name, newline="") as csvfile: dialect = csv.Sniffer().sniff(csvfile.readline()) csvfile.seek(0) has_headers = csv.Sniffer().has_header(csvfile.read(1024)) csvfile.seek(0) if has_headers: fieldnames = csvfile.readline() csv_reader = csv.reader(csvfile, dialect=dialect) for row in csv_reader: modid_array.append(int(row[modid_column])) modid_array = list(modid_array) assert isinstance(modid_array, list), "modid_array is not a list. Expected a list." return cls(modid_array)
[ "def", "from_csv", "(", "cls", ",", "file_name", ":", "str", ",", "modid_column", ":", "int", "=", "0", ")", ":", "modid_array", "=", "[", "]", "with", "open", "(", "file_name", ",", "newline", "=", "\"\"", ")", "as", "csvfile", ":", "dialect", "=", "csv", ".", "Sniffer", "(", ")", ".", "sniff", "(", "csvfile", ".", "readline", "(", ")", ")", "csvfile", ".", "seek", "(", "0", ")", "has_headers", "=", "csv", ".", "Sniffer", "(", ")", ".", "has_header", "(", "csvfile", ".", "read", "(", "1024", ")", ")", "csvfile", ".", "seek", "(", "0", ")", "if", "has_headers", ":", "fieldnames", "=", "csvfile", ".", "readline", "(", ")", "csv_reader", "=", "csv", ".", "reader", "(", "csvfile", ",", "dialect", "=", "dialect", ")", "for", "row", "in", "csv_reader", ":", "modid_array", ".", "append", "(", "int", "(", "row", "[", "modid_column", "]", ")", ")", "modid_array", "=", "list", "(", "modid_array", ")", "assert", "isinstance", "(", "modid_array", ",", "list", ")", ",", "\"modid_array is not a list. Expected a list.\"", "return", "cls", "(", "modid_array", ")" ]
Imports a list of mod ids from a .csv file.
[ "Imports", "a", "list", "of", "mod", "ids", "from", "a", ".", "csv", "file", "." ]
[ "\"\"\"Imports a list of mod ids from a .csv file.\n Mod ids defined by Nexus Mods site.\n\n Args:\n file_name (str): path to .csv file containing the mod ids\n modid_column (:obj:'int', optional): defines the number of the column with mod ids.\n Defaults to the 1st column in the file = 0.\n\n Returns:\n mod_import.ModListImporter\n \"\"\"", "# Recognizes the dialect of the file and whether it has headers", "# Check for column names", "# Reads the modids and adds to modid_array" ]
[ { "param": "cls", "type": null }, { "param": "file_name", "type": "str" }, { "param": "modid_column", "type": "int" } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "file_name", "type": "str", "docstring": "path to .csv file containing the mod ids", "docstring_tokens": [ "path", "to", ".", "csv", "file", "containing", "the", "mod", "ids" ], "default": null, "is_optional": false }, { "identifier": "modid_column", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "modid_column (", "type": null, "docstring": "'int', optional): defines the number of the column with mod ids.\nDefaults to the 1st column in the file = 0.", "docstring_tokens": [ "'", "int", "'", "optional", ")", ":", "defines", "the", "number", "of", "the", "column", "with", "mod", "ids", ".", "Defaults", "to", "the", "1st", "column", "in", "the", "file", "=", "0", "." ], "default": null, "is_optional": null } ], "others": [] }
import csv def from_csv(cls, file_name: str, modid_column: int = 0): modid_array = [] with open(file_name, newline="") as csvfile: dialect = csv.Sniffer().sniff(csvfile.readline()) csvfile.seek(0) has_headers = csv.Sniffer().has_header(csvfile.read(1024)) csvfile.seek(0) if has_headers: fieldnames = csvfile.readline() csv_reader = csv.reader(csvfile, dialect=dialect) for row in csv_reader: modid_array.append(int(row[modid_column])) modid_array = list(modid_array) assert isinstance(modid_array, list), "modid_array is not a list. Expected a list." return cls(modid_array)
1,111
256
2f3276d7dc341e0a5e0706cca8685d9afd8e7dba
davidkeddydb/oref0
bin/get_profile.py
[ "MIT" ]
Python
normalize_entry
<not_specific>
def normalize_entry(entry): """ Clean up an entry before further processing """ try: if entry["timeAsSeconds"]: pass except KeyError: entry_time = datetime.strptime(entry["time"], "%H:%M") entry[ "timeAsSeconds"] = 3600 * entry_time.hour + 60 * entry_time.minute try: if entry["time"]: pass except KeyError: entry_hour = int(entry['timeAsSeconds'] / 3600) entry_minute = int(entry['timeAsSeconds'] % 60) entry["time"] = str(entry_hour).rjust( 2, '0') + ":" + str(entry_minute).rjust(2, '0') entry["start"] = entry["time"] + ":00" entry["minutes"] = int(entry["timeAsSeconds"]) / 60 return entry
Clean up an entry before further processing
Clean up an entry before further processing
[ "Clean", "up", "an", "entry", "before", "further", "processing" ]
def normalize_entry(entry): try: if entry["timeAsSeconds"]: pass except KeyError: entry_time = datetime.strptime(entry["time"], "%H:%M") entry[ "timeAsSeconds"] = 3600 * entry_time.hour + 60 * entry_time.minute try: if entry["time"]: pass except KeyError: entry_hour = int(entry['timeAsSeconds'] / 3600) entry_minute = int(entry['timeAsSeconds'] % 60) entry["time"] = str(entry_hour).rjust( 2, '0') + ":" + str(entry_minute).rjust(2, '0') entry["start"] = entry["time"] + ":00" entry["minutes"] = int(entry["timeAsSeconds"]) / 60 return entry
[ "def", "normalize_entry", "(", "entry", ")", ":", "try", ":", "if", "entry", "[", "\"timeAsSeconds\"", "]", ":", "pass", "except", "KeyError", ":", "entry_time", "=", "datetime", ".", "strptime", "(", "entry", "[", "\"time\"", "]", ",", "\"%H:%M\"", ")", "entry", "[", "\"timeAsSeconds\"", "]", "=", "3600", "*", "entry_time", ".", "hour", "+", "60", "*", "entry_time", ".", "minute", "try", ":", "if", "entry", "[", "\"time\"", "]", ":", "pass", "except", "KeyError", ":", "entry_hour", "=", "int", "(", "entry", "[", "'timeAsSeconds'", "]", "/", "3600", ")", "entry_minute", "=", "int", "(", "entry", "[", "'timeAsSeconds'", "]", "%", "60", ")", "entry", "[", "\"time\"", "]", "=", "str", "(", "entry_hour", ")", ".", "rjust", "(", "2", ",", "'0'", ")", "+", "\":\"", "+", "str", "(", "entry_minute", ")", ".", "rjust", "(", "2", ",", "'0'", ")", "entry", "[", "\"start\"", "]", "=", "entry", "[", "\"time\"", "]", "+", "\":00\"", "entry", "[", "\"minutes\"", "]", "=", "int", "(", "entry", "[", "\"timeAsSeconds\"", "]", ")", "/", "60", "return", "entry" ]
Clean up an entry before further processing
[ "Clean", "up", "an", "entry", "before", "further", "processing" ]
[ "\"\"\"\n Clean up an entry before further processing\n \"\"\"" ]
[ { "param": "entry", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "entry", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import datetime def normalize_entry(entry): try: if entry["timeAsSeconds"]: pass except KeyError: entry_time = datetime.strptime(entry["time"], "%H:%M") entry[ "timeAsSeconds"] = 3600 * entry_time.hour + 60 * entry_time.minute try: if entry["time"]: pass except KeyError: entry_hour = int(entry['timeAsSeconds'] / 3600) entry_minute = int(entry['timeAsSeconds'] % 60) entry["time"] = str(entry_hour).rjust( 2, '0') + ":" + str(entry_minute).rjust(2, '0') entry["start"] = entry["time"] + ":00" entry["minutes"] = int(entry["timeAsSeconds"]) / 60 return entry
1,112
945
f8d1a9d85fd849929b5241b647d2b332fb57871e
Thaneshwor/python-data-profiler
src/utils/types.py
[ "MIT" ]
Python
is_date
<not_specific>
def is_date(date): ''' Return true if input parameter is date. ''' formats = [ "%Y%m%d", "%Y/%m/%d", "%m/%d/%Y", "%m/%d/%y", "%Y-%m-%d", ] for strptime_format in formats: try: datetime.datetime.strptime(date, strptime_format) return True except: pass return False
Return true if input parameter is date.
Return true if input parameter is date.
[ "Return", "true", "if", "input", "parameter", "is", "date", "." ]
def is_date(date): formats = [ "%Y%m%d", "%Y/%m/%d", "%m/%d/%Y", "%m/%d/%y", "%Y-%m-%d", ] for strptime_format in formats: try: datetime.datetime.strptime(date, strptime_format) return True except: pass return False
[ "def", "is_date", "(", "date", ")", ":", "formats", "=", "[", "\"%Y%m%d\"", ",", "\"%Y/%m/%d\"", ",", "\"%m/%d/%Y\"", ",", "\"%m/%d/%y\"", ",", "\"%Y-%m-%d\"", ",", "]", "for", "strptime_format", "in", "formats", ":", "try", ":", "datetime", ".", "datetime", ".", "strptime", "(", "date", ",", "strptime_format", ")", "return", "True", "except", ":", "pass", "return", "False" ]
Return true if input parameter is date.
[ "Return", "true", "if", "input", "parameter", "is", "date", "." ]
[ "''' Return true if input parameter is date. '''" ]
[ { "param": "date", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "date", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import datetime def is_date(date): formats = [ "%Y%m%d", "%Y/%m/%d", "%m/%d/%Y", "%m/%d/%y", "%Y-%m-%d", ] for strptime_format in formats: try: datetime.datetime.strptime(date, strptime_format) return True except: pass return False
1,113
371
0876136eb46ef1d30f09dbd0eff572dd1e4a0144
jimstorch/DGGen
generator.py
[ "Apache-2.0" ]
Python
init_logger
null
def init_logger(verbosity, stream=sys.stdout): """Initialize logger and warnings according to verbosity argument. Verbosity levels of 0-3 supported.""" is_not_debug = verbosity <= 2 level = ( [logging.ERROR, logging.WARNING, logging.INFO][verbosity] if is_not_debug else logging.DEBUG ) log_format = ( "%(message)s" if is_not_debug else "%(asctime)s %(levelname)-8s %(name)s %(module)s.py:%(funcName)s():%(lineno)d %(message)s" ) logging.basicConfig(level=level, format=log_format, stream=stream) if is_not_debug: warnings.filterwarnings("ignore")
Initialize logger and warnings according to verbosity argument. Verbosity levels of 0-3 supported.
Initialize logger and warnings according to verbosity argument. Verbosity levels of 0-3 supported.
[ "Initialize", "logger", "and", "warnings", "according", "to", "verbosity", "argument", ".", "Verbosity", "levels", "of", "0", "-", "3", "supported", "." ]
def init_logger(verbosity, stream=sys.stdout): is_not_debug = verbosity <= 2 level = ( [logging.ERROR, logging.WARNING, logging.INFO][verbosity] if is_not_debug else logging.DEBUG ) log_format = ( "%(message)s" if is_not_debug else "%(asctime)s %(levelname)-8s %(name)s %(module)s.py:%(funcName)s():%(lineno)d %(message)s" ) logging.basicConfig(level=level, format=log_format, stream=stream) if is_not_debug: warnings.filterwarnings("ignore")
[ "def", "init_logger", "(", "verbosity", ",", "stream", "=", "sys", ".", "stdout", ")", ":", "is_not_debug", "=", "verbosity", "<=", "2", "level", "=", "(", "[", "logging", ".", "ERROR", ",", "logging", ".", "WARNING", ",", "logging", ".", "INFO", "]", "[", "verbosity", "]", "if", "is_not_debug", "else", "logging", ".", "DEBUG", ")", "log_format", "=", "(", "\"%(message)s\"", "if", "is_not_debug", "else", "\"%(asctime)s %(levelname)-8s %(name)s %(module)s.py:%(funcName)s():%(lineno)d %(message)s\"", ")", "logging", ".", "basicConfig", "(", "level", "=", "level", ",", "format", "=", "log_format", ",", "stream", "=", "stream", ")", "if", "is_not_debug", ":", "warnings", ".", "filterwarnings", "(", "\"ignore\"", ")" ]
Initialize logger and warnings according to verbosity argument.
[ "Initialize", "logger", "and", "warnings", "according", "to", "verbosity", "argument", "." ]
[ "\"\"\"Initialize logger and warnings according to verbosity argument.\n Verbosity levels of 0-3 supported.\"\"\"" ]
[ { "param": "verbosity", "type": null }, { "param": "stream", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "verbosity", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "stream", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import logging import warnings import sys def init_logger(verbosity, stream=sys.stdout): is_not_debug = verbosity <= 2 level = ( [logging.ERROR, logging.WARNING, logging.INFO][verbosity] if is_not_debug else logging.DEBUG ) log_format = ( "%(message)s" if is_not_debug else "%(asctime)s %(levelname)-8s %(name)s %(module)s.py:%(funcName)s():%(lineno)d %(message)s" ) logging.basicConfig(level=level, format=log_format, stream=stream) if is_not_debug: warnings.filterwarnings("ignore")
1,114
962
8ef448e53b93744416464a041aae330f7f42d7c8
zindy/Imaris
BridgeLib.py
[ "Apache-2.0" ]
Python
GetExtent
<not_specific>
def GetExtent(vDataSet): """Get the X,Y,Z extents of a dataset""" return [vDataSet.GetExtendMinX(),vDataSet.GetExtendMaxX(), vDataSet.GetExtendMinY(),vDataSet.GetExtendMaxY(), vDataSet.GetExtendMinZ(),vDataSet.GetExtendMaxZ()]
Get the X,Y,Z extents of a dataset
Get the X,Y,Z extents of a dataset
[ "Get", "the", "X", "Y", "Z", "extents", "of", "a", "dataset" ]
def GetExtent(vDataSet): return [vDataSet.GetExtendMinX(),vDataSet.GetExtendMaxX(), vDataSet.GetExtendMinY(),vDataSet.GetExtendMaxY(), vDataSet.GetExtendMinZ(),vDataSet.GetExtendMaxZ()]
[ "def", "GetExtent", "(", "vDataSet", ")", ":", "return", "[", "vDataSet", ".", "GetExtendMinX", "(", ")", ",", "vDataSet", ".", "GetExtendMaxX", "(", ")", ",", "vDataSet", ".", "GetExtendMinY", "(", ")", ",", "vDataSet", ".", "GetExtendMaxY", "(", ")", ",", "vDataSet", ".", "GetExtendMinZ", "(", ")", ",", "vDataSet", ".", "GetExtendMaxZ", "(", ")", "]" ]
Get the X,Y,Z extents of a dataset
[ "Get", "the", "X", "Y", "Z", "extents", "of", "a", "dataset" ]
[ "\"\"\"Get the X,Y,Z extents of a dataset\"\"\"" ]
[ { "param": "vDataSet", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "vDataSet", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def GetExtent(vDataSet): return [vDataSet.GetExtendMinX(),vDataSet.GetExtendMaxX(), vDataSet.GetExtendMinY(),vDataSet.GetExtendMaxY(), vDataSet.GetExtendMinZ(),vDataSet.GetExtendMaxZ()]
1,115
873
24af37bb23b0e8be6e9e9c6be405307c6d83b48e
LCageman/nmrglue
nmrglue/fileio/varian.py
[ "BSD-3-Clause" ]
Python
put_hyperheader
<not_specific>
def put_hyperheader(f, hh): """ Write hyperheader list to file (28-bytes written) Parameters ---------- f : file object Open file object to write to. hh : list with 9 elements Hyperheader list. """ f.write(struct.pack('>4hl4f', *hh)) return
Write hyperheader list to file (28-bytes written) Parameters ---------- f : file object Open file object to write to. hh : list with 9 elements Hyperheader list.
Write hyperheader list to file (28-bytes written) Parameters f : file object Open file object to write to. hh : list with 9 elements Hyperheader list.
[ "Write", "hyperheader", "list", "to", "file", "(", "28", "-", "bytes", "written", ")", "Parameters", "f", ":", "file", "object", "Open", "file", "object", "to", "write", "to", ".", "hh", ":", "list", "with", "9", "elements", "Hyperheader", "list", "." ]
def put_hyperheader(f, hh): f.write(struct.pack('>4hl4f', *hh)) return
[ "def", "put_hyperheader", "(", "f", ",", "hh", ")", ":", "f", ".", "write", "(", "struct", ".", "pack", "(", "'>4hl4f'", ",", "*", "hh", ")", ")", "return" ]
Write hyperheader list to file (28-bytes written) Parameters
[ "Write", "hyperheader", "list", "to", "file", "(", "28", "-", "bytes", "written", ")", "Parameters" ]
[ "\"\"\"\n Write hyperheader list to file (28-bytes written)\n\n Parameters\n ----------\n f : file object\n Open file object to write to.\n hh : list with 9 elements\n Hyperheader list.\n\n \"\"\"" ]
[ { "param": "f", "type": null }, { "param": "hh", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "f", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "hh", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import struct def put_hyperheader(f, hh): f.write(struct.pack('>4hl4f', *hh)) return
1,116
136
664967e9a28f4fc8409a6c0b9d7b06103b7a155e
nolanwrightdev/blind-75-python
problems/problem36.py
[ "MIT" ]
Python
solution
<not_specific>
def solution(head): ''' Maintain two pointers, one of which advances at twice the rate of the other. If at any time the two pointers are pointing to the same node, then there is a a loop. If, on the other hand, the faster pointer reaches the end of the list, then there is not a loop. ''' p1 = p2 = head while p2: p1 = p1.next p2 = p2.next if p2: p2 = p2.next else: break if p1 is p2: return True return False
Maintain two pointers, one of which advances at twice the rate of the other. If at any time the two pointers are pointing to the same node, then there is a a loop. If, on the other hand, the faster pointer reaches the end of the list, then there is not a loop.
Maintain two pointers, one of which advances at twice the rate of the other. If at any time the two pointers are pointing to the same node, then there is a a loop. If, on the other hand, the faster pointer reaches the end of the list, then there is not a loop.
[ "Maintain", "two", "pointers", "one", "of", "which", "advances", "at", "twice", "the", "rate", "of", "the", "other", ".", "If", "at", "any", "time", "the", "two", "pointers", "are", "pointing", "to", "the", "same", "node", "then", "there", "is", "a", "a", "loop", ".", "If", "on", "the", "other", "hand", "the", "faster", "pointer", "reaches", "the", "end", "of", "the", "list", "then", "there", "is", "not", "a", "loop", "." ]
def solution(head): p1 = p2 = head while p2: p1 = p1.next p2 = p2.next if p2: p2 = p2.next else: break if p1 is p2: return True return False
[ "def", "solution", "(", "head", ")", ":", "p1", "=", "p2", "=", "head", "while", "p2", ":", "p1", "=", "p1", ".", "next", "p2", "=", "p2", ".", "next", "if", "p2", ":", "p2", "=", "p2", ".", "next", "else", ":", "break", "if", "p1", "is", "p2", ":", "return", "True", "return", "False" ]
Maintain two pointers, one of which advances at twice the rate of the other.
[ "Maintain", "two", "pointers", "one", "of", "which", "advances", "at", "twice", "the", "rate", "of", "the", "other", "." ]
[ "'''\n\tMaintain two pointers, one of which advances at twice the rate of the other.\n\tIf at any time the two pointers are pointing to the same node, then there is a\n\ta loop. If, on the other hand, the faster pointer reaches the end of the list,\n\tthen there is not a loop.\n\t'''" ]
[ { "param": "head", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "head", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def solution(head): p1 = p2 = head while p2: p1 = p1.next p2 = p2.next if p2: p2 = p2.next else: break if p1 is p2: return True return False
1,117
46
ccc556c1fdb3557b5bb738e57ff274b2b4c396b9
Luc-Veldhuis/MasterThesisCS2020
fuzz_checker/helpers/utils.py
[ "MIT" ]
Python
delete_random_character
<not_specific>
def delete_random_character(s): """Returns s with a random character deleted""" if s == "": return s pos = random.randint(0, len(s) - 1) # print("Deleting", repr(s[pos]), "at", pos) return s[:pos] + s[pos + 1:]
Returns s with a random character deleted
Returns s with a random character deleted
[ "Returns", "s", "with", "a", "random", "character", "deleted" ]
def delete_random_character(s): if s == "": return s pos = random.randint(0, len(s) - 1) return s[:pos] + s[pos + 1:]
[ "def", "delete_random_character", "(", "s", ")", ":", "if", "s", "==", "\"\"", ":", "return", "s", "pos", "=", "random", ".", "randint", "(", "0", ",", "len", "(", "s", ")", "-", "1", ")", "return", "s", "[", ":", "pos", "]", "+", "s", "[", "pos", "+", "1", ":", "]" ]
Returns s with a random character deleted
[ "Returns", "s", "with", "a", "random", "character", "deleted" ]
[ "\"\"\"Returns s with a random character deleted\"\"\"", "# print(\"Deleting\", repr(s[pos]), \"at\", pos)" ]
[ { "param": "s", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "s", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import random def delete_random_character(s): if s == "": return s pos = random.randint(0, len(s) - 1) return s[:pos] + s[pos + 1:]
1,119
165
5aa90e802149732179c024120438aaeabc6fa5d5
bravokid47/goatools
goatools/grouper/wr_sections.py
[ "BSD-2-Clause" ]
Python
_init_fncsortnt
<not_specific>
def _init_fncsortnt(flds): """Return a sort function for sorting header GO IDs found in sections.""" if 'tinfo' in flds: if 'D1' in flds: return lambda ntgo: [ntgo.NS, -1*ntgo.tinfo, ntgo.depth, ntgo.D1, ntgo.alt] else: return lambda ntgo: [ntgo.NS, -1*ntgo.tinfo, ntgo.depth, ntgo.alt] if 'dcnt' in flds: if 'D1' in flds: return lambda ntgo: [ntgo.NS, -1*ntgo.dcnt, ntgo.depth, ntgo.D1, ntgo.alt] else: return lambda ntgo: [ntgo.NS, -1*ntgo.dcnt, ntgo.depth, ntgo.alt] else: return lambda ntgo: [ntgo.NS, -1*ntgo.depth, ntgo.alt]
Return a sort function for sorting header GO IDs found in sections.
Return a sort function for sorting header GO IDs found in sections.
[ "Return", "a", "sort", "function", "for", "sorting", "header", "GO", "IDs", "found", "in", "sections", "." ]
def _init_fncsortnt(flds): if 'tinfo' in flds: if 'D1' in flds: return lambda ntgo: [ntgo.NS, -1*ntgo.tinfo, ntgo.depth, ntgo.D1, ntgo.alt] else: return lambda ntgo: [ntgo.NS, -1*ntgo.tinfo, ntgo.depth, ntgo.alt] if 'dcnt' in flds: if 'D1' in flds: return lambda ntgo: [ntgo.NS, -1*ntgo.dcnt, ntgo.depth, ntgo.D1, ntgo.alt] else: return lambda ntgo: [ntgo.NS, -1*ntgo.dcnt, ntgo.depth, ntgo.alt] else: return lambda ntgo: [ntgo.NS, -1*ntgo.depth, ntgo.alt]
[ "def", "_init_fncsortnt", "(", "flds", ")", ":", "if", "'tinfo'", "in", "flds", ":", "if", "'D1'", "in", "flds", ":", "return", "lambda", "ntgo", ":", "[", "ntgo", ".", "NS", ",", "-", "1", "*", "ntgo", ".", "tinfo", ",", "ntgo", ".", "depth", ",", "ntgo", ".", "D1", ",", "ntgo", ".", "alt", "]", "else", ":", "return", "lambda", "ntgo", ":", "[", "ntgo", ".", "NS", ",", "-", "1", "*", "ntgo", ".", "tinfo", ",", "ntgo", ".", "depth", ",", "ntgo", ".", "alt", "]", "if", "'dcnt'", "in", "flds", ":", "if", "'D1'", "in", "flds", ":", "return", "lambda", "ntgo", ":", "[", "ntgo", ".", "NS", ",", "-", "1", "*", "ntgo", ".", "dcnt", ",", "ntgo", ".", "depth", ",", "ntgo", ".", "D1", ",", "ntgo", ".", "alt", "]", "else", ":", "return", "lambda", "ntgo", ":", "[", "ntgo", ".", "NS", ",", "-", "1", "*", "ntgo", ".", "dcnt", ",", "ntgo", ".", "depth", ",", "ntgo", ".", "alt", "]", "else", ":", "return", "lambda", "ntgo", ":", "[", "ntgo", ".", "NS", ",", "-", "1", "*", "ntgo", ".", "depth", ",", "ntgo", ".", "alt", "]" ]
Return a sort function for sorting header GO IDs found in sections.
[ "Return", "a", "sort", "function", "for", "sorting", "header", "GO", "IDs", "found", "in", "sections", "." ]
[ "\"\"\"Return a sort function for sorting header GO IDs found in sections.\"\"\"" ]
[ { "param": "flds", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "flds", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _init_fncsortnt(flds): if 'tinfo' in flds: if 'D1' in flds: return lambda ntgo: [ntgo.NS, -1*ntgo.tinfo, ntgo.depth, ntgo.D1, ntgo.alt] else: return lambda ntgo: [ntgo.NS, -1*ntgo.tinfo, ntgo.depth, ntgo.alt] if 'dcnt' in flds: if 'D1' in flds: return lambda ntgo: [ntgo.NS, -1*ntgo.dcnt, ntgo.depth, ntgo.D1, ntgo.alt] else: return lambda ntgo: [ntgo.NS, -1*ntgo.dcnt, ntgo.depth, ntgo.alt] else: return lambda ntgo: [ntgo.NS, -1*ntgo.depth, ntgo.alt]
1,120
586
bf99cfaac2ae193d69731071279d4ce4dfc194c3
akuhnregnier/wildfires
src/wildfires/utils.py
[ "MIT" ]
Python
traverse_nested_dict
null
def traverse_nested_dict(d, max_recursion=100, _initial_keys=(), _current_recursion=0): """Traverse a nested dictionary, yielding flattened keys and corresponding values. Args: d (dict): (Nested) dict. max_recursion (int): Maximum recursion level before a RuntimeError is raised. Examples: >>> nested_dict = {'a': 1, 'b': {'c': 2, 'd': {'e': 4}}} >>> list(traverse_nested_dict(nested_dict)) [(('a',), 1), (('b', 'c'), 2), (('b', 'd', 'e'), 4)] """ if _current_recursion > max_recursion: raise RuntimeError("Maximum recursion exceeded") for key, val in d.items(): if isinstance(val, dict): yield from traverse_nested_dict( val, max_recursion=max_recursion, _initial_keys=_initial_keys + (key,), _current_recursion=_current_recursion + 1, ) else: yield (_initial_keys + (key,), val)
Traverse a nested dictionary, yielding flattened keys and corresponding values. Args: d (dict): (Nested) dict. max_recursion (int): Maximum recursion level before a RuntimeError is raised. Examples: >>> nested_dict = {'a': 1, 'b': {'c': 2, 'd': {'e': 4}}} >>> list(traverse_nested_dict(nested_dict)) [(('a',), 1), (('b', 'c'), 2), (('b', 'd', 'e'), 4)]
Traverse a nested dictionary, yielding flattened keys and corresponding values.
[ "Traverse", "a", "nested", "dictionary", "yielding", "flattened", "keys", "and", "corresponding", "values", "." ]
def traverse_nested_dict(d, max_recursion=100, _initial_keys=(), _current_recursion=0): if _current_recursion > max_recursion: raise RuntimeError("Maximum recursion exceeded") for key, val in d.items(): if isinstance(val, dict): yield from traverse_nested_dict( val, max_recursion=max_recursion, _initial_keys=_initial_keys + (key,), _current_recursion=_current_recursion + 1, ) else: yield (_initial_keys + (key,), val)
[ "def", "traverse_nested_dict", "(", "d", ",", "max_recursion", "=", "100", ",", "_initial_keys", "=", "(", ")", ",", "_current_recursion", "=", "0", ")", ":", "if", "_current_recursion", ">", "max_recursion", ":", "raise", "RuntimeError", "(", "\"Maximum recursion exceeded\"", ")", "for", "key", ",", "val", "in", "d", ".", "items", "(", ")", ":", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "yield", "from", "traverse_nested_dict", "(", "val", ",", "max_recursion", "=", "max_recursion", ",", "_initial_keys", "=", "_initial_keys", "+", "(", "key", ",", ")", ",", "_current_recursion", "=", "_current_recursion", "+", "1", ",", ")", "else", ":", "yield", "(", "_initial_keys", "+", "(", "key", ",", ")", ",", "val", ")" ]
Traverse a nested dictionary, yielding flattened keys and corresponding values.
[ "Traverse", "a", "nested", "dictionary", "yielding", "flattened", "keys", "and", "corresponding", "values", "." ]
[ "\"\"\"Traverse a nested dictionary, yielding flattened keys and corresponding values.\n\n Args:\n d (dict): (Nested) dict.\n max_recursion (int): Maximum recursion level before a RuntimeError is raised.\n\n Examples:\n >>> nested_dict = {'a': 1, 'b': {'c': 2, 'd': {'e': 4}}}\n >>> list(traverse_nested_dict(nested_dict))\n [(('a',), 1), (('b', 'c'), 2), (('b', 'd', 'e'), 4)]\n\n \"\"\"" ]
[ { "param": "d", "type": null }, { "param": "max_recursion", "type": null }, { "param": "_initial_keys", "type": null }, { "param": "_current_recursion", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "d", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false }, { "identifier": "max_recursion", "type": null, "docstring": "Maximum recursion level before a RuntimeError is raised.", "docstring_tokens": [ "Maximum", "recursion", "level", "before", "a", "RuntimeError", "is", "raised", "." ], "default": null, "is_optional": false }, { "identifier": "_initial_keys", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "_current_recursion", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [ { "identifier": "examples", "docstring": null, "docstring_tokens": [ "None" ] } ] }
def traverse_nested_dict(d, max_recursion=100, _initial_keys=(), _current_recursion=0): if _current_recursion > max_recursion: raise RuntimeError("Maximum recursion exceeded") for key, val in d.items(): if isinstance(val, dict): yield from traverse_nested_dict( val, max_recursion=max_recursion, _initial_keys=_initial_keys + (key,), _current_recursion=_current_recursion + 1, ) else: yield (_initial_keys + (key,), val)
1,121
696
e33e587e1b59c0c92b960c7d991328e657368fb6
ipelupessy/ESMValTool
tests/integration/test_recipes_loading.py
[ "Apache-2.0" ]
Python
_get_dummy_filenames
<not_specific>
def _get_dummy_filenames(drs): """Generate list of realistic dummy filename(s) according to drs. drs is the directory structure used to find input files in ESMValTool """ dummy_filenames = [] # Time-invariant (fx) variables don't have years in their filename if 'fx' in drs: if drs.endswith('[_.]*nc'): dummy_filename = drs.replace('[_.]*', '.') elif drs.endswith('*.nc'): dummy_filename = drs.replace('*', '') dummy_filenames.append(dummy_filename) # For other variables, add custom (large) intervals in dummy filename elif '*' in drs: if drs.endswith('[_.]*nc'): dummy_filename = drs[:-len('[_.]*nc')] elif drs.endswith('*.nc'): dummy_filename = drs[:-len('*.nc')] # Spread dummy data over multiple files for realistic test # Note: adding too many intervals here makes the tests really slow! for interval in ['0000_1849', '1850_9999']: dummy_filenames.append(dummy_filename + '_' + interval + '.nc') # Provide for the possibility of filename drss without *. else: dummy_filename = drs dummy_filenames.append(dummy_filename) return dummy_filenames
Generate list of realistic dummy filename(s) according to drs. drs is the directory structure used to find input files in ESMValTool
Generate list of realistic dummy filename(s) according to drs. drs is the directory structure used to find input files in ESMValTool
[ "Generate", "list", "of", "realistic", "dummy", "filename", "(", "s", ")", "according", "to", "drs", ".", "drs", "is", "the", "directory", "structure", "used", "to", "find", "input", "files", "in", "ESMValTool" ]
def _get_dummy_filenames(drs): dummy_filenames = [] if 'fx' in drs: if drs.endswith('[_.]*nc'): dummy_filename = drs.replace('[_.]*', '.') elif drs.endswith('*.nc'): dummy_filename = drs.replace('*', '') dummy_filenames.append(dummy_filename) elif '*' in drs: if drs.endswith('[_.]*nc'): dummy_filename = drs[:-len('[_.]*nc')] elif drs.endswith('*.nc'): dummy_filename = drs[:-len('*.nc')] for interval in ['0000_1849', '1850_9999']: dummy_filenames.append(dummy_filename + '_' + interval + '.nc') else: dummy_filename = drs dummy_filenames.append(dummy_filename) return dummy_filenames
[ "def", "_get_dummy_filenames", "(", "drs", ")", ":", "dummy_filenames", "=", "[", "]", "if", "'fx'", "in", "drs", ":", "if", "drs", ".", "endswith", "(", "'[_.]*nc'", ")", ":", "dummy_filename", "=", "drs", ".", "replace", "(", "'[_.]*'", ",", "'.'", ")", "elif", "drs", ".", "endswith", "(", "'*.nc'", ")", ":", "dummy_filename", "=", "drs", ".", "replace", "(", "'*'", ",", "''", ")", "dummy_filenames", ".", "append", "(", "dummy_filename", ")", "elif", "'*'", "in", "drs", ":", "if", "drs", ".", "endswith", "(", "'[_.]*nc'", ")", ":", "dummy_filename", "=", "drs", "[", ":", "-", "len", "(", "'[_.]*nc'", ")", "]", "elif", "drs", ".", "endswith", "(", "'*.nc'", ")", ":", "dummy_filename", "=", "drs", "[", ":", "-", "len", "(", "'*.nc'", ")", "]", "for", "interval", "in", "[", "'0000_1849'", ",", "'1850_9999'", "]", ":", "dummy_filenames", ".", "append", "(", "dummy_filename", "+", "'_'", "+", "interval", "+", "'.nc'", ")", "else", ":", "dummy_filename", "=", "drs", "dummy_filenames", ".", "append", "(", "dummy_filename", ")", "return", "dummy_filenames" ]
Generate list of realistic dummy filename(s) according to drs.
[ "Generate", "list", "of", "realistic", "dummy", "filename", "(", "s", ")", "according", "to", "drs", "." ]
[ "\"\"\"Generate list of realistic dummy filename(s) according to drs.\n\n drs is the directory structure used to find input files in ESMValTool\n \"\"\"", "# Time-invariant (fx) variables don't have years in their filename", "# For other variables, add custom (large) intervals in dummy filename", "# Spread dummy data over multiple files for realistic test", "# Note: adding too many intervals here makes the tests really slow!", "# Provide for the possibility of filename drss without *." ]
[ { "param": "drs", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "drs", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _get_dummy_filenames(drs): dummy_filenames = [] if 'fx' in drs: if drs.endswith('[_.]*nc'): dummy_filename = drs.replace('[_.]*', '.') elif drs.endswith('*.nc'): dummy_filename = drs.replace('*', '') dummy_filenames.append(dummy_filename) elif '*' in drs: if drs.endswith('[_.]*nc'): dummy_filename = drs[:-len('[_.]*nc')] elif drs.endswith('*.nc'): dummy_filename = drs[:-len('*.nc')] for interval in ['0000_1849', '1850_9999']: dummy_filenames.append(dummy_filename + '_' + interval + '.nc') else: dummy_filename = drs dummy_filenames.append(dummy_filename) return dummy_filenames
1,122
508
e0485bec65d56b984c8b4e363e80c4ab4a1d4cbe
Sepidak/spikeGUI
margrie_libs/margrie_libs/signal_processing/mat_utils.py
[ "MIT" ]
Python
find_sine_peaks_ranges
<not_specific>
def find_sine_peaks_ranges(sine_trace): """ Sine has to be zero centered """ return abs(sine_trace) > (0.9 * sine_trace.max())
Sine has to be zero centered
Sine has to be zero centered
[ "Sine", "has", "to", "be", "zero", "centered" ]
def find_sine_peaks_ranges(sine_trace): return abs(sine_trace) > (0.9 * sine_trace.max())
[ "def", "find_sine_peaks_ranges", "(", "sine_trace", ")", ":", "return", "abs", "(", "sine_trace", ")", ">", "(", "0.9", "*", "sine_trace", ".", "max", "(", ")", ")" ]
Sine has to be zero centered
[ "Sine", "has", "to", "be", "zero", "centered" ]
[ "\"\"\"\r\n Sine has to be zero centered\r\n \"\"\"" ]
[ { "param": "sine_trace", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "sine_trace", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def find_sine_peaks_ranges(sine_trace): return abs(sine_trace) > (0.9 * sine_trace.max())
1,123
804
31f6a0aae4e5c795428a845c8b10457d9b817459
jason-chao/appcestry
AppcestryCore/conversion.py
[ "MIT" ]
Python
disassemble
<not_specific>
def disassemble(apkFilename, disassemblyDir): """Disassemble an APK file using apktool Args: apkFilename: The full filename of the APK file disassemblyDir: The parent directory in which the directory for the APK will be created Returns: The name of directory that contains the files of an APK file extracted by apktool """ outputDirForApk = os.path.join(disassemblyDir, os.path.basename(apkFilename)) os.system("apktool d {} -o {} -f".format(apkFilename, outputDirForApk)) return outputDirForApk
Disassemble an APK file using apktool Args: apkFilename: The full filename of the APK file disassemblyDir: The parent directory in which the directory for the APK will be created Returns: The name of directory that contains the files of an APK file extracted by apktool
Disassemble an APK file using apktool
[ "Disassemble", "an", "APK", "file", "using", "apktool" ]
def disassemble(apkFilename, disassemblyDir): outputDirForApk = os.path.join(disassemblyDir, os.path.basename(apkFilename)) os.system("apktool d {} -o {} -f".format(apkFilename, outputDirForApk)) return outputDirForApk
[ "def", "disassemble", "(", "apkFilename", ",", "disassemblyDir", ")", ":", "outputDirForApk", "=", "os", ".", "path", ".", "join", "(", "disassemblyDir", ",", "os", ".", "path", ".", "basename", "(", "apkFilename", ")", ")", "os", ".", "system", "(", "\"apktool d {} -o {} -f\"", ".", "format", "(", "apkFilename", ",", "outputDirForApk", ")", ")", "return", "outputDirForApk" ]
Disassemble an APK file using apktool
[ "Disassemble", "an", "APK", "file", "using", "apktool" ]
[ "\"\"\"Disassemble an APK file using apktool\n Args:\n apkFilename: The full filename of the APK file\n disassemblyDir: The parent directory in which the directory for the APK will be created\n Returns:\n The name of directory that contains the files of an APK file extracted by apktool\n \"\"\"" ]
[ { "param": "apkFilename", "type": null }, { "param": "disassemblyDir", "type": null } ]
{ "returns": [ { "docstring": "The name of directory that contains the files of an APK file extracted by apktool", "docstring_tokens": [ "The", "name", "of", "directory", "that", "contains", "the", "files", "of", "an", "APK", "file", "extracted", "by", "apktool" ], "type": null } ], "raises": [], "params": [ { "identifier": "apkFilename", "type": null, "docstring": "The full filename of the APK file", "docstring_tokens": [ "The", "full", "filename", "of", "the", "APK", "file" ], "default": null, "is_optional": null }, { "identifier": "disassemblyDir", "type": null, "docstring": "The parent directory in which the directory for the APK will be created", "docstring_tokens": [ "The", "parent", "directory", "in", "which", "the", "directory", "for", "the", "APK", "will", "be", "created" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def disassemble(apkFilename, disassemblyDir): outputDirForApk = os.path.join(disassemblyDir, os.path.basename(apkFilename)) os.system("apktool d {} -o {} -f".format(apkFilename, outputDirForApk)) return outputDirForApk
1,125
28
275571a787ae6c2838983c293e264fe32a50071c
IdeasLabUT/dynetworkx-uf
dynetworkx/classes/intervalgraph.py
[ "BSD-3-Clause" ]
Python
__overlaps_or_contains
<not_specific>
def __overlaps_or_contains(iv, begin, end): """Returns True if interval `iv` overlaps with begin and end. Parameters ---------- iv: Interval begin: int or float Inclusive beginning time of the node appearing in the interval graph. end: int or float Non-inclusive ending time of the node appearing in the interval graph. Must be bigger than or equal begin. """ if begin is None and end is None: return True if begin is None: return iv[2] < end if end is None: return iv[3] > begin return (iv[2] < end and iv[3] > begin) or iv[2] == begin
Returns True if interval `iv` overlaps with begin and end. Parameters ---------- iv: Interval begin: int or float Inclusive beginning time of the node appearing in the interval graph. end: int or float Non-inclusive ending time of the node appearing in the interval graph. Must be bigger than or equal begin.
Returns True if interval `iv` overlaps with begin and end. Parameters Interval begin: int or float Inclusive beginning time of the node appearing in the interval graph. end: int or float Non-inclusive ending time of the node appearing in the interval graph. Must be bigger than or equal begin.
[ "Returns", "True", "if", "interval", "`", "iv", "`", "overlaps", "with", "begin", "and", "end", ".", "Parameters", "Interval", "begin", ":", "int", "or", "float", "Inclusive", "beginning", "time", "of", "the", "node", "appearing", "in", "the", "interval", "graph", ".", "end", ":", "int", "or", "float", "Non", "-", "inclusive", "ending", "time", "of", "the", "node", "appearing", "in", "the", "interval", "graph", ".", "Must", "be", "bigger", "than", "or", "equal", "begin", "." ]
def __overlaps_or_contains(iv, begin, end): if begin is None and end is None: return True if begin is None: return iv[2] < end if end is None: return iv[3] > begin return (iv[2] < end and iv[3] > begin) or iv[2] == begin
[ "def", "__overlaps_or_contains", "(", "iv", ",", "begin", ",", "end", ")", ":", "if", "begin", "is", "None", "and", "end", "is", "None", ":", "return", "True", "if", "begin", "is", "None", ":", "return", "iv", "[", "2", "]", "<", "end", "if", "end", "is", "None", ":", "return", "iv", "[", "3", "]", ">", "begin", "return", "(", "iv", "[", "2", "]", "<", "end", "and", "iv", "[", "3", "]", ">", "begin", ")", "or", "iv", "[", "2", "]", "==", "begin" ]
Returns True if interval `iv` overlaps with begin and end.
[ "Returns", "True", "if", "interval", "`", "iv", "`", "overlaps", "with", "begin", "and", "end", "." ]
[ "\"\"\"Returns True if interval `iv` overlaps with begin and end.\n\n Parameters\n ----------\n iv: Interval\n begin: int or float\n Inclusive beginning time of the node appearing in the interval graph.\n end: int or float\n Non-inclusive ending time of the node appearing in the interval graph.\n Must be bigger than or equal begin.\n \"\"\"" ]
[ { "param": "iv", "type": null }, { "param": "begin", "type": null }, { "param": "end", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "iv", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "begin", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "end", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def __overlaps_or_contains(iv, begin, end): if begin is None and end is None: return True if begin is None: return iv[2] < end if end is None: return iv[3] > begin return (iv[2] < end and iv[3] > begin) or iv[2] == begin
1,126
916
75f798f89f7b3d4a5e33c31820b0455fdb6b5818
jbellino-usgs/pyemu
pyemu/utils/geostats.py
[ "BSD-3-Clause" ]
Python
_read_variogram
<not_specific>
def _read_variogram(f): """Function to instantiate a Vario2d from a PEST-style structure file Parameters ---------- f : (file handle) file handle opened for reading Returns ------- Vario2d : Vario2d Vario2d derived type """ line = '' vartype = None bearing = 0.0 a = None anisotropy = 1.0 while "end variogram" not in line: line = f.readline() if line == '': raise Exception("EOF while read variogram") line = line.strip().lower().split() if line[0].startswith('#'): continue if line[0] == "vartype": vartype = int(line[1]) elif line[0] == "bearing": bearing = float(line[1]) elif line[0] == "a": a = float(line[1]) elif line[0] == "anisotropy": anisotropy = float(line[1]) elif line[0] == "end": break else: raise Exception("unrecognized arg in variogram:{0}".format(line[0])) return vartype,bearing,a,anisotropy
Function to instantiate a Vario2d from a PEST-style structure file Parameters ---------- f : (file handle) file handle opened for reading Returns ------- Vario2d : Vario2d Vario2d derived type
Function to instantiate a Vario2d from a PEST-style structure file Parameters f : (file handle) file handle opened for reading Returns
[ "Function", "to", "instantiate", "a", "Vario2d", "from", "a", "PEST", "-", "style", "structure", "file", "Parameters", "f", ":", "(", "file", "handle", ")", "file", "handle", "opened", "for", "reading", "Returns" ]
def _read_variogram(f): line = '' vartype = None bearing = 0.0 a = None anisotropy = 1.0 while "end variogram" not in line: line = f.readline() if line == '': raise Exception("EOF while read variogram") line = line.strip().lower().split() if line[0].startswith('#'): continue if line[0] == "vartype": vartype = int(line[1]) elif line[0] == "bearing": bearing = float(line[1]) elif line[0] == "a": a = float(line[1]) elif line[0] == "anisotropy": anisotropy = float(line[1]) elif line[0] == "end": break else: raise Exception("unrecognized arg in variogram:{0}".format(line[0])) return vartype,bearing,a,anisotropy
[ "def", "_read_variogram", "(", "f", ")", ":", "line", "=", "''", "vartype", "=", "None", "bearing", "=", "0.0", "a", "=", "None", "anisotropy", "=", "1.0", "while", "\"end variogram\"", "not", "in", "line", ":", "line", "=", "f", ".", "readline", "(", ")", "if", "line", "==", "''", ":", "raise", "Exception", "(", "\"EOF while read variogram\"", ")", "line", "=", "line", ".", "strip", "(", ")", ".", "lower", "(", ")", ".", "split", "(", ")", "if", "line", "[", "0", "]", ".", "startswith", "(", "'#'", ")", ":", "continue", "if", "line", "[", "0", "]", "==", "\"vartype\"", ":", "vartype", "=", "int", "(", "line", "[", "1", "]", ")", "elif", "line", "[", "0", "]", "==", "\"bearing\"", ":", "bearing", "=", "float", "(", "line", "[", "1", "]", ")", "elif", "line", "[", "0", "]", "==", "\"a\"", ":", "a", "=", "float", "(", "line", "[", "1", "]", ")", "elif", "line", "[", "0", "]", "==", "\"anisotropy\"", ":", "anisotropy", "=", "float", "(", "line", "[", "1", "]", ")", "elif", "line", "[", "0", "]", "==", "\"end\"", ":", "break", "else", ":", "raise", "Exception", "(", "\"unrecognized arg in variogram:{0}\"", ".", "format", "(", "line", "[", "0", "]", ")", ")", "return", "vartype", ",", "bearing", ",", "a", ",", "anisotropy" ]
Function to instantiate a Vario2d from a PEST-style structure file Parameters
[ "Function", "to", "instantiate", "a", "Vario2d", "from", "a", "PEST", "-", "style", "structure", "file", "Parameters" ]
[ "\"\"\"Function to instantiate a Vario2d from a PEST-style structure file\n\n Parameters\n ----------\n f : (file handle)\n file handle opened for reading\n\n Returns\n -------\n Vario2d : Vario2d\n Vario2d derived type\n\n \"\"\"" ]
[ { "param": "f", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "f", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _read_variogram(f): line = '' vartype = None bearing = 0.0 a = None anisotropy = 1.0 while "end variogram" not in line: line = f.readline() if line == '': raise Exception("EOF while read variogram") line = line.strip().lower().split() if line[0].startswith('#'): continue if line[0] == "vartype": vartype = int(line[1]) elif line[0] == "bearing": bearing = float(line[1]) elif line[0] == "a": a = float(line[1]) elif line[0] == "anisotropy": anisotropy = float(line[1]) elif line[0] == "end": break else: raise Exception("unrecognized arg in variogram:{0}".format(line[0])) return vartype,bearing,a,anisotropy
1,128
17
e97782460f7ecb8fb53d27660723d39b8b0147ce
yellowjs0304/tf_computer_vision
Chapter04/tiny_imagenet_utils.py
[ "MIT" ]
Python
_get_class_information
<not_specific>
def _get_class_information(ids_file, words_file): """ Extract the class IDs and corresponding human-readable labels from metadata files. :param ids_file: IDs filename (contains list of unique string class IDs) :param words_file: Words filename (contains list of tuples <ID, human-readable label>) :return: List of IDs, Dictionary of labels """ with open(ids_file, "r") as f: class_ids = [line[:-1] for line in f.readlines()] # removing the `\n` for each line with open(words_file, "r") as f: words_lines = f.readlines() class_readable_labels = {} for line in words_lines: # We split the line between the ID (9-char long) and the human readable label: class_id = line[:9] class_label = line[10:-1] # If this class is in our dataset, we add it to our id-to-label dictionary: if class_id in class_ids: class_readable_labels[class_id] = class_label return class_ids, class_readable_labels
Extract the class IDs and corresponding human-readable labels from metadata files. :param ids_file: IDs filename (contains list of unique string class IDs) :param words_file: Words filename (contains list of tuples <ID, human-readable label>) :return: List of IDs, Dictionary of labels
Extract the class IDs and corresponding human-readable labels from metadata files.
[ "Extract", "the", "class", "IDs", "and", "corresponding", "human", "-", "readable", "labels", "from", "metadata", "files", "." ]
def _get_class_information(ids_file, words_file): with open(ids_file, "r") as f: class_ids = [line[:-1] for line in f.readlines()] with open(words_file, "r") as f: words_lines = f.readlines() class_readable_labels = {} for line in words_lines: class_id = line[:9] class_label = line[10:-1] if class_id in class_ids: class_readable_labels[class_id] = class_label return class_ids, class_readable_labels
[ "def", "_get_class_information", "(", "ids_file", ",", "words_file", ")", ":", "with", "open", "(", "ids_file", ",", "\"r\"", ")", "as", "f", ":", "class_ids", "=", "[", "line", "[", ":", "-", "1", "]", "for", "line", "in", "f", ".", "readlines", "(", ")", "]", "with", "open", "(", "words_file", ",", "\"r\"", ")", "as", "f", ":", "words_lines", "=", "f", ".", "readlines", "(", ")", "class_readable_labels", "=", "{", "}", "for", "line", "in", "words_lines", ":", "class_id", "=", "line", "[", ":", "9", "]", "class_label", "=", "line", "[", "10", ":", "-", "1", "]", "if", "class_id", "in", "class_ids", ":", "class_readable_labels", "[", "class_id", "]", "=", "class_label", "return", "class_ids", ",", "class_readable_labels" ]
Extract the class IDs and corresponding human-readable labels from metadata files.
[ "Extract", "the", "class", "IDs", "and", "corresponding", "human", "-", "readable", "labels", "from", "metadata", "files", "." ]
[ "\"\"\"\n Extract the class IDs and corresponding human-readable labels from metadata files.\n :param ids_file: IDs filename (contains list of unique string class IDs)\n :param words_file: Words filename (contains list of tuples <ID, human-readable label>)\n :return: List of IDs, Dictionary of labels\n \"\"\"", "# removing the `\\n` for each line", "# We split the line between the ID (9-char long) and the human readable label:", "# If this class is in our dataset, we add it to our id-to-label dictionary:" ]
[ { "param": "ids_file", "type": null }, { "param": "words_file", "type": null } ]
{ "returns": [ { "docstring": "List of IDs, Dictionary of labels", "docstring_tokens": [ "List", "of", "IDs", "Dictionary", "of", "labels" ], "type": null } ], "raises": [], "params": [ { "identifier": "ids_file", "type": null, "docstring": "IDs filename (contains list of unique string class IDs)", "docstring_tokens": [ "IDs", "filename", "(", "contains", "list", "of", "unique", "string", "class", "IDs", ")" ], "default": null, "is_optional": null }, { "identifier": "words_file", "type": null, "docstring": "Words filename (contains list of tuples )", "docstring_tokens": [ "Words", "filename", "(", "contains", "list", "of", "tuples", ")" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _get_class_information(ids_file, words_file): with open(ids_file, "r") as f: class_ids = [line[:-1] for line in f.readlines()] with open(words_file, "r") as f: words_lines = f.readlines() class_readable_labels = {} for line in words_lines: class_id = line[:9] class_label = line[10:-1] if class_id in class_ids: class_readable_labels[class_id] = class_label return class_ids, class_readable_labels
1,129
418
4717573ca17cacfca9bed61c862fa7798dd6fa5a
ooby/iris-pacs
libs/commands/c_get.py
[ "MIT" ]
Python
is_dicom
bool
def is_dicom(path: str) -> bool: '''Check file whether dicom-file or not''' if not os.path.isfile(path): return False try: with open(path, 'rb') as file_name: return file_name.read(132).decode('ASCII')[-4:] == 'DICM' except UnicodeDecodeError: return False
Check file whether dicom-file or not
Check file whether dicom-file or not
[ "Check", "file", "whether", "dicom", "-", "file", "or", "not" ]
def is_dicom(path: str) -> bool: if not os.path.isfile(path): return False try: with open(path, 'rb') as file_name: return file_name.read(132).decode('ASCII')[-4:] == 'DICM' except UnicodeDecodeError: return False
[ "def", "is_dicom", "(", "path", ":", "str", ")", "->", "bool", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "return", "False", "try", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "file_name", ":", "return", "file_name", ".", "read", "(", "132", ")", ".", "decode", "(", "'ASCII'", ")", "[", "-", "4", ":", "]", "==", "'DICM'", "except", "UnicodeDecodeError", ":", "return", "False" ]
Check file whether dicom-file or not
[ "Check", "file", "whether", "dicom", "-", "file", "or", "not" ]
[ "'''Check file whether dicom-file or not'''" ]
[ { "param": "path", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "path", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def is_dicom(path: str) -> bool: if not os.path.isfile(path): return False try: with open(path, 'rb') as file_name: return file_name.read(132).decode('ASCII')[-4:] == 'DICM' except UnicodeDecodeError: return False
1,130
585
5245e459c43919975f9e3c1b8e413094005f67d0
kepingwa/tdx-tools
utils/pycloudstack/pycloudstack/msr.py
[ "Apache-2.0" ]
Python
_check_kmod
null
def _check_kmod(): """ Check whether the MSR is loaded, modprobe if not. """ if not os.path.exists("/dev/cpu/0/msr"): os.system("modprobe msr")
Check whether the MSR is loaded, modprobe if not.
Check whether the MSR is loaded, modprobe if not.
[ "Check", "whether", "the", "MSR", "is", "loaded", "modprobe", "if", "not", "." ]
def _check_kmod(): if not os.path.exists("/dev/cpu/0/msr"): os.system("modprobe msr")
[ "def", "_check_kmod", "(", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "\"/dev/cpu/0/msr\"", ")", ":", "os", ".", "system", "(", "\"modprobe msr\"", ")" ]
Check whether the MSR is loaded, modprobe if not.
[ "Check", "whether", "the", "MSR", "is", "loaded", "modprobe", "if", "not", "." ]
[ "\"\"\"\n Check whether the MSR is loaded, modprobe if not.\n \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import os def _check_kmod(): if not os.path.exists("/dev/cpu/0/msr"): os.system("modprobe msr")
1,131
598
2798e2438b7cd043008b78adc35a8c2d28d9fd0a
MedTAG/medtag-core
MedTAG_Dockerized/MedTAG_sket_dock_App/sket/sket/nerd/nerd.py
[ "MIT" ]
Python
lookup_snomed_codes
<not_specific>
def lookup_snomed_codes(snomed_codes, use_case_ontology): """ Lookup for ontology concepts associated to target SNOMED codes Params: snomed_codes (list(str)/str): target SNOMED codes use_case_ontology (pandas DataFrame): reference ontology restricted to the use case considered Returns: a dict of identified ontology concepts {semantic_area: [iri, label], ...} """ lookups = {area: [] for area in set(use_case_ontology['semantic_area_label'].tolist()) if area is not None} if type(snomed_codes) == list: # search for list of snomed codes snomed_codes = [code for code in snomed_codes if code] if snomed_codes: linked_data = use_case_ontology.loc[use_case_ontology['SNOMED'].isin(snomed_codes)][['iri', 'label', 'semantic_area_label']] if not linked_data.empty: # matches found within ontology for linked_datum in linked_data.values.tolist(): lookups[str(linked_datum[2])].append([linked_datum[0], linked_datum[1]]) return lookups else: # search for single snomed code if snomed_codes: linked_data = use_case_ontology.loc[use_case_ontology['SNOMED'] == snomed_codes][['iri', 'label', 'semantic_area_label']] if not linked_data.empty: # match found within ontology linked_datum = linked_data.values[0].tolist() lookups[str(linked_datum[2])].append([linked_datum[0], linked_datum[1]]) return lookups
Lookup for ontology concepts associated to target SNOMED codes Params: snomed_codes (list(str)/str): target SNOMED codes use_case_ontology (pandas DataFrame): reference ontology restricted to the use case considered Returns: a dict of identified ontology concepts {semantic_area: [iri, label], ...}
Lookup for ontology concepts associated to target SNOMED codes
[ "Lookup", "for", "ontology", "concepts", "associated", "to", "target", "SNOMED", "codes" ]
def lookup_snomed_codes(snomed_codes, use_case_ontology): lookups = {area: [] for area in set(use_case_ontology['semantic_area_label'].tolist()) if area is not None} if type(snomed_codes) == list: snomed_codes = [code for code in snomed_codes if code] if snomed_codes: linked_data = use_case_ontology.loc[use_case_ontology['SNOMED'].isin(snomed_codes)][['iri', 'label', 'semantic_area_label']] if not linked_data.empty: for linked_datum in linked_data.values.tolist(): lookups[str(linked_datum[2])].append([linked_datum[0], linked_datum[1]]) return lookups else: if snomed_codes: linked_data = use_case_ontology.loc[use_case_ontology['SNOMED'] == snomed_codes][['iri', 'label', 'semantic_area_label']] if not linked_data.empty: linked_datum = linked_data.values[0].tolist() lookups[str(linked_datum[2])].append([linked_datum[0], linked_datum[1]]) return lookups
[ "def", "lookup_snomed_codes", "(", "snomed_codes", ",", "use_case_ontology", ")", ":", "lookups", "=", "{", "area", ":", "[", "]", "for", "area", "in", "set", "(", "use_case_ontology", "[", "'semantic_area_label'", "]", ".", "tolist", "(", ")", ")", "if", "area", "is", "not", "None", "}", "if", "type", "(", "snomed_codes", ")", "==", "list", ":", "snomed_codes", "=", "[", "code", "for", "code", "in", "snomed_codes", "if", "code", "]", "if", "snomed_codes", ":", "linked_data", "=", "use_case_ontology", ".", "loc", "[", "use_case_ontology", "[", "'SNOMED'", "]", ".", "isin", "(", "snomed_codes", ")", "]", "[", "[", "'iri'", ",", "'label'", ",", "'semantic_area_label'", "]", "]", "if", "not", "linked_data", ".", "empty", ":", "for", "linked_datum", "in", "linked_data", ".", "values", ".", "tolist", "(", ")", ":", "lookups", "[", "str", "(", "linked_datum", "[", "2", "]", ")", "]", ".", "append", "(", "[", "linked_datum", "[", "0", "]", ",", "linked_datum", "[", "1", "]", "]", ")", "return", "lookups", "else", ":", "if", "snomed_codes", ":", "linked_data", "=", "use_case_ontology", ".", "loc", "[", "use_case_ontology", "[", "'SNOMED'", "]", "==", "snomed_codes", "]", "[", "[", "'iri'", ",", "'label'", ",", "'semantic_area_label'", "]", "]", "if", "not", "linked_data", ".", "empty", ":", "linked_datum", "=", "linked_data", ".", "values", "[", "0", "]", ".", "tolist", "(", ")", "lookups", "[", "str", "(", "linked_datum", "[", "2", "]", ")", "]", ".", "append", "(", "[", "linked_datum", "[", "0", "]", ",", "linked_datum", "[", "1", "]", "]", ")", "return", "lookups" ]
Lookup for ontology concepts associated to target SNOMED codes
[ "Lookup", "for", "ontology", "concepts", "associated", "to", "target", "SNOMED", "codes" ]
[ "\"\"\"\n\t\tLookup for ontology concepts associated to target SNOMED codes\n\n\t\tParams:\n\t\t\tsnomed_codes (list(str)/str): target SNOMED codes\n\t\t\tuse_case_ontology (pandas DataFrame): reference ontology restricted to the use case considered\n\n\t\tReturns: a dict of identified ontology concepts {semantic_area: [iri, label], ...}\n\t\t\"\"\"", "# search for list of snomed codes", "# matches found within ontology", "# search for single snomed code", "# match found within ontology" ]
[ { "param": "snomed_codes", "type": null }, { "param": "use_case_ontology", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "snomed_codes", "type": null, "docstring": "target SNOMED codes", "docstring_tokens": [ "target", "SNOMED", "codes" ], "default": null, "is_optional": false }, { "identifier": "use_case_ontology", "type": null, "docstring": "reference ontology restricted to the use case considered", "docstring_tokens": [ "reference", "ontology", "restricted", "to", "the", "use", "case", "considered" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def lookup_snomed_codes(snomed_codes, use_case_ontology): lookups = {area: [] for area in set(use_case_ontology['semantic_area_label'].tolist()) if area is not None} if type(snomed_codes) == list: snomed_codes = [code for code in snomed_codes if code] if snomed_codes: linked_data = use_case_ontology.loc[use_case_ontology['SNOMED'].isin(snomed_codes)][['iri', 'label', 'semantic_area_label']] if not linked_data.empty: for linked_datum in linked_data.values.tolist(): lookups[str(linked_datum[2])].append([linked_datum[0], linked_datum[1]]) return lookups else: if snomed_codes: linked_data = use_case_ontology.loc[use_case_ontology['SNOMED'] == snomed_codes][['iri', 'label', 'semantic_area_label']] if not linked_data.empty: linked_datum = linked_data.values[0].tolist() lookups[str(linked_datum[2])].append([linked_datum[0], linked_datum[1]]) return lookups
1,132
52
3f188c1df3d041f19d9881733cd40b27b1e009ee
staphopia/staphopia-analysis-pipeline
scripts/mlst-blast.py
[ "MIT" ]
Python
pipe_command
<not_specific>
def pipe_command(cmd_1, cmd_2, stdout=False, stderr=False, verbose=True, shell=False): """ Execute a single command and return STDOUT and STDERR. If stdout or stderr are given, output will be written to given file name. """ import subprocess if verbose: print('{0} | {1}'.format(' '.join(cmd_1), ' '.join(cmd_2))) stdout = open(stdout, 'w') if stdout else subprocess.PIPE stderr = open(stderr, 'w') if stderr else subprocess.PIPE p1 = subprocess.Popen(cmd_1, stdout=subprocess.PIPE) p2 = subprocess.Popen(cmd_2, stdin=p1.stdout, stdout=stdout, stderr=stderr) p1.wait() return p2.communicate()
Execute a single command and return STDOUT and STDERR. If stdout or stderr are given, output will be written to given file name.
Execute a single command and return STDOUT and STDERR. If stdout or stderr are given, output will be written to given file name.
[ "Execute", "a", "single", "command", "and", "return", "STDOUT", "and", "STDERR", ".", "If", "stdout", "or", "stderr", "are", "given", "output", "will", "be", "written", "to", "given", "file", "name", "." ]
def pipe_command(cmd_1, cmd_2, stdout=False, stderr=False, verbose=True, shell=False): import subprocess if verbose: print('{0} | {1}'.format(' '.join(cmd_1), ' '.join(cmd_2))) stdout = open(stdout, 'w') if stdout else subprocess.PIPE stderr = open(stderr, 'w') if stderr else subprocess.PIPE p1 = subprocess.Popen(cmd_1, stdout=subprocess.PIPE) p2 = subprocess.Popen(cmd_2, stdin=p1.stdout, stdout=stdout, stderr=stderr) p1.wait() return p2.communicate()
[ "def", "pipe_command", "(", "cmd_1", ",", "cmd_2", ",", "stdout", "=", "False", ",", "stderr", "=", "False", ",", "verbose", "=", "True", ",", "shell", "=", "False", ")", ":", "import", "subprocess", "if", "verbose", ":", "print", "(", "'{0} | {1}'", ".", "format", "(", "' '", ".", "join", "(", "cmd_1", ")", ",", "' '", ".", "join", "(", "cmd_2", ")", ")", ")", "stdout", "=", "open", "(", "stdout", ",", "'w'", ")", "if", "stdout", "else", "subprocess", ".", "PIPE", "stderr", "=", "open", "(", "stderr", ",", "'w'", ")", "if", "stderr", "else", "subprocess", ".", "PIPE", "p1", "=", "subprocess", ".", "Popen", "(", "cmd_1", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "p2", "=", "subprocess", ".", "Popen", "(", "cmd_2", ",", "stdin", "=", "p1", ".", "stdout", ",", "stdout", "=", "stdout", ",", "stderr", "=", "stderr", ")", "p1", ".", "wait", "(", ")", "return", "p2", ".", "communicate", "(", ")" ]
Execute a single command and return STDOUT and STDERR.
[ "Execute", "a", "single", "command", "and", "return", "STDOUT", "and", "STDERR", "." ]
[ "\"\"\"\n Execute a single command and return STDOUT and STDERR.\n\n If stdout or stderr are given, output will be written to given file name.\n \"\"\"" ]
[ { "param": "cmd_1", "type": null }, { "param": "cmd_2", "type": null }, { "param": "stdout", "type": null }, { "param": "stderr", "type": null }, { "param": "verbose", "type": null }, { "param": "shell", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cmd_1", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "cmd_2", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "stdout", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "stderr", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "verbose", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "shell", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import subprocess def pipe_command(cmd_1, cmd_2, stdout=False, stderr=False, verbose=True, shell=False): import subprocess if verbose: print('{0} | {1}'.format(' '.join(cmd_1), ' '.join(cmd_2))) stdout = open(stdout, 'w') if stdout else subprocess.PIPE stderr = open(stderr, 'w') if stderr else subprocess.PIPE p1 = subprocess.Popen(cmd_1, stdout=subprocess.PIPE) p2 = subprocess.Popen(cmd_2, stdin=p1.stdout, stdout=stdout, stderr=stderr) p1.wait() return p2.communicate()
1,133
125
af283eedb6750f83c6c524d32b43889e5f88272e
skruger/AVWX-Engine
avwx/speech.py
[ "MIT" ]
Python
ordinal
str
def ordinal(n: int) -> str: """ Converts an int to it spoken ordinal representation """ if n < 0: return return str(n) + "tsnrhtdd"[(n / 10 % 10 != 1) * (n % 10 < 4) * n % 10 :: 4]
Converts an int to it spoken ordinal representation
Converts an int to it spoken ordinal representation
[ "Converts", "an", "int", "to", "it", "spoken", "ordinal", "representation" ]
def ordinal(n: int) -> str: if n < 0: return return str(n) + "tsnrhtdd"[(n / 10 % 10 != 1) * (n % 10 < 4) * n % 10 :: 4]
[ "def", "ordinal", "(", "n", ":", "int", ")", "->", "str", ":", "if", "n", "<", "0", ":", "return", "return", "str", "(", "n", ")", "+", "\"tsnrhtdd\"", "[", "(", "n", "/", "10", "%", "10", "!=", "1", ")", "*", "(", "n", "%", "10", "<", "4", ")", "*", "n", "%", "10", ":", ":", "4", "]" ]
Converts an int to it spoken ordinal representation
[ "Converts", "an", "int", "to", "it", "spoken", "ordinal", "representation" ]
[ "\"\"\"\n Converts an int to it spoken ordinal representation\n \"\"\"" ]
[ { "param": "n", "type": "int" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "n", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def ordinal(n: int) -> str: if n < 0: return return str(n) + "tsnrhtdd"[(n / 10 % 10 != 1) * (n % 10 < 4) * n % 10 :: 4]
1,134
20
c155f59fa5645ca0cffe5e5bc960e214a965c94f
Metamess/AdventOfCode
2018/days/day16.py
[ "MIT" ]
Python
banr
null
def banr(a, b, c, register): """ (bitwise AND register) stores into register C the result of the bitwise AND of register A and register B. """ register[c] = register[a] & register[b]
(bitwise AND register) stores into register C the result of the bitwise AND of register A and register B.
(bitwise AND register) stores into register C the result of the bitwise AND of register A and register B.
[ "(", "bitwise", "AND", "register", ")", "stores", "into", "register", "C", "the", "result", "of", "the", "bitwise", "AND", "of", "register", "A", "and", "register", "B", "." ]
def banr(a, b, c, register): register[c] = register[a] & register[b]
[ "def", "banr", "(", "a", ",", "b", ",", "c", ",", "register", ")", ":", "register", "[", "c", "]", "=", "register", "[", "a", "]", "&", "register", "[", "b", "]" ]
(bitwise AND register) stores into register C the result of the bitwise AND of register A and register B.
[ "(", "bitwise", "AND", "register", ")", "stores", "into", "register", "C", "the", "result", "of", "the", "bitwise", "AND", "of", "register", "A", "and", "register", "B", "." ]
[ "\"\"\"\n\t(bitwise AND register) stores into register C the result of the bitwise AND of register A and register B.\n\t\"\"\"" ]
[ { "param": "a", "type": null }, { "param": "b", "type": null }, { "param": "c", "type": null }, { "param": "register", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "a", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "b", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "c", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "register", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def banr(a, b, c, register): register[c] = register[a] & register[b]
1,135
449
259c0681e1d8fd8f573a611c407ddb41364509ed
wflynny/cbviz
cbviz/plotting.py
[ "MIT" ]
Python
compute_figure_dimensions
tuple
def compute_figure_dimensions(nspaces, img_shape) -> tuple: """Computes figure dimensions given image size""" dpi_guess = img_shape[0]/8 if dpi_guess < 250: dpi_guess = 80 else: dpi_guess = 300 width = min(img_shape[0]/dpi_guess, 8) #inches height = width / img_shape[1] * img_shape[0] layouts = {1: (1, 1), 2: (1, 2), 3: (1, 3), 4: (2, 2), 5: (2, 3), 6: (2, 3), 7: (3, 3), 8: (3, 3), 9: (3, 3)} nrows, ncols = layouts[nspaces] total_width = ncols * width total_height = (nrows * height) + 1 return dict(nrows=nrows, ncols=ncols, figsize=(total_width, total_height))
Computes figure dimensions given image size
Computes figure dimensions given image size
[ "Computes", "figure", "dimensions", "given", "image", "size" ]
def compute_figure_dimensions(nspaces, img_shape) -> tuple: dpi_guess = img_shape[0]/8 if dpi_guess < 250: dpi_guess = 80 else: dpi_guess = 300 width = min(img_shape[0]/dpi_guess, 8) height = width / img_shape[1] * img_shape[0] layouts = {1: (1, 1), 2: (1, 2), 3: (1, 3), 4: (2, 2), 5: (2, 3), 6: (2, 3), 7: (3, 3), 8: (3, 3), 9: (3, 3)} nrows, ncols = layouts[nspaces] total_width = ncols * width total_height = (nrows * height) + 1 return dict(nrows=nrows, ncols=ncols, figsize=(total_width, total_height))
[ "def", "compute_figure_dimensions", "(", "nspaces", ",", "img_shape", ")", "->", "tuple", ":", "dpi_guess", "=", "img_shape", "[", "0", "]", "/", "8", "if", "dpi_guess", "<", "250", ":", "dpi_guess", "=", "80", "else", ":", "dpi_guess", "=", "300", "width", "=", "min", "(", "img_shape", "[", "0", "]", "/", "dpi_guess", ",", "8", ")", "height", "=", "width", "/", "img_shape", "[", "1", "]", "*", "img_shape", "[", "0", "]", "layouts", "=", "{", "1", ":", "(", "1", ",", "1", ")", ",", "2", ":", "(", "1", ",", "2", ")", ",", "3", ":", "(", "1", ",", "3", ")", ",", "4", ":", "(", "2", ",", "2", ")", ",", "5", ":", "(", "2", ",", "3", ")", ",", "6", ":", "(", "2", ",", "3", ")", ",", "7", ":", "(", "3", ",", "3", ")", ",", "8", ":", "(", "3", ",", "3", ")", ",", "9", ":", "(", "3", ",", "3", ")", "}", "nrows", ",", "ncols", "=", "layouts", "[", "nspaces", "]", "total_width", "=", "ncols", "*", "width", "total_height", "=", "(", "nrows", "*", "height", ")", "+", "1", "return", "dict", "(", "nrows", "=", "nrows", ",", "ncols", "=", "ncols", ",", "figsize", "=", "(", "total_width", ",", "total_height", ")", ")" ]
Computes figure dimensions given image size
[ "Computes", "figure", "dimensions", "given", "image", "size" ]
[ "\"\"\"Computes figure dimensions given image size\"\"\"", "#inches" ]
[ { "param": "nspaces", "type": null }, { "param": "img_shape", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "nspaces", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "img_shape", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def compute_figure_dimensions(nspaces, img_shape) -> tuple: dpi_guess = img_shape[0]/8 if dpi_guess < 250: dpi_guess = 80 else: dpi_guess = 300 width = min(img_shape[0]/dpi_guess, 8) height = width / img_shape[1] * img_shape[0] layouts = {1: (1, 1), 2: (1, 2), 3: (1, 3), 4: (2, 2), 5: (2, 3), 6: (2, 3), 7: (3, 3), 8: (3, 3), 9: (3, 3)} nrows, ncols = layouts[nspaces] total_width = ncols * width total_height = (nrows * height) + 1 return dict(nrows=nrows, ncols=ncols, figsize=(total_width, total_height))
1,136
17