hexsha
stringlengths 40
40
| repo
stringlengths 5
121
| path
stringlengths 4
227
| license
list | language
stringclasses 1
value | identifier
stringlengths 1
107
| return_type
stringlengths 2
237
⌀ | original_string
stringlengths 75
13.4k
| original_docstring
stringlengths 13
12.9k
| docstring
stringlengths 13
2.57k
| docstring_tokens
list | code
stringlengths 23
1.88k
| code_tokens
list | short_docstring
stringlengths 1
1.32k
| short_docstring_tokens
list | comment
list | parameters
list | docstring_params
dict | code_with_imports
stringlengths 23
1.88k
| idxs
int64 0
611k
| cluster
int64 0
1.02k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
21a18029370f390c7d9369df97ac9b47babfeabe
|
hchang18/non-parametric-methods
|
kernel_density_estimator.py
|
[
"MIT"
] |
Python
|
uniform_pdf
|
<not_specific>
|
def uniform_pdf(x_i, bandwidth):
"""Return uniform kernel density estimator."""
lowerb = (x_i - bandwidth)
upperb = (x_i + bandwidth)
def evaluate(x):
"""Evaluate x."""
if x <= lowerb:
pdf = 0
elif x > upperb:
pdf = 0
else:
pdf = (1 / (2 * bandwidth))
return pdf
return evaluate
|
Return uniform kernel density estimator.
|
Return uniform kernel density estimator.
|
[
"Return",
"uniform",
"kernel",
"density",
"estimator",
"."
] |
def uniform_pdf(x_i, bandwidth):
lowerb = (x_i - bandwidth)
upperb = (x_i + bandwidth)
def evaluate(x):
if x <= lowerb:
pdf = 0
elif x > upperb:
pdf = 0
else:
pdf = (1 / (2 * bandwidth))
return pdf
return evaluate
|
[
"def",
"uniform_pdf",
"(",
"x_i",
",",
"bandwidth",
")",
":",
"lowerb",
"=",
"(",
"x_i",
"-",
"bandwidth",
")",
"upperb",
"=",
"(",
"x_i",
"+",
"bandwidth",
")",
"def",
"evaluate",
"(",
"x",
")",
":",
"\"\"\"Evaluate x.\"\"\"",
"if",
"x",
"<=",
"lowerb",
":",
"pdf",
"=",
"0",
"elif",
"x",
">",
"upperb",
":",
"pdf",
"=",
"0",
"else",
":",
"pdf",
"=",
"(",
"1",
"/",
"(",
"2",
"*",
"bandwidth",
")",
")",
"return",
"pdf",
"return",
"evaluate"
] |
Return uniform kernel density estimator.
|
[
"Return",
"uniform",
"kernel",
"density",
"estimator",
"."
] |
[
"\"\"\"Return uniform kernel density estimator.\"\"\"",
"\"\"\"Evaluate x.\"\"\""
] |
[
{
"param": "x_i",
"type": null
},
{
"param": "bandwidth",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "x_i",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "bandwidth",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def uniform_pdf(x_i, bandwidth):
lowerb = (x_i - bandwidth)
upperb = (x_i + bandwidth)
def evaluate(x):
if x <= lowerb:
pdf = 0
elif x > upperb:
pdf = 0
else:
pdf = (1 / (2 * bandwidth))
return pdf
return evaluate
| 800 | 29 |
c17ab28adf7c7e96219c0fe3c65217c88b81e74e
|
alexpark07/pwntools
|
pwnlib/util/fiddling.py
|
[
"MIT"
] |
Python
|
b64d
|
<not_specific>
|
def b64d(s):
"""b64d(s) -> str
Base64 decodes a string
Example:
>>> b64d('dGVzdA==')
'test'
"""
return base64.b64decode(s)
|
b64d(s) -> str
Base64 decodes a string
Example:
>>> b64d('dGVzdA==')
'test'
|
> str
Base64 decodes a string
|
[
">",
"str",
"Base64",
"decodes",
"a",
"string"
] |
def b64d(s):
return base64.b64decode(s)
|
[
"def",
"b64d",
"(",
"s",
")",
":",
"return",
"base64",
".",
"b64decode",
"(",
"s",
")"
] |
b64d(s) -> str
Base64 decodes a string
|
[
"b64d",
"(",
"s",
")",
"-",
">",
"str",
"Base64",
"decodes",
"a",
"string"
] |
[
"\"\"\"b64d(s) -> str\n\n Base64 decodes a string\n\n Example:\n\n >>> b64d('dGVzdA==')\n 'test'\n \"\"\""
] |
[
{
"param": "s",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "s",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": [
{
"identifier": "examples",
"docstring": null,
"docstring_tokens": [
"None"
]
}
]
}
|
import base64
def b64d(s):
return base64.b64decode(s)
| 801 | 1 |
ae7244a8243d35652b92e675045a1758e4330e65
|
Stanels42/cipher-cipher
|
cipher.py
|
[
"MIT"
] |
Python
|
encript
|
<not_specific>
|
def encript(string, key):
"""Takes in a string and a key and will apply the ceaser cipher to the string based on the key. Then returning the result"""
alp = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
new_str = ''
for char in string.lower():
if char in alp:
new_str += alp[(alp.index(char) + key) % len(alp)]
else:
new_str += char
return new_str
|
Takes in a string and a key and will apply the ceaser cipher to the string based on the key. Then returning the result
|
Takes in a string and a key and will apply the ceaser cipher to the string based on the key. Then returning the result
|
[
"Takes",
"in",
"a",
"string",
"and",
"a",
"key",
"and",
"will",
"apply",
"the",
"ceaser",
"cipher",
"to",
"the",
"string",
"based",
"on",
"the",
"key",
".",
"Then",
"returning",
"the",
"result"
] |
def encript(string, key):
alp = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
new_str = ''
for char in string.lower():
if char in alp:
new_str += alp[(alp.index(char) + key) % len(alp)]
else:
new_str += char
return new_str
|
[
"def",
"encript",
"(",
"string",
",",
"key",
")",
":",
"alp",
"=",
"[",
"'a'",
",",
"'b'",
",",
"'c'",
",",
"'d'",
",",
"'e'",
",",
"'f'",
",",
"'g'",
",",
"'h'",
",",
"'i'",
",",
"'j'",
",",
"'k'",
",",
"'l'",
",",
"'m'",
",",
"'n'",
",",
"'o'",
",",
"'p'",
",",
"'q'",
",",
"'r'",
",",
"'s'",
",",
"'t'",
",",
"'u'",
",",
"'v'",
",",
"'w'",
",",
"'x'",
",",
"'y'",
",",
"'z'",
"]",
"new_str",
"=",
"''",
"for",
"char",
"in",
"string",
".",
"lower",
"(",
")",
":",
"if",
"char",
"in",
"alp",
":",
"new_str",
"+=",
"alp",
"[",
"(",
"alp",
".",
"index",
"(",
"char",
")",
"+",
"key",
")",
"%",
"len",
"(",
"alp",
")",
"]",
"else",
":",
"new_str",
"+=",
"char",
"return",
"new_str"
] |
Takes in a string and a key and will apply the ceaser cipher to the string based on the key.
|
[
"Takes",
"in",
"a",
"string",
"and",
"a",
"key",
"and",
"will",
"apply",
"the",
"ceaser",
"cipher",
"to",
"the",
"string",
"based",
"on",
"the",
"key",
"."
] |
[
"\"\"\"Takes in a string and a key and will apply the ceaser cipher to the string based on the key. Then returning the result\"\"\""
] |
[
{
"param": "string",
"type": null
},
{
"param": "key",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "string",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "key",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def encript(string, key):
alp = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
new_str = ''
for char in string.lower():
if char in alp:
new_str += alp[(alp.index(char) + key) % len(alp)]
else:
new_str += char
return new_str
| 802 | 427 |
e5cdb26d33ed03adc43a5c6936d7c2bb45650c02
|
iridium-browser/iridium-browser
|
tools/clang/scripts/build.py
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] |
Python
|
GetCommitDescription
|
<not_specific>
|
def GetCommitDescription(commit):
"""Get the output of `git describe`.
Needs to be called from inside the git repository dir."""
git_exe = 'git.bat' if sys.platform.startswith('win') else 'git'
return subprocess.check_output(
[git_exe, 'describe', '--long', '--abbrev=8', commit]).rstrip()
|
Get the output of `git describe`.
Needs to be called from inside the git repository dir.
|
Get the output of `git describe`.
Needs to be called from inside the git repository dir.
|
[
"Get",
"the",
"output",
"of",
"`",
"git",
"describe",
"`",
".",
"Needs",
"to",
"be",
"called",
"from",
"inside",
"the",
"git",
"repository",
"dir",
"."
] |
def GetCommitDescription(commit):
git_exe = 'git.bat' if sys.platform.startswith('win') else 'git'
return subprocess.check_output(
[git_exe, 'describe', '--long', '--abbrev=8', commit]).rstrip()
|
[
"def",
"GetCommitDescription",
"(",
"commit",
")",
":",
"git_exe",
"=",
"'git.bat'",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'win'",
")",
"else",
"'git'",
"return",
"subprocess",
".",
"check_output",
"(",
"[",
"git_exe",
",",
"'describe'",
",",
"'--long'",
",",
"'--abbrev=8'",
",",
"commit",
"]",
")",
".",
"rstrip",
"(",
")"
] |
Get the output of `git describe`.
|
[
"Get",
"the",
"output",
"of",
"`",
"git",
"describe",
"`",
"."
] |
[
"\"\"\"Get the output of `git describe`.\n\n Needs to be called from inside the git repository dir.\"\"\""
] |
[
{
"param": "commit",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "commit",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import subprocess
import sys
def GetCommitDescription(commit):
git_exe = 'git.bat' if sys.platform.startswith('win') else 'git'
return subprocess.check_output(
[git_exe, 'describe', '--long', '--abbrev=8', commit]).rstrip()
| 803 | 242 |
b7bce97ab6229ee857958ddb135236a1a56e4f71
|
chaseste/fin4dummy
|
application/internal/dates.py
|
[
"MIT"
] |
Python
|
is_time_between
|
bool
|
def is_time_between(begin_time: time, end_time: time, check_time: time = None) -> bool:
"""Checks if the time is between the specified date range"""
check_time = check_time or datetime.now().time()
if begin_time < end_time:
return check_time >= begin_time and check_time <= end_time
else:
return check_time >= begin_time or check_time <= end_time
|
Checks if the time is between the specified date range
|
Checks if the time is between the specified date range
|
[
"Checks",
"if",
"the",
"time",
"is",
"between",
"the",
"specified",
"date",
"range"
] |
def is_time_between(begin_time: time, end_time: time, check_time: time = None) -> bool:
check_time = check_time or datetime.now().time()
if begin_time < end_time:
return check_time >= begin_time and check_time <= end_time
else:
return check_time >= begin_time or check_time <= end_time
|
[
"def",
"is_time_between",
"(",
"begin_time",
":",
"time",
",",
"end_time",
":",
"time",
",",
"check_time",
":",
"time",
"=",
"None",
")",
"->",
"bool",
":",
"check_time",
"=",
"check_time",
"or",
"datetime",
".",
"now",
"(",
")",
".",
"time",
"(",
")",
"if",
"begin_time",
"<",
"end_time",
":",
"return",
"check_time",
">=",
"begin_time",
"and",
"check_time",
"<=",
"end_time",
"else",
":",
"return",
"check_time",
">=",
"begin_time",
"or",
"check_time",
"<=",
"end_time"
] |
Checks if the time is between the specified date range
|
[
"Checks",
"if",
"the",
"time",
"is",
"between",
"the",
"specified",
"date",
"range"
] |
[
"\"\"\"Checks if the time is between the specified date range\"\"\""
] |
[
{
"param": "begin_time",
"type": "time"
},
{
"param": "end_time",
"type": "time"
},
{
"param": "check_time",
"type": "time"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "begin_time",
"type": "time",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "end_time",
"type": "time",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "check_time",
"type": "time",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import datetime
def is_time_between(begin_time: time, end_time: time, check_time: time = None) -> bool:
check_time = check_time or datetime.now().time()
if begin_time < end_time:
return check_time >= begin_time and check_time <= end_time
else:
return check_time >= begin_time or check_time <= end_time
| 804 | 823 |
dd2c9292a4d5c0e23ba78433347bb4fd678aa4bb
|
MatteoLacki/rta
|
rta/stats/stats.py
|
[
"BSD-2-Clause"
] |
Python
|
sensitivity
|
<not_specific>
|
def sensitivity(cm):
"""True positives vs all positives.
Args:
cm (2x2 np.array): A confusion matrix.
Returns:
float: Sensitivity of the classifier.
"""
return cm[0,0]/(cm[0,0] + cm[0,1])
|
True positives vs all positives.
Args:
cm (2x2 np.array): A confusion matrix.
Returns:
float: Sensitivity of the classifier.
|
True positives vs all positives.
|
[
"True",
"positives",
"vs",
"all",
"positives",
"."
] |
def sensitivity(cm):
return cm[0,0]/(cm[0,0] + cm[0,1])
|
[
"def",
"sensitivity",
"(",
"cm",
")",
":",
"return",
"cm",
"[",
"0",
",",
"0",
"]",
"/",
"(",
"cm",
"[",
"0",
",",
"0",
"]",
"+",
"cm",
"[",
"0",
",",
"1",
"]",
")"
] |
True positives vs all positives.
|
[
"True",
"positives",
"vs",
"all",
"positives",
"."
] |
[
"\"\"\"True positives vs all positives.\n\n Args:\n cm (2x2 np.array): A confusion matrix.\n Returns:\n float: Sensitivity of the classifier.\n \"\"\""
] |
[
{
"param": "cm",
"type": null
}
] |
{
"returns": [
{
"docstring": "Sensitivity of the classifier.",
"docstring_tokens": [
"Sensitivity",
"of",
"the",
"classifier",
"."
],
"type": "float"
}
],
"raises": [],
"params": [
{
"identifier": "cm",
"type": null,
"docstring": "A confusion matrix.",
"docstring_tokens": [
"A",
"confusion",
"matrix",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def sensitivity(cm):
return cm[0,0]/(cm[0,0] + cm[0,1])
| 805 | 288 |
3fbf4aedf26588c6bfad1e7a7ed744215b15089d
|
hrmstockdale/changelog-cli
|
tasks/release/__init__.py
|
[
"MIT"
] |
Python
|
release_changelog
| null |
def release_changelog(context):
"""
Runs changelog command to update changelog
"""
context.run('changelog release --yes')
|
Runs changelog command to update changelog
|
Runs changelog command to update changelog
|
[
"Runs",
"changelog",
"command",
"to",
"update",
"changelog"
] |
def release_changelog(context):
context.run('changelog release --yes')
|
[
"def",
"release_changelog",
"(",
"context",
")",
":",
"context",
".",
"run",
"(",
"'changelog release --yes'",
")"
] |
Runs changelog command to update changelog
|
[
"Runs",
"changelog",
"command",
"to",
"update",
"changelog"
] |
[
"\"\"\"\n Runs changelog command to update changelog\n \"\"\""
] |
[
{
"param": "context",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "context",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def release_changelog(context):
context.run('changelog release --yes')
| 806 | 889 |
41e1d27046897e082af258b6ca02c76e13093124
|
liuqk3/GSM
|
lib/models/config/config.py
|
[
"MIT"
] |
Python
|
load_config
|
<not_specific>
|
def load_config(cfg_path):
"""Load the config from a json file"""
if not os.path.exists(cfg_path):
raise RuntimeError('file {} does not exists!'.format(cfg_path))
with open(cfg_path, 'r') as f:
cfg = json.load(f)
return cfg
|
Load the config from a json file
|
Load the config from a json file
|
[
"Load",
"the",
"config",
"from",
"a",
"json",
"file"
] |
def load_config(cfg_path):
if not os.path.exists(cfg_path):
raise RuntimeError('file {} does not exists!'.format(cfg_path))
with open(cfg_path, 'r') as f:
cfg = json.load(f)
return cfg
|
[
"def",
"load_config",
"(",
"cfg_path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"cfg_path",
")",
":",
"raise",
"RuntimeError",
"(",
"'file {} does not exists!'",
".",
"format",
"(",
"cfg_path",
")",
")",
"with",
"open",
"(",
"cfg_path",
",",
"'r'",
")",
"as",
"f",
":",
"cfg",
"=",
"json",
".",
"load",
"(",
"f",
")",
"return",
"cfg"
] |
Load the config from a json file
|
[
"Load",
"the",
"config",
"from",
"a",
"json",
"file"
] |
[
"\"\"\"Load the config from a json file\"\"\""
] |
[
{
"param": "cfg_path",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cfg_path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
import json
def load_config(cfg_path):
if not os.path.exists(cfg_path):
raise RuntimeError('file {} does not exists!'.format(cfg_path))
with open(cfg_path, 'r') as f:
cfg = json.load(f)
return cfg
| 807 | 483 |
bcbb2b89e8b217cf2598a6d3791bb58f86220b94
|
chenjianlong/my-gist
|
fileutil.py
|
[
"Unlicense"
] |
Python
|
rm_dir
| null |
def rm_dir(src, dst, ignore = None):
"""remove the dst dir's file that it exist in src dir with the same relative path"""
src = src.rstrip(r'\/')
dst = dst.rstrip(r'\/')
for root, dirnames, filenames in os.walk(src):
if ignore and (ignore in root):
continue
for filename in filenames:
src_path = os.path.join(root, filename)
dst_path = os.path.join(dst, src_path[len(src)+1:])
os.path.exists(dst_path) and os.remove(dst_path)
|
remove the dst dir's file that it exist in src dir with the same relative path
|
remove the dst dir's file that it exist in src dir with the same relative path
|
[
"remove",
"the",
"dst",
"dir",
"'",
"s",
"file",
"that",
"it",
"exist",
"in",
"src",
"dir",
"with",
"the",
"same",
"relative",
"path"
] |
def rm_dir(src, dst, ignore = None):
src = src.rstrip(r'\/')
dst = dst.rstrip(r'\/')
for root, dirnames, filenames in os.walk(src):
if ignore and (ignore in root):
continue
for filename in filenames:
src_path = os.path.join(root, filename)
dst_path = os.path.join(dst, src_path[len(src)+1:])
os.path.exists(dst_path) and os.remove(dst_path)
|
[
"def",
"rm_dir",
"(",
"src",
",",
"dst",
",",
"ignore",
"=",
"None",
")",
":",
"src",
"=",
"src",
".",
"rstrip",
"(",
"r'\\/'",
")",
"dst",
"=",
"dst",
".",
"rstrip",
"(",
"r'\\/'",
")",
"for",
"root",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"src",
")",
":",
"if",
"ignore",
"and",
"(",
"ignore",
"in",
"root",
")",
":",
"continue",
"for",
"filename",
"in",
"filenames",
":",
"src_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"filename",
")",
"dst_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dst",
",",
"src_path",
"[",
"len",
"(",
"src",
")",
"+",
"1",
":",
"]",
")",
"os",
".",
"path",
".",
"exists",
"(",
"dst_path",
")",
"and",
"os",
".",
"remove",
"(",
"dst_path",
")"
] |
remove the dst dir's file that it exist in src dir with the same relative path
|
[
"remove",
"the",
"dst",
"dir",
"'",
"s",
"file",
"that",
"it",
"exist",
"in",
"src",
"dir",
"with",
"the",
"same",
"relative",
"path"
] |
[
"\"\"\"remove the dst dir's file that it exist in src dir with the same relative path\"\"\""
] |
[
{
"param": "src",
"type": null
},
{
"param": "dst",
"type": null
},
{
"param": "ignore",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "src",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "dst",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "ignore",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
def rm_dir(src, dst, ignore = None):
src = src.rstrip(r'\/')
dst = dst.rstrip(r'\/')
for root, dirnames, filenames in os.walk(src):
if ignore and (ignore in root):
continue
for filename in filenames:
src_path = os.path.join(root, filename)
dst_path = os.path.join(dst, src_path[len(src)+1:])
os.path.exists(dst_path) and os.remove(dst_path)
| 809 | 361 |
7369f3604f9318018cf45aa65b889b9e46eb04cf
|
pepastach/ochrona-cli
|
ochrona/parser/requirements.py
|
[
"MIT"
] |
Python
|
clean_dependency
|
str
|
def clean_dependency(dependency: str) -> str:
"""
Removes any comments or hashes following the dependency.
:param file_path: a dependency with optional pinned version
:return: str a cleaned dependency string
"""
if " " in dependency or ";" in dependency or "#" in dependency:
return dependency.split(" ")[0].replace(";", "")
return dependency
|
Removes any comments or hashes following the dependency.
:param file_path: a dependency with optional pinned version
:return: str a cleaned dependency string
|
Removes any comments or hashes following the dependency.
|
[
"Removes",
"any",
"comments",
"or",
"hashes",
"following",
"the",
"dependency",
"."
] |
def clean_dependency(dependency: str) -> str:
if " " in dependency or ";" in dependency or "#" in dependency:
return dependency.split(" ")[0].replace(";", "")
return dependency
|
[
"def",
"clean_dependency",
"(",
"dependency",
":",
"str",
")",
"->",
"str",
":",
"if",
"\" \"",
"in",
"dependency",
"or",
"\";\"",
"in",
"dependency",
"or",
"\"#\"",
"in",
"dependency",
":",
"return",
"dependency",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
".",
"replace",
"(",
"\";\"",
",",
"\"\"",
")",
"return",
"dependency"
] |
Removes any comments or hashes following the dependency.
|
[
"Removes",
"any",
"comments",
"or",
"hashes",
"following",
"the",
"dependency",
"."
] |
[
"\"\"\"\n Removes any comments or hashes following the dependency.\n\n :param file_path: a dependency with optional pinned version\n :return: str a cleaned dependency string\n \"\"\""
] |
[
{
"param": "dependency",
"type": "str"
}
] |
{
"returns": [
{
"docstring": "str a cleaned dependency string",
"docstring_tokens": [
"str",
"a",
"cleaned",
"dependency",
"string"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "dependency",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [
{
"identifier": "file_path",
"type": null,
"docstring": "a dependency with optional pinned version",
"docstring_tokens": [
"a",
"dependency",
"with",
"optional",
"pinned",
"version"
],
"default": null,
"is_optional": null
}
],
"others": []
}
|
def clean_dependency(dependency: str) -> str:
if " " in dependency or ";" in dependency or "#" in dependency:
return dependency.split(" ")[0].replace(";", "")
return dependency
| 810 | 2 |
050ab772e8c83d15e78a5735adae6cbb9b558118
|
jacobfgrant/simplemdm-webhook-functions
|
simplemdm_webhook_functions/slack_functions.py
|
[
"Apache-2.0"
] |
Python
|
slack_webhook_message
|
<not_specific>
|
def slack_webhook_message(serial_number, webhook_event, event_time):
"""Create a formatted Slack message."""
event_time = re.sub(r'([\+|\-]{1}[0-9]{2})\:([0-9]{2})', r'\g<1>\g<2>', event_time)
event_time = datetime.strptime(event_time, '%Y-%m-%dT%H:%M:%S.%f%z')
event_time = event_time.strftime('%A, %B %d, %Y at %H:%M %Z')
slack_message = {
'text': (
"SimpleMDM: Device `" +
serial_number +
"` " +
webhook_event +
" on " +
event_time +
"."
)
}
return slack_message
|
Create a formatted Slack message.
|
Create a formatted Slack message.
|
[
"Create",
"a",
"formatted",
"Slack",
"message",
"."
] |
def slack_webhook_message(serial_number, webhook_event, event_time):
event_time = re.sub(r'([\+|\-]{1}[0-9]{2})\:([0-9]{2})', r'\g<1>\g<2>', event_time)
event_time = datetime.strptime(event_time, '%Y-%m-%dT%H:%M:%S.%f%z')
event_time = event_time.strftime('%A, %B %d, %Y at %H:%M %Z')
slack_message = {
'text': (
"SimpleMDM: Device `" +
serial_number +
"` " +
webhook_event +
" on " +
event_time +
"."
)
}
return slack_message
|
[
"def",
"slack_webhook_message",
"(",
"serial_number",
",",
"webhook_event",
",",
"event_time",
")",
":",
"event_time",
"=",
"re",
".",
"sub",
"(",
"r'([\\+|\\-]{1}[0-9]{2})\\:([0-9]{2})'",
",",
"r'\\g<1>\\g<2>'",
",",
"event_time",
")",
"event_time",
"=",
"datetime",
".",
"strptime",
"(",
"event_time",
",",
"'%Y-%m-%dT%H:%M:%S.%f%z'",
")",
"event_time",
"=",
"event_time",
".",
"strftime",
"(",
"'%A, %B %d, %Y at %H:%M %Z'",
")",
"slack_message",
"=",
"{",
"'text'",
":",
"(",
"\"SimpleMDM: Device `\"",
"+",
"serial_number",
"+",
"\"` \"",
"+",
"webhook_event",
"+",
"\" on \"",
"+",
"event_time",
"+",
"\".\"",
")",
"}",
"return",
"slack_message"
] |
Create a formatted Slack message.
|
[
"Create",
"a",
"formatted",
"Slack",
"message",
"."
] |
[
"\"\"\"Create a formatted Slack message.\"\"\""
] |
[
{
"param": "serial_number",
"type": null
},
{
"param": "webhook_event",
"type": null
},
{
"param": "event_time",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "serial_number",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "webhook_event",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "event_time",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import re
import datetime
def slack_webhook_message(serial_number, webhook_event, event_time):
event_time = re.sub(r'([\+|\-]{1}[0-9]{2})\:([0-9]{2})', r'\g<1>\g<2>', event_time)
event_time = datetime.strptime(event_time, '%Y-%m-%dT%H:%M:%S.%f%z')
event_time = event_time.strftime('%A, %B %d, %Y at %H:%M %Z')
slack_message = {
'text': (
"SimpleMDM: Device `" +
serial_number +
"` " +
webhook_event +
" on " +
event_time +
"."
)
}
return slack_message
| 811 | 753 |
070c108542829596ef282f019063fb3eb7fd7a2c
|
Lambda-School-Labs/cryptolytic-ds
|
cryptolytic/data/historical.py
|
[
"MIT"
] |
Python
|
price_increase
|
<not_specific>
|
def price_increase(percent_diff, bottom5percent, top5percent):
"""Classify price changes into three types of categories"""
if percent_diff > top5percent:
return 1
elif percent_diff < bottom5percent:
return -1
return 0
|
Classify price changes into three types of categories
|
Classify price changes into three types of categories
|
[
"Classify",
"price",
"changes",
"into",
"three",
"types",
"of",
"categories"
] |
def price_increase(percent_diff, bottom5percent, top5percent):
if percent_diff > top5percent:
return 1
elif percent_diff < bottom5percent:
return -1
return 0
|
[
"def",
"price_increase",
"(",
"percent_diff",
",",
"bottom5percent",
",",
"top5percent",
")",
":",
"if",
"percent_diff",
">",
"top5percent",
":",
"return",
"1",
"elif",
"percent_diff",
"<",
"bottom5percent",
":",
"return",
"-",
"1",
"return",
"0"
] |
Classify price changes into three types of categories
|
[
"Classify",
"price",
"changes",
"into",
"three",
"types",
"of",
"categories"
] |
[
"\"\"\"Classify price changes into three types of categories\"\"\""
] |
[
{
"param": "percent_diff",
"type": null
},
{
"param": "bottom5percent",
"type": null
},
{
"param": "top5percent",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "percent_diff",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "bottom5percent",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "top5percent",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def price_increase(percent_diff, bottom5percent, top5percent):
if percent_diff > top5percent:
return 1
elif percent_diff < bottom5percent:
return -1
return 0
| 812 | 455 |
c7fff9be3a3bfa9b0d9da6bc0ddc8c68e0bc9f86
|
bopopescu/mysql-dbcompare
|
mysql-utilities-1.6.0/mysql/utilities/common/utilities.py
|
[
"Apache-2.0"
] |
Python
|
find_executable
|
<not_specific>
|
def find_executable(util_name):
"""Search the system path for an executable matching the utility
util_name[in] Name of utility
Returns string - name of executable (util_name or util_name.exe) or
original name if not found on the system path
"""
paths = os.getenv("PATH").split(os.pathsep)
for path in paths:
new_path = os.path.join(path, util_name + "*")
if os.name == "nt":
new_path = '"{0}"'.format(new_path)
found_path = glob.glob(new_path)
if found_path:
return os.path.split(found_path[0])[1]
return util_name
|
Search the system path for an executable matching the utility
util_name[in] Name of utility
Returns string - name of executable (util_name or util_name.exe) or
original name if not found on the system path
|
Search the system path for an executable matching the utility
util_name[in] Name of utility
Returns string - name of executable (util_name or util_name.exe) or
original name if not found on the system path
|
[
"Search",
"the",
"system",
"path",
"for",
"an",
"executable",
"matching",
"the",
"utility",
"util_name",
"[",
"in",
"]",
"Name",
"of",
"utility",
"Returns",
"string",
"-",
"name",
"of",
"executable",
"(",
"util_name",
"or",
"util_name",
".",
"exe",
")",
"or",
"original",
"name",
"if",
"not",
"found",
"on",
"the",
"system",
"path"
] |
def find_executable(util_name):
paths = os.getenv("PATH").split(os.pathsep)
for path in paths:
new_path = os.path.join(path, util_name + "*")
if os.name == "nt":
new_path = '"{0}"'.format(new_path)
found_path = glob.glob(new_path)
if found_path:
return os.path.split(found_path[0])[1]
return util_name
|
[
"def",
"find_executable",
"(",
"util_name",
")",
":",
"paths",
"=",
"os",
".",
"getenv",
"(",
"\"PATH\"",
")",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"for",
"path",
"in",
"paths",
":",
"new_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"util_name",
"+",
"\"*\"",
")",
"if",
"os",
".",
"name",
"==",
"\"nt\"",
":",
"new_path",
"=",
"'\"{0}\"'",
".",
"format",
"(",
"new_path",
")",
"found_path",
"=",
"glob",
".",
"glob",
"(",
"new_path",
")",
"if",
"found_path",
":",
"return",
"os",
".",
"path",
".",
"split",
"(",
"found_path",
"[",
"0",
"]",
")",
"[",
"1",
"]",
"return",
"util_name"
] |
Search the system path for an executable matching the utility
util_name[in] Name of utility
|
[
"Search",
"the",
"system",
"path",
"for",
"an",
"executable",
"matching",
"the",
"utility",
"util_name",
"[",
"in",
"]",
"Name",
"of",
"utility"
] |
[
"\"\"\"Search the system path for an executable matching the utility\n\n util_name[in] Name of utility\n\n Returns string - name of executable (util_name or util_name.exe) or\n original name if not found on the system path\n \"\"\""
] |
[
{
"param": "util_name",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "util_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
import glob
def find_executable(util_name):
paths = os.getenv("PATH").split(os.pathsep)
for path in paths:
new_path = os.path.join(path, util_name + "*")
if os.name == "nt":
new_path = '"{0}"'.format(new_path)
found_path = glob.glob(new_path)
if found_path:
return os.path.split(found_path[0])[1]
return util_name
| 813 | 1,001 |
5d2607e7c4b76fc1312c2ac9f5cd88311fff0f48
|
averyhiebert/slackhistory
|
slackhistory/historytools.py
|
[
"MIT"
] |
Python
|
flatten_posts
|
<not_specific>
|
def flatten_posts(history):
''' Flatten all posts from all channels into a single list.
Also add a field "channel" to each post.
Does not modify the original.'''
posts = []
for channel, post_list in history.items():
for post in post_list:
new_post = copy.deepcopy(post)
new_post["channel"] = channel
posts.append(new_post)
return posts
|
Flatten all posts from all channels into a single list.
Also add a field "channel" to each post.
Does not modify the original.
|
Flatten all posts from all channels into a single list.
Also add a field "channel" to each post.
Does not modify the original.
|
[
"Flatten",
"all",
"posts",
"from",
"all",
"channels",
"into",
"a",
"single",
"list",
".",
"Also",
"add",
"a",
"field",
"\"",
"channel",
"\"",
"to",
"each",
"post",
".",
"Does",
"not",
"modify",
"the",
"original",
"."
] |
def flatten_posts(history):
posts = []
for channel, post_list in history.items():
for post in post_list:
new_post = copy.deepcopy(post)
new_post["channel"] = channel
posts.append(new_post)
return posts
|
[
"def",
"flatten_posts",
"(",
"history",
")",
":",
"posts",
"=",
"[",
"]",
"for",
"channel",
",",
"post_list",
"in",
"history",
".",
"items",
"(",
")",
":",
"for",
"post",
"in",
"post_list",
":",
"new_post",
"=",
"copy",
".",
"deepcopy",
"(",
"post",
")",
"new_post",
"[",
"\"channel\"",
"]",
"=",
"channel",
"posts",
".",
"append",
"(",
"new_post",
")",
"return",
"posts"
] |
Flatten all posts from all channels into a single list.
|
[
"Flatten",
"all",
"posts",
"from",
"all",
"channels",
"into",
"a",
"single",
"list",
"."
] |
[
"''' Flatten all posts from all channels into a single list.\n Also add a field \"channel\" to each post.\n \n Does not modify the original.'''"
] |
[
{
"param": "history",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "history",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import copy
def flatten_posts(history):
posts = []
for channel, post_list in history.items():
for post in post_list:
new_post = copy.deepcopy(post)
new_post["channel"] = channel
posts.append(new_post)
return posts
| 814 | 538 |
a7a4b5f4fdfd958cecff5c80bd722c1ef0ffc06f
|
moertle/underscorepy
|
_/skel.py
|
[
"MIT"
] |
Python
|
add_data_files
|
<not_specific>
|
def add_data_files(*include_dirs):
'called from setup.py to include auxillary files for installation'
data_files = []
for include_dir in include_dirs:
for root, directories, filenames in os.walk(include_dir):
include_files = []
for filename in filenames:
include_files.append(os.path.join(root, filename))
if include_files:
data_files.append((root, include_files))
return data_files
|
called from setup.py to include auxillary files for installation
|
called from setup.py to include auxillary files for installation
|
[
"called",
"from",
"setup",
".",
"py",
"to",
"include",
"auxillary",
"files",
"for",
"installation"
] |
def add_data_files(*include_dirs):
data_files = []
for include_dir in include_dirs:
for root, directories, filenames in os.walk(include_dir):
include_files = []
for filename in filenames:
include_files.append(os.path.join(root, filename))
if include_files:
data_files.append((root, include_files))
return data_files
|
[
"def",
"add_data_files",
"(",
"*",
"include_dirs",
")",
":",
"data_files",
"=",
"[",
"]",
"for",
"include_dir",
"in",
"include_dirs",
":",
"for",
"root",
",",
"directories",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"include_dir",
")",
":",
"include_files",
"=",
"[",
"]",
"for",
"filename",
"in",
"filenames",
":",
"include_files",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"filename",
")",
")",
"if",
"include_files",
":",
"data_files",
".",
"append",
"(",
"(",
"root",
",",
"include_files",
")",
")",
"return",
"data_files"
] |
called from setup.py to include auxillary files for installation
|
[
"called",
"from",
"setup",
".",
"py",
"to",
"include",
"auxillary",
"files",
"for",
"installation"
] |
[
"'called from setup.py to include auxillary files for installation'"
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import os
def add_data_files(*include_dirs):
data_files = []
for include_dir in include_dirs:
for root, directories, filenames in os.walk(include_dir):
include_files = []
for filename in filenames:
include_files.append(os.path.join(root, filename))
if include_files:
data_files.append((root, include_files))
return data_files
| 817 | 549 |
93a883d8cbeb738336dc98584b8d889d2e4be78e
|
Naugeh/mezzanine
|
mezzanine/core/templatetags/mezzanine_tags.py
|
[
"BSD-2-Clause"
] |
Python
|
errors_for
|
<not_specific>
|
def errors_for(form):
"""
Renders an alert if the form has any errors.
"""
return {"form": form}
|
Renders an alert if the form has any errors.
|
Renders an alert if the form has any errors.
|
[
"Renders",
"an",
"alert",
"if",
"the",
"form",
"has",
"any",
"errors",
"."
] |
def errors_for(form):
return {"form": form}
|
[
"def",
"errors_for",
"(",
"form",
")",
":",
"return",
"{",
"\"form\"",
":",
"form",
"}"
] |
Renders an alert if the form has any errors.
|
[
"Renders",
"an",
"alert",
"if",
"the",
"form",
"has",
"any",
"errors",
"."
] |
[
"\"\"\"\n Renders an alert if the form has any errors.\n \"\"\""
] |
[
{
"param": "form",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "form",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def errors_for(form):
return {"form": form}
| 818 | 163 |
7837a02b4631ced061fe03bdc1442c76f9b60c8b
|
akmiller01/pyIATI
|
iati/tests/fixtures/versions.py
|
[
"MIT"
] |
Python
|
split_decimal
|
<not_specific>
|
def split_decimal(version_decimal):
"""Split a Decimal version number into numeric representations of its components.
Args:
version_decimal (Decimal): A Decimal containing an IATI version number.
Returns:
list of int: A list containing numeric representations of the Integer and Decimal components.
"""
integer_component = int(version_decimal)
decimal_component = int(version_decimal * 100) - 100
return [integer_component, decimal_component]
|
Split a Decimal version number into numeric representations of its components.
Args:
version_decimal (Decimal): A Decimal containing an IATI version number.
Returns:
list of int: A list containing numeric representations of the Integer and Decimal components.
|
Split a Decimal version number into numeric representations of its components.
|
[
"Split",
"a",
"Decimal",
"version",
"number",
"into",
"numeric",
"representations",
"of",
"its",
"components",
"."
] |
def split_decimal(version_decimal):
integer_component = int(version_decimal)
decimal_component = int(version_decimal * 100) - 100
return [integer_component, decimal_component]
|
[
"def",
"split_decimal",
"(",
"version_decimal",
")",
":",
"integer_component",
"=",
"int",
"(",
"version_decimal",
")",
"decimal_component",
"=",
"int",
"(",
"version_decimal",
"*",
"100",
")",
"-",
"100",
"return",
"[",
"integer_component",
",",
"decimal_component",
"]"
] |
Split a Decimal version number into numeric representations of its components.
|
[
"Split",
"a",
"Decimal",
"version",
"number",
"into",
"numeric",
"representations",
"of",
"its",
"components",
"."
] |
[
"\"\"\"Split a Decimal version number into numeric representations of its components.\n\n Args:\n version_decimal (Decimal): A Decimal containing an IATI version number.\n\n Returns:\n list of int: A list containing numeric representations of the Integer and Decimal components.\n\n \"\"\""
] |
[
{
"param": "version_decimal",
"type": null
}
] |
{
"returns": [
{
"docstring": "list of int: A list containing numeric representations of the Integer and Decimal components.",
"docstring_tokens": [
"list",
"of",
"int",
":",
"A",
"list",
"containing",
"numeric",
"representations",
"of",
"the",
"Integer",
"and",
"Decimal",
"components",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "version_decimal",
"type": null,
"docstring": "A Decimal containing an IATI version number.",
"docstring_tokens": [
"A",
"Decimal",
"containing",
"an",
"IATI",
"version",
"number",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def split_decimal(version_decimal):
integer_component = int(version_decimal)
decimal_component = int(version_decimal * 100) - 100
return [integer_component, decimal_component]
| 819 | 15 |
3ced932201bbee3ebb9a0e1afdd8ebf329fd7aba
|
evansmurithi/kplc-interruptions
|
kplc_interruptions/interruptions/models.py
|
[
"MIT"
] |
Python
|
interruption_upload_path
|
<not_specific>
|
def interruption_upload_path(instance, filename):
"""
Directory to upload interruption PDF files.
"""
return "interruptions/{interruption_id}/{filename}".format(
interruption_id=instance.interruption.id, filename=filename)
|
Directory to upload interruption PDF files.
|
Directory to upload interruption PDF files.
|
[
"Directory",
"to",
"upload",
"interruption",
"PDF",
"files",
"."
] |
def interruption_upload_path(instance, filename):
return "interruptions/{interruption_id}/{filename}".format(
interruption_id=instance.interruption.id, filename=filename)
|
[
"def",
"interruption_upload_path",
"(",
"instance",
",",
"filename",
")",
":",
"return",
"\"interruptions/{interruption_id}/{filename}\"",
".",
"format",
"(",
"interruption_id",
"=",
"instance",
".",
"interruption",
".",
"id",
",",
"filename",
"=",
"filename",
")"
] |
Directory to upload interruption PDF files.
|
[
"Directory",
"to",
"upload",
"interruption",
"PDF",
"files",
"."
] |
[
"\"\"\"\n Directory to upload interruption PDF files.\n \"\"\""
] |
[
{
"param": "instance",
"type": null
},
{
"param": "filename",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "instance",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "filename",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def interruption_upload_path(instance, filename):
return "interruptions/{interruption_id}/{filename}".format(
interruption_id=instance.interruption.id, filename=filename)
| 820 | 605 |
75bbb3c3f25e6aa3d654ad31459d4369b8ba3831
|
OneTesseractInMultiverse/CateringService
|
jkfood/extensions/security/crypto/message_integrity.py
|
[
"MIT"
] |
Python
|
compute_hash
|
<not_specific>
|
def compute_hash(password, salt):
"""
Computes the SHA256 has value of a given password and encodes the
using hex encoding to produce a readable string.
:param password: The password to be hashed
:param salt: A set of entropy data to prevent rainbow table and
dictionary attacks.
:return: The resulting hash
"""
return hashlib.sha256(password.encode('utf-8')+salt.encode('utf-8'))\
.hexdigest()
|
Computes the SHA256 has value of a given password and encodes the
using hex encoding to produce a readable string.
:param password: The password to be hashed
:param salt: A set of entropy data to prevent rainbow table and
dictionary attacks.
:return: The resulting hash
|
Computes the SHA256 has value of a given password and encodes the
using hex encoding to produce a readable string.
|
[
"Computes",
"the",
"SHA256",
"has",
"value",
"of",
"a",
"given",
"password",
"and",
"encodes",
"the",
"using",
"hex",
"encoding",
"to",
"produce",
"a",
"readable",
"string",
"."
] |
def compute_hash(password, salt):
return hashlib.sha256(password.encode('utf-8')+salt.encode('utf-8'))\
.hexdigest()
|
[
"def",
"compute_hash",
"(",
"password",
",",
"salt",
")",
":",
"return",
"hashlib",
".",
"sha256",
"(",
"password",
".",
"encode",
"(",
"'utf-8'",
")",
"+",
"salt",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")"
] |
Computes the SHA256 has value of a given password and encodes the
using hex encoding to produce a readable string.
|
[
"Computes",
"the",
"SHA256",
"has",
"value",
"of",
"a",
"given",
"password",
"and",
"encodes",
"the",
"using",
"hex",
"encoding",
"to",
"produce",
"a",
"readable",
"string",
"."
] |
[
"\"\"\"\n Computes the SHA256 has value of a given password and encodes the\n using hex encoding to produce a readable string.\n\n :param password: The password to be hashed\n :param salt: A set of entropy data to prevent rainbow table and\n dictionary attacks.\n :return: The resulting hash\n \"\"\""
] |
[
{
"param": "password",
"type": null
},
{
"param": "salt",
"type": null
}
] |
{
"returns": [
{
"docstring": "The resulting hash",
"docstring_tokens": [
"The",
"resulting",
"hash"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "password",
"type": null,
"docstring": "The password to be hashed",
"docstring_tokens": [
"The",
"password",
"to",
"be",
"hashed"
],
"default": null,
"is_optional": null
},
{
"identifier": "salt",
"type": null,
"docstring": "A set of entropy data to prevent rainbow table and\ndictionary attacks.",
"docstring_tokens": [
"A",
"set",
"of",
"entropy",
"data",
"to",
"prevent",
"rainbow",
"table",
"and",
"dictionary",
"attacks",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import hashlib
def compute_hash(password, salt):
return hashlib.sha256(password.encode('utf-8')+salt.encode('utf-8'))\
.hexdigest()
| 821 | 755 |
1d52ced4d86ff47f0c277045ddba60145f600ca0
|
Bielorusse/random_text
|
random_text.py
|
[
"MIT"
] |
Python
|
add_uppercase
|
<not_specific>
|
def add_uppercase(input_words):
"""
Remove the uppercase of the beginning of sentences in a list of words.
Input:
-input_words list of strings
Ouput:
-output_words list of strings
"""
output_words = [input_words[0].capitalize()]
for i in range(len(input_words[:-1])):
if input_words[i].endswith("."):
output_words.append(input_words[i+1].capitalize())
else:
output_words.append(input_words[i+1])
output_words[-1] = "{}.".format(output_words[-1])
return output_words
|
Remove the uppercase of the beginning of sentences in a list of words.
Input:
-input_words list of strings
Ouput:
-output_words list of strings
|
Remove the uppercase of the beginning of sentences in a list of words.
Input:
input_words list of strings
Ouput:
output_words list of strings
|
[
"Remove",
"the",
"uppercase",
"of",
"the",
"beginning",
"of",
"sentences",
"in",
"a",
"list",
"of",
"words",
".",
"Input",
":",
"input_words",
"list",
"of",
"strings",
"Ouput",
":",
"output_words",
"list",
"of",
"strings"
] |
def add_uppercase(input_words):
output_words = [input_words[0].capitalize()]
for i in range(len(input_words[:-1])):
if input_words[i].endswith("."):
output_words.append(input_words[i+1].capitalize())
else:
output_words.append(input_words[i+1])
output_words[-1] = "{}.".format(output_words[-1])
return output_words
|
[
"def",
"add_uppercase",
"(",
"input_words",
")",
":",
"output_words",
"=",
"[",
"input_words",
"[",
"0",
"]",
".",
"capitalize",
"(",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"input_words",
"[",
":",
"-",
"1",
"]",
")",
")",
":",
"if",
"input_words",
"[",
"i",
"]",
".",
"endswith",
"(",
"\".\"",
")",
":",
"output_words",
".",
"append",
"(",
"input_words",
"[",
"i",
"+",
"1",
"]",
".",
"capitalize",
"(",
")",
")",
"else",
":",
"output_words",
".",
"append",
"(",
"input_words",
"[",
"i",
"+",
"1",
"]",
")",
"output_words",
"[",
"-",
"1",
"]",
"=",
"\"{}.\"",
".",
"format",
"(",
"output_words",
"[",
"-",
"1",
"]",
")",
"return",
"output_words"
] |
Remove the uppercase of the beginning of sentences in a list of words.
|
[
"Remove",
"the",
"uppercase",
"of",
"the",
"beginning",
"of",
"sentences",
"in",
"a",
"list",
"of",
"words",
"."
] |
[
"\"\"\"\n Remove the uppercase of the beginning of sentences in a list of words.\n\n Input:\n -input_words list of strings\n Ouput:\n -output_words list of strings\n \"\"\""
] |
[
{
"param": "input_words",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "input_words",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def add_uppercase(input_words):
output_words = [input_words[0].capitalize()]
for i in range(len(input_words[:-1])):
if input_words[i].endswith("."):
output_words.append(input_words[i+1].capitalize())
else:
output_words.append(input_words[i+1])
output_words[-1] = "{}.".format(output_words[-1])
return output_words
| 822 | 1,004 |
ff152bb2fe3fc9b31e5223249af5722e3819fe8c
|
hkotaro1215/invest
|
src/natcap/invest/pygeoprocessing_0_3_3/testing/assertions.py
|
[
"BSD-3-Clause"
] |
Python
|
assert_text_equal
| null |
def assert_text_equal(text_1_uri, text_2_uri):
"""Assert that two text files are equal.
This comparison is done line-by-line.
Args:
text_1_uri (string): a python string uri to a text file.
Considered the file to be tested.
text_2_uri (string): a python string uri to a text file.
Considered the regression file.
Raises:
AssertionError: Raised when a line differs in the two files.
Returns:
None
"""
def lines(filepath):
"""Return a generator of lines in the opened file."""
with open(filepath, 'rb') as opened_file:
for line in opened_file:
yield line
for index, (a_line, b_line) in enumerate(itertools.izip(
lines(text_1_uri), lines(text_2_uri))):
if a_line != b_line:
raise AssertionError('Line %s in %s does not match regression '
'file %s. Output "%s" Regression "%s"' % (
index, text_1_uri, text_2_uri, a_line,
b_line))
|
Assert that two text files are equal.
This comparison is done line-by-line.
Args:
text_1_uri (string): a python string uri to a text file.
Considered the file to be tested.
text_2_uri (string): a python string uri to a text file.
Considered the regression file.
Raises:
AssertionError: Raised when a line differs in the two files.
Returns:
None
|
Assert that two text files are equal.
This comparison is done line-by-line.
|
[
"Assert",
"that",
"two",
"text",
"files",
"are",
"equal",
".",
"This",
"comparison",
"is",
"done",
"line",
"-",
"by",
"-",
"line",
"."
] |
def assert_text_equal(text_1_uri, text_2_uri):
def lines(filepath):
with open(filepath, 'rb') as opened_file:
for line in opened_file:
yield line
for index, (a_line, b_line) in enumerate(itertools.izip(
lines(text_1_uri), lines(text_2_uri))):
if a_line != b_line:
raise AssertionError('Line %s in %s does not match regression '
'file %s. Output "%s" Regression "%s"' % (
index, text_1_uri, text_2_uri, a_line,
b_line))
|
[
"def",
"assert_text_equal",
"(",
"text_1_uri",
",",
"text_2_uri",
")",
":",
"def",
"lines",
"(",
"filepath",
")",
":",
"\"\"\"Return a generator of lines in the opened file.\"\"\"",
"with",
"open",
"(",
"filepath",
",",
"'rb'",
")",
"as",
"opened_file",
":",
"for",
"line",
"in",
"opened_file",
":",
"yield",
"line",
"for",
"index",
",",
"(",
"a_line",
",",
"b_line",
")",
"in",
"enumerate",
"(",
"itertools",
".",
"izip",
"(",
"lines",
"(",
"text_1_uri",
")",
",",
"lines",
"(",
"text_2_uri",
")",
")",
")",
":",
"if",
"a_line",
"!=",
"b_line",
":",
"raise",
"AssertionError",
"(",
"'Line %s in %s does not match regression '",
"'file %s. Output \"%s\" Regression \"%s\"'",
"%",
"(",
"index",
",",
"text_1_uri",
",",
"text_2_uri",
",",
"a_line",
",",
"b_line",
")",
")"
] |
Assert that two text files are equal.
|
[
"Assert",
"that",
"two",
"text",
"files",
"are",
"equal",
"."
] |
[
"\"\"\"Assert that two text files are equal.\n\n This comparison is done line-by-line.\n\n Args:\n text_1_uri (string): a python string uri to a text file.\n Considered the file to be tested.\n text_2_uri (string): a python string uri to a text file.\n Considered the regression file.\n\n Raises:\n AssertionError: Raised when a line differs in the two files.\n\n Returns:\n None\n \"\"\"",
"\"\"\"Return a generator of lines in the opened file.\"\"\""
] |
[
{
"param": "text_1_uri",
"type": null
},
{
"param": "text_2_uri",
"type": null
}
] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [
{
"docstring": "Raised when a line differs in the two files.",
"docstring_tokens": [
"Raised",
"when",
"a",
"line",
"differs",
"in",
"the",
"two",
"files",
"."
],
"type": "AssertionError"
}
],
"params": [
{
"identifier": "text_1_uri",
"type": null,
"docstring": "a python string uri to a text file.\nConsidered the file to be tested.",
"docstring_tokens": [
"a",
"python",
"string",
"uri",
"to",
"a",
"text",
"file",
".",
"Considered",
"the",
"file",
"to",
"be",
"tested",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "text_2_uri",
"type": null,
"docstring": "a python string uri to a text file.\nConsidered the regression file.",
"docstring_tokens": [
"a",
"python",
"string",
"uri",
"to",
"a",
"text",
"file",
".",
"Considered",
"the",
"regression",
"file",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
import itertools
def assert_text_equal(text_1_uri, text_2_uri):
def lines(filepath):
with open(filepath, 'rb') as opened_file:
for line in opened_file:
yield line
for index, (a_line, b_line) in enumerate(itertools.izip(
lines(text_1_uri), lines(text_2_uri))):
if a_line != b_line:
raise AssertionError('Line %s in %s does not match regression '
'file %s. Output "%s" Regression "%s"' % (
index, text_1_uri, text_2_uri, a_line,
b_line))
| 824 | 366 |
a7f5535325c8a7249d451132ec4a530c6f93a4de
|
thesadru/genshin.py
|
genshin/__main__.py
|
[
"MIT"
] |
Python
|
asynchronous
|
typing.Callable[..., typing.Any]
|
def asynchronous(func: typing.Callable[..., typing.Awaitable[typing.Any]]) -> typing.Callable[..., typing.Any]:
"""Make an asynchronous function runnable by click."""
@functools.wraps(func)
def wrapper(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
return asyncio.run(func(*args, **kwargs)) # type: ignore # unsure what is wrong here
return wrapper
|
Make an asynchronous function runnable by click.
|
Make an asynchronous function runnable by click.
|
[
"Make",
"an",
"asynchronous",
"function",
"runnable",
"by",
"click",
"."
] |
def asynchronous(func: typing.Callable[..., typing.Awaitable[typing.Any]]) -> typing.Callable[..., typing.Any]:
@functools.wraps(func)
def wrapper(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
return asyncio.run(func(*args, **kwargs))
return wrapper
|
[
"def",
"asynchronous",
"(",
"func",
":",
"typing",
".",
"Callable",
"[",
"...",
",",
"typing",
".",
"Awaitable",
"[",
"typing",
".",
"Any",
"]",
"]",
")",
"->",
"typing",
".",
"Callable",
"[",
"...",
",",
"typing",
".",
"Any",
"]",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
":",
"typing",
".",
"Any",
",",
"**",
"kwargs",
":",
"typing",
".",
"Any",
")",
"->",
"typing",
".",
"Any",
":",
"return",
"asyncio",
".",
"run",
"(",
"func",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
")",
"return",
"wrapper"
] |
Make an asynchronous function runnable by click.
|
[
"Make",
"an",
"asynchronous",
"function",
"runnable",
"by",
"click",
"."
] |
[
"\"\"\"Make an asynchronous function runnable by click.\"\"\"",
"# type: ignore # unsure what is wrong here"
] |
[
{
"param": "func",
"type": "typing.Callable[..., typing.Awaitable[typing.Any]]"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "func",
"type": "typing.Callable[..., typing.Awaitable[typing.Any]]",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import asyncio
import functools
import typing
def asynchronous(func: typing.Callable[..., typing.Awaitable[typing.Any]]) -> typing.Callable[..., typing.Any]:
@functools.wraps(func)
def wrapper(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
return asyncio.run(func(*args, **kwargs))
return wrapper
| 825 | 630 |
e20cbec6b4f191c481151a3c705459c2326b507c
|
yonch/wireless
|
lablog/python/Distribute.py
|
[
"MIT"
] |
Python
|
distribute
| null |
def distribute(pending, numProcessors, minAmount):
'''
Distributes the experiments that have less results than the required amount
@param pending: list of tuples: (key, numToDistribute)
@param numProcessors: number of processors to distribute to
@param minAmount: Distributor will not distribute less than this
amount per processor, unless the total amount of tasks is smaller
than minAmount.
@return: iterator that returns a single node's list of tuples (eID, num)
'''
totalPending = sum(x[1] for x in pending)
minAmount = max(1, minAmount)
numProcessors = max(1, min(numProcessors, totalPending / minAmount))
while totalPending > 0:
curAmount = totalPending / numProcessors
remaining = curAmount
res = []
while remaining > 0:
firstId, firstMissing = pending[0]
if firstMissing <= remaining:
# We return all the experiment in this result
res.append((firstId, firstMissing))
remaining -= firstMissing
pending = pending[1:]
else:
# We fragment this experiment
res.append((firstId, remaining))
pending = [(firstId, firstMissing - remaining)] + pending[1:]
remaining = 0
numProcessors -= 1
totalPending -= curAmount
yield res
|
Distributes the experiments that have less results than the required amount
@param pending: list of tuples: (key, numToDistribute)
@param numProcessors: number of processors to distribute to
@param minAmount: Distributor will not distribute less than this
amount per processor, unless the total amount of tasks is smaller
than minAmount.
@return: iterator that returns a single node's list of tuples (eID, num)
|
Distributes the experiments that have less results than the required amount
|
[
"Distributes",
"the",
"experiments",
"that",
"have",
"less",
"results",
"than",
"the",
"required",
"amount"
] |
def distribute(pending, numProcessors, minAmount):
totalPending = sum(x[1] for x in pending)
minAmount = max(1, minAmount)
numProcessors = max(1, min(numProcessors, totalPending / minAmount))
while totalPending > 0:
curAmount = totalPending / numProcessors
remaining = curAmount
res = []
while remaining > 0:
firstId, firstMissing = pending[0]
if firstMissing <= remaining:
res.append((firstId, firstMissing))
remaining -= firstMissing
pending = pending[1:]
else:
res.append((firstId, remaining))
pending = [(firstId, firstMissing - remaining)] + pending[1:]
remaining = 0
numProcessors -= 1
totalPending -= curAmount
yield res
|
[
"def",
"distribute",
"(",
"pending",
",",
"numProcessors",
",",
"minAmount",
")",
":",
"totalPending",
"=",
"sum",
"(",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"pending",
")",
"minAmount",
"=",
"max",
"(",
"1",
",",
"minAmount",
")",
"numProcessors",
"=",
"max",
"(",
"1",
",",
"min",
"(",
"numProcessors",
",",
"totalPending",
"/",
"minAmount",
")",
")",
"while",
"totalPending",
">",
"0",
":",
"curAmount",
"=",
"totalPending",
"/",
"numProcessors",
"remaining",
"=",
"curAmount",
"res",
"=",
"[",
"]",
"while",
"remaining",
">",
"0",
":",
"firstId",
",",
"firstMissing",
"=",
"pending",
"[",
"0",
"]",
"if",
"firstMissing",
"<=",
"remaining",
":",
"res",
".",
"append",
"(",
"(",
"firstId",
",",
"firstMissing",
")",
")",
"remaining",
"-=",
"firstMissing",
"pending",
"=",
"pending",
"[",
"1",
":",
"]",
"else",
":",
"res",
".",
"append",
"(",
"(",
"firstId",
",",
"remaining",
")",
")",
"pending",
"=",
"[",
"(",
"firstId",
",",
"firstMissing",
"-",
"remaining",
")",
"]",
"+",
"pending",
"[",
"1",
":",
"]",
"remaining",
"=",
"0",
"numProcessors",
"-=",
"1",
"totalPending",
"-=",
"curAmount",
"yield",
"res"
] |
Distributes the experiments that have less results than the required amount
|
[
"Distributes",
"the",
"experiments",
"that",
"have",
"less",
"results",
"than",
"the",
"required",
"amount"
] |
[
"'''\n Distributes the experiments that have less results than the required amount\n @param pending: list of tuples: (key, numToDistribute)\n @param numProcessors: number of processors to distribute to\n @param minAmount: Distributor will not distribute less than this\n amount per processor, unless the total amount of tasks is smaller\n than minAmount.\n @return: iterator that returns a single node's list of tuples (eID, num)\n\n '''",
"# We return all the experiment in this result",
"# We fragment this experiment"
] |
[
{
"param": "pending",
"type": null
},
{
"param": "numProcessors",
"type": null
},
{
"param": "minAmount",
"type": null
}
] |
{
"returns": [
{
"docstring": "iterator that returns a single node's list of tuples (eID, num)",
"docstring_tokens": [
"iterator",
"that",
"returns",
"a",
"single",
"node",
"'",
"s",
"list",
"of",
"tuples",
"(",
"eID",
"num",
")"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "pending",
"type": null,
"docstring": "list of tuples: (key, numToDistribute)",
"docstring_tokens": [
"list",
"of",
"tuples",
":",
"(",
"key",
"numToDistribute",
")"
],
"default": null,
"is_optional": false
},
{
"identifier": "numProcessors",
"type": null,
"docstring": "number of processors to distribute to",
"docstring_tokens": [
"number",
"of",
"processors",
"to",
"distribute",
"to"
],
"default": null,
"is_optional": false
},
{
"identifier": "minAmount",
"type": null,
"docstring": "Distributor will not distribute less than this\namount per processor, unless the total amount of tasks is smaller\nthan minAmount.",
"docstring_tokens": [
"Distributor",
"will",
"not",
"distribute",
"less",
"than",
"this",
"amount",
"per",
"processor",
"unless",
"the",
"total",
"amount",
"of",
"tasks",
"is",
"smaller",
"than",
"minAmount",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def distribute(pending, numProcessors, minAmount):
totalPending = sum(x[1] for x in pending)
minAmount = max(1, minAmount)
numProcessors = max(1, min(numProcessors, totalPending / minAmount))
while totalPending > 0:
curAmount = totalPending / numProcessors
remaining = curAmount
res = []
while remaining > 0:
firstId, firstMissing = pending[0]
if firstMissing <= remaining:
res.append((firstId, firstMissing))
remaining -= firstMissing
pending = pending[1:]
else:
res.append((firstId, remaining))
pending = [(firstId, firstMissing - remaining)] + pending[1:]
remaining = 0
numProcessors -= 1
totalPending -= curAmount
yield res
| 826 | 1,005 |
c0e47e0e429c06592a779a071792f7a2621698c1
|
mavisguan/django
|
django/utils/cache.py
|
[
"BSD-3-Clause",
"0BSD"
] |
Python
|
_if_modified_since_passes
|
<not_specific>
|
def _if_modified_since_passes(last_modified, if_modified_since):
"""
Test the If-Modified-Since comparison as defined in section 3.3 of RFC 7232.
"""
return not last_modified or last_modified > if_modified_since
|
Test the If-Modified-Since comparison as defined in section 3.3 of RFC 7232.
|
Test the If-Modified-Since comparison as defined in section 3.3 of RFC 7232.
|
[
"Test",
"the",
"If",
"-",
"Modified",
"-",
"Since",
"comparison",
"as",
"defined",
"in",
"section",
"3",
".",
"3",
"of",
"RFC",
"7232",
"."
] |
def _if_modified_since_passes(last_modified, if_modified_since):
return not last_modified or last_modified > if_modified_since
|
[
"def",
"_if_modified_since_passes",
"(",
"last_modified",
",",
"if_modified_since",
")",
":",
"return",
"not",
"last_modified",
"or",
"last_modified",
">",
"if_modified_since"
] |
Test the If-Modified-Since comparison as defined in section 3.3 of RFC 7232.
|
[
"Test",
"the",
"If",
"-",
"Modified",
"-",
"Since",
"comparison",
"as",
"defined",
"in",
"section",
"3",
".",
"3",
"of",
"RFC",
"7232",
"."
] |
[
"\"\"\"\n Test the If-Modified-Since comparison as defined in section 3.3 of RFC 7232.\n \"\"\""
] |
[
{
"param": "last_modified",
"type": null
},
{
"param": "if_modified_since",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "last_modified",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "if_modified_since",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _if_modified_since_passes(last_modified, if_modified_since):
return not last_modified or last_modified > if_modified_since
| 827 | 471 |
690698aa98570699dc551465279f1e858659a594
|
dirk-tornow-adesso/dotfiles
|
sublime/Packages/Anaconda/anaconda_lib/jedi/parser/tree.py
|
[
"MIT"
] |
Python
|
search_ancestor
|
<not_specific>
|
def search_ancestor(node, *node_types):
"""
Recursively looks at the parents of a node and checks if the type names
match.
:param node: The node that is looked at.
:param node_types: A tuple or a string of type names that are
searched for.
"""
while True:
node = node.parent
if node is None or node.type in node_types:
return node
|
Recursively looks at the parents of a node and checks if the type names
match.
:param node: The node that is looked at.
:param node_types: A tuple or a string of type names that are
searched for.
|
Recursively looks at the parents of a node and checks if the type names
match.
|
[
"Recursively",
"looks",
"at",
"the",
"parents",
"of",
"a",
"node",
"and",
"checks",
"if",
"the",
"type",
"names",
"match",
"."
] |
def search_ancestor(node, *node_types):
while True:
node = node.parent
if node is None or node.type in node_types:
return node
|
[
"def",
"search_ancestor",
"(",
"node",
",",
"*",
"node_types",
")",
":",
"while",
"True",
":",
"node",
"=",
"node",
".",
"parent",
"if",
"node",
"is",
"None",
"or",
"node",
".",
"type",
"in",
"node_types",
":",
"return",
"node"
] |
Recursively looks at the parents of a node and checks if the type names
match.
|
[
"Recursively",
"looks",
"at",
"the",
"parents",
"of",
"a",
"node",
"and",
"checks",
"if",
"the",
"type",
"names",
"match",
"."
] |
[
"\"\"\"\n Recursively looks at the parents of a node and checks if the type names\n match.\n\n :param node: The node that is looked at.\n :param node_types: A tuple or a string of type names that are\n searched for.\n \"\"\""
] |
[
{
"param": "node",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "node",
"type": null,
"docstring": "The node that is looked at.",
"docstring_tokens": [
"The",
"node",
"that",
"is",
"looked",
"at",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [
{
"identifier": "node_types",
"type": null,
"docstring": "A tuple or a string of type names that are\nsearched for.",
"docstring_tokens": [
"A",
"tuple",
"or",
"a",
"string",
"of",
"type",
"names",
"that",
"are",
"searched",
"for",
"."
],
"default": null,
"is_optional": null
}
],
"others": []
}
|
def search_ancestor(node, *node_types):
while True:
node = node.parent
if node is None or node.type in node_types:
return node
| 828 | 128 |
8d97fd94137ecc590a70a9c47cf9484f07e6038c
|
TakaHoribe/launch
|
launch/launch/actions/group_action.py
|
[
"Apache-2.0"
] |
Python
|
parse
|
<not_specific>
|
def parse(cls, entity: Entity, parser: Parser):
"""Return `GroupAction` action and kwargs for constructing it."""
_, kwargs = super().parse(entity, parser)
scoped = entity.get_attr('scoped', data_type=bool, optional=True)
if scoped is not None:
kwargs['scoped'] = scoped
kwargs['actions'] = [parser.parse_action(e) for e in entity.children]
return cls, kwargs
|
Return `GroupAction` action and kwargs for constructing it.
|
Return `GroupAction` action and kwargs for constructing it.
|
[
"Return",
"`",
"GroupAction",
"`",
"action",
"and",
"kwargs",
"for",
"constructing",
"it",
"."
] |
def parse(cls, entity: Entity, parser: Parser):
_, kwargs = super().parse(entity, parser)
scoped = entity.get_attr('scoped', data_type=bool, optional=True)
if scoped is not None:
kwargs['scoped'] = scoped
kwargs['actions'] = [parser.parse_action(e) for e in entity.children]
return cls, kwargs
|
[
"def",
"parse",
"(",
"cls",
",",
"entity",
":",
"Entity",
",",
"parser",
":",
"Parser",
")",
":",
"_",
",",
"kwargs",
"=",
"super",
"(",
")",
".",
"parse",
"(",
"entity",
",",
"parser",
")",
"scoped",
"=",
"entity",
".",
"get_attr",
"(",
"'scoped'",
",",
"data_type",
"=",
"bool",
",",
"optional",
"=",
"True",
")",
"if",
"scoped",
"is",
"not",
"None",
":",
"kwargs",
"[",
"'scoped'",
"]",
"=",
"scoped",
"kwargs",
"[",
"'actions'",
"]",
"=",
"[",
"parser",
".",
"parse_action",
"(",
"e",
")",
"for",
"e",
"in",
"entity",
".",
"children",
"]",
"return",
"cls",
",",
"kwargs"
] |
Return `GroupAction` action and kwargs for constructing it.
|
[
"Return",
"`",
"GroupAction",
"`",
"action",
"and",
"kwargs",
"for",
"constructing",
"it",
"."
] |
[
"\"\"\"Return `GroupAction` action and kwargs for constructing it.\"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "entity",
"type": "Entity"
},
{
"param": "parser",
"type": "Parser"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "entity",
"type": "Entity",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "parser",
"type": "Parser",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def parse(cls, entity: Entity, parser: Parser):
_, kwargs = super().parse(entity, parser)
scoped = entity.get_attr('scoped', data_type=bool, optional=True)
if scoped is not None:
kwargs['scoped'] = scoped
kwargs['actions'] = [parser.parse_action(e) for e in entity.children]
return cls, kwargs
| 829 | 864 |
d8c1a8db43da6f5547ffd18d6c7532634c8398c5
|
sjg20/ec
|
zephyr/zmake/zmake/output_packers.py
|
[
"BSD-3-Clause"
] |
Python
|
_write_dts_file
| null |
def _write_dts_file(dts_file, config_header, output_bin, ro_filename, rw_filename):
"""Generate the .dts file used for binman.
Args:
dts_file: The dts file to write to.
config_header: The full path to the generated autoconf.h header.
output_bin: The full path to the binary that binman should output.
ro_filename: The RO image file name.
rw_filename: The RW image file name.
Returns:
The path to the .dts file that was generated.
"""
dts_file.write("""
/dts-v1/;
#include "{config_header}"
/ {{
#address-cells = <1>;
#size-cells = <1>;
binman {{
filename = "{output_bin}";
pad-byte = <0x1d>;
section@0 {{
read-only;
offset = <CONFIG_CROS_EC_RO_MEM_OFF>;
size = <CONFIG_CROS_EC_RO_SIZE>;
blob {{
filename = "{ro_filename}";
}};
}};
section@1 {{
offset = <CONFIG_CROS_EC_RW_MEM_OFF>;
size = <CONFIG_CROS_EC_RW_SIZE>;
blob {{
filename = "{rw_filename}";
}};
}};
}};
}};""".format(
output_bin=output_bin,
config_header=config_header,
ro_filename=ro_filename,
rw_filename=rw_filename
))
|
Generate the .dts file used for binman.
Args:
dts_file: The dts file to write to.
config_header: The full path to the generated autoconf.h header.
output_bin: The full path to the binary that binman should output.
ro_filename: The RO image file name.
rw_filename: The RW image file name.
Returns:
The path to the .dts file that was generated.
|
Generate the .dts file used for binman.
|
[
"Generate",
"the",
".",
"dts",
"file",
"used",
"for",
"binman",
"."
] |
def _write_dts_file(dts_file, config_header, output_bin, ro_filename, rw_filename):
dts_file.write("""
/dts-v1/;
#include "{config_header}"
/ {{
#address-cells = <1>;
#size-cells = <1>;
binman {{
filename = "{output_bin}";
pad-byte = <0x1d>;
section@0 {{
read-only;
offset = <CONFIG_CROS_EC_RO_MEM_OFF>;
size = <CONFIG_CROS_EC_RO_SIZE>;
blob {{
filename = "{ro_filename}";
}};
}};
section@1 {{
offset = <CONFIG_CROS_EC_RW_MEM_OFF>;
size = <CONFIG_CROS_EC_RW_SIZE>;
blob {{
filename = "{rw_filename}";
}};
}};
}};
}};""".format(
output_bin=output_bin,
config_header=config_header,
ro_filename=ro_filename,
rw_filename=rw_filename
))
|
[
"def",
"_write_dts_file",
"(",
"dts_file",
",",
"config_header",
",",
"output_bin",
",",
"ro_filename",
",",
"rw_filename",
")",
":",
"dts_file",
".",
"write",
"(",
"\"\"\"\n /dts-v1/;\n #include \"{config_header}\"\n / {{\n #address-cells = <1>;\n #size-cells = <1>;\n binman {{\n filename = \"{output_bin}\";\n pad-byte = <0x1d>;\n section@0 {{\n read-only;\n offset = <CONFIG_CROS_EC_RO_MEM_OFF>;\n size = <CONFIG_CROS_EC_RO_SIZE>;\n blob {{\n filename = \"{ro_filename}\";\n }};\n }};\n section@1 {{\n offset = <CONFIG_CROS_EC_RW_MEM_OFF>;\n size = <CONFIG_CROS_EC_RW_SIZE>;\n blob {{\n filename = \"{rw_filename}\";\n }};\n }};\n }};\n }};\"\"\"",
".",
"format",
"(",
"output_bin",
"=",
"output_bin",
",",
"config_header",
"=",
"config_header",
",",
"ro_filename",
"=",
"ro_filename",
",",
"rw_filename",
"=",
"rw_filename",
")",
")"
] |
Generate the .dts file used for binman.
|
[
"Generate",
"the",
".",
"dts",
"file",
"used",
"for",
"binman",
"."
] |
[
"\"\"\"Generate the .dts file used for binman.\n\n Args:\n dts_file: The dts file to write to.\n config_header: The full path to the generated autoconf.h header.\n output_bin: The full path to the binary that binman should output.\n ro_filename: The RO image file name.\n rw_filename: The RW image file name.\n\n Returns:\n The path to the .dts file that was generated.\n \"\"\""
] |
[
{
"param": "dts_file",
"type": null
},
{
"param": "config_header",
"type": null
},
{
"param": "output_bin",
"type": null
},
{
"param": "ro_filename",
"type": null
},
{
"param": "rw_filename",
"type": null
}
] |
{
"returns": [
{
"docstring": "The path to the .dts file that was generated.",
"docstring_tokens": [
"The",
"path",
"to",
"the",
".",
"dts",
"file",
"that",
"was",
"generated",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "dts_file",
"type": null,
"docstring": "The dts file to write to.",
"docstring_tokens": [
"The",
"dts",
"file",
"to",
"write",
"to",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "config_header",
"type": null,
"docstring": "The full path to the generated autoconf.h header.",
"docstring_tokens": [
"The",
"full",
"path",
"to",
"the",
"generated",
"autoconf",
".",
"h",
"header",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "output_bin",
"type": null,
"docstring": "The full path to the binary that binman should output.",
"docstring_tokens": [
"The",
"full",
"path",
"to",
"the",
"binary",
"that",
"binman",
"should",
"output",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "ro_filename",
"type": null,
"docstring": "The RO image file name.",
"docstring_tokens": [
"The",
"RO",
"image",
"file",
"name",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "rw_filename",
"type": null,
"docstring": "The RW image file name.",
"docstring_tokens": [
"The",
"RW",
"image",
"file",
"name",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _write_dts_file(dts_file, config_header, output_bin, ro_filename, rw_filename):
dts_file.write("""
/dts-v1/;
#include "{config_header}"
/ {{
#address-cells = <1>;
#size-cells = <1>;
binman {{
filename = "{output_bin}";
pad-byte = <0x1d>;
section@0 {{
read-only;
offset = <CONFIG_CROS_EC_RO_MEM_OFF>;
size = <CONFIG_CROS_EC_RO_SIZE>;
blob {{
filename = "{ro_filename}";
}};
}};
section@1 {{
offset = <CONFIG_CROS_EC_RW_MEM_OFF>;
size = <CONFIG_CROS_EC_RW_SIZE>;
blob {{
filename = "{rw_filename}";
}};
}};
}};
}};""".format(
output_bin=output_bin,
config_header=config_header,
ro_filename=ro_filename,
rw_filename=rw_filename
))
| 831 | 109 |
cfc14956fb440d3081b6769f360bf646ce46d53e
|
laszlokiraly/LearningAlgorithms
|
ch05/sorting.py
|
[
"MIT"
] |
Python
|
selection_sort_counting
|
<not_specific>
|
def selection_sort_counting(A):
"""Instrumented Selection Sort to return #swaps, #compares."""
N = len(A)
num_swap = num_compare = 0
for i in range(N-1):
min_index = i
for j in range(i+1, N):
num_compare += 1
if A[j] < A[min_index]:
min_index = j
num_swap += 1
A[i],A[min_index] = A[min_index],A[i]
return (num_swap, num_compare)
|
Instrumented Selection Sort to return #swaps, #compares.
|
Instrumented Selection Sort to return #swaps, #compares.
|
[
"Instrumented",
"Selection",
"Sort",
"to",
"return",
"#swaps",
"#compares",
"."
] |
def selection_sort_counting(A):
N = len(A)
num_swap = num_compare = 0
for i in range(N-1):
min_index = i
for j in range(i+1, N):
num_compare += 1
if A[j] < A[min_index]:
min_index = j
num_swap += 1
A[i],A[min_index] = A[min_index],A[i]
return (num_swap, num_compare)
|
[
"def",
"selection_sort_counting",
"(",
"A",
")",
":",
"N",
"=",
"len",
"(",
"A",
")",
"num_swap",
"=",
"num_compare",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"N",
"-",
"1",
")",
":",
"min_index",
"=",
"i",
"for",
"j",
"in",
"range",
"(",
"i",
"+",
"1",
",",
"N",
")",
":",
"num_compare",
"+=",
"1",
"if",
"A",
"[",
"j",
"]",
"<",
"A",
"[",
"min_index",
"]",
":",
"min_index",
"=",
"j",
"num_swap",
"+=",
"1",
"A",
"[",
"i",
"]",
",",
"A",
"[",
"min_index",
"]",
"=",
"A",
"[",
"min_index",
"]",
",",
"A",
"[",
"i",
"]",
"return",
"(",
"num_swap",
",",
"num_compare",
")"
] |
Instrumented Selection Sort to return #swaps, #compares.
|
[
"Instrumented",
"Selection",
"Sort",
"to",
"return",
"#swaps",
"#compares",
"."
] |
[
"\"\"\"Instrumented Selection Sort to return #swaps, #compares.\"\"\""
] |
[
{
"param": "A",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "A",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def selection_sort_counting(A):
N = len(A)
num_swap = num_compare = 0
for i in range(N-1):
min_index = i
for j in range(i+1, N):
num_compare += 1
if A[j] < A[min_index]:
min_index = j
num_swap += 1
A[i],A[min_index] = A[min_index],A[i]
return (num_swap, num_compare)
| 832 | 726 |
3cd42742ca570b3a205e5ccabd366d77dda1fece
|
chapinb/aws_ip_tracker
|
query_mongo.py
|
[
"MIT"
] |
Python
|
write_json
| null |
def write_json(open_file, rset, lines=False):
"""Controller to write results as json"""
if lines:
for entry in rset:
open_file.write(json.dumps(entry)+'\n')
else:
open_file.write(json.dumps(rset))
|
Controller to write results as json
|
Controller to write results as json
|
[
"Controller",
"to",
"write",
"results",
"as",
"json"
] |
def write_json(open_file, rset, lines=False):
if lines:
for entry in rset:
open_file.write(json.dumps(entry)+'\n')
else:
open_file.write(json.dumps(rset))
|
[
"def",
"write_json",
"(",
"open_file",
",",
"rset",
",",
"lines",
"=",
"False",
")",
":",
"if",
"lines",
":",
"for",
"entry",
"in",
"rset",
":",
"open_file",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"entry",
")",
"+",
"'\\n'",
")",
"else",
":",
"open_file",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"rset",
")",
")"
] |
Controller to write results as json
|
[
"Controller",
"to",
"write",
"results",
"as",
"json"
] |
[
"\"\"\"Controller to write results as json\"\"\""
] |
[
{
"param": "open_file",
"type": null
},
{
"param": "rset",
"type": null
},
{
"param": "lines",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "open_file",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "rset",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "lines",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import json
def write_json(open_file, rset, lines=False):
if lines:
for entry in rset:
open_file.write(json.dumps(entry)+'\n')
else:
open_file.write(json.dumps(rset))
| 833 | 11 |
6b1f599001846d7d313ffda42cc6a320078341da
|
4eyes4u/HRAnalytics
|
main_woe.py
|
[
"MIT"
] |
Python
|
report_performance
|
dict
|
def report_performance(optimizer: BayesSearchCV,
X: pd.DataFrame,
y: pd.DataFrame,
callbacks=None
) -> dict:
r"""Verbose for optimizer. Will call callbacks in each iteration."""
optimizer.fit(X, y, callback=callbacks)
return optimizer.best_params_
|
r"""Verbose for optimizer. Will call callbacks in each iteration.
|
r"""Verbose for optimizer. Will call callbacks in each iteration.
|
[
"r",
"\"",
"\"",
"\"",
"Verbose",
"for",
"optimizer",
".",
"Will",
"call",
"callbacks",
"in",
"each",
"iteration",
"."
] |
def report_performance(optimizer: BayesSearchCV,
X: pd.DataFrame,
y: pd.DataFrame,
callbacks=None
) -> dict:
optimizer.fit(X, y, callback=callbacks)
return optimizer.best_params_
|
[
"def",
"report_performance",
"(",
"optimizer",
":",
"BayesSearchCV",
",",
"X",
":",
"pd",
".",
"DataFrame",
",",
"y",
":",
"pd",
".",
"DataFrame",
",",
"callbacks",
"=",
"None",
")",
"->",
"dict",
":",
"optimizer",
".",
"fit",
"(",
"X",
",",
"y",
",",
"callback",
"=",
"callbacks",
")",
"return",
"optimizer",
".",
"best_params_"
] |
r"""Verbose for optimizer.
|
[
"r",
"\"",
"\"",
"\"",
"Verbose",
"for",
"optimizer",
"."
] |
[
"r\"\"\"Verbose for optimizer. Will call callbacks in each iteration.\"\"\""
] |
[
{
"param": "optimizer",
"type": "BayesSearchCV"
},
{
"param": "X",
"type": "pd.DataFrame"
},
{
"param": "y",
"type": "pd.DataFrame"
},
{
"param": "callbacks",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "optimizer",
"type": "BayesSearchCV",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "X",
"type": "pd.DataFrame",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "y",
"type": "pd.DataFrame",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "callbacks",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def report_performance(optimizer: BayesSearchCV,
X: pd.DataFrame,
y: pd.DataFrame,
callbacks=None
) -> dict:
optimizer.fit(X, y, callback=callbacks)
return optimizer.best_params_
| 835 | 357 |
51515392367946e2092fb743f1551bbe76f42319
|
YmirKhang/flytekit
|
tests/flytekit/unit/core/test_references.py
|
[
"Apache-2.0"
] |
Python
|
ref_t1
|
str
|
def ref_t1(a: typing.List[str]) -> str:
"""
The empty function acts as a convenient skeleton to make it intuitive to call/reference this task from workflows.
The interface of the task must match that of the remote task. Otherwise, remote compilation of the workflow will
fail.
"""
...
|
The empty function acts as a convenient skeleton to make it intuitive to call/reference this task from workflows.
The interface of the task must match that of the remote task. Otherwise, remote compilation of the workflow will
fail.
|
The empty function acts as a convenient skeleton to make it intuitive to call/reference this task from workflows.
The interface of the task must match that of the remote task. Otherwise, remote compilation of the workflow will
fail.
|
[
"The",
"empty",
"function",
"acts",
"as",
"a",
"convenient",
"skeleton",
"to",
"make",
"it",
"intuitive",
"to",
"call",
"/",
"reference",
"this",
"task",
"from",
"workflows",
".",
"The",
"interface",
"of",
"the",
"task",
"must",
"match",
"that",
"of",
"the",
"remote",
"task",
".",
"Otherwise",
"remote",
"compilation",
"of",
"the",
"workflow",
"will",
"fail",
"."
] |
def ref_t1(a: typing.List[str]) -> str:
...
|
[
"def",
"ref_t1",
"(",
"a",
":",
"typing",
".",
"List",
"[",
"str",
"]",
")",
"->",
"str",
":",
"..."
] |
The empty function acts as a convenient skeleton to make it intuitive to call/reference this task from workflows.
|
[
"The",
"empty",
"function",
"acts",
"as",
"a",
"convenient",
"skeleton",
"to",
"make",
"it",
"intuitive",
"to",
"call",
"/",
"reference",
"this",
"task",
"from",
"workflows",
"."
] |
[
"\"\"\"\n The empty function acts as a convenient skeleton to make it intuitive to call/reference this task from workflows.\n The interface of the task must match that of the remote task. Otherwise, remote compilation of the workflow will\n fail.\n \"\"\""
] |
[
{
"param": "a",
"type": "typing.List[str]"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "a",
"type": "typing.List[str]",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def ref_t1(a: typing.List[str]) -> str:
...
| 836 | 244 |
37130074cbcf17044671662630397074e69dd062
|
nedbat/coveragepy
|
tests/test_testing.py
|
[
"Apache-2.0"
] |
Python
|
_same_python_executable
|
<not_specific>
|
def _same_python_executable(e1, e2):
"""Determine if `e1` and `e2` refer to the same Python executable.
Either path could include symbolic links. The two paths might not refer
to the exact same file, but if they are in the same directory and their
numeric suffixes aren't different, they are the same executable.
"""
e1 = os.path.abspath(os.path.realpath(e1))
e2 = os.path.abspath(os.path.realpath(e2))
if os.path.dirname(e1) != os.path.dirname(e2):
return False # pragma: only failure
e1 = os.path.basename(e1)
e2 = os.path.basename(e2)
if e1 == "python" or e2 == "python" or e1 == e2:
# Python and Python2.3: OK
# Python2.3 and Python: OK
# Python and Python: OK
# Python2.3 and Python2.3: OK
return True
return False # pragma: only failure
|
Determine if `e1` and `e2` refer to the same Python executable.
Either path could include symbolic links. The two paths might not refer
to the exact same file, but if they are in the same directory and their
numeric suffixes aren't different, they are the same executable.
|
Determine if `e1` and `e2` refer to the same Python executable.
Either path could include symbolic links. The two paths might not refer
to the exact same file, but if they are in the same directory and their
numeric suffixes aren't different, they are the same executable.
|
[
"Determine",
"if",
"`",
"e1",
"`",
"and",
"`",
"e2",
"`",
"refer",
"to",
"the",
"same",
"Python",
"executable",
".",
"Either",
"path",
"could",
"include",
"symbolic",
"links",
".",
"The",
"two",
"paths",
"might",
"not",
"refer",
"to",
"the",
"exact",
"same",
"file",
"but",
"if",
"they",
"are",
"in",
"the",
"same",
"directory",
"and",
"their",
"numeric",
"suffixes",
"aren",
"'",
"t",
"different",
"they",
"are",
"the",
"same",
"executable",
"."
] |
def _same_python_executable(e1, e2):
e1 = os.path.abspath(os.path.realpath(e1))
e2 = os.path.abspath(os.path.realpath(e2))
if os.path.dirname(e1) != os.path.dirname(e2):
return False
e1 = os.path.basename(e1)
e2 = os.path.basename(e2)
if e1 == "python" or e2 == "python" or e1 == e2:
return True
return False
|
[
"def",
"_same_python_executable",
"(",
"e1",
",",
"e2",
")",
":",
"e1",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"e1",
")",
")",
"e2",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"e2",
")",
")",
"if",
"os",
".",
"path",
".",
"dirname",
"(",
"e1",
")",
"!=",
"os",
".",
"path",
".",
"dirname",
"(",
"e2",
")",
":",
"return",
"False",
"e1",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"e1",
")",
"e2",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"e2",
")",
"if",
"e1",
"==",
"\"python\"",
"or",
"e2",
"==",
"\"python\"",
"or",
"e1",
"==",
"e2",
":",
"return",
"True",
"return",
"False"
] |
Determine if `e1` and `e2` refer to the same Python executable.
|
[
"Determine",
"if",
"`",
"e1",
"`",
"and",
"`",
"e2",
"`",
"refer",
"to",
"the",
"same",
"Python",
"executable",
"."
] |
[
"\"\"\"Determine if `e1` and `e2` refer to the same Python executable.\n\n Either path could include symbolic links. The two paths might not refer\n to the exact same file, but if they are in the same directory and their\n numeric suffixes aren't different, they are the same executable.\n\n \"\"\"",
"# pragma: only failure",
"# Python and Python2.3: OK",
"# Python2.3 and Python: OK",
"# Python and Python: OK",
"# Python2.3 and Python2.3: OK",
"# pragma: only failure"
] |
[
{
"param": "e1",
"type": null
},
{
"param": "e2",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "e1",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "e2",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
def _same_python_executable(e1, e2):
e1 = os.path.abspath(os.path.realpath(e1))
e2 = os.path.abspath(os.path.realpath(e2))
if os.path.dirname(e1) != os.path.dirname(e2):
return False
e1 = os.path.basename(e1)
e2 = os.path.basename(e2)
if e1 == "python" or e2 == "python" or e1 == e2:
return True
return False
| 837 | 843 |
a0505d18aee08825510209f23decfde028aa1559
|
centre-for-humanities-computing/danish-foundation-models
|
src/applications/danews/quality_filter.py
|
[
"MIT"
] |
Python
|
add_text_col
|
dict
|
def add_text_col(example: dict) -> dict:
"""constructs text column to news article"""
example["text"] = ""
if example["Heading"].strip(" "):
example["text"] = example["Heading"]
if example["SubHeading"].strip(" "):
example["text"] += f'\n {example["SubHeading"]}'
# if example["PublishDate"]:
# example["text"] += f'\n {example["PublishDate"]}'
# if example["Paragraph"].strip(" "):
# example["text"] += f'\n\n {example["Paragraph"]}'
if example["BodyText"].strip(" "):
example["text"] += f'\n\n {example["BodyText"]}'
return example
|
constructs text column to news article
|
constructs text column to news article
|
[
"constructs",
"text",
"column",
"to",
"news",
"article"
] |
def add_text_col(example: dict) -> dict:
example["text"] = ""
if example["Heading"].strip(" "):
example["text"] = example["Heading"]
if example["SubHeading"].strip(" "):
example["text"] += f'\n {example["SubHeading"]}'
if example["BodyText"].strip(" "):
example["text"] += f'\n\n {example["BodyText"]}'
return example
|
[
"def",
"add_text_col",
"(",
"example",
":",
"dict",
")",
"->",
"dict",
":",
"example",
"[",
"\"text\"",
"]",
"=",
"\"\"",
"if",
"example",
"[",
"\"Heading\"",
"]",
".",
"strip",
"(",
"\" \"",
")",
":",
"example",
"[",
"\"text\"",
"]",
"=",
"example",
"[",
"\"Heading\"",
"]",
"if",
"example",
"[",
"\"SubHeading\"",
"]",
".",
"strip",
"(",
"\" \"",
")",
":",
"example",
"[",
"\"text\"",
"]",
"+=",
"f'\\n {example[\"SubHeading\"]}'",
"if",
"example",
"[",
"\"BodyText\"",
"]",
".",
"strip",
"(",
"\" \"",
")",
":",
"example",
"[",
"\"text\"",
"]",
"+=",
"f'\\n\\n {example[\"BodyText\"]}'",
"return",
"example"
] |
constructs text column to news article
|
[
"constructs",
"text",
"column",
"to",
"news",
"article"
] |
[
"\"\"\"constructs text column to news article\"\"\"",
"# if example[\"PublishDate\"]:",
"# example[\"text\"] += f'\\n {example[\"PublishDate\"]}'",
"# if example[\"Paragraph\"].strip(\" \"):",
"# example[\"text\"] += f'\\n\\n {example[\"Paragraph\"]}'"
] |
[
{
"param": "example",
"type": "dict"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "example",
"type": "dict",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def add_text_col(example: dict) -> dict:
example["text"] = ""
if example["Heading"].strip(" "):
example["text"] = example["Heading"]
if example["SubHeading"].strip(" "):
example["text"] += f'\n {example["SubHeading"]}'
if example["BodyText"].strip(" "):
example["text"] += f'\n\n {example["BodyText"]}'
return example
| 838 | 762 |
d5cc029b750acd102a2afdc7fb0719be3e5d8cde
|
rgaensler/gcode
|
src/prechecks/graph_creation.py
|
[
"MIT"
] |
Python
|
joint_limit_cost
|
float
|
def joint_limit_cost(joints: List[float], qlim: List[float], w: Optional[List[float]] = None) -> float:
"""
Measure to drive joints away from their limits.
:param joints: Joint coordinates to be evaluated
:param qlim: Joint limits in order [J1 min, J1 max, J2 min, J2 max, Jn min, Jn max]
:param w: Weights for the individual joints.
:return: Non-negative cost value for the given joint coordinates. Best is zero.
[1] B. Siciliano, L. Sciavicco, L. Villani und G. Oriolo, Robotics : Modelling, Planning and Control, London:
Springer, 2009.
"""
val = 0
if w is None:
for jmin, jmax, j in zip(qlim[::2], qlim[1::2], joints):
# Use distance from mid-point relative to total range
val += ((j - 0.5 * (jmin + jmax)) / (jmax - jmin)) ** 2
else:
if len(w) != len(joints):
raise ValueError('Need to supply as many weight factors as joint coordinates.')
for jmin, jmax, j, jw in zip(qlim[::2], qlim[1::2], joints, w):
val += jw * ((j - 0.5 * (jmin + jmax)) / (jmax - jmin)) ** 2
# Normalize with regard to number of joints
return val / (2 * len(joints))
|
Measure to drive joints away from their limits.
:param joints: Joint coordinates to be evaluated
:param qlim: Joint limits in order [J1 min, J1 max, J2 min, J2 max, Jn min, Jn max]
:param w: Weights for the individual joints.
:return: Non-negative cost value for the given joint coordinates. Best is zero.
[1] B. Siciliano, L. Sciavicco, L. Villani und G. Oriolo, Robotics : Modelling, Planning and Control, London:
Springer, 2009.
|
Measure to drive joints away from their limits.
|
[
"Measure",
"to",
"drive",
"joints",
"away",
"from",
"their",
"limits",
"."
] |
def joint_limit_cost(joints: List[float], qlim: List[float], w: Optional[List[float]] = None) -> float:
val = 0
if w is None:
for jmin, jmax, j in zip(qlim[::2], qlim[1::2], joints):
val += ((j - 0.5 * (jmin + jmax)) / (jmax - jmin)) ** 2
else:
if len(w) != len(joints):
raise ValueError('Need to supply as many weight factors as joint coordinates.')
for jmin, jmax, j, jw in zip(qlim[::2], qlim[1::2], joints, w):
val += jw * ((j - 0.5 * (jmin + jmax)) / (jmax - jmin)) ** 2
return val / (2 * len(joints))
|
[
"def",
"joint_limit_cost",
"(",
"joints",
":",
"List",
"[",
"float",
"]",
",",
"qlim",
":",
"List",
"[",
"float",
"]",
",",
"w",
":",
"Optional",
"[",
"List",
"[",
"float",
"]",
"]",
"=",
"None",
")",
"->",
"float",
":",
"val",
"=",
"0",
"if",
"w",
"is",
"None",
":",
"for",
"jmin",
",",
"jmax",
",",
"j",
"in",
"zip",
"(",
"qlim",
"[",
":",
":",
"2",
"]",
",",
"qlim",
"[",
"1",
":",
":",
"2",
"]",
",",
"joints",
")",
":",
"val",
"+=",
"(",
"(",
"j",
"-",
"0.5",
"*",
"(",
"jmin",
"+",
"jmax",
")",
")",
"/",
"(",
"jmax",
"-",
"jmin",
")",
")",
"**",
"2",
"else",
":",
"if",
"len",
"(",
"w",
")",
"!=",
"len",
"(",
"joints",
")",
":",
"raise",
"ValueError",
"(",
"'Need to supply as many weight factors as joint coordinates.'",
")",
"for",
"jmin",
",",
"jmax",
",",
"j",
",",
"jw",
"in",
"zip",
"(",
"qlim",
"[",
":",
":",
"2",
"]",
",",
"qlim",
"[",
"1",
":",
":",
"2",
"]",
",",
"joints",
",",
"w",
")",
":",
"val",
"+=",
"jw",
"*",
"(",
"(",
"j",
"-",
"0.5",
"*",
"(",
"jmin",
"+",
"jmax",
")",
")",
"/",
"(",
"jmax",
"-",
"jmin",
")",
")",
"**",
"2",
"return",
"val",
"/",
"(",
"2",
"*",
"len",
"(",
"joints",
")",
")"
] |
Measure to drive joints away from their limits.
|
[
"Measure",
"to",
"drive",
"joints",
"away",
"from",
"their",
"limits",
"."
] |
[
"\"\"\"\n Measure to drive joints away from their limits.\n :param joints: Joint coordinates to be evaluated\n :param qlim: Joint limits in order [J1 min, J1 max, J2 min, J2 max, Jn min, Jn max]\n :param w: Weights for the individual joints.\n :return: Non-negative cost value for the given joint coordinates. Best is zero.\n\n [1] B. Siciliano, L. Sciavicco, L. Villani und G. Oriolo, Robotics : Modelling, Planning and Control, London:\n Springer, 2009.\n \"\"\"",
"# Use distance from mid-point relative to total range",
"# Normalize with regard to number of joints"
] |
[
{
"param": "joints",
"type": "List[float]"
},
{
"param": "qlim",
"type": "List[float]"
},
{
"param": "w",
"type": "Optional[List[float]]"
}
] |
{
"returns": [
{
"docstring": "Non-negative cost value for the given joint coordinates. Best is zero.\n[1] B. Siciliano, L. Sciavicco, L. Villani und G. Oriolo, Robotics : Modelling, Planning and Control, London:\nSpringer, 2009.",
"docstring_tokens": [
"Non",
"-",
"negative",
"cost",
"value",
"for",
"the",
"given",
"joint",
"coordinates",
".",
"Best",
"is",
"zero",
".",
"[",
"1",
"]",
"B",
".",
"Siciliano",
"L",
".",
"Sciavicco",
"L",
".",
"Villani",
"und",
"G",
".",
"Oriolo",
"Robotics",
":",
"Modelling",
"Planning",
"and",
"Control",
"London",
":",
"Springer",
"2009",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "joints",
"type": "List[float]",
"docstring": "Joint coordinates to be evaluated",
"docstring_tokens": [
"Joint",
"coordinates",
"to",
"be",
"evaluated"
],
"default": null,
"is_optional": null
},
{
"identifier": "qlim",
"type": "List[float]",
"docstring": "Joint limits in order [J1 min, J1 max, J2 min, J2 max, Jn min, Jn max]",
"docstring_tokens": [
"Joint",
"limits",
"in",
"order",
"[",
"J1",
"min",
"J1",
"max",
"J2",
"min",
"J2",
"max",
"Jn",
"min",
"Jn",
"max",
"]"
],
"default": null,
"is_optional": null
},
{
"identifier": "w",
"type": "Optional[List[float]]",
"docstring": "Weights for the individual joints.",
"docstring_tokens": [
"Weights",
"for",
"the",
"individual",
"joints",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def joint_limit_cost(joints: List[float], qlim: List[float], w: Optional[List[float]] = None) -> float:
val = 0
if w is None:
for jmin, jmax, j in zip(qlim[::2], qlim[1::2], joints):
val += ((j - 0.5 * (jmin + jmax)) / (jmax - jmin)) ** 2
else:
if len(w) != len(joints):
raise ValueError('Need to supply as many weight factors as joint coordinates.')
for jmin, jmax, j, jw in zip(qlim[::2], qlim[1::2], joints, w):
val += jw * ((j - 0.5 * (jmin + jmax)) / (jmax - jmin)) ** 2
return val / (2 * len(joints))
| 839 | 32 |
54e2aac1985cf018d552c89199857c9861a3e14a
|
Yasir326/address-index-data
|
DataScience/Analytics/linking/addressLinkingNLPindex.py
|
[
"MIT"
] |
Python
|
_extract_postcode
|
<not_specific>
|
def _extract_postcode(string):
"""
A static private method to extract a postcode from address string.
Uses a rather loose regular expression, so may get some strings that are not completely valid postcodes.
Should not be used to validate whether a postcode conforms to the UK postcode standards.
The regular expression was taken from:
http://stackoverflow.com/questions/164979/uk-postcode-regex-comprehensive
:param string: string to be parsed
:type string: str
:return: postcode
:rtype: str
"""
regx = r'(([gG][iI][rR] {0,}0[aA]{2})|((([a-pr-uwyzA-PR-UWYZ][a-hk-yA-HK-Y]?[0-9][0-9]?)|(([a-pr-uwyzA-PR-UWYZ][0-9][a-hjkstuwA-HJKSTUW])|([a-pr-uwyzA-PR-UWYZ][a-hk-yA-HK-Y][0-9][abehmnprv-yABEHMNPRV-Y]))) {0,}[0-9][abd-hjlnp-uw-zABD-HJLNP-UW-Z]{2}))'
try:
potential_postcode = re.findall(regx, string)[0][0]
potential_postcode = potential_postcode.lower().strip()
except IndexError:
potential_postcode = None
# above regex gives also those without space between, add if needed
if potential_postcode is not None:
if ' ' not in potential_postcode:
inc = potential_postcode[-3:]
out = potential_postcode.replace(inc, '')
potential_postcode = out + ' ' + inc
return potential_postcode
|
A static private method to extract a postcode from address string.
Uses a rather loose regular expression, so may get some strings that are not completely valid postcodes.
Should not be used to validate whether a postcode conforms to the UK postcode standards.
The regular expression was taken from:
http://stackoverflow.com/questions/164979/uk-postcode-regex-comprehensive
:param string: string to be parsed
:type string: str
:return: postcode
:rtype: str
|
A static private method to extract a postcode from address string.
Uses a rather loose regular expression, so may get some strings that are not completely valid postcodes.
Should not be used to validate whether a postcode conforms to the UK postcode standards.
|
[
"A",
"static",
"private",
"method",
"to",
"extract",
"a",
"postcode",
"from",
"address",
"string",
".",
"Uses",
"a",
"rather",
"loose",
"regular",
"expression",
"so",
"may",
"get",
"some",
"strings",
"that",
"are",
"not",
"completely",
"valid",
"postcodes",
".",
"Should",
"not",
"be",
"used",
"to",
"validate",
"whether",
"a",
"postcode",
"conforms",
"to",
"the",
"UK",
"postcode",
"standards",
"."
] |
def _extract_postcode(string):
regx = r'(([gG][iI][rR] {0,}0[aA]{2})|((([a-pr-uwyzA-PR-UWYZ][a-hk-yA-HK-Y]?[0-9][0-9]?)|(([a-pr-uwyzA-PR-UWYZ][0-9][a-hjkstuwA-HJKSTUW])|([a-pr-uwyzA-PR-UWYZ][a-hk-yA-HK-Y][0-9][abehmnprv-yABEHMNPRV-Y]))) {0,}[0-9][abd-hjlnp-uw-zABD-HJLNP-UW-Z]{2}))'
try:
potential_postcode = re.findall(regx, string)[0][0]
potential_postcode = potential_postcode.lower().strip()
except IndexError:
potential_postcode = None
if potential_postcode is not None:
if ' ' not in potential_postcode:
inc = potential_postcode[-3:]
out = potential_postcode.replace(inc, '')
potential_postcode = out + ' ' + inc
return potential_postcode
|
[
"def",
"_extract_postcode",
"(",
"string",
")",
":",
"regx",
"=",
"r'(([gG][iI][rR] {0,}0[aA]{2})|((([a-pr-uwyzA-PR-UWYZ][a-hk-yA-HK-Y]?[0-9][0-9]?)|(([a-pr-uwyzA-PR-UWYZ][0-9][a-hjkstuwA-HJKSTUW])|([a-pr-uwyzA-PR-UWYZ][a-hk-yA-HK-Y][0-9][abehmnprv-yABEHMNPRV-Y]))) {0,}[0-9][abd-hjlnp-uw-zABD-HJLNP-UW-Z]{2}))'",
"try",
":",
"potential_postcode",
"=",
"re",
".",
"findall",
"(",
"regx",
",",
"string",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"potential_postcode",
"=",
"potential_postcode",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"except",
"IndexError",
":",
"potential_postcode",
"=",
"None",
"if",
"potential_postcode",
"is",
"not",
"None",
":",
"if",
"' '",
"not",
"in",
"potential_postcode",
":",
"inc",
"=",
"potential_postcode",
"[",
"-",
"3",
":",
"]",
"out",
"=",
"potential_postcode",
".",
"replace",
"(",
"inc",
",",
"''",
")",
"potential_postcode",
"=",
"out",
"+",
"' '",
"+",
"inc",
"return",
"potential_postcode"
] |
A static private method to extract a postcode from address string.
|
[
"A",
"static",
"private",
"method",
"to",
"extract",
"a",
"postcode",
"from",
"address",
"string",
"."
] |
[
"\"\"\"\n A static private method to extract a postcode from address string.\n\n Uses a rather loose regular expression, so may get some strings that are not completely valid postcodes.\n Should not be used to validate whether a postcode conforms to the UK postcode standards.\n\n The regular expression was taken from:\n http://stackoverflow.com/questions/164979/uk-postcode-regex-comprehensive\n\n :param string: string to be parsed\n :type string: str\n\n :return: postcode\n :rtype: str\n \"\"\"",
"# above regex gives also those without space between, add if needed"
] |
[
{
"param": "string",
"type": null
}
] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "str"
}
],
"raises": [],
"params": [
{
"identifier": "string",
"type": null,
"docstring": "string to be parsed",
"docstring_tokens": [
"string",
"to",
"be",
"parsed"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import re
def _extract_postcode(string):
regx = r'(([gG][iI][rR] {0,}0[aA]{2})|((([a-pr-uwyzA-PR-UWYZ][a-hk-yA-HK-Y]?[0-9][0-9]?)|(([a-pr-uwyzA-PR-UWYZ][0-9][a-hjkstuwA-HJKSTUW])|([a-pr-uwyzA-PR-UWYZ][a-hk-yA-HK-Y][0-9][abehmnprv-yABEHMNPRV-Y]))) {0,}[0-9][abd-hjlnp-uw-zABD-HJLNP-UW-Z]{2}))'
try:
potential_postcode = re.findall(regx, string)[0][0]
potential_postcode = potential_postcode.lower().strip()
except IndexError:
potential_postcode = None
if potential_postcode is not None:
if ' ' not in potential_postcode:
inc = potential_postcode[-3:]
out = potential_postcode.replace(inc, '')
potential_postcode = out + ' ' + inc
return potential_postcode
| 840 | 352 |
3bc3d0a02940e2761459447f176107ce24c914be
|
YanCheng-go/geemap
|
geemap/common.py
|
[
"MIT"
] |
Python
|
bbox_to_geojson
|
<not_specific>
|
def bbox_to_geojson(bounds):
"""Convert coordinates of a bounding box to a geojson.
Args:
bounds (list): A list of coordinates representing [left, bottom, right, top].
Returns:
dict: A geojson feature.
"""
return {
"geometry": {
"type": "Polygon",
"coordinates": [
[
[bounds[0], bounds[3]],
[bounds[0], bounds[1]],
[bounds[2], bounds[1]],
[bounds[2], bounds[3]],
[bounds[0], bounds[3]],
]
],
},
"type": "Feature",
}
|
Convert coordinates of a bounding box to a geojson.
Args:
bounds (list): A list of coordinates representing [left, bottom, right, top].
Returns:
dict: A geojson feature.
|
Convert coordinates of a bounding box to a geojson.
|
[
"Convert",
"coordinates",
"of",
"a",
"bounding",
"box",
"to",
"a",
"geojson",
"."
] |
def bbox_to_geojson(bounds):
return {
"geometry": {
"type": "Polygon",
"coordinates": [
[
[bounds[0], bounds[3]],
[bounds[0], bounds[1]],
[bounds[2], bounds[1]],
[bounds[2], bounds[3]],
[bounds[0], bounds[3]],
]
],
},
"type": "Feature",
}
|
[
"def",
"bbox_to_geojson",
"(",
"bounds",
")",
":",
"return",
"{",
"\"geometry\"",
":",
"{",
"\"type\"",
":",
"\"Polygon\"",
",",
"\"coordinates\"",
":",
"[",
"[",
"[",
"bounds",
"[",
"0",
"]",
",",
"bounds",
"[",
"3",
"]",
"]",
",",
"[",
"bounds",
"[",
"0",
"]",
",",
"bounds",
"[",
"1",
"]",
"]",
",",
"[",
"bounds",
"[",
"2",
"]",
",",
"bounds",
"[",
"1",
"]",
"]",
",",
"[",
"bounds",
"[",
"2",
"]",
",",
"bounds",
"[",
"3",
"]",
"]",
",",
"[",
"bounds",
"[",
"0",
"]",
",",
"bounds",
"[",
"3",
"]",
"]",
",",
"]",
"]",
",",
"}",
",",
"\"type\"",
":",
"\"Feature\"",
",",
"}"
] |
Convert coordinates of a bounding box to a geojson.
|
[
"Convert",
"coordinates",
"of",
"a",
"bounding",
"box",
"to",
"a",
"geojson",
"."
] |
[
"\"\"\"Convert coordinates of a bounding box to a geojson.\n\n Args:\n bounds (list): A list of coordinates representing [left, bottom, right, top].\n\n Returns:\n dict: A geojson feature.\n \"\"\""
] |
[
{
"param": "bounds",
"type": null
}
] |
{
"returns": [
{
"docstring": "A geojson feature.",
"docstring_tokens": [
"A",
"geojson",
"feature",
"."
],
"type": "dict"
}
],
"raises": [],
"params": [
{
"identifier": "bounds",
"type": null,
"docstring": "A list of coordinates representing [left, bottom, right, top].",
"docstring_tokens": [
"A",
"list",
"of",
"coordinates",
"representing",
"[",
"left",
"bottom",
"right",
"top",
"]",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def bbox_to_geojson(bounds):
return {
"geometry": {
"type": "Polygon",
"coordinates": [
[
[bounds[0], bounds[3]],
[bounds[0], bounds[1]],
[bounds[2], bounds[1]],
[bounds[2], bounds[3]],
[bounds[0], bounds[3]],
]
],
},
"type": "Feature",
}
| 841 | 377 |
459e85deb9fba72ad4b8079a9651fd546ad39f2f
|
RavelBio/NucleoATAC
|
nucleoatac/cli.py
|
[
"MIT"
] |
Python
|
add_run_parser
|
<not_specific>
|
def add_run_parser( subparsers):
"""Add argument parsers for the run utility
"""
parser = subparsers.add_parser("run", help = "Main nucleoatac utility-- runs through occupancy determination & calling nuc positions")
group1 = parser.add_argument_group('Required', 'Necessary arguments')
group1.add_argument('--bed', metavar='bed_file' , help = 'Regions for which \
to do stuff.', required=True)
group1.add_argument('--bam', metavar='bam_file',
help = 'Accepts sorted BAM file', required=True)
group1.add_argument('--out', metavar='output_basename',
help="give output basename", required=True)
group1.add_argument('--fasta', metavar = 'genome_seq',
help = 'Indexed fasta file', required=True)
group4 = parser.add_argument_group("Bias calculation parameters","")
group4.add_argument('--pwm', metavar = 'Tn5_PWM', help = "PWM descriptor file. Default is Human.PWM.txt included in package", default = "Human")
group3 = parser.add_argument_group('General options', '')
group3.add_argument('--cores', metavar = 'num_cores',default=1,
help='Number of cores to use',type=int)
group3.add_argument('--write_all', action="store_true", default = False,
help="write all tracks")
return
|
Add argument parsers for the run utility
|
Add argument parsers for the run utility
|
[
"Add",
"argument",
"parsers",
"for",
"the",
"run",
"utility"
] |
def add_run_parser( subparsers):
parser = subparsers.add_parser("run", help = "Main nucleoatac utility-- runs through occupancy determination & calling nuc positions")
group1 = parser.add_argument_group('Required', 'Necessary arguments')
group1.add_argument('--bed', metavar='bed_file' , help = 'Regions for which \
to do stuff.', required=True)
group1.add_argument('--bam', metavar='bam_file',
help = 'Accepts sorted BAM file', required=True)
group1.add_argument('--out', metavar='output_basename',
help="give output basename", required=True)
group1.add_argument('--fasta', metavar = 'genome_seq',
help = 'Indexed fasta file', required=True)
group4 = parser.add_argument_group("Bias calculation parameters","")
group4.add_argument('--pwm', metavar = 'Tn5_PWM', help = "PWM descriptor file. Default is Human.PWM.txt included in package", default = "Human")
group3 = parser.add_argument_group('General options', '')
group3.add_argument('--cores', metavar = 'num_cores',default=1,
help='Number of cores to use',type=int)
group3.add_argument('--write_all', action="store_true", default = False,
help="write all tracks")
return
|
[
"def",
"add_run_parser",
"(",
"subparsers",
")",
":",
"parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"run\"",
",",
"help",
"=",
"\"Main nucleoatac utility-- runs through occupancy determination & calling nuc positions\"",
")",
"group1",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Required'",
",",
"'Necessary arguments'",
")",
"group1",
".",
"add_argument",
"(",
"'--bed'",
",",
"metavar",
"=",
"'bed_file'",
",",
"help",
"=",
"'Regions for which \\\n to do stuff.'",
",",
"required",
"=",
"True",
")",
"group1",
".",
"add_argument",
"(",
"'--bam'",
",",
"metavar",
"=",
"'bam_file'",
",",
"help",
"=",
"'Accepts sorted BAM file'",
",",
"required",
"=",
"True",
")",
"group1",
".",
"add_argument",
"(",
"'--out'",
",",
"metavar",
"=",
"'output_basename'",
",",
"help",
"=",
"\"give output basename\"",
",",
"required",
"=",
"True",
")",
"group1",
".",
"add_argument",
"(",
"'--fasta'",
",",
"metavar",
"=",
"'genome_seq'",
",",
"help",
"=",
"'Indexed fasta file'",
",",
"required",
"=",
"True",
")",
"group4",
"=",
"parser",
".",
"add_argument_group",
"(",
"\"Bias calculation parameters\"",
",",
"\"\"",
")",
"group4",
".",
"add_argument",
"(",
"'--pwm'",
",",
"metavar",
"=",
"'Tn5_PWM'",
",",
"help",
"=",
"\"PWM descriptor file. Default is Human.PWM.txt included in package\"",
",",
"default",
"=",
"\"Human\"",
")",
"group3",
"=",
"parser",
".",
"add_argument_group",
"(",
"'General options'",
",",
"''",
")",
"group3",
".",
"add_argument",
"(",
"'--cores'",
",",
"metavar",
"=",
"'num_cores'",
",",
"default",
"=",
"1",
",",
"help",
"=",
"'Number of cores to use'",
",",
"type",
"=",
"int",
")",
"group3",
".",
"add_argument",
"(",
"'--write_all'",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"write all tracks\"",
")",
"return"
] |
Add argument parsers for the run utility
|
[
"Add",
"argument",
"parsers",
"for",
"the",
"run",
"utility"
] |
[
"\"\"\"Add argument parsers for the run utility\n\n \"\"\""
] |
[
{
"param": "subparsers",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "subparsers",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def add_run_parser( subparsers):
parser = subparsers.add_parser("run", help = "Main nucleoatac utility-- runs through occupancy determination & calling nuc positions")
group1 = parser.add_argument_group('Required', 'Necessary arguments')
group1.add_argument('--bed', metavar='bed_file' , help = 'Regions for which \
to do stuff.', required=True)
group1.add_argument('--bam', metavar='bam_file',
help = 'Accepts sorted BAM file', required=True)
group1.add_argument('--out', metavar='output_basename',
help="give output basename", required=True)
group1.add_argument('--fasta', metavar = 'genome_seq',
help = 'Indexed fasta file', required=True)
group4 = parser.add_argument_group("Bias calculation parameters","")
group4.add_argument('--pwm', metavar = 'Tn5_PWM', help = "PWM descriptor file. Default is Human.PWM.txt included in package", default = "Human")
group3 = parser.add_argument_group('General options', '')
group3.add_argument('--cores', metavar = 'num_cores',default=1,
help='Number of cores to use',type=int)
group3.add_argument('--write_all', action="store_true", default = False,
help="write all tracks")
return
| 842 | 763 |
f583fa78b44781558f68a0ab2880ef51089e6499
|
cschloer/pudl
|
src/pudl/convert/merge_datapkgs.py
|
[
"MIT"
] |
Python
|
check_etl_params
| null |
def check_etl_params(dps):
"""
Verify that datapackages to be merged have compatible ETL params.
Given that all of the input data packages come from the same ETL run, which
means they will have used the same input data, the only way they should
potentially differ is in the ETL parameters which were used to generate
them. This function pulls the data source specific ETL params which we
store in each datapackage descriptor and checks that within a given data
source (e.g. eia923, ferc1) all of the ETL parameters are identical (e.g.
the years, states, and tables loaded).
Args:
dps (iterable): A list of datapackage.Package objects, representing the
datapackages to be merged.
Returns:
None
Raises:
ValueError: If the PUDL ETL parameters associated with any given data
source are not identical across all instances of that data source
within the datapackages to be merged. Also if the ETL UUIDs for all
of the datapackages to be merged are not identical.
"""
# These are all the possible datasets right now... note that this is
# slightly different from the data *source* codes, because we have merged
# the EIA 860 and EIA 923 souces into a single dataset called EIA...
dataset_codes = ["eia", "epacems", "ferc1", "epaipm"]
# For each of the unique source codes, verify that all ETL parameters
# associated with it in any of the input data packages are identical:
for dataset_code in dataset_codes:
etl_params = []
for dp in dps:
for dataset in dp.descriptor["etl-parameters-pudl"]:
if dataset_code in dataset.keys():
etl_params.append(dataset[dataset_code])
for params in etl_params:
if not params == etl_params[0]:
raise ValueError(
f"Mismatched PUDL ETL parameters for {dataset_code}.")
|
Verify that datapackages to be merged have compatible ETL params.
Given that all of the input data packages come from the same ETL run, which
means they will have used the same input data, the only way they should
potentially differ is in the ETL parameters which were used to generate
them. This function pulls the data source specific ETL params which we
store in each datapackage descriptor and checks that within a given data
source (e.g. eia923, ferc1) all of the ETL parameters are identical (e.g.
the years, states, and tables loaded).
Args:
dps (iterable): A list of datapackage.Package objects, representing the
datapackages to be merged.
Returns:
None
Raises:
ValueError: If the PUDL ETL parameters associated with any given data
source are not identical across all instances of that data source
within the datapackages to be merged. Also if the ETL UUIDs for all
of the datapackages to be merged are not identical.
|
Verify that datapackages to be merged have compatible ETL params.
Given that all of the input data packages come from the same ETL run, which
means they will have used the same input data, the only way they should
potentially differ is in the ETL parameters which were used to generate
them. This function pulls the data source specific ETL params which we
store in each datapackage descriptor and checks that within a given data
source all of the ETL parameters are identical .
|
[
"Verify",
"that",
"datapackages",
"to",
"be",
"merged",
"have",
"compatible",
"ETL",
"params",
".",
"Given",
"that",
"all",
"of",
"the",
"input",
"data",
"packages",
"come",
"from",
"the",
"same",
"ETL",
"run",
"which",
"means",
"they",
"will",
"have",
"used",
"the",
"same",
"input",
"data",
"the",
"only",
"way",
"they",
"should",
"potentially",
"differ",
"is",
"in",
"the",
"ETL",
"parameters",
"which",
"were",
"used",
"to",
"generate",
"them",
".",
"This",
"function",
"pulls",
"the",
"data",
"source",
"specific",
"ETL",
"params",
"which",
"we",
"store",
"in",
"each",
"datapackage",
"descriptor",
"and",
"checks",
"that",
"within",
"a",
"given",
"data",
"source",
"all",
"of",
"the",
"ETL",
"parameters",
"are",
"identical",
"."
] |
def check_etl_params(dps):
dataset_codes = ["eia", "epacems", "ferc1", "epaipm"]
for dataset_code in dataset_codes:
etl_params = []
for dp in dps:
for dataset in dp.descriptor["etl-parameters-pudl"]:
if dataset_code in dataset.keys():
etl_params.append(dataset[dataset_code])
for params in etl_params:
if not params == etl_params[0]:
raise ValueError(
f"Mismatched PUDL ETL parameters for {dataset_code}.")
|
[
"def",
"check_etl_params",
"(",
"dps",
")",
":",
"dataset_codes",
"=",
"[",
"\"eia\"",
",",
"\"epacems\"",
",",
"\"ferc1\"",
",",
"\"epaipm\"",
"]",
"for",
"dataset_code",
"in",
"dataset_codes",
":",
"etl_params",
"=",
"[",
"]",
"for",
"dp",
"in",
"dps",
":",
"for",
"dataset",
"in",
"dp",
".",
"descriptor",
"[",
"\"etl-parameters-pudl\"",
"]",
":",
"if",
"dataset_code",
"in",
"dataset",
".",
"keys",
"(",
")",
":",
"etl_params",
".",
"append",
"(",
"dataset",
"[",
"dataset_code",
"]",
")",
"for",
"params",
"in",
"etl_params",
":",
"if",
"not",
"params",
"==",
"etl_params",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"f\"Mismatched PUDL ETL parameters for {dataset_code}.\"",
")"
] |
Verify that datapackages to be merged have compatible ETL params.
|
[
"Verify",
"that",
"datapackages",
"to",
"be",
"merged",
"have",
"compatible",
"ETL",
"params",
"."
] |
[
"\"\"\"\n Verify that datapackages to be merged have compatible ETL params.\n\n Given that all of the input data packages come from the same ETL run, which\n means they will have used the same input data, the only way they should\n potentially differ is in the ETL parameters which were used to generate\n them. This function pulls the data source specific ETL params which we\n store in each datapackage descriptor and checks that within a given data\n source (e.g. eia923, ferc1) all of the ETL parameters are identical (e.g.\n the years, states, and tables loaded).\n\n Args:\n dps (iterable): A list of datapackage.Package objects, representing the\n datapackages to be merged.\n\n Returns:\n None\n\n Raises:\n ValueError: If the PUDL ETL parameters associated with any given data\n source are not identical across all instances of that data source\n within the datapackages to be merged. Also if the ETL UUIDs for all\n of the datapackages to be merged are not identical.\n\n \"\"\"",
"# These are all the possible datasets right now... note that this is",
"# slightly different from the data *source* codes, because we have merged",
"# the EIA 860 and EIA 923 souces into a single dataset called EIA...",
"# For each of the unique source codes, verify that all ETL parameters",
"# associated with it in any of the input data packages are identical:"
] |
[
{
"param": "dps",
"type": null
}
] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [
{
"docstring": "If the PUDL ETL parameters associated with any given data\nsource are not identical across all instances of that data source\nwithin the datapackages to be merged. Also if the ETL UUIDs for all\nof the datapackages to be merged are not identical.",
"docstring_tokens": [
"If",
"the",
"PUDL",
"ETL",
"parameters",
"associated",
"with",
"any",
"given",
"data",
"source",
"are",
"not",
"identical",
"across",
"all",
"instances",
"of",
"that",
"data",
"source",
"within",
"the",
"datapackages",
"to",
"be",
"merged",
".",
"Also",
"if",
"the",
"ETL",
"UUIDs",
"for",
"all",
"of",
"the",
"datapackages",
"to",
"be",
"merged",
"are",
"not",
"identical",
"."
],
"type": "ValueError"
}
],
"params": [
{
"identifier": "dps",
"type": null,
"docstring": "A list of datapackage.Package objects, representing the\ndatapackages to be merged.",
"docstring_tokens": [
"A",
"list",
"of",
"datapackage",
".",
"Package",
"objects",
"representing",
"the",
"datapackages",
"to",
"be",
"merged",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def check_etl_params(dps):
dataset_codes = ["eia", "epacems", "ferc1", "epaipm"]
for dataset_code in dataset_codes:
etl_params = []
for dp in dps:
for dataset in dp.descriptor["etl-parameters-pudl"]:
if dataset_code in dataset.keys():
etl_params.append(dataset[dataset_code])
for params in etl_params:
if not params == etl_params[0]:
raise ValueError(
f"Mismatched PUDL ETL parameters for {dataset_code}.")
| 843 | 527 |
c8d0bbd97d7038a399f367036ee9da399ef7f546
|
iplo/Chain
|
ui/gl/generate_bindings.py
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] |
Python
|
LooksLikeExtensionFunction
|
<not_specific>
|
def LooksLikeExtensionFunction(function):
"""Heuristic to see if a function name is consistent with extension function
naming."""
vendor = re.match(r'\w+?([A-Z][A-Z]+)$', function)
return vendor is not None and not vendor.group(1) in ['GL', 'API', 'DC']
|
Heuristic to see if a function name is consistent with extension function
naming.
|
Heuristic to see if a function name is consistent with extension function
naming.
|
[
"Heuristic",
"to",
"see",
"if",
"a",
"function",
"name",
"is",
"consistent",
"with",
"extension",
"function",
"naming",
"."
] |
def LooksLikeExtensionFunction(function):
vendor = re.match(r'\w+?([A-Z][A-Z]+)$', function)
return vendor is not None and not vendor.group(1) in ['GL', 'API', 'DC']
|
[
"def",
"LooksLikeExtensionFunction",
"(",
"function",
")",
":",
"vendor",
"=",
"re",
".",
"match",
"(",
"r'\\w+?([A-Z][A-Z]+)$'",
",",
"function",
")",
"return",
"vendor",
"is",
"not",
"None",
"and",
"not",
"vendor",
".",
"group",
"(",
"1",
")",
"in",
"[",
"'GL'",
",",
"'API'",
",",
"'DC'",
"]"
] |
Heuristic to see if a function name is consistent with extension function
naming.
|
[
"Heuristic",
"to",
"see",
"if",
"a",
"function",
"name",
"is",
"consistent",
"with",
"extension",
"function",
"naming",
"."
] |
[
"\"\"\"Heuristic to see if a function name is consistent with extension function\n naming.\"\"\""
] |
[
{
"param": "function",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "function",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import re
def LooksLikeExtensionFunction(function):
vendor = re.match(r'\w+?([A-Z][A-Z]+)$', function)
return vendor is not None and not vendor.group(1) in ['GL', 'API', 'DC']
| 844 | 349 |
cfa58e6253cb7e4d9fc5726078bb4bf19c998fb3
|
pusscat/refNes
|
MOS6502.py
|
[
"BSD-2-Clause"
] |
Python
|
create_carry_condition
|
<not_specific>
|
def create_carry_condition(new_val, sub_op):
"""Return boolean value whether operation creates a carry condition"""
if not sub_op:
carry_cond = new_val > 0xFF
else:
carry_cond = new_val >= 0
return carry_cond
|
Return boolean value whether operation creates a carry condition
|
Return boolean value whether operation creates a carry condition
|
[
"Return",
"boolean",
"value",
"whether",
"operation",
"creates",
"a",
"carry",
"condition"
] |
def create_carry_condition(new_val, sub_op):
if not sub_op:
carry_cond = new_val > 0xFF
else:
carry_cond = new_val >= 0
return carry_cond
|
[
"def",
"create_carry_condition",
"(",
"new_val",
",",
"sub_op",
")",
":",
"if",
"not",
"sub_op",
":",
"carry_cond",
"=",
"new_val",
">",
"0xFF",
"else",
":",
"carry_cond",
"=",
"new_val",
">=",
"0",
"return",
"carry_cond"
] |
Return boolean value whether operation creates a carry condition
|
[
"Return",
"boolean",
"value",
"whether",
"operation",
"creates",
"a",
"carry",
"condition"
] |
[
"\"\"\"Return boolean value whether operation creates a carry condition\"\"\""
] |
[
{
"param": "new_val",
"type": null
},
{
"param": "sub_op",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "new_val",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "sub_op",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def create_carry_condition(new_val, sub_op):
if not sub_op:
carry_cond = new_val > 0xFF
else:
carry_cond = new_val >= 0
return carry_cond
| 845 | 455 |
6be4de9498041fb332f5d01742cc59c21ec8e26b
|
Jacob-Spiegel/SmilesClickChem
|
smilesclickchem/operators/convert_files/gypsum_dl/gypsum_dl/Parallelizer.py
|
[
"Apache-2.0"
] |
Python
|
count_processors
|
<not_specific>
|
def count_processors(num_inputs, num_procs):
"""
Checks processors available and returns a safe number of them to
utilize.
:param int num_inputs: The number of inputs.
:param int num_procs: The number of desired processors.
:returns: The number of processors to use.
"""
# first, if num_procs <= 0, determine the number of processors to
# use programatically
if num_procs <= 0:
num_procs = multiprocessing.cpu_count()
# reduce the number of processors if too many have been specified
if num_inputs < num_procs:
num_procs = num_inputs
return num_procs
|
Checks processors available and returns a safe number of them to
utilize.
:param int num_inputs: The number of inputs.
:param int num_procs: The number of desired processors.
:returns: The number of processors to use.
|
Checks processors available and returns a safe number of them to
utilize.
|
[
"Checks",
"processors",
"available",
"and",
"returns",
"a",
"safe",
"number",
"of",
"them",
"to",
"utilize",
"."
] |
def count_processors(num_inputs, num_procs):
if num_procs <= 0:
num_procs = multiprocessing.cpu_count()
if num_inputs < num_procs:
num_procs = num_inputs
return num_procs
|
[
"def",
"count_processors",
"(",
"num_inputs",
",",
"num_procs",
")",
":",
"if",
"num_procs",
"<=",
"0",
":",
"num_procs",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
"if",
"num_inputs",
"<",
"num_procs",
":",
"num_procs",
"=",
"num_inputs",
"return",
"num_procs"
] |
Checks processors available and returns a safe number of them to
utilize.
|
[
"Checks",
"processors",
"available",
"and",
"returns",
"a",
"safe",
"number",
"of",
"them",
"to",
"utilize",
"."
] |
[
"\"\"\"\n Checks processors available and returns a safe number of them to\n utilize.\n\n :param int num_inputs: The number of inputs.\n :param int num_procs: The number of desired processors.\n\n :returns: The number of processors to use.\n \"\"\"",
"# first, if num_procs <= 0, determine the number of processors to",
"# use programatically",
"# reduce the number of processors if too many have been specified"
] |
[
{
"param": "num_inputs",
"type": null
},
{
"param": "num_procs",
"type": null
}
] |
{
"returns": [
{
"docstring": "The number of processors to use.",
"docstring_tokens": [
"The",
"number",
"of",
"processors",
"to",
"use",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "num_inputs",
"type": null,
"docstring": "The number of inputs.",
"docstring_tokens": [
"The",
"number",
"of",
"inputs",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "num_procs",
"type": null,
"docstring": "The number of desired processors.",
"docstring_tokens": [
"The",
"number",
"of",
"desired",
"processors",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
import multiprocessing
def count_processors(num_inputs, num_procs):
if num_procs <= 0:
num_procs = multiprocessing.cpu_count()
if num_inputs < num_procs:
num_procs = num_inputs
return num_procs
| 846 | 807 |
08305250b0131cc023e6769042ec149c5d80e35a
|
KateWang2016/image_dl
|
image_downloader/utils.py
|
[
"MIT"
] |
Python
|
parse_args
|
<not_specific>
|
def parse_args():
"""Get the arguments from command line"""
parser = argparse.ArgumentParser(
description="Download images from one or multiple urls"
)
parser.add_argument("url", help="A url to download from")
parser.add_argument("-d", "--dir", help="Directory to save images")
parser.add_argument(
"--formats",
nargs="*",
default=["jpg", "png", "gif", "svg", "jpeg", "webp"],
help="Seperate multiple format strings with space",
)
args = parser.parse_args()
return args
|
Get the arguments from command line
|
Get the arguments from command line
|
[
"Get",
"the",
"arguments",
"from",
"command",
"line"
] |
def parse_args():
parser = argparse.ArgumentParser(
description="Download images from one or multiple urls"
)
parser.add_argument("url", help="A url to download from")
parser.add_argument("-d", "--dir", help="Directory to save images")
parser.add_argument(
"--formats",
nargs="*",
default=["jpg", "png", "gif", "svg", "jpeg", "webp"],
help="Seperate multiple format strings with space",
)
args = parser.parse_args()
return args
|
[
"def",
"parse_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Download images from one or multiple urls\"",
")",
"parser",
".",
"add_argument",
"(",
"\"url\"",
",",
"help",
"=",
"\"A url to download from\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-d\"",
",",
"\"--dir\"",
",",
"help",
"=",
"\"Directory to save images\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--formats\"",
",",
"nargs",
"=",
"\"*\"",
",",
"default",
"=",
"[",
"\"jpg\"",
",",
"\"png\"",
",",
"\"gif\"",
",",
"\"svg\"",
",",
"\"jpeg\"",
",",
"\"webp\"",
"]",
",",
"help",
"=",
"\"Seperate multiple format strings with space\"",
",",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"return",
"args"
] |
Get the arguments from command line
|
[
"Get",
"the",
"arguments",
"from",
"command",
"line"
] |
[
"\"\"\"Get the arguments from command line\"\"\""
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import argparse
def parse_args():
parser = argparse.ArgumentParser(
description="Download images from one or multiple urls"
)
parser.add_argument("url", help="A url to download from")
parser.add_argument("-d", "--dir", help="Directory to save images")
parser.add_argument(
"--formats",
nargs="*",
default=["jpg", "png", "gif", "svg", "jpeg", "webp"],
help="Seperate multiple format strings with space",
)
args = parser.parse_args()
return args
| 847 | 882 |
bfc25c619cd95d1ca069cc355ea6e7c9dbdd0bc5
|
nicolasmota/pyIE
|
ie/pi.py
|
[
"MIT"
] |
Python
|
start
|
<not_specific>
|
def start(st_reg_number):
"""Checks the number valiaty for the Piaui state"""
divisor = 11
if len(st_reg_number) > 9:
return False
if len(st_reg_number) < 9:
return False
sum_total = 0
peso = 9
for i in range(len(st_reg_number)-1):
sum_total = sum_total + int(st_reg_number[i]) * peso
peso = peso - 1
rest_division = sum_total % divisor
digit = divisor - rest_division
if digit == 10 or digit == 11:
digit = 0
return digit == int(st_reg_number[len(st_reg_number)-1])
|
Checks the number valiaty for the Piaui state
|
Checks the number valiaty for the Piaui state
|
[
"Checks",
"the",
"number",
"valiaty",
"for",
"the",
"Piaui",
"state"
] |
def start(st_reg_number):
divisor = 11
if len(st_reg_number) > 9:
return False
if len(st_reg_number) < 9:
return False
sum_total = 0
peso = 9
for i in range(len(st_reg_number)-1):
sum_total = sum_total + int(st_reg_number[i]) * peso
peso = peso - 1
rest_division = sum_total % divisor
digit = divisor - rest_division
if digit == 10 or digit == 11:
digit = 0
return digit == int(st_reg_number[len(st_reg_number)-1])
|
[
"def",
"start",
"(",
"st_reg_number",
")",
":",
"divisor",
"=",
"11",
"if",
"len",
"(",
"st_reg_number",
")",
">",
"9",
":",
"return",
"False",
"if",
"len",
"(",
"st_reg_number",
")",
"<",
"9",
":",
"return",
"False",
"sum_total",
"=",
"0",
"peso",
"=",
"9",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"st_reg_number",
")",
"-",
"1",
")",
":",
"sum_total",
"=",
"sum_total",
"+",
"int",
"(",
"st_reg_number",
"[",
"i",
"]",
")",
"*",
"peso",
"peso",
"=",
"peso",
"-",
"1",
"rest_division",
"=",
"sum_total",
"%",
"divisor",
"digit",
"=",
"divisor",
"-",
"rest_division",
"if",
"digit",
"==",
"10",
"or",
"digit",
"==",
"11",
":",
"digit",
"=",
"0",
"return",
"digit",
"==",
"int",
"(",
"st_reg_number",
"[",
"len",
"(",
"st_reg_number",
")",
"-",
"1",
"]",
")"
] |
Checks the number valiaty for the Piaui state
|
[
"Checks",
"the",
"number",
"valiaty",
"for",
"the",
"Piaui",
"state"
] |
[
"\"\"\"Checks the number valiaty for the Piaui state\"\"\""
] |
[
{
"param": "st_reg_number",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "st_reg_number",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def start(st_reg_number):
divisor = 11
if len(st_reg_number) > 9:
return False
if len(st_reg_number) < 9:
return False
sum_total = 0
peso = 9
for i in range(len(st_reg_number)-1):
sum_total = sum_total + int(st_reg_number[i]) * peso
peso = peso - 1
rest_division = sum_total % divisor
digit = divisor - rest_division
if digit == 10 or digit == 11:
digit = 0
return digit == int(st_reg_number[len(st_reg_number)-1])
| 848 | 174 |
5398f9cbb329ed186654568f3d5b876e669fa3fa
|
BAMresearch/ctsimu-toolbox
|
ctsimu/helpers.py
|
[
"Apache-2.0"
] |
Python
|
divideAndError
|
<not_specific>
|
def divideAndError(muA, muB, errA, errB):
""" Error propagation upon division; estimation of largest error. """
value = muA / muB
err = errA/abs(muB) + errB*abs(muA/(muB**2))
return value, err
|
Error propagation upon division; estimation of largest error.
|
Error propagation upon division; estimation of largest error.
|
[
"Error",
"propagation",
"upon",
"division",
";",
"estimation",
"of",
"largest",
"error",
"."
] |
def divideAndError(muA, muB, errA, errB):
value = muA / muB
err = errA/abs(muB) + errB*abs(muA/(muB**2))
return value, err
|
[
"def",
"divideAndError",
"(",
"muA",
",",
"muB",
",",
"errA",
",",
"errB",
")",
":",
"value",
"=",
"muA",
"/",
"muB",
"err",
"=",
"errA",
"/",
"abs",
"(",
"muB",
")",
"+",
"errB",
"*",
"abs",
"(",
"muA",
"/",
"(",
"muB",
"**",
"2",
")",
")",
"return",
"value",
",",
"err"
] |
Error propagation upon division; estimation of largest error.
|
[
"Error",
"propagation",
"upon",
"division",
";",
"estimation",
"of",
"largest",
"error",
"."
] |
[
"\"\"\" Error propagation upon division; estimation of largest error. \"\"\""
] |
[
{
"param": "muA",
"type": null
},
{
"param": "muB",
"type": null
},
{
"param": "errA",
"type": null
},
{
"param": "errB",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "muA",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "muB",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "errA",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "errB",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def divideAndError(muA, muB, errA, errB):
value = muA / muB
err = errA/abs(muB) + errB*abs(muA/(muB**2))
return value, err
| 849 | 845 |
1328c721d906ed5a2c51301f23c44a8db7ccd7ee
|
xinan-jiang/iree
|
integrations/tensorflow/bindings/python/pyiree/tf/support/module_utils.py
|
[
"Apache-2.0"
] |
Python
|
create_from_class
| null |
def create_from_class(cls,
module_class: Type[tf.Module],
backend_info: "BackendInfo",
exported_names: Sequence[str] = (),
artifacts_dir: str = None):
"""Compile a tf.Module subclass to the target backend in backend_info.
Args:
module_class: The tf.Module subclass to compile.
backend_info: BackendInfo with the details for compiling this module.
exported_names: Optional sequence representing the exported names to keep.
artifacts_dir: An optional string pointing to where compilation artifacts
should be saved. No compilation artifacts will be saved if this is not
provided.
"""
raise NotImplementedError()
|
Compile a tf.Module subclass to the target backend in backend_info.
Args:
module_class: The tf.Module subclass to compile.
backend_info: BackendInfo with the details for compiling this module.
exported_names: Optional sequence representing the exported names to keep.
artifacts_dir: An optional string pointing to where compilation artifacts
should be saved. No compilation artifacts will be saved if this is not
provided.
|
Compile a tf.Module subclass to the target backend in backend_info.
|
[
"Compile",
"a",
"tf",
".",
"Module",
"subclass",
"to",
"the",
"target",
"backend",
"in",
"backend_info",
"."
] |
def create_from_class(cls,
module_class: Type[tf.Module],
backend_info: "BackendInfo",
exported_names: Sequence[str] = (),
artifacts_dir: str = None):
raise NotImplementedError()
|
[
"def",
"create_from_class",
"(",
"cls",
",",
"module_class",
":",
"Type",
"[",
"tf",
".",
"Module",
"]",
",",
"backend_info",
":",
"\"BackendInfo\"",
",",
"exported_names",
":",
"Sequence",
"[",
"str",
"]",
"=",
"(",
")",
",",
"artifacts_dir",
":",
"str",
"=",
"None",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] |
Compile a tf.Module subclass to the target backend in backend_info.
|
[
"Compile",
"a",
"tf",
".",
"Module",
"subclass",
"to",
"the",
"target",
"backend",
"in",
"backend_info",
"."
] |
[
"\"\"\"Compile a tf.Module subclass to the target backend in backend_info.\n\n Args:\n module_class: The tf.Module subclass to compile.\n backend_info: BackendInfo with the details for compiling this module.\n exported_names: Optional sequence representing the exported names to keep.\n artifacts_dir: An optional string pointing to where compilation artifacts\n should be saved. No compilation artifacts will be saved if this is not\n provided.\n \"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "module_class",
"type": "Type[tf.Module]"
},
{
"param": "backend_info",
"type": "\"BackendInfo\""
},
{
"param": "exported_names",
"type": "Sequence[str]"
},
{
"param": "artifacts_dir",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "module_class",
"type": "Type[tf.Module]",
"docstring": "The tf.Module subclass to compile.",
"docstring_tokens": [
"The",
"tf",
".",
"Module",
"subclass",
"to",
"compile",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "backend_info",
"type": "\"BackendInfo\"",
"docstring": "BackendInfo with the details for compiling this module.",
"docstring_tokens": [
"BackendInfo",
"with",
"the",
"details",
"for",
"compiling",
"this",
"module",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "exported_names",
"type": "Sequence[str]",
"docstring": "Optional sequence representing the exported names to keep.",
"docstring_tokens": [
"Optional",
"sequence",
"representing",
"the",
"exported",
"names",
"to",
"keep",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "artifacts_dir",
"type": "str",
"docstring": "An optional string pointing to where compilation artifacts\nshould be saved. No compilation artifacts will be saved if this is not\nprovided.",
"docstring_tokens": [
"An",
"optional",
"string",
"pointing",
"to",
"where",
"compilation",
"artifacts",
"should",
"be",
"saved",
".",
"No",
"compilation",
"artifacts",
"will",
"be",
"saved",
"if",
"this",
"is",
"not",
"provided",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def create_from_class(cls,
module_class: Type[tf.Module],
backend_info: "BackendInfo",
exported_names: Sequence[str] = (),
artifacts_dir: str = None):
raise NotImplementedError()
| 850 | 442 |
362afc4fc11735535a431d826125d46948baa159
|
cglesner/100-days-of-py
|
dotproduct.py
|
[
"MIT"
] |
Python
|
orthogonal
|
<not_specific>
|
def orthogonal(a):
"""Return an arbitrary vector orthogonal to the input vector.
:type a: tuple of floats.
:return b: tuple of floats.
"""
if len(a) == 1:
return None
# Handle the vector generation one way if any elements
# of the input vector are zero.
if not all(a):
return tuple(0 if a_i else 1 for a_i in a)
# If none of the elements of a are zero, set the first n-1
# elements of b to 1. Set the n-th element of b as follows:
return (1,) * len(a[:-1]) + (-1 * sum(a[:-1]) / a[-1],)
|
Return an arbitrary vector orthogonal to the input vector.
:type a: tuple of floats.
:return b: tuple of floats.
|
Return an arbitrary vector orthogonal to the input vector.
|
[
"Return",
"an",
"arbitrary",
"vector",
"orthogonal",
"to",
"the",
"input",
"vector",
"."
] |
def orthogonal(a):
if len(a) == 1:
return None
if not all(a):
return tuple(0 if a_i else 1 for a_i in a)
return (1,) * len(a[:-1]) + (-1 * sum(a[:-1]) / a[-1],)
|
[
"def",
"orthogonal",
"(",
"a",
")",
":",
"if",
"len",
"(",
"a",
")",
"==",
"1",
":",
"return",
"None",
"if",
"not",
"all",
"(",
"a",
")",
":",
"return",
"tuple",
"(",
"0",
"if",
"a_i",
"else",
"1",
"for",
"a_i",
"in",
"a",
")",
"return",
"(",
"1",
",",
")",
"*",
"len",
"(",
"a",
"[",
":",
"-",
"1",
"]",
")",
"+",
"(",
"-",
"1",
"*",
"sum",
"(",
"a",
"[",
":",
"-",
"1",
"]",
")",
"/",
"a",
"[",
"-",
"1",
"]",
",",
")"
] |
Return an arbitrary vector orthogonal to the input vector.
|
[
"Return",
"an",
"arbitrary",
"vector",
"orthogonal",
"to",
"the",
"input",
"vector",
"."
] |
[
"\"\"\"Return an arbitrary vector orthogonal to the input vector.\n :type a: tuple of floats.\n :return b: tuple of floats.\n \"\"\"",
"# Handle the vector generation one way if any elements",
"# of the input vector are zero.",
"# If none of the elements of a are zero, set the first n-1",
"# elements of b to 1. Set the n-th element of b as follows:"
] |
[
{
"param": "a",
"type": null
}
] |
{
"returns": [
{
"docstring": "tuple of floats.",
"docstring_tokens": [
"tuple",
"of",
"floats",
"."
],
"type": "b"
}
],
"raises": [],
"params": [
{
"identifier": "a",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def orthogonal(a):
if len(a) == 1:
return None
if not all(a):
return tuple(0 if a_i else 1 for a_i in a)
return (1,) * len(a[:-1]) + (-1 * sum(a[:-1]) / a[-1],)
| 851 | 874 |
76630bd861765d3c6d2db15e39c4a0472d7c323e
|
IAWG-CSBC-PSON/hack2022-06-viz-comp
|
solution2/analytics/two_cluster_mapping.py
|
[
"MIT"
] |
Python
|
jaccard_set
|
<not_specific>
|
def jaccard_set(list1, list2):
"""Define Jaccard Similarity function for two sets"""
intersection = len(list(set(list1).intersection(list2)))
union = (len(list1) + len(list2)) - intersection
return float(intersection) / union
|
Define Jaccard Similarity function for two sets
|
Define Jaccard Similarity function for two sets
|
[
"Define",
"Jaccard",
"Similarity",
"function",
"for",
"two",
"sets"
] |
def jaccard_set(list1, list2):
intersection = len(list(set(list1).intersection(list2)))
union = (len(list1) + len(list2)) - intersection
return float(intersection) / union
|
[
"def",
"jaccard_set",
"(",
"list1",
",",
"list2",
")",
":",
"intersection",
"=",
"len",
"(",
"list",
"(",
"set",
"(",
"list1",
")",
".",
"intersection",
"(",
"list2",
")",
")",
")",
"union",
"=",
"(",
"len",
"(",
"list1",
")",
"+",
"len",
"(",
"list2",
")",
")",
"-",
"intersection",
"return",
"float",
"(",
"intersection",
")",
"/",
"union"
] |
Define Jaccard Similarity function for two sets
|
[
"Define",
"Jaccard",
"Similarity",
"function",
"for",
"two",
"sets"
] |
[
"\"\"\"Define Jaccard Similarity function for two sets\"\"\""
] |
[
{
"param": "list1",
"type": null
},
{
"param": "list2",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "list1",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "list2",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def jaccard_set(list1, list2):
intersection = len(list(set(list1).intersection(list2)))
union = (len(list1) + len(list2)) - intersection
return float(intersection) / union
| 852 | 189 |
b10c7ced3ca4b7919db1bb856f3eb5afb2c0a958
|
m-salman-05/libfreenect
|
src/fwfetcher.py
|
[
"Apache-2.0"
] |
Python
|
nice_open_dir
|
<not_specific>
|
def nice_open_dir(dirname):
"""Checks if the output directory with the given name already exists,
and if so, asks for overwrite permission. This means that any file
in that directory might be overwritten.
@param dirname name of the output directory to open
@return overwrite permission
"""
if os.path.isdir(dirname):
print(dirname, "already exists, ok to overwrite files in it? (y/n)",
end=' ')
answer = input("")
return len(answer) > 0 and answer[0] in ["Y", "y"]
else:
return True
|
Checks if the output directory with the given name already exists,
and if so, asks for overwrite permission. This means that any file
in that directory might be overwritten.
@param dirname name of the output directory to open
@return overwrite permission
|
Checks if the output directory with the given name already exists,
and if so, asks for overwrite permission. This means that any file
in that directory might be overwritten.
@param dirname name of the output directory to open
@return overwrite permission
|
[
"Checks",
"if",
"the",
"output",
"directory",
"with",
"the",
"given",
"name",
"already",
"exists",
"and",
"if",
"so",
"asks",
"for",
"overwrite",
"permission",
".",
"This",
"means",
"that",
"any",
"file",
"in",
"that",
"directory",
"might",
"be",
"overwritten",
".",
"@param",
"dirname",
"name",
"of",
"the",
"output",
"directory",
"to",
"open",
"@return",
"overwrite",
"permission"
] |
def nice_open_dir(dirname):
if os.path.isdir(dirname):
print(dirname, "already exists, ok to overwrite files in it? (y/n)",
end=' ')
answer = input("")
return len(answer) > 0 and answer[0] in ["Y", "y"]
else:
return True
|
[
"def",
"nice_open_dir",
"(",
"dirname",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"dirname",
")",
":",
"print",
"(",
"dirname",
",",
"\"already exists, ok to overwrite files in it? (y/n)\"",
",",
"end",
"=",
"' '",
")",
"answer",
"=",
"input",
"(",
"\"\"",
")",
"return",
"len",
"(",
"answer",
")",
">",
"0",
"and",
"answer",
"[",
"0",
"]",
"in",
"[",
"\"Y\"",
",",
"\"y\"",
"]",
"else",
":",
"return",
"True"
] |
Checks if the output directory with the given name already exists,
and if so, asks for overwrite permission.
|
[
"Checks",
"if",
"the",
"output",
"directory",
"with",
"the",
"given",
"name",
"already",
"exists",
"and",
"if",
"so",
"asks",
"for",
"overwrite",
"permission",
"."
] |
[
"\"\"\"Checks if the output directory with the given name already exists,\n and if so, asks for overwrite permission. This means that any file\n in that directory might be overwritten.\n\n @param dirname name of the output directory to open\n @return overwrite permission\n \"\"\""
] |
[
{
"param": "dirname",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "dirname",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
def nice_open_dir(dirname):
if os.path.isdir(dirname):
print(dirname, "already exists, ok to overwrite files in it? (y/n)",
end=' ')
answer = input("")
return len(answer) > 0 and answer[0] in ["Y", "y"]
else:
return True
| 853 | 322 |
e5bcbcc03599788bc85fc2793adcdca821d3465a
|
VEuPathDB/websiteconf
|
make_yaml.py
|
[
"Apache-2.0"
] |
Python
|
decomment
| null |
def decomment(csvfile):
""" simple helper function to remove comments from a file """
for row in csvfile:
raw = row.split('#')[0].strip()
if raw: yield row
|
simple helper function to remove comments from a file
|
simple helper function to remove comments from a file
|
[
"simple",
"helper",
"function",
"to",
"remove",
"comments",
"from",
"a",
"file"
] |
def decomment(csvfile):
for row in csvfile:
raw = row.split('#')[0].strip()
if raw: yield row
|
[
"def",
"decomment",
"(",
"csvfile",
")",
":",
"for",
"row",
"in",
"csvfile",
":",
"raw",
"=",
"row",
".",
"split",
"(",
"'#'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"if",
"raw",
":",
"yield",
"row"
] |
simple helper function to remove comments from a file
|
[
"simple",
"helper",
"function",
"to",
"remove",
"comments",
"from",
"a",
"file"
] |
[
"\"\"\" simple helper function to remove comments from a file \"\"\""
] |
[
{
"param": "csvfile",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "csvfile",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def decomment(csvfile):
for row in csvfile:
raw = row.split('#')[0].strip()
if raw: yield row
| 854 | 817 |
1b4d7f5093e49ae6452905cde0ec670b9abd72d4
|
QCoding/qsum
|
qsum/data/to_bytes_custom.py
|
[
"MIT"
] |
Python
|
int_to_bytes
|
bytes
|
def int_to_bytes(obj: int) -> bytes:
"""Convert int's in to the most compact byte representation possible
CURRENTLY UNUSED, while cleverly packing integers tightly it's actually ~5x slower then just calling repr
Args:
obj: integer to convert to bytes
Returns:
bytes representing integer
"""
return obj.to_bytes(1 if obj == 0 else math.floor((math.log2(abs(obj)) + 1) / 8 + 1), byteorder='big', signed=True)
|
Convert int's in to the most compact byte representation possible
CURRENTLY UNUSED, while cleverly packing integers tightly it's actually ~5x slower then just calling repr
Args:
obj: integer to convert to bytes
Returns:
bytes representing integer
|
Convert int's in to the most compact byte representation possible
CURRENTLY UNUSED, while cleverly packing integers tightly it's actually ~5x slower then just calling repr
|
[
"Convert",
"int",
"'",
"s",
"in",
"to",
"the",
"most",
"compact",
"byte",
"representation",
"possible",
"CURRENTLY",
"UNUSED",
"while",
"cleverly",
"packing",
"integers",
"tightly",
"it",
"'",
"s",
"actually",
"~5x",
"slower",
"then",
"just",
"calling",
"repr"
] |
def int_to_bytes(obj: int) -> bytes:
return obj.to_bytes(1 if obj == 0 else math.floor((math.log2(abs(obj)) + 1) / 8 + 1), byteorder='big', signed=True)
|
[
"def",
"int_to_bytes",
"(",
"obj",
":",
"int",
")",
"->",
"bytes",
":",
"return",
"obj",
".",
"to_bytes",
"(",
"1",
"if",
"obj",
"==",
"0",
"else",
"math",
".",
"floor",
"(",
"(",
"math",
".",
"log2",
"(",
"abs",
"(",
"obj",
")",
")",
"+",
"1",
")",
"/",
"8",
"+",
"1",
")",
",",
"byteorder",
"=",
"'big'",
",",
"signed",
"=",
"True",
")"
] |
Convert int's in to the most compact byte representation possible
CURRENTLY UNUSED, while cleverly packing integers tightly it's actually ~5x slower then just calling repr
|
[
"Convert",
"int",
"'",
"s",
"in",
"to",
"the",
"most",
"compact",
"byte",
"representation",
"possible",
"CURRENTLY",
"UNUSED",
"while",
"cleverly",
"packing",
"integers",
"tightly",
"it",
"'",
"s",
"actually",
"~5x",
"slower",
"then",
"just",
"calling",
"repr"
] |
[
"\"\"\"Convert int's in to the most compact byte representation possible\n\n CURRENTLY UNUSED, while cleverly packing integers tightly it's actually ~5x slower then just calling repr\n\n Args:\n obj: integer to convert to bytes\n\n Returns:\n bytes representing integer\n \"\"\""
] |
[
{
"param": "obj",
"type": "int"
}
] |
{
"returns": [
{
"docstring": "bytes representing integer",
"docstring_tokens": [
"bytes",
"representing",
"integer"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "obj",
"type": "int",
"docstring": "integer to convert to bytes",
"docstring_tokens": [
"integer",
"to",
"convert",
"to",
"bytes"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import math
def int_to_bytes(obj: int) -> bytes:
return obj.to_bytes(1 if obj == 0 else math.floor((math.log2(abs(obj)) + 1) / 8 + 1), byteorder='big', signed=True)
| 855 | 848 |
31403d4219775b4091c8d57d053acdbdf23a329d
|
BD2KGenomics/toil
|
src/toil/cwl/cwltoil.py
|
[
"Apache-2.0"
] |
Python
|
check_directory_dict_invariants
|
None
|
def check_directory_dict_invariants(
contents: DirectoryContents
) -> None:
"""
Make sure a directory structure dict makes sense. Throws an error
otherwise.
Currently just checks to make sure no empty-string keys exist.
"""
for name, item in contents.items():
if name == '':
raise RuntimeError('Found nameless entry in directory: ' + json.dumps(contents, indent=2))
if isinstance(item, dict):
check_directory_dict_invariants(item)
|
Make sure a directory structure dict makes sense. Throws an error
otherwise.
Currently just checks to make sure no empty-string keys exist.
|
Make sure a directory structure dict makes sense. Throws an error
otherwise.
Currently just checks to make sure no empty-string keys exist.
|
[
"Make",
"sure",
"a",
"directory",
"structure",
"dict",
"makes",
"sense",
".",
"Throws",
"an",
"error",
"otherwise",
".",
"Currently",
"just",
"checks",
"to",
"make",
"sure",
"no",
"empty",
"-",
"string",
"keys",
"exist",
"."
] |
def check_directory_dict_invariants(
contents: DirectoryContents
) -> None:
for name, item in contents.items():
if name == '':
raise RuntimeError('Found nameless entry in directory: ' + json.dumps(contents, indent=2))
if isinstance(item, dict):
check_directory_dict_invariants(item)
|
[
"def",
"check_directory_dict_invariants",
"(",
"contents",
":",
"DirectoryContents",
")",
"->",
"None",
":",
"for",
"name",
",",
"item",
"in",
"contents",
".",
"items",
"(",
")",
":",
"if",
"name",
"==",
"''",
":",
"raise",
"RuntimeError",
"(",
"'Found nameless entry in directory: '",
"+",
"json",
".",
"dumps",
"(",
"contents",
",",
"indent",
"=",
"2",
")",
")",
"if",
"isinstance",
"(",
"item",
",",
"dict",
")",
":",
"check_directory_dict_invariants",
"(",
"item",
")"
] |
Make sure a directory structure dict makes sense.
|
[
"Make",
"sure",
"a",
"directory",
"structure",
"dict",
"makes",
"sense",
"."
] |
[
"\"\"\"\n Make sure a directory structure dict makes sense. Throws an error\n otherwise.\n\n Currently just checks to make sure no empty-string keys exist.\n \"\"\""
] |
[
{
"param": "contents",
"type": "DirectoryContents"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "contents",
"type": "DirectoryContents",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import json
def check_directory_dict_invariants(
contents: DirectoryContents
) -> None:
for name, item in contents.items():
if name == '':
raise RuntimeError('Found nameless entry in directory: ' + json.dumps(contents, indent=2))
if isinstance(item, dict):
check_directory_dict_invariants(item)
| 856 | 921 |
3c912eba80b9d33972dd45048f100ac1af5d5b69
|
winksaville/bike-simple-simulation
|
bike-sim.py
|
[
"Unlicense"
] |
Python
|
mph
|
<not_specific>
|
def mph(mps):
"""
Meters per second to miles per hour
"""
mpsToMph = 3600.0 / (0.0254 * 12.0 * 5280.0)
return mps * mpsToMph
|
Meters per second to miles per hour
|
Meters per second to miles per hour
|
[
"Meters",
"per",
"second",
"to",
"miles",
"per",
"hour"
] |
def mph(mps):
mpsToMph = 3600.0 / (0.0254 * 12.0 * 5280.0)
return mps * mpsToMph
|
[
"def",
"mph",
"(",
"mps",
")",
":",
"mpsToMph",
"=",
"3600.0",
"/",
"(",
"0.0254",
"*",
"12.0",
"*",
"5280.0",
")",
"return",
"mps",
"*",
"mpsToMph"
] |
Meters per second to miles per hour
|
[
"Meters",
"per",
"second",
"to",
"miles",
"per",
"hour"
] |
[
"\"\"\"\n Meters per second to miles per hour\n \"\"\""
] |
[
{
"param": "mps",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "mps",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def mph(mps):
mpsToMph = 3600.0 / (0.0254 * 12.0 * 5280.0)
return mps * mpsToMph
| 857 | 373 |
44482bb29aa717f7c5d5964428cd5b561d11d073
|
abs-tudelft/vhsnunzip
|
tests/emu/operators.py
|
[
"MIT"
] |
Python
|
writer
| null |
def writer(stream, fname):
"""Writes the serialized representation of each transfer to the given
file."""
with open(fname, 'w') as fil:
for transfer in stream:
print(transfer.serialize(), file=fil)
yield transfer
|
Writes the serialized representation of each transfer to the given
file.
|
Writes the serialized representation of each transfer to the given
file.
|
[
"Writes",
"the",
"serialized",
"representation",
"of",
"each",
"transfer",
"to",
"the",
"given",
"file",
"."
] |
def writer(stream, fname):
with open(fname, 'w') as fil:
for transfer in stream:
print(transfer.serialize(), file=fil)
yield transfer
|
[
"def",
"writer",
"(",
"stream",
",",
"fname",
")",
":",
"with",
"open",
"(",
"fname",
",",
"'w'",
")",
"as",
"fil",
":",
"for",
"transfer",
"in",
"stream",
":",
"print",
"(",
"transfer",
".",
"serialize",
"(",
")",
",",
"file",
"=",
"fil",
")",
"yield",
"transfer"
] |
Writes the serialized representation of each transfer to the given
file.
|
[
"Writes",
"the",
"serialized",
"representation",
"of",
"each",
"transfer",
"to",
"the",
"given",
"file",
"."
] |
[
"\"\"\"Writes the serialized representation of each transfer to the given\n file.\"\"\""
] |
[
{
"param": "stream",
"type": null
},
{
"param": "fname",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "stream",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "fname",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def writer(stream, fname):
with open(fname, 'w') as fil:
for transfer in stream:
print(transfer.serialize(), file=fil)
yield transfer
| 858 | 11 |
50eca91c898560b9378fead3bc1389ce2eee4e73
|
heatherleaf/sparv-pipeline
|
sparv/modules/vw_topic_modelling/vw_topic_modelling.py
|
[
"MIT"
] |
Python
|
triangulate
| null |
def triangulate(xs):
"""
All initial segments of xs, concatenated.
>>> ''.join(triangulate('abc'))
'aababc'
>>> ''.join(triangulate('1234'))
'1121231234'
>>> list(triangulate([]))
[]
"""
for i, _ in enumerate(xs):
for x in xs[:i + 1]:
yield x
|
All initial segments of xs, concatenated.
>>> ''.join(triangulate('abc'))
'aababc'
>>> ''.join(triangulate('1234'))
'1121231234'
>>> list(triangulate([]))
[]
|
All initial segments of xs, concatenated.
|
[
"All",
"initial",
"segments",
"of",
"xs",
"concatenated",
"."
] |
def triangulate(xs):
for i, _ in enumerate(xs):
for x in xs[:i + 1]:
yield x
|
[
"def",
"triangulate",
"(",
"xs",
")",
":",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"xs",
")",
":",
"for",
"x",
"in",
"xs",
"[",
":",
"i",
"+",
"1",
"]",
":",
"yield",
"x"
] |
All initial segments of xs, concatenated.
|
[
"All",
"initial",
"segments",
"of",
"xs",
"concatenated",
"."
] |
[
"\"\"\"\n All initial segments of xs, concatenated.\n\n >>> ''.join(triangulate('abc'))\n 'aababc'\n >>> ''.join(triangulate('1234'))\n '1121231234'\n >>> list(triangulate([]))\n []\n \"\"\""
] |
[
{
"param": "xs",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "xs",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def triangulate(xs):
for i, _ in enumerate(xs):
for x in xs[:i + 1]:
yield x
| 859 | 526 |
93e21eac2057e4a6d1d432d170f2219d2acb29df
|
beantowel/librosa
|
examples/hpss_beats.py
|
[
"ISC"
] |
Python
|
process_arguments
|
<not_specific>
|
def process_arguments(args):
'''Argparse function to get the program parameters'''
parser = argparse.ArgumentParser(description='HPSS beat-tracking example')
parser.add_argument('input_file',
action='store',
help='path to the input file (wav, mp3, etc)')
parser.add_argument('output_file',
action='store',
help='path to the output file (csv of beat times)')
return vars(parser.parse_args(args))
|
Argparse function to get the program parameters
|
Argparse function to get the program parameters
|
[
"Argparse",
"function",
"to",
"get",
"the",
"program",
"parameters"
] |
def process_arguments(args):
parser = argparse.ArgumentParser(description='HPSS beat-tracking example')
parser.add_argument('input_file',
action='store',
help='path to the input file (wav, mp3, etc)')
parser.add_argument('output_file',
action='store',
help='path to the output file (csv of beat times)')
return vars(parser.parse_args(args))
|
[
"def",
"process_arguments",
"(",
"args",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'HPSS beat-tracking example'",
")",
"parser",
".",
"add_argument",
"(",
"'input_file'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'path to the input file (wav, mp3, etc)'",
")",
"parser",
".",
"add_argument",
"(",
"'output_file'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'path to the output file (csv of beat times)'",
")",
"return",
"vars",
"(",
"parser",
".",
"parse_args",
"(",
"args",
")",
")"
] |
Argparse function to get the program parameters
|
[
"Argparse",
"function",
"to",
"get",
"the",
"program",
"parameters"
] |
[
"'''Argparse function to get the program parameters'''"
] |
[
{
"param": "args",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "args",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import argparse
def process_arguments(args):
parser = argparse.ArgumentParser(description='HPSS beat-tracking example')
parser.add_argument('input_file',
action='store',
help='path to the input file (wav, mp3, etc)')
parser.add_argument('output_file',
action='store',
help='path to the output file (csv of beat times)')
return vars(parser.parse_args(args))
| 860 | 435 |
ea88e1f54e7ff8af5f1191269fecc46695f02dce
|
InfrastructureHQ/AWS-CDK-Accelerators
|
salesforce-appflow/iac/policies/policies.py
|
[
"Apache-2.0"
] |
Python
|
aws_secrets_manager_get_secret_policy_in_json
|
<not_specific>
|
def aws_secrets_manager_get_secret_policy_in_json(secret_arn):
"""
Define an IAM policy statement for getting secret value.
:return: an IAM policy statement in json.
"""
return {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["secretsmanager:GetSecretValue"],
"Resource": [secret_arn],
}
],
}
|
Define an IAM policy statement for getting secret value.
:return: an IAM policy statement in json.
|
Define an IAM policy statement for getting secret value.
|
[
"Define",
"an",
"IAM",
"policy",
"statement",
"for",
"getting",
"secret",
"value",
"."
] |
def aws_secrets_manager_get_secret_policy_in_json(secret_arn):
return {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["secretsmanager:GetSecretValue"],
"Resource": [secret_arn],
}
],
}
|
[
"def",
"aws_secrets_manager_get_secret_policy_in_json",
"(",
"secret_arn",
")",
":",
"return",
"{",
"\"Version\"",
":",
"\"2012-10-17\"",
",",
"\"Statement\"",
":",
"[",
"{",
"\"Effect\"",
":",
"\"Allow\"",
",",
"\"Action\"",
":",
"[",
"\"secretsmanager:GetSecretValue\"",
"]",
",",
"\"Resource\"",
":",
"[",
"secret_arn",
"]",
",",
"}",
"]",
",",
"}"
] |
Define an IAM policy statement for getting secret value.
|
[
"Define",
"an",
"IAM",
"policy",
"statement",
"for",
"getting",
"secret",
"value",
"."
] |
[
"\"\"\"\n Define an IAM policy statement for getting secret value.\n :return: an IAM policy statement in json.\n \"\"\""
] |
[
{
"param": "secret_arn",
"type": null
}
] |
{
"returns": [
{
"docstring": "an IAM policy statement in json.",
"docstring_tokens": [
"an",
"IAM",
"policy",
"statement",
"in",
"json",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "secret_arn",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def aws_secrets_manager_get_secret_policy_in_json(secret_arn):
return {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["secretsmanager:GetSecretValue"],
"Resource": [secret_arn],
}
],
}
| 861 | 85 |
866b4d9ee5b46f226d443445ae1eef8d2ff7dba7
|
jason-r-becker/dfspy
|
dfspy/train_projections.py
|
[
"MIT"
] |
Python
|
fmt_pval
|
<not_specific>
|
def fmt_pval(pvals):
"""Return *-style significance list for pvalues."""
sig = []
for pval in pvals:
if pval <= 0.001:
sig.append('*')
elif pval <= 0.01:
sig.append('**')
elif pval <= 0.05:
sig.append('***')
else:
sig.append('')
return sig
|
Return *-style significance list for pvalues.
|
Return *-style significance list for pvalues.
|
[
"Return",
"*",
"-",
"style",
"significance",
"list",
"for",
"pvalues",
"."
] |
def fmt_pval(pvals):
sig = []
for pval in pvals:
if pval <= 0.001:
sig.append('*')
elif pval <= 0.01:
sig.append('**')
elif pval <= 0.05:
sig.append('***')
else:
sig.append('')
return sig
|
[
"def",
"fmt_pval",
"(",
"pvals",
")",
":",
"sig",
"=",
"[",
"]",
"for",
"pval",
"in",
"pvals",
":",
"if",
"pval",
"<=",
"0.001",
":",
"sig",
".",
"append",
"(",
"'*'",
")",
"elif",
"pval",
"<=",
"0.01",
":",
"sig",
".",
"append",
"(",
"'**'",
")",
"elif",
"pval",
"<=",
"0.05",
":",
"sig",
".",
"append",
"(",
"'***'",
")",
"else",
":",
"sig",
".",
"append",
"(",
"''",
")",
"return",
"sig"
] |
Return *-style significance list for pvalues.
|
[
"Return",
"*",
"-",
"style",
"significance",
"list",
"for",
"pvalues",
"."
] |
[
"\"\"\"Return *-style significance list for pvalues.\"\"\""
] |
[
{
"param": "pvals",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "pvals",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def fmt_pval(pvals):
sig = []
for pval in pvals:
if pval <= 0.001:
sig.append('*')
elif pval <= 0.01:
sig.append('**')
elif pval <= 0.05:
sig.append('***')
else:
sig.append('')
return sig
| 863 | 114 |
0cfc996c1f7fa2e516172473d5f77b17e198ab95
|
radug0314/pytest_func_cov
|
pytest_func_cov/tracking.py
|
[
"MIT"
] |
Python
|
import_module_from_file
|
<not_specific>
|
def import_module_from_file(module_name, file_path):
"""
Imports module from a given file path under a given module name. If the module
exists the function returns the module object from sys.modules.
Args:
module_name (str): Full qualified name of the module.
Example: mypackage.mymodule
file_path (str): Path to module, assumed to be a ".py" file
Returns:
ModuleType
"""
if module_name in sys.modules:
module = sys.modules[module_name]
else:
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
return module
|
Imports module from a given file path under a given module name. If the module
exists the function returns the module object from sys.modules.
Args:
module_name (str): Full qualified name of the module.
Example: mypackage.mymodule
file_path (str): Path to module, assumed to be a ".py" file
Returns:
ModuleType
|
Imports module from a given file path under a given module name. If the module
exists the function returns the module object from sys.modules.
|
[
"Imports",
"module",
"from",
"a",
"given",
"file",
"path",
"under",
"a",
"given",
"module",
"name",
".",
"If",
"the",
"module",
"exists",
"the",
"function",
"returns",
"the",
"module",
"object",
"from",
"sys",
".",
"modules",
"."
] |
def import_module_from_file(module_name, file_path):
if module_name in sys.modules:
module = sys.modules[module_name]
else:
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
return module
|
[
"def",
"import_module_from_file",
"(",
"module_name",
",",
"file_path",
")",
":",
"if",
"module_name",
"in",
"sys",
".",
"modules",
":",
"module",
"=",
"sys",
".",
"modules",
"[",
"module_name",
"]",
"else",
":",
"spec",
"=",
"importlib",
".",
"util",
".",
"spec_from_file_location",
"(",
"module_name",
",",
"file_path",
")",
"module",
"=",
"importlib",
".",
"util",
".",
"module_from_spec",
"(",
"spec",
")",
"sys",
".",
"modules",
"[",
"module_name",
"]",
"=",
"module",
"spec",
".",
"loader",
".",
"exec_module",
"(",
"module",
")",
"return",
"module"
] |
Imports module from a given file path under a given module name.
|
[
"Imports",
"module",
"from",
"a",
"given",
"file",
"path",
"under",
"a",
"given",
"module",
"name",
"."
] |
[
"\"\"\"\n Imports module from a given file path under a given module name. If the module\n exists the function returns the module object from sys.modules.\n\n Args:\n module_name (str): Full qualified name of the module.\n Example: mypackage.mymodule\n file_path (str): Path to module, assumed to be a \".py\" file\n\n Returns:\n ModuleType\n \"\"\""
] |
[
{
"param": "module_name",
"type": null
},
{
"param": "file_path",
"type": null
}
] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "module_name",
"type": null,
"docstring": "Full qualified name of the module.\nExample: mypackage.mymodule",
"docstring_tokens": [
"Full",
"qualified",
"name",
"of",
"the",
"module",
".",
"Example",
":",
"mypackage",
".",
"mymodule"
],
"default": null,
"is_optional": false
},
{
"identifier": "file_path",
"type": null,
"docstring": "Path to module, assumed to be a \".py\" file",
"docstring_tokens": [
"Path",
"to",
"module",
"assumed",
"to",
"be",
"a",
"\"",
".",
"py",
"\"",
"file"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
import importlib
import sys
def import_module_from_file(module_name, file_path):
if module_name in sys.modules:
module = sys.modules[module_name]
else:
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
return module
| 864 | 740 |
5f2f16cda2237781d32f309ad923d4a58b1fbe5b
|
danielw2904/networkit
|
extrafiles/tooling/nktooling/__init__.py
|
[
"MIT"
] |
Python
|
computeAndReportDiff
| null |
def computeAndReportDiff(originalFilename, formatedFilename):
"""Compute a colorful diff between the original file and the formatted one"""
p = subprocess.Popen(["diff", "-a", "--color=always", originalFilename, formatedFilename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1)
output, _ = p.communicate()
print("-" * 20, "Begin of diff", "-" * 20)
print("Input file: ", originalFilename)
sys.stdout.buffer.write(output)
print("-" * 21, "End of diff", "-" * 21)
|
Compute a colorful diff between the original file and the formatted one
|
Compute a colorful diff between the original file and the formatted one
|
[
"Compute",
"a",
"colorful",
"diff",
"between",
"the",
"original",
"file",
"and",
"the",
"formatted",
"one"
] |
def computeAndReportDiff(originalFilename, formatedFilename):
p = subprocess.Popen(["diff", "-a", "--color=always", originalFilename, formatedFilename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1)
output, _ = p.communicate()
print("-" * 20, "Begin of diff", "-" * 20)
print("Input file: ", originalFilename)
sys.stdout.buffer.write(output)
print("-" * 21, "End of diff", "-" * 21)
|
[
"def",
"computeAndReportDiff",
"(",
"originalFilename",
",",
"formatedFilename",
")",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"\"diff\"",
",",
"\"-a\"",
",",
"\"--color=always\"",
",",
"originalFilename",
",",
"formatedFilename",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"bufsize",
"=",
"-",
"1",
")",
"output",
",",
"_",
"=",
"p",
".",
"communicate",
"(",
")",
"print",
"(",
"\"-\"",
"*",
"20",
",",
"\"Begin of diff\"",
",",
"\"-\"",
"*",
"20",
")",
"print",
"(",
"\"Input file: \"",
",",
"originalFilename",
")",
"sys",
".",
"stdout",
".",
"buffer",
".",
"write",
"(",
"output",
")",
"print",
"(",
"\"-\"",
"*",
"21",
",",
"\"End of diff\"",
",",
"\"-\"",
"*",
"21",
")"
] |
Compute a colorful diff between the original file and the formatted one
|
[
"Compute",
"a",
"colorful",
"diff",
"between",
"the",
"original",
"file",
"and",
"the",
"formatted",
"one"
] |
[
"\"\"\"Compute a colorful diff between the original file and the formatted one\"\"\""
] |
[
{
"param": "originalFilename",
"type": null
},
{
"param": "formatedFilename",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "originalFilename",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "formatedFilename",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import subprocess
import sys
def computeAndReportDiff(originalFilename, formatedFilename):
p = subprocess.Popen(["diff", "-a", "--color=always", originalFilename, formatedFilename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1)
output, _ = p.communicate()
print("-" * 20, "Begin of diff", "-" * 20)
print("Input file: ", originalFilename)
sys.stdout.buffer.write(output)
print("-" * 21, "End of diff", "-" * 21)
| 865 | 778 |
4314016bab1bb184cf790e1efcb61f64ccfdc429
|
zklaus/ESMValTool
|
diag_scripts/aux/catchment_analysis/catchment_analysis_tool_val.py
|
[
"Apache-2.0"
] |
Python
|
writefile
|
<not_specific>
|
def writefile(data, output, title):
"""write data to file as to be read by function readfile"""
import csv
with open(output, 'w') as csvfile:
w = csv.writer(csvfile)
w.writerow([title])
w.writerow([' '])
for key in sorted(data.keys()):
val = data[key]['data']
unit = data[key]['unit']
if not data[key].get('rel', False):
w.writerow(
[key.ljust(30) + ':' +
str(str(val) + ' ' + unit).rjust(17)])
else:
rel = data[key]['rel']
w.writerow(
[key.ljust(30) + ':' +
str(str(val) + ' ' + unit).rjust(17) +
str(' (' + '%6.2f' % (rel) + ' %)')])
return
|
write data to file as to be read by function readfile
|
write data to file as to be read by function readfile
|
[
"write",
"data",
"to",
"file",
"as",
"to",
"be",
"read",
"by",
"function",
"readfile"
] |
def writefile(data, output, title):
import csv
with open(output, 'w') as csvfile:
w = csv.writer(csvfile)
w.writerow([title])
w.writerow([' '])
for key in sorted(data.keys()):
val = data[key]['data']
unit = data[key]['unit']
if not data[key].get('rel', False):
w.writerow(
[key.ljust(30) + ':' +
str(str(val) + ' ' + unit).rjust(17)])
else:
rel = data[key]['rel']
w.writerow(
[key.ljust(30) + ':' +
str(str(val) + ' ' + unit).rjust(17) +
str(' (' + '%6.2f' % (rel) + ' %)')])
return
|
[
"def",
"writefile",
"(",
"data",
",",
"output",
",",
"title",
")",
":",
"import",
"csv",
"with",
"open",
"(",
"output",
",",
"'w'",
")",
"as",
"csvfile",
":",
"w",
"=",
"csv",
".",
"writer",
"(",
"csvfile",
")",
"w",
".",
"writerow",
"(",
"[",
"title",
"]",
")",
"w",
".",
"writerow",
"(",
"[",
"' '",
"]",
")",
"for",
"key",
"in",
"sorted",
"(",
"data",
".",
"keys",
"(",
")",
")",
":",
"val",
"=",
"data",
"[",
"key",
"]",
"[",
"'data'",
"]",
"unit",
"=",
"data",
"[",
"key",
"]",
"[",
"'unit'",
"]",
"if",
"not",
"data",
"[",
"key",
"]",
".",
"get",
"(",
"'rel'",
",",
"False",
")",
":",
"w",
".",
"writerow",
"(",
"[",
"key",
".",
"ljust",
"(",
"30",
")",
"+",
"':'",
"+",
"str",
"(",
"str",
"(",
"val",
")",
"+",
"' '",
"+",
"unit",
")",
".",
"rjust",
"(",
"17",
")",
"]",
")",
"else",
":",
"rel",
"=",
"data",
"[",
"key",
"]",
"[",
"'rel'",
"]",
"w",
".",
"writerow",
"(",
"[",
"key",
".",
"ljust",
"(",
"30",
")",
"+",
"':'",
"+",
"str",
"(",
"str",
"(",
"val",
")",
"+",
"' '",
"+",
"unit",
")",
".",
"rjust",
"(",
"17",
")",
"+",
"str",
"(",
"' ('",
"+",
"'%6.2f'",
"%",
"(",
"rel",
")",
"+",
"' %)'",
")",
"]",
")",
"return"
] |
write data to file as to be read by function readfile
|
[
"write",
"data",
"to",
"file",
"as",
"to",
"be",
"read",
"by",
"function",
"readfile"
] |
[
"\"\"\"write data to file as to be read by function readfile\"\"\""
] |
[
{
"param": "data",
"type": null
},
{
"param": "output",
"type": null
},
{
"param": "title",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "output",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "title",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import csv
def writefile(data, output, title):
import csv
with open(output, 'w') as csvfile:
w = csv.writer(csvfile)
w.writerow([title])
w.writerow([' '])
for key in sorted(data.keys()):
val = data[key]['data']
unit = data[key]['unit']
if not data[key].get('rel', False):
w.writerow(
[key.ljust(30) + ':' +
str(str(val) + ' ' + unit).rjust(17)])
else:
rel = data[key]['rel']
w.writerow(
[key.ljust(30) + ':' +
str(str(val) + ' ' + unit).rjust(17) +
str(' (' + '%6.2f' % (rel) + ' %)')])
return
| 866 | 756 |
493b4080a38b52d4af2d46fe63e658233a49a798
|
MKlauck/qcomp2020
|
toolpackages/PRISM/tool.py
|
[
"CC-BY-4.0"
] |
Python
|
is_benchmark_supported
|
<not_specific>
|
def is_benchmark_supported(benchmark : Benchmark):
"""returns True if the provided benchmark is supported by the tool and if the given benchmark should appear on the generated benchmark list"""
# Check for unsupported input languages: everything but PRISM currently
if benchmark.is_prism():
# Temporarily disable pacman - very slow
# if benchmark.get_model_short_name() == "pacman":
# return False
# Check for unsupported property types: just reward bounded currently
if benchmark.is_reward_bounded_probabilistic_reachability() or benchmark.is_reward_bounded_expected_reward():
return False
# print("{},{},{},{}".format(benchmark.get_identifier(),benchmark.get_model_type(),benchmark.get_property_type(),benchmark.get_max_num_states()))
return True
else:
return False
|
returns True if the provided benchmark is supported by the tool and if the given benchmark should appear on the generated benchmark list
|
returns True if the provided benchmark is supported by the tool and if the given benchmark should appear on the generated benchmark list
|
[
"returns",
"True",
"if",
"the",
"provided",
"benchmark",
"is",
"supported",
"by",
"the",
"tool",
"and",
"if",
"the",
"given",
"benchmark",
"should",
"appear",
"on",
"the",
"generated",
"benchmark",
"list"
] |
def is_benchmark_supported(benchmark : Benchmark):
if benchmark.is_prism():
if benchmark.is_reward_bounded_probabilistic_reachability() or benchmark.is_reward_bounded_expected_reward():
return False
return True
else:
return False
|
[
"def",
"is_benchmark_supported",
"(",
"benchmark",
":",
"Benchmark",
")",
":",
"if",
"benchmark",
".",
"is_prism",
"(",
")",
":",
"if",
"benchmark",
".",
"is_reward_bounded_probabilistic_reachability",
"(",
")",
"or",
"benchmark",
".",
"is_reward_bounded_expected_reward",
"(",
")",
":",
"return",
"False",
"return",
"True",
"else",
":",
"return",
"False"
] |
returns True if the provided benchmark is supported by the tool and if the given benchmark should appear on the generated benchmark list
|
[
"returns",
"True",
"if",
"the",
"provided",
"benchmark",
"is",
"supported",
"by",
"the",
"tool",
"and",
"if",
"the",
"given",
"benchmark",
"should",
"appear",
"on",
"the",
"generated",
"benchmark",
"list"
] |
[
"\"\"\"returns True if the provided benchmark is supported by the tool and if the given benchmark should appear on the generated benchmark list\"\"\"",
"# Check for unsupported input languages: everything but PRISM currently",
"# Temporarily disable pacman - very slow",
"# if benchmark.get_model_short_name() == \"pacman\":",
"# return False",
"# Check for unsupported property types: just reward bounded currently",
"# print(\"{},{},{},{}\".format(benchmark.get_identifier(),benchmark.get_model_type(),benchmark.get_property_type(),benchmark.get_max_num_states()))"
] |
[
{
"param": "benchmark",
"type": "Benchmark"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "benchmark",
"type": "Benchmark",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def is_benchmark_supported(benchmark : Benchmark):
if benchmark.is_prism():
if benchmark.is_reward_bounded_probabilistic_reachability() or benchmark.is_reward_bounded_expected_reward():
return False
return True
else:
return False
| 867 | 176 |
a1b3b517bce890f64945f838bed5c7f56cc4cef1
|
valeriabriones/dvm-dos-tem
|
scripts/generate-html-viewer.py
|
[
"MIT"
] |
Python
|
classify
|
<not_specific>
|
def classify(filepath):
'''
Attempts to classify a file based on the underscore seperated fields.
Expects a file name something like the following:
"sometag_Vegetation_pft0.png" # calibration "suites"
"_histo_pestplot.png" # pest plots
"*-diagnostics.png" # diagnostics plots
For the calibration suites, the category is the last filed before the
extension, unless 'pft' is in the last field, in which case the category
is the second to last field.
For pest plots, the category is the last field before the extension.
For diagnostic plots, the category is the second to last field before the
extension.
Parameters
----------
filepath : str (required)
Splits the last element (basename) of a path on underscores, and
then looks for either the last element, or second to last element.
Returns
-------
c : str
The classification string.
'''
bn = os.path.basename(filepath)
sbn = os.path.splitext(bn)[0]
tokens = sbn.split('_')
#print tokens
classification = None
if 'pft' in tokens[-1]:
classification = tokens[-2]
elif 'diagnostic' in tokens[-1]:
classification = "diagnostic-" + tokens[-2]
else:
classification = tokens[-1]
#print "returning %s" % (classification)
return classification
|
Attempts to classify a file based on the underscore seperated fields.
Expects a file name something like the following:
"sometag_Vegetation_pft0.png" # calibration "suites"
"_histo_pestplot.png" # pest plots
"*-diagnostics.png" # diagnostics plots
For the calibration suites, the category is the last filed before the
extension, unless 'pft' is in the last field, in which case the category
is the second to last field.
For pest plots, the category is the last field before the extension.
For diagnostic plots, the category is the second to last field before the
extension.
Parameters
----------
filepath : str (required)
Splits the last element (basename) of a path on underscores, and
then looks for either the last element, or second to last element.
Returns
-------
c : str
The classification string.
|
Attempts to classify a file based on the underscore seperated fields.
Expects a file name something like the following.
For the calibration suites, the category is the last filed before the
extension, unless 'pft' is in the last field, in which case the category
is the second to last field.
For pest plots, the category is the last field before the extension.
For diagnostic plots, the category is the second to last field before the
extension.
Parameters
filepath : str (required)
Splits the last element (basename) of a path on underscores, and
then looks for either the last element, or second to last element.
Returns
c : str
The classification string.
|
[
"Attempts",
"to",
"classify",
"a",
"file",
"based",
"on",
"the",
"underscore",
"seperated",
"fields",
".",
"Expects",
"a",
"file",
"name",
"something",
"like",
"the",
"following",
".",
"For",
"the",
"calibration",
"suites",
"the",
"category",
"is",
"the",
"last",
"filed",
"before",
"the",
"extension",
"unless",
"'",
"pft",
"'",
"is",
"in",
"the",
"last",
"field",
"in",
"which",
"case",
"the",
"category",
"is",
"the",
"second",
"to",
"last",
"field",
".",
"For",
"pest",
"plots",
"the",
"category",
"is",
"the",
"last",
"field",
"before",
"the",
"extension",
".",
"For",
"diagnostic",
"plots",
"the",
"category",
"is",
"the",
"second",
"to",
"last",
"field",
"before",
"the",
"extension",
".",
"Parameters",
"filepath",
":",
"str",
"(",
"required",
")",
"Splits",
"the",
"last",
"element",
"(",
"basename",
")",
"of",
"a",
"path",
"on",
"underscores",
"and",
"then",
"looks",
"for",
"either",
"the",
"last",
"element",
"or",
"second",
"to",
"last",
"element",
".",
"Returns",
"c",
":",
"str",
"The",
"classification",
"string",
"."
] |
def classify(filepath):
bn = os.path.basename(filepath)
sbn = os.path.splitext(bn)[0]
tokens = sbn.split('_')
classification = None
if 'pft' in tokens[-1]:
classification = tokens[-2]
elif 'diagnostic' in tokens[-1]:
classification = "diagnostic-" + tokens[-2]
else:
classification = tokens[-1]
return classification
|
[
"def",
"classify",
"(",
"filepath",
")",
":",
"bn",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filepath",
")",
"sbn",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"bn",
")",
"[",
"0",
"]",
"tokens",
"=",
"sbn",
".",
"split",
"(",
"'_'",
")",
"classification",
"=",
"None",
"if",
"'pft'",
"in",
"tokens",
"[",
"-",
"1",
"]",
":",
"classification",
"=",
"tokens",
"[",
"-",
"2",
"]",
"elif",
"'diagnostic'",
"in",
"tokens",
"[",
"-",
"1",
"]",
":",
"classification",
"=",
"\"diagnostic-\"",
"+",
"tokens",
"[",
"-",
"2",
"]",
"else",
":",
"classification",
"=",
"tokens",
"[",
"-",
"1",
"]",
"return",
"classification"
] |
Attempts to classify a file based on the underscore seperated fields.
|
[
"Attempts",
"to",
"classify",
"a",
"file",
"based",
"on",
"the",
"underscore",
"seperated",
"fields",
"."
] |
[
"'''\n Attempts to classify a file based on the underscore seperated fields.\n Expects a file name something like the following:\n\n \"sometag_Vegetation_pft0.png\" # calibration \"suites\"\n \"_histo_pestplot.png\" # pest plots\n \"*-diagnostics.png\" # diagnostics plots\n\n\n For the calibration suites, the category is the last filed before the \n extension, unless 'pft' is in the last field, in which case the category\n is the second to last field.\n\n For pest plots, the category is the last field before the extension.\n\n For diagnostic plots, the category is the second to last field before the\n extension.\n\n Parameters\n ----------\n filepath : str (required)\n Splits the last element (basename) of a path on underscores, and\n then looks for either the last element, or second to last element.\n\n Returns\n -------\n c : str\n The classification string.\n '''",
"#print tokens",
"#print \"returning %s\" % (classification) "
] |
[
{
"param": "filepath",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "filepath",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
def classify(filepath):
bn = os.path.basename(filepath)
sbn = os.path.splitext(bn)[0]
tokens = sbn.split('_')
classification = None
if 'pft' in tokens[-1]:
classification = tokens[-2]
elif 'diagnostic' in tokens[-1]:
classification = "diagnostic-" + tokens[-2]
else:
classification = tokens[-1]
return classification
| 868 | 761 |
0cd757e3021342ed1b2f13b217b19c80e9bad499
|
GeminiDRSoftware/OCAMDR
|
scorpiodr/scorpio/recipes/sq/recipes_ARC_LS_SPECT_CCD.py
|
[
"BSD-3-Clause"
] |
Python
|
makeProcessedArc
|
<not_specific>
|
def makeProcessedArc(p):
"""
This recipe performs the standardization and corrections needed to convert
the raw input arc images into a reduced arc, with a wavelength solution
attached to it. This output processed arc is stored on disk using
storeProcessedArc and has a name equal to the name of the first input arc
image with "_arc.fits" appended.
Parameters
----------
p : Primitives object
A primitive set matching the recipe_tags.
"""
p.prepare()
p.addDQ(static_bpm=None)
p.addVAR(read_noise=True)
p.overscanCorrect()
p.biasCorrect()
p.ADUToElectrons()
p.addVAR(poisson_noise=True)
p.determineWavelengthSolution()
p.determineDistortion()
p.storeProcessedArc()
return
|
This recipe performs the standardization and corrections needed to convert
the raw input arc images into a reduced arc, with a wavelength solution
attached to it. This output processed arc is stored on disk using
storeProcessedArc and has a name equal to the name of the first input arc
image with "_arc.fits" appended.
Parameters
----------
p : Primitives object
A primitive set matching the recipe_tags.
|
This recipe performs the standardization and corrections needed to convert
the raw input arc images into a reduced arc, with a wavelength solution
attached to it. This output processed arc is stored on disk using
storeProcessedArc and has a name equal to the name of the first input arc
image with "_arc.fits" appended.
Parameters
p : Primitives object
A primitive set matching the recipe_tags.
|
[
"This",
"recipe",
"performs",
"the",
"standardization",
"and",
"corrections",
"needed",
"to",
"convert",
"the",
"raw",
"input",
"arc",
"images",
"into",
"a",
"reduced",
"arc",
"with",
"a",
"wavelength",
"solution",
"attached",
"to",
"it",
".",
"This",
"output",
"processed",
"arc",
"is",
"stored",
"on",
"disk",
"using",
"storeProcessedArc",
"and",
"has",
"a",
"name",
"equal",
"to",
"the",
"name",
"of",
"the",
"first",
"input",
"arc",
"image",
"with",
"\"",
"_arc",
".",
"fits",
"\"",
"appended",
".",
"Parameters",
"p",
":",
"Primitives",
"object",
"A",
"primitive",
"set",
"matching",
"the",
"recipe_tags",
"."
] |
def makeProcessedArc(p):
p.prepare()
p.addDQ(static_bpm=None)
p.addVAR(read_noise=True)
p.overscanCorrect()
p.biasCorrect()
p.ADUToElectrons()
p.addVAR(poisson_noise=True)
p.determineWavelengthSolution()
p.determineDistortion()
p.storeProcessedArc()
return
|
[
"def",
"makeProcessedArc",
"(",
"p",
")",
":",
"p",
".",
"prepare",
"(",
")",
"p",
".",
"addDQ",
"(",
"static_bpm",
"=",
"None",
")",
"p",
".",
"addVAR",
"(",
"read_noise",
"=",
"True",
")",
"p",
".",
"overscanCorrect",
"(",
")",
"p",
".",
"biasCorrect",
"(",
")",
"p",
".",
"ADUToElectrons",
"(",
")",
"p",
".",
"addVAR",
"(",
"poisson_noise",
"=",
"True",
")",
"p",
".",
"determineWavelengthSolution",
"(",
")",
"p",
".",
"determineDistortion",
"(",
")",
"p",
".",
"storeProcessedArc",
"(",
")",
"return"
] |
This recipe performs the standardization and corrections needed to convert
the raw input arc images into a reduced arc, with a wavelength solution
attached to it.
|
[
"This",
"recipe",
"performs",
"the",
"standardization",
"and",
"corrections",
"needed",
"to",
"convert",
"the",
"raw",
"input",
"arc",
"images",
"into",
"a",
"reduced",
"arc",
"with",
"a",
"wavelength",
"solution",
"attached",
"to",
"it",
"."
] |
[
"\"\"\"\n This recipe performs the standardization and corrections needed to convert\n the raw input arc images into a reduced arc, with a wavelength solution\n attached to it. This output processed arc is stored on disk using\n storeProcessedArc and has a name equal to the name of the first input arc\n image with \"_arc.fits\" appended.\n\n Parameters\n ----------\n p : Primitives object\n A primitive set matching the recipe_tags.\n \"\"\""
] |
[
{
"param": "p",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "p",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def makeProcessedArc(p):
p.prepare()
p.addDQ(static_bpm=None)
p.addVAR(read_noise=True)
p.overscanCorrect()
p.biasCorrect()
p.ADUToElectrons()
p.addVAR(poisson_noise=True)
p.determineWavelengthSolution()
p.determineDistortion()
p.storeProcessedArc()
return
| 869 | 453 |
2bb8090aa91e7ecb93bb17d12ab42880f60463a0
|
akanksha1304/msds692
|
hw/code/search/index_search.py
|
[
"MIT"
] |
Python
|
index_search
|
<not_specific>
|
def index_search(files, index, terms):
"""
Given an index and a list of fully-qualified filenames, return a list of
filenames whose file contents has all words in terms parameter as normalized
by your words() function. Parameter terms is a list of strings.
You can only use the index to find matching files; you cannot open the files
and look inside.
"""
first = True
common_files_index = set()
for term in terms:
if first == True:
first = False
if term not in index.keys():
continue
common_files_index = index[term]
else:
if common_files_index:
common_files_index = common_files_index.intersection(index[term])
result = []
if common_files_index:
for i in common_files_index:
result.append(files[i])
return result
|
Given an index and a list of fully-qualified filenames, return a list of
filenames whose file contents has all words in terms parameter as normalized
by your words() function. Parameter terms is a list of strings.
You can only use the index to find matching files; you cannot open the files
and look inside.
|
Given an index and a list of fully-qualified filenames, return a list of
filenames whose file contents has all words in terms parameter as normalized
by your words() function. Parameter terms is a list of strings.
You can only use the index to find matching files; you cannot open the files
and look inside.
|
[
"Given",
"an",
"index",
"and",
"a",
"list",
"of",
"fully",
"-",
"qualified",
"filenames",
"return",
"a",
"list",
"of",
"filenames",
"whose",
"file",
"contents",
"has",
"all",
"words",
"in",
"terms",
"parameter",
"as",
"normalized",
"by",
"your",
"words",
"()",
"function",
".",
"Parameter",
"terms",
"is",
"a",
"list",
"of",
"strings",
".",
"You",
"can",
"only",
"use",
"the",
"index",
"to",
"find",
"matching",
"files",
";",
"you",
"cannot",
"open",
"the",
"files",
"and",
"look",
"inside",
"."
] |
def index_search(files, index, terms):
first = True
common_files_index = set()
for term in terms:
if first == True:
first = False
if term not in index.keys():
continue
common_files_index = index[term]
else:
if common_files_index:
common_files_index = common_files_index.intersection(index[term])
result = []
if common_files_index:
for i in common_files_index:
result.append(files[i])
return result
|
[
"def",
"index_search",
"(",
"files",
",",
"index",
",",
"terms",
")",
":",
"first",
"=",
"True",
"common_files_index",
"=",
"set",
"(",
")",
"for",
"term",
"in",
"terms",
":",
"if",
"first",
"==",
"True",
":",
"first",
"=",
"False",
"if",
"term",
"not",
"in",
"index",
".",
"keys",
"(",
")",
":",
"continue",
"common_files_index",
"=",
"index",
"[",
"term",
"]",
"else",
":",
"if",
"common_files_index",
":",
"common_files_index",
"=",
"common_files_index",
".",
"intersection",
"(",
"index",
"[",
"term",
"]",
")",
"result",
"=",
"[",
"]",
"if",
"common_files_index",
":",
"for",
"i",
"in",
"common_files_index",
":",
"result",
".",
"append",
"(",
"files",
"[",
"i",
"]",
")",
"return",
"result"
] |
Given an index and a list of fully-qualified filenames, return a list of
filenames whose file contents has all words in terms parameter as normalized
by your words() function.
|
[
"Given",
"an",
"index",
"and",
"a",
"list",
"of",
"fully",
"-",
"qualified",
"filenames",
"return",
"a",
"list",
"of",
"filenames",
"whose",
"file",
"contents",
"has",
"all",
"words",
"in",
"terms",
"parameter",
"as",
"normalized",
"by",
"your",
"words",
"()",
"function",
"."
] |
[
"\"\"\"\n Given an index and a list of fully-qualified filenames, return a list of\n filenames whose file contents has all words in terms parameter as normalized\n by your words() function. Parameter terms is a list of strings.\n You can only use the index to find matching files; you cannot open the files\n and look inside.\n \"\"\""
] |
[
{
"param": "files",
"type": null
},
{
"param": "index",
"type": null
},
{
"param": "terms",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "files",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "index",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "terms",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def index_search(files, index, terms):
first = True
common_files_index = set()
for term in terms:
if first == True:
first = False
if term not in index.keys():
continue
common_files_index = index[term]
else:
if common_files_index:
common_files_index = common_files_index.intersection(index[term])
result = []
if common_files_index:
for i in common_files_index:
result.append(files[i])
return result
| 871 | 92 |
557056a2c4f4e40beabd8bf06f543d53f1229ff5
|
veryju/unsupervised_coreset_selection
|
svp/common/active.py
|
[
"MIT"
] |
Python
|
symlink_target_to_proxy
| null |
def symlink_target_to_proxy(run_dir: str):
"""
Create symbolic links from proxy files to the corresponding target files.
Parameters
----------
run_dir : str
"""
# Symlink target directory and files to proxy becasue they
# are the same.
print('Proxy and target are not different.')
proxy_dir = os.path.join(run_dir, 'proxy')
target_dir = os.path.join(run_dir, 'target')
os.symlink(os.path.relpath(proxy_dir, run_dir), target_dir)
print(f'Linked {target_dir} to {proxy_dir}')
proxy_csv = os.path.join(run_dir, 'proxy.csv')
target_csv = os.path.join(run_dir, 'target.csv')
os.symlink(os.path.relpath(proxy_csv, run_dir), target_csv)
print(f'Linked {target_csv} to {proxy_csv}')
|
Create symbolic links from proxy files to the corresponding target files.
Parameters
----------
run_dir : str
|
Create symbolic links from proxy files to the corresponding target files.
Parameters
|
[
"Create",
"symbolic",
"links",
"from",
"proxy",
"files",
"to",
"the",
"corresponding",
"target",
"files",
".",
"Parameters"
] |
def symlink_target_to_proxy(run_dir: str):
print('Proxy and target are not different.')
proxy_dir = os.path.join(run_dir, 'proxy')
target_dir = os.path.join(run_dir, 'target')
os.symlink(os.path.relpath(proxy_dir, run_dir), target_dir)
print(f'Linked {target_dir} to {proxy_dir}')
proxy_csv = os.path.join(run_dir, 'proxy.csv')
target_csv = os.path.join(run_dir, 'target.csv')
os.symlink(os.path.relpath(proxy_csv, run_dir), target_csv)
print(f'Linked {target_csv} to {proxy_csv}')
|
[
"def",
"symlink_target_to_proxy",
"(",
"run_dir",
":",
"str",
")",
":",
"print",
"(",
"'Proxy and target are not different.'",
")",
"proxy_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"run_dir",
",",
"'proxy'",
")",
"target_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"run_dir",
",",
"'target'",
")",
"os",
".",
"symlink",
"(",
"os",
".",
"path",
".",
"relpath",
"(",
"proxy_dir",
",",
"run_dir",
")",
",",
"target_dir",
")",
"print",
"(",
"f'Linked {target_dir} to {proxy_dir}'",
")",
"proxy_csv",
"=",
"os",
".",
"path",
".",
"join",
"(",
"run_dir",
",",
"'proxy.csv'",
")",
"target_csv",
"=",
"os",
".",
"path",
".",
"join",
"(",
"run_dir",
",",
"'target.csv'",
")",
"os",
".",
"symlink",
"(",
"os",
".",
"path",
".",
"relpath",
"(",
"proxy_csv",
",",
"run_dir",
")",
",",
"target_csv",
")",
"print",
"(",
"f'Linked {target_csv} to {proxy_csv}'",
")"
] |
Create symbolic links from proxy files to the corresponding target files.
|
[
"Create",
"symbolic",
"links",
"from",
"proxy",
"files",
"to",
"the",
"corresponding",
"target",
"files",
"."
] |
[
"\"\"\"\n Create symbolic links from proxy files to the corresponding target files.\n\n Parameters\n ----------\n run_dir : str\n \"\"\"",
"# Symlink target directory and files to proxy becasue they",
"# are the same."
] |
[
{
"param": "run_dir",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "run_dir",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
def symlink_target_to_proxy(run_dir: str):
print('Proxy and target are not different.')
proxy_dir = os.path.join(run_dir, 'proxy')
target_dir = os.path.join(run_dir, 'target')
os.symlink(os.path.relpath(proxy_dir, run_dir), target_dir)
print(f'Linked {target_dir} to {proxy_dir}')
proxy_csv = os.path.join(run_dir, 'proxy.csv')
target_csv = os.path.join(run_dir, 'target.csv')
os.symlink(os.path.relpath(proxy_csv, run_dir), target_csv)
print(f'Linked {target_csv} to {proxy_csv}')
| 872 | 263 |
1342fdd1c29c6eed917d897a052e7cea78b0bf28
|
snadampal/tensorflow
|
tensorflow/python/ops/control_flow_util.py
|
[
"Apache-2.0"
] |
Python
|
GetContainingCondContext
|
<not_specific>
|
def GetContainingCondContext(ctxt):
"""Returns the first ancestor CondContext of `ctxt`.
Returns `ctxt` if `ctxt` is a CondContext, or None if `ctxt` is not in a cond.
Args:
ctxt: ControlFlowContext
Returns:
`ctxt` if `ctxt` is a CondContext, the most nested CondContext containing
`ctxt`, or None if `ctxt` is not in a cond.
"""
while ctxt:
if ctxt.IsCondContext(): return ctxt
ctxt = ctxt.outer_context
return None
|
Returns the first ancestor CondContext of `ctxt`.
Returns `ctxt` if `ctxt` is a CondContext, or None if `ctxt` is not in a cond.
Args:
ctxt: ControlFlowContext
Returns:
`ctxt` if `ctxt` is a CondContext, the most nested CondContext containing
`ctxt`, or None if `ctxt` is not in a cond.
|
Returns the first ancestor CondContext of `ctxt`.
|
[
"Returns",
"the",
"first",
"ancestor",
"CondContext",
"of",
"`",
"ctxt",
"`",
"."
] |
def GetContainingCondContext(ctxt):
while ctxt:
if ctxt.IsCondContext(): return ctxt
ctxt = ctxt.outer_context
return None
|
[
"def",
"GetContainingCondContext",
"(",
"ctxt",
")",
":",
"while",
"ctxt",
":",
"if",
"ctxt",
".",
"IsCondContext",
"(",
")",
":",
"return",
"ctxt",
"ctxt",
"=",
"ctxt",
".",
"outer_context",
"return",
"None"
] |
Returns the first ancestor CondContext of `ctxt`.
|
[
"Returns",
"the",
"first",
"ancestor",
"CondContext",
"of",
"`",
"ctxt",
"`",
"."
] |
[
"\"\"\"Returns the first ancestor CondContext of `ctxt`.\n\n Returns `ctxt` if `ctxt` is a CondContext, or None if `ctxt` is not in a cond.\n\n Args:\n ctxt: ControlFlowContext\n\n Returns:\n `ctxt` if `ctxt` is a CondContext, the most nested CondContext containing\n `ctxt`, or None if `ctxt` is not in a cond.\n \"\"\""
] |
[
{
"param": "ctxt",
"type": null
}
] |
{
"returns": [
{
"docstring": "`ctxt` if `ctxt` is a CondContext, the most nested CondContext containing\n`ctxt`, or None if `ctxt` is not in a cond.",
"docstring_tokens": [
"`",
"ctxt",
"`",
"if",
"`",
"ctxt",
"`",
"is",
"a",
"CondContext",
"the",
"most",
"nested",
"CondContext",
"containing",
"`",
"ctxt",
"`",
"or",
"None",
"if",
"`",
"ctxt",
"`",
"is",
"not",
"in",
"a",
"cond",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "ctxt",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def GetContainingCondContext(ctxt):
while ctxt:
if ctxt.IsCondContext(): return ctxt
ctxt = ctxt.outer_context
return None
| 873 | 180 |
84fd01944ce1dde3df55987a06164d748d9b41b4
|
magnusross/gpcm
|
gpcm/gprvm.py
|
[
"MIT"
] |
Python
|
psd_matern_12
|
<not_specific>
|
def psd_matern_12(omega, lam, lam_t):
"""Spectral density of Matern-1/2 process.
Args:
omega (tensor): Frequency.
lam (tensor): Decay.
lam_t (tensor): Scale.
Returns:
tensor: Spectral density.
"""
return 2 * lam_t * lam / (lam ** 2 + omega ** 2)
|
Spectral density of Matern-1/2 process.
Args:
omega (tensor): Frequency.
lam (tensor): Decay.
lam_t (tensor): Scale.
Returns:
tensor: Spectral density.
|
Spectral density of Matern-1/2 process.
|
[
"Spectral",
"density",
"of",
"Matern",
"-",
"1",
"/",
"2",
"process",
"."
] |
def psd_matern_12(omega, lam, lam_t):
return 2 * lam_t * lam / (lam ** 2 + omega ** 2)
|
[
"def",
"psd_matern_12",
"(",
"omega",
",",
"lam",
",",
"lam_t",
")",
":",
"return",
"2",
"*",
"lam_t",
"*",
"lam",
"/",
"(",
"lam",
"**",
"2",
"+",
"omega",
"**",
"2",
")"
] |
Spectral density of Matern-1/2 process.
|
[
"Spectral",
"density",
"of",
"Matern",
"-",
"1",
"/",
"2",
"process",
"."
] |
[
"\"\"\"Spectral density of Matern-1/2 process.\n\n Args:\n omega (tensor): Frequency.\n lam (tensor): Decay.\n lam_t (tensor): Scale.\n\n Returns:\n tensor: Spectral density.\n \"\"\""
] |
[
{
"param": "omega",
"type": null
},
{
"param": "lam",
"type": null
},
{
"param": "lam_t",
"type": null
}
] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "tensor"
}
],
"raises": [],
"params": [
{
"identifier": "omega",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
},
{
"identifier": "lam",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
},
{
"identifier": "lam_t",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def psd_matern_12(omega, lam, lam_t):
return 2 * lam_t * lam / (lam ** 2 + omega ** 2)
| 874 | 297 |
ad90e4882b41e059e2805a01c4e3e763c3874f40
|
salayhin/talkofacta
|
eggs/PasteScript-1.7.5-py2.7.egg/paste/script/serve.py
|
[
"MIT"
] |
Python
|
_turn_sigterm_into_systemexit
|
<not_specific>
|
def _turn_sigterm_into_systemexit():
"""
Attempts to turn a SIGTERM exception into a SystemExit exception.
"""
try:
import signal
except ImportError:
return
def handle_term(signo, frame):
raise SystemExit
signal.signal(signal.SIGTERM, handle_term)
|
Attempts to turn a SIGTERM exception into a SystemExit exception.
|
Attempts to turn a SIGTERM exception into a SystemExit exception.
|
[
"Attempts",
"to",
"turn",
"a",
"SIGTERM",
"exception",
"into",
"a",
"SystemExit",
"exception",
"."
] |
def _turn_sigterm_into_systemexit():
try:
import signal
except ImportError:
return
def handle_term(signo, frame):
raise SystemExit
signal.signal(signal.SIGTERM, handle_term)
|
[
"def",
"_turn_sigterm_into_systemexit",
"(",
")",
":",
"try",
":",
"import",
"signal",
"except",
"ImportError",
":",
"return",
"def",
"handle_term",
"(",
"signo",
",",
"frame",
")",
":",
"raise",
"SystemExit",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGTERM",
",",
"handle_term",
")"
] |
Attempts to turn a SIGTERM exception into a SystemExit exception.
|
[
"Attempts",
"to",
"turn",
"a",
"SIGTERM",
"exception",
"into",
"a",
"SystemExit",
"exception",
"."
] |
[
"\"\"\"\n Attempts to turn a SIGTERM exception into a SystemExit exception.\n \"\"\""
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import signal
def _turn_sigterm_into_systemexit():
try:
import signal
except ImportError:
return
def handle_term(signo, frame):
raise SystemExit
signal.signal(signal.SIGTERM, handle_term)
| 875 | 68 |
23c9304061c03a5c7b7dd2c9b972a702492481be
|
HenryKobin/sentinelhub-py
|
sentinelhub/data_collections.py
|
[
"MIT"
] |
Python
|
_raise_invalid_id
| null |
def _raise_invalid_id(collection_id):
""" Checks if a given collection ID conforms to an expected pattern and raises an error if it doesn't
"""
collection_id_pattern = '.{8}-.{4}-.{4}-.{4}-.{12}'
if not re.compile(collection_id_pattern).match(collection_id):
raise ValueError("Given collection id does not match the expected format {}".format(collection_id_pattern))
|
Checks if a given collection ID conforms to an expected pattern and raises an error if it doesn't
|
Checks if a given collection ID conforms to an expected pattern and raises an error if it doesn't
|
[
"Checks",
"if",
"a",
"given",
"collection",
"ID",
"conforms",
"to",
"an",
"expected",
"pattern",
"and",
"raises",
"an",
"error",
"if",
"it",
"doesn",
"'",
"t"
] |
def _raise_invalid_id(collection_id):
collection_id_pattern = '.{8}-.{4}-.{4}-.{4}-.{12}'
if not re.compile(collection_id_pattern).match(collection_id):
raise ValueError("Given collection id does not match the expected format {}".format(collection_id_pattern))
|
[
"def",
"_raise_invalid_id",
"(",
"collection_id",
")",
":",
"collection_id_pattern",
"=",
"'.{8}-.{4}-.{4}-.{4}-.{12}'",
"if",
"not",
"re",
".",
"compile",
"(",
"collection_id_pattern",
")",
".",
"match",
"(",
"collection_id",
")",
":",
"raise",
"ValueError",
"(",
"\"Given collection id does not match the expected format {}\"",
".",
"format",
"(",
"collection_id_pattern",
")",
")"
] |
Checks if a given collection ID conforms to an expected pattern and raises an error if it doesn't
|
[
"Checks",
"if",
"a",
"given",
"collection",
"ID",
"conforms",
"to",
"an",
"expected",
"pattern",
"and",
"raises",
"an",
"error",
"if",
"it",
"doesn",
"'",
"t"
] |
[
"\"\"\" Checks if a given collection ID conforms to an expected pattern and raises an error if it doesn't\n \"\"\""
] |
[
{
"param": "collection_id",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "collection_id",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import re
def _raise_invalid_id(collection_id):
collection_id_pattern = '.{8}-.{4}-.{4}-.{4}-.{12}'
if not re.compile(collection_id_pattern).match(collection_id):
raise ValueError("Given collection id does not match the expected format {}".format(collection_id_pattern))
| 876 | 923 |
142f5adce110b02163de6d1569ff259385301975
|
lrt512/emol
|
emol/emol/initialize/jinja.py
|
[
"MIT"
] |
Python
|
yes_no_auth
|
<not_specific>
|
def yes_no_auth(card, authorization):
"""Return HTML for yes or no values.
A green FontAwesome checkmark or a red FontAwesome
X depending on the result of check_authorization
"""
if card.has_authorization(authorization):
return '<i class="fa fa-check fa-lg green yes-no-icon"></i>'
return '<i class="fa fa-close fa-lg red yes-no-icon"></i>'
|
Return HTML for yes or no values.
A green FontAwesome checkmark or a red FontAwesome
X depending on the result of check_authorization
|
Return HTML for yes or no values.
A green FontAwesome checkmark or a red FontAwesome
X depending on the result of check_authorization
|
[
"Return",
"HTML",
"for",
"yes",
"or",
"no",
"values",
".",
"A",
"green",
"FontAwesome",
"checkmark",
"or",
"a",
"red",
"FontAwesome",
"X",
"depending",
"on",
"the",
"result",
"of",
"check_authorization"
] |
def yes_no_auth(card, authorization):
if card.has_authorization(authorization):
return '<i class="fa fa-check fa-lg green yes-no-icon"></i>'
return '<i class="fa fa-close fa-lg red yes-no-icon"></i>'
|
[
"def",
"yes_no_auth",
"(",
"card",
",",
"authorization",
")",
":",
"if",
"card",
".",
"has_authorization",
"(",
"authorization",
")",
":",
"return",
"'<i class=\"fa fa-check fa-lg green yes-no-icon\"></i>'",
"return",
"'<i class=\"fa fa-close fa-lg red yes-no-icon\"></i>'"
] |
Return HTML for yes or no values.
|
[
"Return",
"HTML",
"for",
"yes",
"or",
"no",
"values",
"."
] |
[
"\"\"\"Return HTML for yes or no values.\n\n A green FontAwesome checkmark or a red FontAwesome\n X depending on the result of check_authorization\n\n \"\"\""
] |
[
{
"param": "card",
"type": null
},
{
"param": "authorization",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "card",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "authorization",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def yes_no_auth(card, authorization):
if card.has_authorization(authorization):
return '<i class="fa fa-check fa-lg green yes-no-icon"></i>'
return '<i class="fa fa-close fa-lg red yes-no-icon"></i>'
| 878 | 596 |
88e94fb27fb3f7d6e469a546a28ebaccd2c3278e
|
lakesterful/tara
|
bikeshare_2.py
|
[
"Unlicense"
] |
Python
|
user_stats
| null |
def user_stats(df, city):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# TO DO: Display counts of user types
counts_user = df['User Type'].value_counts()
print('Counts of user type: ',counts_user)
# TO DO: Display counts of gender. Display earliest, most recent, and most common year of birth
if city != 'washington':
counts_gender = df['Gender'].value_counts()
print('Counts of gender: ', counts_gender)
print('Earliest year of birth: ', df['Birth Year'].min())
print('Most recent year of birth: ', df['Birth Year'].max())
print('Most common year of birth: ', df['Birth Year'].mode()[0])
else:
print('The list for Washington does not contain a Gender and Birth Year column.')
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
|
Displays statistics on bikeshare users.
|
Displays statistics on bikeshare users.
|
[
"Displays",
"statistics",
"on",
"bikeshare",
"users",
"."
] |
def user_stats(df, city):
print('\nCalculating User Stats...\n')
start_time = time.time()
counts_user = df['User Type'].value_counts()
print('Counts of user type: ',counts_user)
if city != 'washington':
counts_gender = df['Gender'].value_counts()
print('Counts of gender: ', counts_gender)
print('Earliest year of birth: ', df['Birth Year'].min())
print('Most recent year of birth: ', df['Birth Year'].max())
print('Most common year of birth: ', df['Birth Year'].mode()[0])
else:
print('The list for Washington does not contain a Gender and Birth Year column.')
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
|
[
"def",
"user_stats",
"(",
"df",
",",
"city",
")",
":",
"print",
"(",
"'\\nCalculating User Stats...\\n'",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"counts_user",
"=",
"df",
"[",
"'User Type'",
"]",
".",
"value_counts",
"(",
")",
"print",
"(",
"'Counts of user type: '",
",",
"counts_user",
")",
"if",
"city",
"!=",
"'washington'",
":",
"counts_gender",
"=",
"df",
"[",
"'Gender'",
"]",
".",
"value_counts",
"(",
")",
"print",
"(",
"'Counts of gender: '",
",",
"counts_gender",
")",
"print",
"(",
"'Earliest year of birth: '",
",",
"df",
"[",
"'Birth Year'",
"]",
".",
"min",
"(",
")",
")",
"print",
"(",
"'Most recent year of birth: '",
",",
"df",
"[",
"'Birth Year'",
"]",
".",
"max",
"(",
")",
")",
"print",
"(",
"'Most common year of birth: '",
",",
"df",
"[",
"'Birth Year'",
"]",
".",
"mode",
"(",
")",
"[",
"0",
"]",
")",
"else",
":",
"print",
"(",
"'The list for Washington does not contain a Gender and Birth Year column.'",
")",
"print",
"(",
"\"\\nThis took %s seconds.\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
")",
"print",
"(",
"'-'",
"*",
"40",
")"
] |
Displays statistics on bikeshare users.
|
[
"Displays",
"statistics",
"on",
"bikeshare",
"users",
"."
] |
[
"\"\"\"Displays statistics on bikeshare users.\"\"\"",
"# TO DO: Display counts of user types",
"# TO DO: Display counts of gender. Display earliest, most recent, and most common year of birth"
] |
[
{
"param": "df",
"type": null
},
{
"param": "city",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "df",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "city",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import time
def user_stats(df, city):
print('\nCalculating User Stats...\n')
start_time = time.time()
counts_user = df['User Type'].value_counts()
print('Counts of user type: ',counts_user)
if city != 'washington':
counts_gender = df['Gender'].value_counts()
print('Counts of gender: ', counts_gender)
print('Earliest year of birth: ', df['Birth Year'].min())
print('Most recent year of birth: ', df['Birth Year'].max())
print('Most common year of birth: ', df['Birth Year'].mode()[0])
else:
print('The list for Washington does not contain a Gender and Birth Year column.')
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
| 879 | 125 |
9b0b58656627217d303d6168087a96015492b937
|
goelakash/sagemaker-rl-container
|
src/vw-serving/src/vw_serving/sagemaker/integration.py
|
[
"Apache-2.0"
] |
Python
|
pretty_string_validator
|
<not_specific>
|
def pretty_string_validator(validator):
"""Returns a human friendly description of the string validator or None if not applicable"""
allowed_keys = {'type', 'pattern'}
if validator['type'] == 'string' and set(validator.keys()).issubset(allowed_keys):
message = "a string"
if 'pattern' in validator:
message += " which matches the pattern '{}'".format(validator['pattern'])
return message
else:
return None
|
Returns a human friendly description of the string validator or None if not applicable
|
Returns a human friendly description of the string validator or None if not applicable
|
[
"Returns",
"a",
"human",
"friendly",
"description",
"of",
"the",
"string",
"validator",
"or",
"None",
"if",
"not",
"applicable"
] |
def pretty_string_validator(validator):
allowed_keys = {'type', 'pattern'}
if validator['type'] == 'string' and set(validator.keys()).issubset(allowed_keys):
message = "a string"
if 'pattern' in validator:
message += " which matches the pattern '{}'".format(validator['pattern'])
return message
else:
return None
|
[
"def",
"pretty_string_validator",
"(",
"validator",
")",
":",
"allowed_keys",
"=",
"{",
"'type'",
",",
"'pattern'",
"}",
"if",
"validator",
"[",
"'type'",
"]",
"==",
"'string'",
"and",
"set",
"(",
"validator",
".",
"keys",
"(",
")",
")",
".",
"issubset",
"(",
"allowed_keys",
")",
":",
"message",
"=",
"\"a string\"",
"if",
"'pattern'",
"in",
"validator",
":",
"message",
"+=",
"\" which matches the pattern '{}'\"",
".",
"format",
"(",
"validator",
"[",
"'pattern'",
"]",
")",
"return",
"message",
"else",
":",
"return",
"None"
] |
Returns a human friendly description of the string validator or None if not applicable
|
[
"Returns",
"a",
"human",
"friendly",
"description",
"of",
"the",
"string",
"validator",
"or",
"None",
"if",
"not",
"applicable"
] |
[
"\"\"\"Returns a human friendly description of the string validator or None if not applicable\"\"\""
] |
[
{
"param": "validator",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "validator",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def pretty_string_validator(validator):
allowed_keys = {'type', 'pattern'}
if validator['type'] == 'string' and set(validator.keys()).issubset(allowed_keys):
message = "a string"
if 'pattern' in validator:
message += " which matches the pattern '{}'".format(validator['pattern'])
return message
else:
return None
| 880 | 952 |
ddb8cfb5f3d8699806c106de358c19b1d5c31367
|
raneem11/DynamicHead
|
web_data/augmentation.py
|
[
"MIT"
] |
Python
|
policy_v0
|
<not_specific>
|
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Detection Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
[('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
[('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
[('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
]
return policy
|
Autoaugment policy that was used in AutoAugment Detection Paper.
|
Autoaugment policy that was used in AutoAugment Detection Paper.
|
[
"Autoaugment",
"policy",
"that",
"was",
"used",
"in",
"AutoAugment",
"Detection",
"Paper",
"."
] |
def policy_v0():
policy = [
[('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
[('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
[('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
[('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
]
return policy
|
[
"def",
"policy_v0",
"(",
")",
":",
"policy",
"=",
"[",
"[",
"(",
"'TranslateX_BBox'",
",",
"0.6",
",",
"4",
")",
",",
"(",
"'Equalize'",
",",
"0.8",
",",
"10",
")",
"]",
",",
"[",
"(",
"'TranslateY_Only_BBoxes'",
",",
"0.2",
",",
"2",
")",
",",
"(",
"'Cutout'",
",",
"0.8",
",",
"8",
")",
"]",
",",
"[",
"(",
"'Sharpness'",
",",
"0.0",
",",
"8",
")",
",",
"(",
"'ShearX_BBox'",
",",
"0.4",
",",
"0",
")",
"]",
",",
"[",
"(",
"'ShearY_BBox'",
",",
"1.0",
",",
"2",
")",
",",
"(",
"'TranslateY_Only_BBoxes'",
",",
"0.6",
",",
"6",
")",
"]",
",",
"[",
"(",
"'Rotate_BBox'",
",",
"0.6",
",",
"10",
")",
",",
"(",
"'Color'",
",",
"1.0",
",",
"6",
")",
"]",
",",
"]",
"return",
"policy"
] |
Autoaugment policy that was used in AutoAugment Detection Paper.
|
[
"Autoaugment",
"policy",
"that",
"was",
"used",
"in",
"AutoAugment",
"Detection",
"Paper",
"."
] |
[
"\"\"\"Autoaugment policy that was used in AutoAugment Detection Paper.\"\"\"",
"# Each tuple is an augmentation operation of the form",
"# (operation, probability, magnitude). Each element in policy is a",
"# sub-policy that will be applied sequentially on the image."
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
def policy_v0():
policy = [
[('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
[('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
[('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
[('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
]
return policy
| 881 | 749 |
8f7038af4f6294245008fe2e65f87fe40be64b87
|
R3v1L/evogtk
|
evogtk/tools/__init__.py
|
[
"MIT"
] |
Python
|
openWithDefaultApp
| null |
def openWithDefaultApp(filename):
"""
Open a file in default system application
"""
if sys.platform in ['win32','cygwin']:
os.startfile(filename)
elif sys.platform == 'linux2':
os.system('/usr/bin/xdg-open "%s"' % filename)
elif sys.platform =='darwin':
os.system('open "%s"' % filename)
elif os.name == 'nt':
os.startfile(filename)
elif os.name == 'posix':
os.system('/usr/bin/xdg-open "%s"' % filename)
|
Open a file in default system application
|
Open a file in default system application
|
[
"Open",
"a",
"file",
"in",
"default",
"system",
"application"
] |
def openWithDefaultApp(filename):
if sys.platform in ['win32','cygwin']:
os.startfile(filename)
elif sys.platform == 'linux2':
os.system('/usr/bin/xdg-open "%s"' % filename)
elif sys.platform =='darwin':
os.system('open "%s"' % filename)
elif os.name == 'nt':
os.startfile(filename)
elif os.name == 'posix':
os.system('/usr/bin/xdg-open "%s"' % filename)
|
[
"def",
"openWithDefaultApp",
"(",
"filename",
")",
":",
"if",
"sys",
".",
"platform",
"in",
"[",
"'win32'",
",",
"'cygwin'",
"]",
":",
"os",
".",
"startfile",
"(",
"filename",
")",
"elif",
"sys",
".",
"platform",
"==",
"'linux2'",
":",
"os",
".",
"system",
"(",
"'/usr/bin/xdg-open \"%s\"'",
"%",
"filename",
")",
"elif",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"os",
".",
"system",
"(",
"'open \"%s\"'",
"%",
"filename",
")",
"elif",
"os",
".",
"name",
"==",
"'nt'",
":",
"os",
".",
"startfile",
"(",
"filename",
")",
"elif",
"os",
".",
"name",
"==",
"'posix'",
":",
"os",
".",
"system",
"(",
"'/usr/bin/xdg-open \"%s\"'",
"%",
"filename",
")"
] |
Open a file in default system application
|
[
"Open",
"a",
"file",
"in",
"default",
"system",
"application"
] |
[
"\"\"\"\n Open a file in default system application\n \"\"\""
] |
[
{
"param": "filename",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "filename",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import sys
import os
def openWithDefaultApp(filename):
if sys.platform in ['win32','cygwin']:
os.startfile(filename)
elif sys.platform == 'linux2':
os.system('/usr/bin/xdg-open "%s"' % filename)
elif sys.platform =='darwin':
os.system('open "%s"' % filename)
elif os.name == 'nt':
os.startfile(filename)
elif os.name == 'posix':
os.system('/usr/bin/xdg-open "%s"' % filename)
| 882 | 480 |
cd084741f783a0ea12babbcc5ae828dfc408040e
|
akshaygupta2208/hangoutsbot
|
hangupsbot/plugins/forecast.py
|
[
"Unlicense"
] |
Python
|
_get_forcast_units
|
<not_specific>
|
def _get_forcast_units(result):
"""
Checks to see what uni the results were passed back as and sets the display units accordingly
"""
units = {
'temperature': 'F',
'distance': 'Miles',
'percipIntensity': 'in./hr.',
'precipAccumulation': 'inches',
'windSpeed': 'mph',
'pressure': 'millibars'
}
if result['flags']:
unit = result['flags']['units']
if unit != 'us':
units['temperature'] = 'C'
units['distance'] = 'KM'
units['percipIntensity'] = 'milimeters per hour'
units['precipAccumulation'] = 'centimeters'
units['windSpeed'] = 'm/s'
units['pressure'] = 'kPa'
if unit == 'ca':
units['windSpeed'] = 'km/h'
if unit == 'uk2':
units['windSpeed'] = 'mph'
units['distance'] = 'Miles'
return units
|
Checks to see what uni the results were passed back as and sets the display units accordingly
|
Checks to see what uni the results were passed back as and sets the display units accordingly
|
[
"Checks",
"to",
"see",
"what",
"uni",
"the",
"results",
"were",
"passed",
"back",
"as",
"and",
"sets",
"the",
"display",
"units",
"accordingly"
] |
def _get_forcast_units(result):
units = {
'temperature': 'F',
'distance': 'Miles',
'percipIntensity': 'in./hr.',
'precipAccumulation': 'inches',
'windSpeed': 'mph',
'pressure': 'millibars'
}
if result['flags']:
unit = result['flags']['units']
if unit != 'us':
units['temperature'] = 'C'
units['distance'] = 'KM'
units['percipIntensity'] = 'milimeters per hour'
units['precipAccumulation'] = 'centimeters'
units['windSpeed'] = 'm/s'
units['pressure'] = 'kPa'
if unit == 'ca':
units['windSpeed'] = 'km/h'
if unit == 'uk2':
units['windSpeed'] = 'mph'
units['distance'] = 'Miles'
return units
|
[
"def",
"_get_forcast_units",
"(",
"result",
")",
":",
"units",
"=",
"{",
"'temperature'",
":",
"'F'",
",",
"'distance'",
":",
"'Miles'",
",",
"'percipIntensity'",
":",
"'in./hr.'",
",",
"'precipAccumulation'",
":",
"'inches'",
",",
"'windSpeed'",
":",
"'mph'",
",",
"'pressure'",
":",
"'millibars'",
"}",
"if",
"result",
"[",
"'flags'",
"]",
":",
"unit",
"=",
"result",
"[",
"'flags'",
"]",
"[",
"'units'",
"]",
"if",
"unit",
"!=",
"'us'",
":",
"units",
"[",
"'temperature'",
"]",
"=",
"'C'",
"units",
"[",
"'distance'",
"]",
"=",
"'KM'",
"units",
"[",
"'percipIntensity'",
"]",
"=",
"'milimeters per hour'",
"units",
"[",
"'precipAccumulation'",
"]",
"=",
"'centimeters'",
"units",
"[",
"'windSpeed'",
"]",
"=",
"'m/s'",
"units",
"[",
"'pressure'",
"]",
"=",
"'kPa'",
"if",
"unit",
"==",
"'ca'",
":",
"units",
"[",
"'windSpeed'",
"]",
"=",
"'km/h'",
"if",
"unit",
"==",
"'uk2'",
":",
"units",
"[",
"'windSpeed'",
"]",
"=",
"'mph'",
"units",
"[",
"'distance'",
"]",
"=",
"'Miles'",
"return",
"units"
] |
Checks to see what uni the results were passed back as and sets the display units accordingly
|
[
"Checks",
"to",
"see",
"what",
"uni",
"the",
"results",
"were",
"passed",
"back",
"as",
"and",
"sets",
"the",
"display",
"units",
"accordingly"
] |
[
"\"\"\"\n Checks to see what uni the results were passed back as and sets the display units accordingly\n \"\"\""
] |
[
{
"param": "result",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "result",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _get_forcast_units(result):
units = {
'temperature': 'F',
'distance': 'Miles',
'percipIntensity': 'in./hr.',
'precipAccumulation': 'inches',
'windSpeed': 'mph',
'pressure': 'millibars'
}
if result['flags']:
unit = result['flags']['units']
if unit != 'us':
units['temperature'] = 'C'
units['distance'] = 'KM'
units['percipIntensity'] = 'milimeters per hour'
units['precipAccumulation'] = 'centimeters'
units['windSpeed'] = 'm/s'
units['pressure'] = 'kPa'
if unit == 'ca':
units['windSpeed'] = 'km/h'
if unit == 'uk2':
units['windSpeed'] = 'mph'
units['distance'] = 'Miles'
return units
| 883 | 201 |
183542f4c4bb82b2aab92c8adb23f5cbb8d5d321
|
JonasGlobevnik/Project_GB_1718
|
src/evaluation.py
|
[
"Apache-2.0"
] |
Python
|
idx_tuple_in_df
|
<not_specific>
|
def idx_tuple_in_df(tuple_x, df):
"""
Find the first row index of tuple_x in df
"""
for i,v in enumerate(df.values):
if tuple_x == tuple(v):
res = i
break
else:
res=None
return res
|
Find the first row index of tuple_x in df
|
Find the first row index of tuple_x in df
|
[
"Find",
"the",
"first",
"row",
"index",
"of",
"tuple_x",
"in",
"df"
] |
def idx_tuple_in_df(tuple_x, df):
for i,v in enumerate(df.values):
if tuple_x == tuple(v):
res = i
break
else:
res=None
return res
|
[
"def",
"idx_tuple_in_df",
"(",
"tuple_x",
",",
"df",
")",
":",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"df",
".",
"values",
")",
":",
"if",
"tuple_x",
"==",
"tuple",
"(",
"v",
")",
":",
"res",
"=",
"i",
"break",
"else",
":",
"res",
"=",
"None",
"return",
"res"
] |
Find the first row index of tuple_x in df
|
[
"Find",
"the",
"first",
"row",
"index",
"of",
"tuple_x",
"in",
"df"
] |
[
"\"\"\"\n Find the first row index of tuple_x in df\n\n \"\"\""
] |
[
{
"param": "tuple_x",
"type": null
},
{
"param": "df",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "tuple_x",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "df",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def idx_tuple_in_df(tuple_x, df):
for i,v in enumerate(df.values):
if tuple_x == tuple(v):
res = i
break
else:
res=None
return res
| 884 | 105 |
7b510ee49e48f24e640c2c7b3642cf5121e8770e
|
plaplant/hera_opm
|
hera_opm/mf_tools.py
|
[
"BSD-2-Clause"
] |
Python
|
clean_wrapper_scripts
|
<not_specific>
|
def clean_wrapper_scripts(work_dir):
"""Clean up wrapper scripts from work directory.
This script removes any files in the specified directory that begin with
"wrapper_", which is how the scripts are named in the
'build_makeflow_from_config' function above. It also removes files that end
in ".wrapper", which is how makeflow labels wrapper scripts for batch
processing.
Parameters
----------
work_dir : str
The full path to the work directory.
Returns
-------
None
"""
# list files in work directory
files = os.listdir(work_dir)
wrapper_files = [
fn for fn in files if fn[:8] == "wrapper_" or fn[-8:] == ".wrapper"
]
# remove files; assumes individual files (and not directories)
for fn in wrapper_files:
abspath = os.path.join(work_dir, fn)
os.remove(abspath)
return
|
Clean up wrapper scripts from work directory.
This script removes any files in the specified directory that begin with
"wrapper_", which is how the scripts are named in the
'build_makeflow_from_config' function above. It also removes files that end
in ".wrapper", which is how makeflow labels wrapper scripts for batch
processing.
Parameters
----------
work_dir : str
The full path to the work directory.
Returns
-------
None
|
Clean up wrapper scripts from work directory.
This script removes any files in the specified directory that begin with
"wrapper_", which is how the scripts are named in the
'build_makeflow_from_config' function above. It also removes files that end
in ".wrapper", which is how makeflow labels wrapper scripts for batch
processing.
Parameters
work_dir : str
The full path to the work directory.
Returns
None
|
[
"Clean",
"up",
"wrapper",
"scripts",
"from",
"work",
"directory",
".",
"This",
"script",
"removes",
"any",
"files",
"in",
"the",
"specified",
"directory",
"that",
"begin",
"with",
"\"",
"wrapper_",
"\"",
"which",
"is",
"how",
"the",
"scripts",
"are",
"named",
"in",
"the",
"'",
"build_makeflow_from_config",
"'",
"function",
"above",
".",
"It",
"also",
"removes",
"files",
"that",
"end",
"in",
"\"",
".",
"wrapper",
"\"",
"which",
"is",
"how",
"makeflow",
"labels",
"wrapper",
"scripts",
"for",
"batch",
"processing",
".",
"Parameters",
"work_dir",
":",
"str",
"The",
"full",
"path",
"to",
"the",
"work",
"directory",
".",
"Returns",
"None"
] |
def clean_wrapper_scripts(work_dir):
files = os.listdir(work_dir)
wrapper_files = [
fn for fn in files if fn[:8] == "wrapper_" or fn[-8:] == ".wrapper"
]
for fn in wrapper_files:
abspath = os.path.join(work_dir, fn)
os.remove(abspath)
return
|
[
"def",
"clean_wrapper_scripts",
"(",
"work_dir",
")",
":",
"files",
"=",
"os",
".",
"listdir",
"(",
"work_dir",
")",
"wrapper_files",
"=",
"[",
"fn",
"for",
"fn",
"in",
"files",
"if",
"fn",
"[",
":",
"8",
"]",
"==",
"\"wrapper_\"",
"or",
"fn",
"[",
"-",
"8",
":",
"]",
"==",
"\".wrapper\"",
"]",
"for",
"fn",
"in",
"wrapper_files",
":",
"abspath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"fn",
")",
"os",
".",
"remove",
"(",
"abspath",
")",
"return"
] |
Clean up wrapper scripts from work directory.
|
[
"Clean",
"up",
"wrapper",
"scripts",
"from",
"work",
"directory",
"."
] |
[
"\"\"\"Clean up wrapper scripts from work directory.\n\n This script removes any files in the specified directory that begin with\n \"wrapper_\", which is how the scripts are named in the\n 'build_makeflow_from_config' function above. It also removes files that end\n in \".wrapper\", which is how makeflow labels wrapper scripts for batch\n processing.\n\n Parameters\n ----------\n work_dir : str\n The full path to the work directory.\n\n Returns\n -------\n None\n\n \"\"\"",
"# list files in work directory",
"# remove files; assumes individual files (and not directories)"
] |
[
{
"param": "work_dir",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "work_dir",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
def clean_wrapper_scripts(work_dir):
files = os.listdir(work_dir)
wrapper_files = [
fn for fn in files if fn[:8] == "wrapper_" or fn[-8:] == ".wrapper"
]
for fn in wrapper_files:
abspath = os.path.join(work_dir, fn)
os.remove(abspath)
return
| 886 | 822 |
f23dc658f4c3448a7816db93093611e4c3130a08
|
monkeywithacupcake/pygorithm
|
pygorithm/sorting/insertion_sort.py
|
[
"MIT"
] |
Python
|
time_complexities
|
<not_specific>
|
def time_complexities():
"""
Return information on functions
time complexity
:return: string
"""
return "Best Case: O(n), Average Case: O(n ^ 2), Worst Case: O(n ^ 2)"
|
Return information on functions
time complexity
:return: string
|
Return information on functions
time complexity
|
[
"Return",
"information",
"on",
"functions",
"time",
"complexity"
] |
def time_complexities():
return "Best Case: O(n), Average Case: O(n ^ 2), Worst Case: O(n ^ 2)"
|
[
"def",
"time_complexities",
"(",
")",
":",
"return",
"\"Best Case: O(n), Average Case: O(n ^ 2), Worst Case: O(n ^ 2)\""
] |
Return information on functions
time complexity
|
[
"Return",
"information",
"on",
"functions",
"time",
"complexity"
] |
[
"\"\"\"\n Return information on functions\n time complexity\n :return: string\n \"\"\""
] |
[] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
def time_complexities():
return "Best Case: O(n), Average Case: O(n ^ 2), Worst Case: O(n ^ 2)"
| 887 | 857 |
72faab8d6bc25a48f492e19987ee1318423fcc8b
|
Kotti/kotti_contentgenerator
|
kotti_contentgenerator/generator.py
|
[
"BSD-2-Clause"
] |
Python
|
infinite_list
| null |
def infinite_list(list_):
""" return an infinite stream of items from a list
:param list_: a python list
"""
i = 0
while True:
yield list_[i]
i = i + 1
if i == len(list_):
i = 0
|
return an infinite stream of items from a list
:param list_: a python list
|
return an infinite stream of items from a list
|
[
"return",
"an",
"infinite",
"stream",
"of",
"items",
"from",
"a",
"list"
] |
def infinite_list(list_):
i = 0
while True:
yield list_[i]
i = i + 1
if i == len(list_):
i = 0
|
[
"def",
"infinite_list",
"(",
"list_",
")",
":",
"i",
"=",
"0",
"while",
"True",
":",
"yield",
"list_",
"[",
"i",
"]",
"i",
"=",
"i",
"+",
"1",
"if",
"i",
"==",
"len",
"(",
"list_",
")",
":",
"i",
"=",
"0"
] |
return an infinite stream of items from a list
|
[
"return",
"an",
"infinite",
"stream",
"of",
"items",
"from",
"a",
"list"
] |
[
"\"\"\" return an infinite stream of items from a list\n\n :param list_: a python list\n \"\"\""
] |
[
{
"param": "list_",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "list_",
"type": null,
"docstring": "a python list",
"docstring_tokens": [
"a",
"python",
"list"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def infinite_list(list_):
i = 0
while True:
yield list_[i]
i = i + 1
if i == len(list_):
i = 0
| 888 | 23 |
6a3e80ba0dfd14103932b2156abe82b56739d1f5
|
TomStarshak/project-euler-helper
|
euler_helper/euler_helper.py
|
[
"MIT"
] |
Python
|
is_prime
|
<not_specific>
|
def is_prime(n):
'''Returns True if n is prime, otherwise False'''
# SPECIALCASE: These are special cases that are not primes
if n % 2 == 0 and n != 2 or n <= 1:
return False
# Look for a factor
for i in range(3, int(n ** (0.5)) + 1, 2):
# If there is a factor
if n % i == 0:
# Its not a prime number
return False
return True
|
Returns True if n is prime, otherwise False
|
Returns True if n is prime, otherwise False
|
[
"Returns",
"True",
"if",
"n",
"is",
"prime",
"otherwise",
"False"
] |
def is_prime(n):
if n % 2 == 0 and n != 2 or n <= 1:
return False
for i in range(3, int(n ** (0.5)) + 1, 2):
if n % i == 0:
return False
return True
|
[
"def",
"is_prime",
"(",
"n",
")",
":",
"if",
"n",
"%",
"2",
"==",
"0",
"and",
"n",
"!=",
"2",
"or",
"n",
"<=",
"1",
":",
"return",
"False",
"for",
"i",
"in",
"range",
"(",
"3",
",",
"int",
"(",
"n",
"**",
"(",
"0.5",
")",
")",
"+",
"1",
",",
"2",
")",
":",
"if",
"n",
"%",
"i",
"==",
"0",
":",
"return",
"False",
"return",
"True"
] |
Returns True if n is prime, otherwise False
|
[
"Returns",
"True",
"if",
"n",
"is",
"prime",
"otherwise",
"False"
] |
[
"'''Returns True if n is prime, otherwise False'''",
"# SPECIALCASE: These are special cases that are not primes\r",
"# Look for a factor\r",
"# If there is a factor\r",
"# Its not a prime number\r"
] |
[
{
"param": "n",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "n",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def is_prime(n):
if n % 2 == 0 and n != 2 or n <= 1:
return False
for i in range(3, int(n ** (0.5)) + 1, 2):
if n % i == 0:
return False
return True
| 889 | 669 |
ae9e915e91bc592f4848dc439aa05fbb51d38776
|
iacopy/coveragepy
|
tests/test_farm.py
|
[
"Apache-2.0"
] |
Python
|
contains_any
|
<not_specific>
|
def contains_any(filename, *strlist):
"""Check that the file contains at least one of a list of strings.
An assert will be raised if none of the arguments in `strlist` is in
`filename`.
"""
with open(filename, "r") as fobj:
text = fobj.read()
for s in strlist:
if s in text:
return
assert False, "Missing content in %s: %r [1 of %d]" % (filename, strlist[0], len(strlist),)
|
Check that the file contains at least one of a list of strings.
An assert will be raised if none of the arguments in `strlist` is in
`filename`.
|
Check that the file contains at least one of a list of strings.
An assert will be raised if none of the arguments in `strlist` is in
`filename`.
|
[
"Check",
"that",
"the",
"file",
"contains",
"at",
"least",
"one",
"of",
"a",
"list",
"of",
"strings",
".",
"An",
"assert",
"will",
"be",
"raised",
"if",
"none",
"of",
"the",
"arguments",
"in",
"`",
"strlist",
"`",
"is",
"in",
"`",
"filename",
"`",
"."
] |
def contains_any(filename, *strlist):
with open(filename, "r") as fobj:
text = fobj.read()
for s in strlist:
if s in text:
return
assert False, "Missing content in %s: %r [1 of %d]" % (filename, strlist[0], len(strlist),)
|
[
"def",
"contains_any",
"(",
"filename",
",",
"*",
"strlist",
")",
":",
"with",
"open",
"(",
"filename",
",",
"\"r\"",
")",
"as",
"fobj",
":",
"text",
"=",
"fobj",
".",
"read",
"(",
")",
"for",
"s",
"in",
"strlist",
":",
"if",
"s",
"in",
"text",
":",
"return",
"assert",
"False",
",",
"\"Missing content in %s: %r [1 of %d]\"",
"%",
"(",
"filename",
",",
"strlist",
"[",
"0",
"]",
",",
"len",
"(",
"strlist",
")",
",",
")"
] |
Check that the file contains at least one of a list of strings.
|
[
"Check",
"that",
"the",
"file",
"contains",
"at",
"least",
"one",
"of",
"a",
"list",
"of",
"strings",
"."
] |
[
"\"\"\"Check that the file contains at least one of a list of strings.\n\n An assert will be raised if none of the arguments in `strlist` is in\n `filename`.\n\n \"\"\""
] |
[
{
"param": "filename",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "filename",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def contains_any(filename, *strlist):
with open(filename, "r") as fobj:
text = fobj.read()
for s in strlist:
if s in text:
return
assert False, "Missing content in %s: %r [1 of %d]" % (filename, strlist[0], len(strlist),)
| 890 | 556 |
f978a48f82509dfe5cb910f1ab81591d1a3b8a39
|
mobergd/interfaces
|
chemkin_io/mechparser/reaction.py
|
[
"Apache-2.0"
] |
Python
|
units
|
<not_specific>
|
def units(block_str):
""" get the units for the rate parameters
"""
# print('block string')
# print(block_str)
# print(block_str.strip().splitlines()[0])
units_str = block_str.strip().splitlines()[0]
units_lst = units_str.split()
if units_lst:
ea_units = units_lst[0].lower()
a_units = units_lst[1].lower()
else:
ea_units = 'cal/mole'
a_units = 'moles'
return ea_units, a_units
|
get the units for the rate parameters
|
get the units for the rate parameters
|
[
"get",
"the",
"units",
"for",
"the",
"rate",
"parameters"
] |
def units(block_str):
units_str = block_str.strip().splitlines()[0]
units_lst = units_str.split()
if units_lst:
ea_units = units_lst[0].lower()
a_units = units_lst[1].lower()
else:
ea_units = 'cal/mole'
a_units = 'moles'
return ea_units, a_units
|
[
"def",
"units",
"(",
"block_str",
")",
":",
"units_str",
"=",
"block_str",
".",
"strip",
"(",
")",
".",
"splitlines",
"(",
")",
"[",
"0",
"]",
"units_lst",
"=",
"units_str",
".",
"split",
"(",
")",
"if",
"units_lst",
":",
"ea_units",
"=",
"units_lst",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"a_units",
"=",
"units_lst",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"else",
":",
"ea_units",
"=",
"'cal/mole'",
"a_units",
"=",
"'moles'",
"return",
"ea_units",
",",
"a_units"
] |
get the units for the rate parameters
|
[
"get",
"the",
"units",
"for",
"the",
"rate",
"parameters"
] |
[
"\"\"\" get the units for the rate parameters\n \"\"\"",
"# print('block string')",
"# print(block_str)",
"# print(block_str.strip().splitlines()[0])"
] |
[
{
"param": "block_str",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "block_str",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def units(block_str):
units_str = block_str.strip().splitlines()[0]
units_lst = units_str.split()
if units_lst:
ea_units = units_lst[0].lower()
a_units = units_lst[1].lower()
else:
ea_units = 'cal/mole'
a_units = 'moles'
return ea_units, a_units
| 892 | 24 |
5e8e0614e6e7dadb56511ae3879bb1e4063f579f
|
ramgtv/mongo
|
buildscripts/errorcodes.py
|
[
"Apache-2.0"
] |
Python
|
isTerminated
|
<not_specific>
|
def isTerminated( lines ):
"""Given .cpp/.h source lines as text, determine if assert is terminated."""
x = " ".join(lines)
return ';' in x \
or x.count('(') - x.count(')') <= 0
|
Given .cpp/.h source lines as text, determine if assert is terminated.
|
Given .cpp/.h source lines as text, determine if assert is terminated.
|
[
"Given",
".",
"cpp",
"/",
".",
"h",
"source",
"lines",
"as",
"text",
"determine",
"if",
"assert",
"is",
"terminated",
"."
] |
def isTerminated( lines ):
x = " ".join(lines)
return ';' in x \
or x.count('(') - x.count(')') <= 0
|
[
"def",
"isTerminated",
"(",
"lines",
")",
":",
"x",
"=",
"\" \"",
".",
"join",
"(",
"lines",
")",
"return",
"';'",
"in",
"x",
"or",
"x",
".",
"count",
"(",
"'('",
")",
"-",
"x",
".",
"count",
"(",
"')'",
")",
"<=",
"0"
] |
Given .cpp/.h source lines as text, determine if assert is terminated.
|
[
"Given",
".",
"cpp",
"/",
".",
"h",
"source",
"lines",
"as",
"text",
"determine",
"if",
"assert",
"is",
"terminated",
"."
] |
[
"\"\"\"Given .cpp/.h source lines as text, determine if assert is terminated.\"\"\""
] |
[
{
"param": "lines",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "lines",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def isTerminated( lines ):
x = " ".join(lines)
return ';' in x \
or x.count('(') - x.count(')') <= 0
| 893 | 808 |
7dea4bec92c507587e0f8bd81afdf0f87a72727f
|
fengggli/pegasus
|
lib/pegasus/python/Pegasus/plots_stats/utils.py
|
[
"Apache-2.0"
] |
Python
|
create_home_button
|
<not_specific>
|
def create_home_button():
"""
Utility method for creating a home button
"""
html_content ="""
<a href="../index.html" target="_self"><img src="images/home.png" width =72 height =72 border =0 title ="Home" alt ="Home"></a>
"""
return html_content
|
Utility method for creating a home button
|
Utility method for creating a home button
|
[
"Utility",
"method",
"for",
"creating",
"a",
"home",
"button"
] |
def create_home_button():
html_content ="""
<a href="../index.html" target="_self"><img src="images/home.png" width =72 height =72 border =0 title ="Home" alt ="Home"></a>
"""
return html_content
|
[
"def",
"create_home_button",
"(",
")",
":",
"html_content",
"=",
"\"\"\"\n<a href=\"../index.html\" target=\"_self\"><img src=\"images/home.png\" width =72 height =72 border =0 title =\"Home\" alt =\"Home\"></a>\n\t\"\"\"",
"return",
"html_content"
] |
Utility method for creating a home button
|
[
"Utility",
"method",
"for",
"creating",
"a",
"home",
"button"
] |
[
"\"\"\"\n\tUtility method for creating a home button\n\t\"\"\""
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
def create_home_button():
html_content ="""
<a href="../index.html" target="_self"><img src="images/home.png" width =72 height =72 border =0 title ="Home" alt ="Home"></a>
"""
return html_content
| 894 | 857 |
cb3a5cfb0775a610c1c0fa736758e5113b82d364
|
gladstone-institutes/RASLseqTools
|
RASLseqTools/RASLseqAnalysis_old.py
|
[
"MIT"
] |
Python
|
observed_wellbc_extraction
|
<not_specific>
|
def observed_wellbc_extraction(line, AD1='GGAAGCCTTGGCTTTTG'):
'''
This function returns the sequence (observed wellbc) before the first adaptor sequence
ToDo, fix alignments to well bc
Parameters
----------
line: Pandas dataframe line (Series)
requires column: 'seq' possessing fastq read sequence
AD1: str, optional, default='GGAAGCCTTGGCTTTTG'
Specifies the first adaptor sequence expected in the fastq read
Returns
-------
Fastq sequence before first occurent of AD1
or
'0' if AD1 not in seq sequence
'''
seq = line['seq']
if AD1 in seq:
return seq[ : seq.index(AD1)]
else: return '0'
|
This function returns the sequence (observed wellbc) before the first adaptor sequence
ToDo, fix alignments to well bc
Parameters
----------
line: Pandas dataframe line (Series)
requires column: 'seq' possessing fastq read sequence
AD1: str, optional, default='GGAAGCCTTGGCTTTTG'
Specifies the first adaptor sequence expected in the fastq read
Returns
-------
Fastq sequence before first occurent of AD1
or
'0' if AD1 not in seq sequence
|
This function returns the sequence (observed wellbc) before the first adaptor sequence
ToDo, fix alignments to well bc
Parameters
Pandas dataframe line (Series)
requires column: 'seq' possessing fastq read sequence
str, optional, default='GGAAGCCTTGGCTTTTG'
Specifies the first adaptor sequence expected in the fastq read
Returns
Fastq sequence before first occurent of AD1
or
'0' if AD1 not in seq sequence
|
[
"This",
"function",
"returns",
"the",
"sequence",
"(",
"observed",
"wellbc",
")",
"before",
"the",
"first",
"adaptor",
"sequence",
"ToDo",
"fix",
"alignments",
"to",
"well",
"bc",
"Parameters",
"Pandas",
"dataframe",
"line",
"(",
"Series",
")",
"requires",
"column",
":",
"'",
"seq",
"'",
"possessing",
"fastq",
"read",
"sequence",
"str",
"optional",
"default",
"=",
"'",
"GGAAGCCTTGGCTTTTG",
"'",
"Specifies",
"the",
"first",
"adaptor",
"sequence",
"expected",
"in",
"the",
"fastq",
"read",
"Returns",
"Fastq",
"sequence",
"before",
"first",
"occurent",
"of",
"AD1",
"or",
"'",
"0",
"'",
"if",
"AD1",
"not",
"in",
"seq",
"sequence"
] |
def observed_wellbc_extraction(line, AD1='GGAAGCCTTGGCTTTTG'):
seq = line['seq']
if AD1 in seq:
return seq[ : seq.index(AD1)]
else: return '0'
|
[
"def",
"observed_wellbc_extraction",
"(",
"line",
",",
"AD1",
"=",
"'GGAAGCCTTGGCTTTTG'",
")",
":",
"seq",
"=",
"line",
"[",
"'seq'",
"]",
"if",
"AD1",
"in",
"seq",
":",
"return",
"seq",
"[",
":",
"seq",
".",
"index",
"(",
"AD1",
")",
"]",
"else",
":",
"return",
"'0'"
] |
This function returns the sequence (observed wellbc) before the first adaptor sequence
ToDo, fix alignments to well bc
|
[
"This",
"function",
"returns",
"the",
"sequence",
"(",
"observed",
"wellbc",
")",
"before",
"the",
"first",
"adaptor",
"sequence",
"ToDo",
"fix",
"alignments",
"to",
"well",
"bc"
] |
[
"''' \n This function returns the sequence (observed wellbc) before the first adaptor sequence \n ToDo, fix alignments to well bc\n \n Parameters\n ----------\n line: Pandas dataframe line (Series)\n requires column: 'seq' possessing fastq read sequence\n \n AD1: str, optional, default='GGAAGCCTTGGCTTTTG'\n Specifies the first adaptor sequence expected in the fastq read\n \n Returns\n -------\n Fastq sequence before first occurent of AD1\n or\n '0' if AD1 not in seq sequence\n '''"
] |
[
{
"param": "line",
"type": null
},
{
"param": "AD1",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "line",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "AD1",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def observed_wellbc_extraction(line, AD1='GGAAGCCTTGGCTTTTG'):
seq = line['seq']
if AD1 in seq:
return seq[ : seq.index(AD1)]
else: return '0'
| 895 | 976 |
cb5f70c83266061e31e165a6e3ff41c686ae86e7
|
Vinesma/youtube-chat-render
|
core/clean.py
|
[
"MIT"
] |
Python
|
all
| null |
def all(frames_folder, chat_video_path):
"""Delete all non-essentials."""
try:
for file in os.listdir(frames_folder):
if file.endswith('.png'):
os.remove(os.path.join(frames_folder, file))
os.remove(chat_video_path)
os.remove('30_sec_test.mp4')
except FileNotFoundError:
pass
|
Delete all non-essentials.
|
Delete all non-essentials.
|
[
"Delete",
"all",
"non",
"-",
"essentials",
"."
] |
def all(frames_folder, chat_video_path):
try:
for file in os.listdir(frames_folder):
if file.endswith('.png'):
os.remove(os.path.join(frames_folder, file))
os.remove(chat_video_path)
os.remove('30_sec_test.mp4')
except FileNotFoundError:
pass
|
[
"def",
"all",
"(",
"frames_folder",
",",
"chat_video_path",
")",
":",
"try",
":",
"for",
"file",
"in",
"os",
".",
"listdir",
"(",
"frames_folder",
")",
":",
"if",
"file",
".",
"endswith",
"(",
"'.png'",
")",
":",
"os",
".",
"remove",
"(",
"os",
".",
"path",
".",
"join",
"(",
"frames_folder",
",",
"file",
")",
")",
"os",
".",
"remove",
"(",
"chat_video_path",
")",
"os",
".",
"remove",
"(",
"'30_sec_test.mp4'",
")",
"except",
"FileNotFoundError",
":",
"pass"
] |
Delete all non-essentials.
|
[
"Delete",
"all",
"non",
"-",
"essentials",
"."
] |
[
"\"\"\"Delete all non-essentials.\"\"\""
] |
[
{
"param": "frames_folder",
"type": null
},
{
"param": "chat_video_path",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "frames_folder",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "chat_video_path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
def all(frames_folder, chat_video_path):
try:
for file in os.listdir(frames_folder):
if file.endswith('.png'):
os.remove(os.path.join(frames_folder, file))
os.remove(chat_video_path)
os.remove('30_sec_test.mp4')
except FileNotFoundError:
pass
| 896 | 107 |
bf735c164b16ee50c3cbf0f338789c1f2304653f
|
tiwarylab/Belief-Propagation
|
belief_propagation.py
|
[
"MIT"
] |
Python
|
clean_names
|
<not_specific>
|
def clean_names(names):
"""Clean factor names to remove formatting characters.
Parameters
----------
names: list of str
Names to be cleaned.
Returns
-------
list of str
List of strings without formatting characters.
"""
new_names = copy.copy(names)
for i in range(len(names)):
name = new_names[i]
name = name.replace('_','')
name = name.replace('$','')
name = name.replace('\\','')
new_names[i] = name
return new_names
|
Clean factor names to remove formatting characters.
Parameters
----------
names: list of str
Names to be cleaned.
Returns
-------
list of str
List of strings without formatting characters.
|
Clean factor names to remove formatting characters.
Parameters
list of str
Names to be cleaned.
Returns
list of str
List of strings without formatting characters.
|
[
"Clean",
"factor",
"names",
"to",
"remove",
"formatting",
"characters",
".",
"Parameters",
"list",
"of",
"str",
"Names",
"to",
"be",
"cleaned",
".",
"Returns",
"list",
"of",
"str",
"List",
"of",
"strings",
"without",
"formatting",
"characters",
"."
] |
def clean_names(names):
new_names = copy.copy(names)
for i in range(len(names)):
name = new_names[i]
name = name.replace('_','')
name = name.replace('$','')
name = name.replace('\\','')
new_names[i] = name
return new_names
|
[
"def",
"clean_names",
"(",
"names",
")",
":",
"new_names",
"=",
"copy",
".",
"copy",
"(",
"names",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"names",
")",
")",
":",
"name",
"=",
"new_names",
"[",
"i",
"]",
"name",
"=",
"name",
".",
"replace",
"(",
"'_'",
",",
"''",
")",
"name",
"=",
"name",
".",
"replace",
"(",
"'$'",
",",
"''",
")",
"name",
"=",
"name",
".",
"replace",
"(",
"'\\\\'",
",",
"''",
")",
"new_names",
"[",
"i",
"]",
"=",
"name",
"return",
"new_names"
] |
Clean factor names to remove formatting characters.
|
[
"Clean",
"factor",
"names",
"to",
"remove",
"formatting",
"characters",
"."
] |
[
"\"\"\"Clean factor names to remove formatting characters.\n \n Parameters\n ----------\n names: list of str\n Names to be cleaned.\n \n Returns\n -------\n list of str\n List of strings without formatting characters.\n \n \"\"\""
] |
[
{
"param": "names",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "names",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import copy
def clean_names(names):
new_names = copy.copy(names)
for i in range(len(names)):
name = new_names[i]
name = name.replace('_','')
name = name.replace('$','')
name = name.replace('\\','')
new_names[i] = name
return new_names
| 897 | 707 |
2b44bbf0fe7be0b4d4f92e65e723b92564be70cd
|
spel-uchile/langmuir_parser
|
langmuir_parser.py
|
[
"MIT"
] |
Python
|
add_is_anomaly
|
<not_specific>
|
def add_is_anomaly(dataset, threshold):
"""
Appends a column that sorts out the particles counter
values (greater or lower than a threshold) to a dataset that contains
a "Particles counter" column.
:param dataset: DataFrame Dataset with a "Particles counter" column
:return: DataFrame with is_anomaly column added
"""
particles = dataset['Particles counter']
is_anomaly = []
for row in particles:
if row >= threshold:
is_anomaly.append(1)
else:
is_anomaly.append(0)
dataset["is_anom"] = is_anomaly
dataset = dataset[dataset["Lat"] > -50]
dataset = dataset[dataset["Lat"] < 0]
return dataset
|
Appends a column that sorts out the particles counter
values (greater or lower than a threshold) to a dataset that contains
a "Particles counter" column.
:param dataset: DataFrame Dataset with a "Particles counter" column
:return: DataFrame with is_anomaly column added
|
Appends a column that sorts out the particles counter
values (greater or lower than a threshold) to a dataset that contains
a "Particles counter" column.
|
[
"Appends",
"a",
"column",
"that",
"sorts",
"out",
"the",
"particles",
"counter",
"values",
"(",
"greater",
"or",
"lower",
"than",
"a",
"threshold",
")",
"to",
"a",
"dataset",
"that",
"contains",
"a",
"\"",
"Particles",
"counter",
"\"",
"column",
"."
] |
def add_is_anomaly(dataset, threshold):
particles = dataset['Particles counter']
is_anomaly = []
for row in particles:
if row >= threshold:
is_anomaly.append(1)
else:
is_anomaly.append(0)
dataset["is_anom"] = is_anomaly
dataset = dataset[dataset["Lat"] > -50]
dataset = dataset[dataset["Lat"] < 0]
return dataset
|
[
"def",
"add_is_anomaly",
"(",
"dataset",
",",
"threshold",
")",
":",
"particles",
"=",
"dataset",
"[",
"'Particles counter'",
"]",
"is_anomaly",
"=",
"[",
"]",
"for",
"row",
"in",
"particles",
":",
"if",
"row",
">=",
"threshold",
":",
"is_anomaly",
".",
"append",
"(",
"1",
")",
"else",
":",
"is_anomaly",
".",
"append",
"(",
"0",
")",
"dataset",
"[",
"\"is_anom\"",
"]",
"=",
"is_anomaly",
"dataset",
"=",
"dataset",
"[",
"dataset",
"[",
"\"Lat\"",
"]",
">",
"-",
"50",
"]",
"dataset",
"=",
"dataset",
"[",
"dataset",
"[",
"\"Lat\"",
"]",
"<",
"0",
"]",
"return",
"dataset"
] |
Appends a column that sorts out the particles counter
values (greater or lower than a threshold) to a dataset that contains
a "Particles counter" column.
|
[
"Appends",
"a",
"column",
"that",
"sorts",
"out",
"the",
"particles",
"counter",
"values",
"(",
"greater",
"or",
"lower",
"than",
"a",
"threshold",
")",
"to",
"a",
"dataset",
"that",
"contains",
"a",
"\"",
"Particles",
"counter",
"\"",
"column",
"."
] |
[
"\"\"\"\n Appends a column that sorts out the particles counter\n values (greater or lower than a threshold) to a dataset that contains\n a \"Particles counter\" column.\n :param dataset: DataFrame Dataset with a \"Particles counter\" column\n :return: DataFrame with is_anomaly column added\n \"\"\""
] |
[
{
"param": "dataset",
"type": null
},
{
"param": "threshold",
"type": null
}
] |
{
"returns": [
{
"docstring": "DataFrame with is_anomaly column added",
"docstring_tokens": [
"DataFrame",
"with",
"is_anomaly",
"column",
"added"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "dataset",
"type": null,
"docstring": "DataFrame Dataset with a \"Particles counter\" column",
"docstring_tokens": [
"DataFrame",
"Dataset",
"with",
"a",
"\"",
"Particles",
"counter",
"\"",
"column"
],
"default": null,
"is_optional": null
},
{
"identifier": "threshold",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def add_is_anomaly(dataset, threshold):
particles = dataset['Particles counter']
is_anomaly = []
for row in particles:
if row >= threshold:
is_anomaly.append(1)
else:
is_anomaly.append(0)
dataset["is_anom"] = is_anomaly
dataset = dataset[dataset["Lat"] > -50]
dataset = dataset[dataset["Lat"] < 0]
return dataset
| 899 | 12 |
a69c8d1a9ca3d9de2824eb3ce8819bb45c57f2ab
|
lmmentel/chemtools
|
chemtools/basisset.py
|
[
"MIT"
] |
Python
|
ncartesian
|
<not_specific>
|
def ncartesian(l):
'''
Calculate the number of cartesian components of a function with a given
angular momentum value *l*.
'''
return int((l + 1) * (l + 2) / 2)
|
Calculate the number of cartesian components of a function with a given
angular momentum value *l*.
|
Calculate the number of cartesian components of a function with a given
angular momentum value *l*.
|
[
"Calculate",
"the",
"number",
"of",
"cartesian",
"components",
"of",
"a",
"function",
"with",
"a",
"given",
"angular",
"momentum",
"value",
"*",
"l",
"*",
"."
] |
def ncartesian(l):
return int((l + 1) * (l + 2) / 2)
|
[
"def",
"ncartesian",
"(",
"l",
")",
":",
"return",
"int",
"(",
"(",
"l",
"+",
"1",
")",
"*",
"(",
"l",
"+",
"2",
")",
"/",
"2",
")"
] |
Calculate the number of cartesian components of a function with a given
angular momentum value *l*.
|
[
"Calculate",
"the",
"number",
"of",
"cartesian",
"components",
"of",
"a",
"function",
"with",
"a",
"given",
"angular",
"momentum",
"value",
"*",
"l",
"*",
"."
] |
[
"'''\n Calculate the number of cartesian components of a function with a given\n angular momentum value *l*.\n '''"
] |
[
{
"param": "l",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "l",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def ncartesian(l):
return int((l + 1) * (l + 2) / 2)
| 900 | 58 |
e813403061e64970f93bf5f20ed637644bd23ad9
|
PySCeS/PyscesToolbox
|
psctb/utils/misc/_misc.py
|
[
"BSD-3-Clause"
] |
Python
|
is_linear
|
<not_specific>
|
def is_linear(scan_range):
"""
For any 1-demensional data structure containing numbers return True
if the numbers follows a linear sequence.
Within the context of PySCeSToolbox this function will be called on
either a linear range or a log range. Thus, while not indicative
of log ranges, this is what a False return value indicates in this
software.
Parameters
----------
scan_range : iterable
Any iterable object containing a range of numbers.
Returns
-------
bool
A boolean indicating if `scan_range` is a linear sequence of
numbers.
"""
if scan_range[1] - scan_range[0] == scan_range[-1] - scan_range[-2]:
return True
else:
return False
|
For any 1-demensional data structure containing numbers return True
if the numbers follows a linear sequence.
Within the context of PySCeSToolbox this function will be called on
either a linear range or a log range. Thus, while not indicative
of log ranges, this is what a False return value indicates in this
software.
Parameters
----------
scan_range : iterable
Any iterable object containing a range of numbers.
Returns
-------
bool
A boolean indicating if `scan_range` is a linear sequence of
numbers.
|
For any 1-demensional data structure containing numbers return True
if the numbers follows a linear sequence.
Within the context of PySCeSToolbox this function will be called on
either a linear range or a log range. Thus, while not indicative
of log ranges, this is what a False return value indicates in this
software.
Parameters
scan_range : iterable
Any iterable object containing a range of numbers.
Returns
bool
A boolean indicating if `scan_range` is a linear sequence of
numbers.
|
[
"For",
"any",
"1",
"-",
"demensional",
"data",
"structure",
"containing",
"numbers",
"return",
"True",
"if",
"the",
"numbers",
"follows",
"a",
"linear",
"sequence",
".",
"Within",
"the",
"context",
"of",
"PySCeSToolbox",
"this",
"function",
"will",
"be",
"called",
"on",
"either",
"a",
"linear",
"range",
"or",
"a",
"log",
"range",
".",
"Thus",
"while",
"not",
"indicative",
"of",
"log",
"ranges",
"this",
"is",
"what",
"a",
"False",
"return",
"value",
"indicates",
"in",
"this",
"software",
".",
"Parameters",
"scan_range",
":",
"iterable",
"Any",
"iterable",
"object",
"containing",
"a",
"range",
"of",
"numbers",
".",
"Returns",
"bool",
"A",
"boolean",
"indicating",
"if",
"`",
"scan_range",
"`",
"is",
"a",
"linear",
"sequence",
"of",
"numbers",
"."
] |
def is_linear(scan_range):
if scan_range[1] - scan_range[0] == scan_range[-1] - scan_range[-2]:
return True
else:
return False
|
[
"def",
"is_linear",
"(",
"scan_range",
")",
":",
"if",
"scan_range",
"[",
"1",
"]",
"-",
"scan_range",
"[",
"0",
"]",
"==",
"scan_range",
"[",
"-",
"1",
"]",
"-",
"scan_range",
"[",
"-",
"2",
"]",
":",
"return",
"True",
"else",
":",
"return",
"False"
] |
For any 1-demensional data structure containing numbers return True
if the numbers follows a linear sequence.
|
[
"For",
"any",
"1",
"-",
"demensional",
"data",
"structure",
"containing",
"numbers",
"return",
"True",
"if",
"the",
"numbers",
"follows",
"a",
"linear",
"sequence",
"."
] |
[
"\"\"\"\n For any 1-demensional data structure containing numbers return True\n if the numbers follows a linear sequence.\n\n Within the context of PySCeSToolbox this function will be called on\n either a linear range or a log range. Thus, while not indicative\n of log ranges, this is what a False return value indicates in this\n software.\n\n Parameters\n ----------\n scan_range : iterable\n Any iterable object containing a range of numbers.\n\n Returns\n -------\n bool\n A boolean indicating if `scan_range` is a linear sequence of\n numbers.\n \"\"\""
] |
[
{
"param": "scan_range",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "scan_range",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def is_linear(scan_range):
if scan_range[1] - scan_range[0] == scan_range[-1] - scan_range[-2]:
return True
else:
return False
| 902 | 706 |
91a58852945c4dc0ef2db24061ddcb313f39ee08
|
vagmcs/pylogit
|
src/pylogit/bootstrap_abc.py
|
[
"BSD-3-Clause"
] |
Python
|
calc_bias_abc
|
<not_specific>
|
def calc_bias_abc(second_order_influence):
"""
Calculates the approximate bias of the MLE estimates for use in calculating
the approximate bootstrap confidence (ABC) intervals.
Parameters
----------
second_order_influence : 2D ndarray.
Should have one row for each observation. Should have one column for
each parameter in the parameter vector being estimated. Elements should
denote the second order empirical influence of the associated
observation on the associated parameter.
Returns
-------
bias : 1D ndarray.
Contains the approximate bias of the MLE estimates for use in the ABC
confidence intervals.
References
----------
Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap.
CRC press, 1994. Section 22.6, Equation 22.35.
"""
num_obs = second_order_influence.shape[0]
constant = 2.0 * num_obs**2
bias = second_order_influence.sum(axis=0) / constant
return bias
|
Calculates the approximate bias of the MLE estimates for use in calculating
the approximate bootstrap confidence (ABC) intervals.
Parameters
----------
second_order_influence : 2D ndarray.
Should have one row for each observation. Should have one column for
each parameter in the parameter vector being estimated. Elements should
denote the second order empirical influence of the associated
observation on the associated parameter.
Returns
-------
bias : 1D ndarray.
Contains the approximate bias of the MLE estimates for use in the ABC
confidence intervals.
References
----------
Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap.
CRC press, 1994. Section 22.6, Equation 22.35.
|
Calculates the approximate bias of the MLE estimates for use in calculating
the approximate bootstrap confidence (ABC) intervals.
Parameters
second_order_influence : 2D ndarray.
Should have one row for each observation. Should have one column for
each parameter in the parameter vector being estimated. Elements should
denote the second order empirical influence of the associated
observation on the associated parameter.
Returns
bias : 1D ndarray.
Contains the approximate bias of the MLE estimates for use in the ABC
confidence intervals.
References
Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap.
|
[
"Calculates",
"the",
"approximate",
"bias",
"of",
"the",
"MLE",
"estimates",
"for",
"use",
"in",
"calculating",
"the",
"approximate",
"bootstrap",
"confidence",
"(",
"ABC",
")",
"intervals",
".",
"Parameters",
"second_order_influence",
":",
"2D",
"ndarray",
".",
"Should",
"have",
"one",
"row",
"for",
"each",
"observation",
".",
"Should",
"have",
"one",
"column",
"for",
"each",
"parameter",
"in",
"the",
"parameter",
"vector",
"being",
"estimated",
".",
"Elements",
"should",
"denote",
"the",
"second",
"order",
"empirical",
"influence",
"of",
"the",
"associated",
"observation",
"on",
"the",
"associated",
"parameter",
".",
"Returns",
"bias",
":",
"1D",
"ndarray",
".",
"Contains",
"the",
"approximate",
"bias",
"of",
"the",
"MLE",
"estimates",
"for",
"use",
"in",
"the",
"ABC",
"confidence",
"intervals",
".",
"References",
"Efron",
"Bradley",
"and",
"Robert",
"J",
".",
"Tibshirani",
".",
"An",
"Introduction",
"to",
"the",
"Bootstrap",
"."
] |
def calc_bias_abc(second_order_influence):
num_obs = second_order_influence.shape[0]
constant = 2.0 * num_obs**2
bias = second_order_influence.sum(axis=0) / constant
return bias
|
[
"def",
"calc_bias_abc",
"(",
"second_order_influence",
")",
":",
"num_obs",
"=",
"second_order_influence",
".",
"shape",
"[",
"0",
"]",
"constant",
"=",
"2.0",
"*",
"num_obs",
"**",
"2",
"bias",
"=",
"second_order_influence",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"/",
"constant",
"return",
"bias"
] |
Calculates the approximate bias of the MLE estimates for use in calculating
the approximate bootstrap confidence (ABC) intervals.
|
[
"Calculates",
"the",
"approximate",
"bias",
"of",
"the",
"MLE",
"estimates",
"for",
"use",
"in",
"calculating",
"the",
"approximate",
"bootstrap",
"confidence",
"(",
"ABC",
")",
"intervals",
"."
] |
[
"\"\"\"\n Calculates the approximate bias of the MLE estimates for use in calculating\n the approximate bootstrap confidence (ABC) intervals.\n\n Parameters\n ----------\n second_order_influence : 2D ndarray.\n Should have one row for each observation. Should have one column for\n each parameter in the parameter vector being estimated. Elements should\n denote the second order empirical influence of the associated\n observation on the associated parameter.\n\n Returns\n -------\n bias : 1D ndarray.\n Contains the approximate bias of the MLE estimates for use in the ABC\n confidence intervals.\n\n References\n ----------\n Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap.\n CRC press, 1994. Section 22.6, Equation 22.35.\n \"\"\""
] |
[
{
"param": "second_order_influence",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "second_order_influence",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def calc_bias_abc(second_order_influence):
num_obs = second_order_influence.shape[0]
constant = 2.0 * num_obs**2
bias = second_order_influence.sum(axis=0) / constant
return bias
| 903 | 747 |
f867a013a43c3399f8b7912eb52238bc17b51802
|
iyappan24/bqcon
|
bqcon/query_processor.py
|
[
"MIT"
] |
Python
|
count_query
|
<not_specific>
|
def count_query(project,dataset,tablename,condition):
"""
Function to process query for count process
"""
if isinstance(tablename, str):
pass
else:
raise ValueError("Tablename should be a String")
if isinstance(dataset, str):
pass
else:
raise ValueError("Tablename should be a String")
if condition == None or isinstance(condition, str):
pass
else:
raise ValueError("Condition can only be either None or String")
final_tablename = "["+project+":"+dataset+"."+tablename+"]"
if condition == None:
query = "select count(*) from " + final_tablename
else:
# building the query
query = "select count(*) from " + final_tablename + " where " + condition
return query
|
Function to process query for count process
|
Function to process query for count process
|
[
"Function",
"to",
"process",
"query",
"for",
"count",
"process"
] |
def count_query(project,dataset,tablename,condition):
if isinstance(tablename, str):
pass
else:
raise ValueError("Tablename should be a String")
if isinstance(dataset, str):
pass
else:
raise ValueError("Tablename should be a String")
if condition == None or isinstance(condition, str):
pass
else:
raise ValueError("Condition can only be either None or String")
final_tablename = "["+project+":"+dataset+"."+tablename+"]"
if condition == None:
query = "select count(*) from " + final_tablename
else:
query = "select count(*) from " + final_tablename + " where " + condition
return query
|
[
"def",
"count_query",
"(",
"project",
",",
"dataset",
",",
"tablename",
",",
"condition",
")",
":",
"if",
"isinstance",
"(",
"tablename",
",",
"str",
")",
":",
"pass",
"else",
":",
"raise",
"ValueError",
"(",
"\"Tablename should be a String\"",
")",
"if",
"isinstance",
"(",
"dataset",
",",
"str",
")",
":",
"pass",
"else",
":",
"raise",
"ValueError",
"(",
"\"Tablename should be a String\"",
")",
"if",
"condition",
"==",
"None",
"or",
"isinstance",
"(",
"condition",
",",
"str",
")",
":",
"pass",
"else",
":",
"raise",
"ValueError",
"(",
"\"Condition can only be either None or String\"",
")",
"final_tablename",
"=",
"\"[\"",
"+",
"project",
"+",
"\":\"",
"+",
"dataset",
"+",
"\".\"",
"+",
"tablename",
"+",
"\"]\"",
"if",
"condition",
"==",
"None",
":",
"query",
"=",
"\"select count(*) from \"",
"+",
"final_tablename",
"else",
":",
"query",
"=",
"\"select count(*) from \"",
"+",
"final_tablename",
"+",
"\" where \"",
"+",
"condition",
"return",
"query"
] |
Function to process query for count process
|
[
"Function",
"to",
"process",
"query",
"for",
"count",
"process"
] |
[
"\"\"\"\n Function to process query for count process\n \"\"\"",
"# building the query"
] |
[
{
"param": "project",
"type": null
},
{
"param": "dataset",
"type": null
},
{
"param": "tablename",
"type": null
},
{
"param": "condition",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "project",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "dataset",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "tablename",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "condition",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def count_query(project,dataset,tablename,condition):
if isinstance(tablename, str):
pass
else:
raise ValueError("Tablename should be a String")
if isinstance(dataset, str):
pass
else:
raise ValueError("Tablename should be a String")
if condition == None or isinstance(condition, str):
pass
else:
raise ValueError("Condition can only be either None or String")
final_tablename = "["+project+":"+dataset+"."+tablename+"]"
if condition == None:
query = "select count(*) from " + final_tablename
else:
query = "select count(*) from " + final_tablename + " where " + condition
return query
| 904 | 243 |
c940ec2c565bf018fae115bf20d4c9fe7f922f0a
|
AhmedHani/mleus
|
mleus/utils/algorithm_utils.py
|
[
"BSD-3-Clause"
] |
Python
|
_get_ngrams
|
<not_specific>
|
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
|
Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
|
Extracts all n-grams upto a given maximum order from an input segment.
|
[
"Extracts",
"all",
"n",
"-",
"grams",
"upto",
"a",
"given",
"maximum",
"order",
"from",
"an",
"input",
"segment",
"."
] |
def _get_ngrams(segment, max_order):
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
|
[
"def",
"_get_ngrams",
"(",
"segment",
",",
"max_order",
")",
":",
"ngram_counts",
"=",
"collections",
".",
"Counter",
"(",
")",
"for",
"order",
"in",
"range",
"(",
"1",
",",
"max_order",
"+",
"1",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"segment",
")",
"-",
"order",
"+",
"1",
")",
":",
"ngram",
"=",
"tuple",
"(",
"segment",
"[",
"i",
":",
"i",
"+",
"order",
"]",
")",
"ngram_counts",
"[",
"ngram",
"]",
"+=",
"1",
"return",
"ngram_counts"
] |
Extracts all n-grams upto a given maximum order from an input segment.
|
[
"Extracts",
"all",
"n",
"-",
"grams",
"upto",
"a",
"given",
"maximum",
"order",
"from",
"an",
"input",
"segment",
"."
] |
[
"\"\"\"Extracts all n-grams upto a given maximum order from an input segment.\n\n Args:\n segment: text segment from which n-grams will be extracted.\n max_order: maximum length in tokens of the n-grams returned by this\n methods.\n\n Returns:\n The Counter containing all n-grams upto max_order in segment\n with a count of how many times each n-gram occurred.\n \"\"\""
] |
[
{
"param": "segment",
"type": null
},
{
"param": "max_order",
"type": null
}
] |
{
"returns": [
{
"docstring": "The Counter containing all n-grams upto max_order in segment\nwith a count of how many times each n-gram occurred.",
"docstring_tokens": [
"The",
"Counter",
"containing",
"all",
"n",
"-",
"grams",
"upto",
"max_order",
"in",
"segment",
"with",
"a",
"count",
"of",
"how",
"many",
"times",
"each",
"n",
"-",
"gram",
"occurred",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "segment",
"type": null,
"docstring": "text segment from which n-grams will be extracted.",
"docstring_tokens": [
"text",
"segment",
"from",
"which",
"n",
"-",
"grams",
"will",
"be",
"extracted",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "max_order",
"type": null,
"docstring": "maximum length in tokens of the n-grams returned by this\nmethods.",
"docstring_tokens": [
"maximum",
"length",
"in",
"tokens",
"of",
"the",
"n",
"-",
"grams",
"returned",
"by",
"this",
"methods",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import collections
def _get_ngrams(segment, max_order):
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
| 905 | 12 |
9697ff0df66c17286bd1263482e822b405ce4f8a
|
IBM/networking-services-python-sdk
|
ibm_cloud_networking_services/direct_link_v1.py
|
[
"Apache-2.0"
] |
Python
|
from_dict
|
'GatewayBfdConfigActionTemplate'
|
def from_dict(cls, _dict: Dict) -> 'GatewayBfdConfigActionTemplate':
"""Initialize a GatewayBfdConfigActionTemplate object from a json dictionary."""
args = {}
if 'interval' in _dict:
args['interval'] = _dict.get('interval')
else:
raise ValueError('Required property \'interval\' not present in GatewayBfdConfigActionTemplate JSON')
if 'multiplier' in _dict:
args['multiplier'] = _dict.get('multiplier')
return cls(**args)
|
Initialize a GatewayBfdConfigActionTemplate object from a json dictionary.
|
Initialize a GatewayBfdConfigActionTemplate object from a json dictionary.
|
[
"Initialize",
"a",
"GatewayBfdConfigActionTemplate",
"object",
"from",
"a",
"json",
"dictionary",
"."
] |
def from_dict(cls, _dict: Dict) -> 'GatewayBfdConfigActionTemplate':
args = {}
if 'interval' in _dict:
args['interval'] = _dict.get('interval')
else:
raise ValueError('Required property \'interval\' not present in GatewayBfdConfigActionTemplate JSON')
if 'multiplier' in _dict:
args['multiplier'] = _dict.get('multiplier')
return cls(**args)
|
[
"def",
"from_dict",
"(",
"cls",
",",
"_dict",
":",
"Dict",
")",
"->",
"'GatewayBfdConfigActionTemplate'",
":",
"args",
"=",
"{",
"}",
"if",
"'interval'",
"in",
"_dict",
":",
"args",
"[",
"'interval'",
"]",
"=",
"_dict",
".",
"get",
"(",
"'interval'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Required property \\'interval\\' not present in GatewayBfdConfigActionTemplate JSON'",
")",
"if",
"'multiplier'",
"in",
"_dict",
":",
"args",
"[",
"'multiplier'",
"]",
"=",
"_dict",
".",
"get",
"(",
"'multiplier'",
")",
"return",
"cls",
"(",
"**",
"args",
")"
] |
Initialize a GatewayBfdConfigActionTemplate object from a json dictionary.
|
[
"Initialize",
"a",
"GatewayBfdConfigActionTemplate",
"object",
"from",
"a",
"json",
"dictionary",
"."
] |
[
"\"\"\"Initialize a GatewayBfdConfigActionTemplate object from a json dictionary.\"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "_dict",
"type": "Dict"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "_dict",
"type": "Dict",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def from_dict(cls, _dict: Dict) -> 'GatewayBfdConfigActionTemplate':
args = {}
if 'interval' in _dict:
args['interval'] = _dict.get('interval')
else:
raise ValueError('Required property \'interval\' not present in GatewayBfdConfigActionTemplate JSON')
if 'multiplier' in _dict:
args['multiplier'] = _dict.get('multiplier')
return cls(**args)
| 907 | 550 |
fa4f30016864a562b88d7dd15f4d610da8b8e76e
|
bbc/bbc-speech-segmenter
|
recipe/local/xvector_utils.py
|
[
"Apache-2.0"
] |
Python
|
extract_timestamp
|
<not_specific>
|
def extract_timestamp(xvector_key):
'''
X-vector key format: fileid_segStart_segEnd-xvectorStart-xvectorEnd
segStart & segEng in seconds
xvectorStart & xvectorEnd in milliseconds
'''
# Split twice from right as fileid might contain underscore too
fileid, seg_start, rest = xvector_key.rsplit('_', 2)
# Extract X-vector timimng from the rest
xvector_start_ms, xvector_end_ms = rest.split('-')[-2:]
# Calculate X-vector duration in seconds
xvector_start = int(xvector_start_ms) / 1000 # ms to sec
xvector_end = int(xvector_end_ms) / 1000 # ms to sec
xvector_duration = xvector_end - xvector_start # duration in sec
start = float(seg_start) + xvector_start
end = start + xvector_duration
return {'fileid': fileid,
'start': '{:.2f}'.format(start),
'end': '{:.2f}'.format(end)}
|
X-vector key format: fileid_segStart_segEnd-xvectorStart-xvectorEnd
segStart & segEng in seconds
xvectorStart & xvectorEnd in milliseconds
|
X-vector key format: fileid_segStart_segEnd-xvectorStart-xvectorEnd
segStart & segEng in seconds
xvectorStart & xvectorEnd in milliseconds
|
[
"X",
"-",
"vector",
"key",
"format",
":",
"fileid_segStart_segEnd",
"-",
"xvectorStart",
"-",
"xvectorEnd",
"segStart",
"&",
"segEng",
"in",
"seconds",
"xvectorStart",
"&",
"xvectorEnd",
"in",
"milliseconds"
] |
def extract_timestamp(xvector_key):
fileid, seg_start, rest = xvector_key.rsplit('_', 2)
xvector_start_ms, xvector_end_ms = rest.split('-')[-2:]
xvector_start = int(xvector_start_ms) / 1000
xvector_end = int(xvector_end_ms) / 1000
xvector_duration = xvector_end - xvector_start
start = float(seg_start) + xvector_start
end = start + xvector_duration
return {'fileid': fileid,
'start': '{:.2f}'.format(start),
'end': '{:.2f}'.format(end)}
|
[
"def",
"extract_timestamp",
"(",
"xvector_key",
")",
":",
"fileid",
",",
"seg_start",
",",
"rest",
"=",
"xvector_key",
".",
"rsplit",
"(",
"'_'",
",",
"2",
")",
"xvector_start_ms",
",",
"xvector_end_ms",
"=",
"rest",
".",
"split",
"(",
"'-'",
")",
"[",
"-",
"2",
":",
"]",
"xvector_start",
"=",
"int",
"(",
"xvector_start_ms",
")",
"/",
"1000",
"xvector_end",
"=",
"int",
"(",
"xvector_end_ms",
")",
"/",
"1000",
"xvector_duration",
"=",
"xvector_end",
"-",
"xvector_start",
"start",
"=",
"float",
"(",
"seg_start",
")",
"+",
"xvector_start",
"end",
"=",
"start",
"+",
"xvector_duration",
"return",
"{",
"'fileid'",
":",
"fileid",
",",
"'start'",
":",
"'{:.2f}'",
".",
"format",
"(",
"start",
")",
",",
"'end'",
":",
"'{:.2f}'",
".",
"format",
"(",
"end",
")",
"}"
] |
X-vector key format: fileid_segStart_segEnd-xvectorStart-xvectorEnd
segStart & segEng in seconds
xvectorStart & xvectorEnd in milliseconds
|
[
"X",
"-",
"vector",
"key",
"format",
":",
"fileid_segStart_segEnd",
"-",
"xvectorStart",
"-",
"xvectorEnd",
"segStart",
"&",
"segEng",
"in",
"seconds",
"xvectorStart",
"&",
"xvectorEnd",
"in",
"milliseconds"
] |
[
"'''\n X-vector key format: fileid_segStart_segEnd-xvectorStart-xvectorEnd\n segStart & segEng in seconds\n xvectorStart & xvectorEnd in milliseconds\n '''",
"# Split twice from right as fileid might contain underscore too",
"# Extract X-vector timimng from the rest",
"# Calculate X-vector duration in seconds",
"# ms to sec",
"# ms to sec",
"# duration in sec"
] |
[
{
"param": "xvector_key",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "xvector_key",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def extract_timestamp(xvector_key):
fileid, seg_start, rest = xvector_key.rsplit('_', 2)
xvector_start_ms, xvector_end_ms = rest.split('-')[-2:]
xvector_start = int(xvector_start_ms) / 1000
xvector_end = int(xvector_end_ms) / 1000
xvector_duration = xvector_end - xvector_start
start = float(seg_start) + xvector_start
end = start + xvector_duration
return {'fileid': fileid,
'start': '{:.2f}'.format(start),
'end': '{:.2f}'.format(end)}
| 908 | 177 |
62ecc1ad18eaef5a43b7e70445c62fbfce7ccf2c
|
kotalbert/udacity-customer-churn
|
churn_script_logging_and_tests.py
|
[
"Unlicense"
] |
Python
|
_clean_dir
|
None
|
def _clean_dir(pth: str, ptrn: str = '*.png') -> None:
"""
Utility to clear directory before testing if files are presetn.
:param pth: path to dir to be cleared
:param ptrn: optional, fille pattern to be cleared, defaults to '*.png'
"""
for img in glob.glob(os.path.join(pth, ptrn)):
os.remove(img)
|
Utility to clear directory before testing if files are presetn.
:param pth: path to dir to be cleared
:param ptrn: optional, fille pattern to be cleared, defaults to '*.png'
|
Utility to clear directory before testing if files are presetn.
|
[
"Utility",
"to",
"clear",
"directory",
"before",
"testing",
"if",
"files",
"are",
"presetn",
"."
] |
def _clean_dir(pth: str, ptrn: str = '*.png') -> None:
for img in glob.glob(os.path.join(pth, ptrn)):
os.remove(img)
|
[
"def",
"_clean_dir",
"(",
"pth",
":",
"str",
",",
"ptrn",
":",
"str",
"=",
"'*.png'",
")",
"->",
"None",
":",
"for",
"img",
"in",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pth",
",",
"ptrn",
")",
")",
":",
"os",
".",
"remove",
"(",
"img",
")"
] |
Utility to clear directory before testing if files are presetn.
|
[
"Utility",
"to",
"clear",
"directory",
"before",
"testing",
"if",
"files",
"are",
"presetn",
"."
] |
[
"\"\"\"\n Utility to clear directory before testing if files are presetn.\n\n :param pth: path to dir to be cleared\n :param ptrn: optional, fille pattern to be cleared, defaults to '*.png'\n \"\"\""
] |
[
{
"param": "pth",
"type": "str"
},
{
"param": "ptrn",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "pth",
"type": "str",
"docstring": "path to dir to be cleared",
"docstring_tokens": [
"path",
"to",
"dir",
"to",
"be",
"cleared"
],
"default": null,
"is_optional": null
},
{
"identifier": "ptrn",
"type": "str",
"docstring": "optional, fille pattern to be cleared, defaults to '*.png'",
"docstring_tokens": [
"optional",
"fille",
"pattern",
"to",
"be",
"cleared",
"defaults",
"to",
"'",
"*",
".",
"png",
"'"
],
"default": "'*.png'",
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
import glob
def _clean_dir(pth: str, ptrn: str = '*.png') -> None:
for img in glob.glob(os.path.join(pth, ptrn)):
os.remove(img)
| 909 | 57 |
a4cd43978f015bb2ce39dbf4b8c238acded430ae
|
matthiasseemoo/hexrec
|
src/hexrec/utils.py
|
[
"BSD-2-Clause"
] |
Python
|
humanize_ascii
|
str
|
def humanize_ascii(
data: Union[ByteString, Iterable[int]],
replace: str = '.',
) -> str:
r"""ASCII for human readers.
Simplifies the ASCII representation replacing all non-human-readable
characters with a generic placeholder.
Arguments:
data (bytes):
Byte data. Sequence generator supported.
replace (str):
String replacement of non-human-readable characters.
Returns:
str: ASCII representation with only human-readable characters.
Example:
>>> humanize_ascii(b'\x89PNG\r\n\x1a\n')
'.PNG....'
"""
text = ''.join(chr(b) if 0x20 <= b < 0x7F else replace for b in data)
return text
|
r"""ASCII for human readers.
Simplifies the ASCII representation replacing all non-human-readable
characters with a generic placeholder.
Arguments:
data (bytes):
Byte data. Sequence generator supported.
replace (str):
String replacement of non-human-readable characters.
Returns:
str: ASCII representation with only human-readable characters.
Example:
>>> humanize_ascii(b'\x89PNG\r\n\x1a\n')
'.PNG....'
|
r"""ASCII for human readers.
Simplifies the ASCII representation replacing all non-human-readable
characters with a generic placeholder.
|
[
"r",
"\"",
"\"",
"\"",
"ASCII",
"for",
"human",
"readers",
".",
"Simplifies",
"the",
"ASCII",
"representation",
"replacing",
"all",
"non",
"-",
"human",
"-",
"readable",
"characters",
"with",
"a",
"generic",
"placeholder",
"."
] |
def humanize_ascii(
data: Union[ByteString, Iterable[int]],
replace: str = '.',
) -> str:
text = ''.join(chr(b) if 0x20 <= b < 0x7F else replace for b in data)
return text
|
[
"def",
"humanize_ascii",
"(",
"data",
":",
"Union",
"[",
"ByteString",
",",
"Iterable",
"[",
"int",
"]",
"]",
",",
"replace",
":",
"str",
"=",
"'.'",
",",
")",
"->",
"str",
":",
"text",
"=",
"''",
".",
"join",
"(",
"chr",
"(",
"b",
")",
"if",
"0x20",
"<=",
"b",
"<",
"0x7F",
"else",
"replace",
"for",
"b",
"in",
"data",
")",
"return",
"text"
] |
r"""ASCII for human readers.
|
[
"r",
"\"",
"\"",
"\"",
"ASCII",
"for",
"human",
"readers",
"."
] |
[
"r\"\"\"ASCII for human readers.\n\n Simplifies the ASCII representation replacing all non-human-readable\n characters with a generic placeholder.\n\n Arguments:\n data (bytes):\n Byte data. Sequence generator supported.\n\n replace (str):\n String replacement of non-human-readable characters.\n\n Returns:\n str: ASCII representation with only human-readable characters.\n\n Example:\n >>> humanize_ascii(b'\\x89PNG\\r\\n\\x1a\\n')\n '.PNG....'\n \"\"\""
] |
[
{
"param": "data",
"type": "Union[ByteString, Iterable[int]]"
},
{
"param": "replace",
"type": "str"
}
] |
{
"returns": [
{
"docstring": "ASCII representation with only human-readable characters.",
"docstring_tokens": [
"ASCII",
"representation",
"with",
"only",
"human",
"-",
"readable",
"characters",
"."
],
"type": "str"
}
],
"raises": [],
"params": [
{
"identifier": "data",
"type": "Union[ByteString, Iterable[int]]",
"docstring": "Byte data. Sequence generator supported.",
"docstring_tokens": [
"Byte",
"data",
".",
"Sequence",
"generator",
"supported",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "replace",
"type": "str",
"docstring": "String replacement of non-human-readable characters.",
"docstring_tokens": [
"String",
"replacement",
"of",
"non",
"-",
"human",
"-",
"readable",
"characters",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": [
{
"identifier": "examples",
"docstring": null,
"docstring_tokens": [
"None"
]
}
]
}
|
def humanize_ascii(
data: Union[ByteString, Iterable[int]],
replace: str = '.',
) -> str:
text = ''.join(chr(b) if 0x20 <= b < 0x7F else replace for b in data)
return text
| 910 | 630 |
7d544d066281f1790fe0a8097931ff7ca8af980e
|
PhyloSofS-Team/exonhomology
|
thoraxe/subexons/phylosofs.py
|
[
"MIT"
] |
Python
|
_fill_sequence_and_annotation
|
<not_specific>
|
def _fill_sequence_and_annotation(df_group, s_exon2char):
"""Create a list of sequences and s-exons (annotation)."""
s_exon_annot = []
seqs = []
for row in df_group.itertuples():
s_exon = s_exon2char[row.S_exonID]
seq = str(row.S_exon_Sequence).replace('*', '')
for _ in range(len(seq)):
s_exon_annot.append(s_exon)
seqs.append(seq)
return "".join(seqs), "".join(s_exon_annot)
|
Create a list of sequences and s-exons (annotation).
|
Create a list of sequences and s-exons (annotation).
|
[
"Create",
"a",
"list",
"of",
"sequences",
"and",
"s",
"-",
"exons",
"(",
"annotation",
")",
"."
] |
def _fill_sequence_and_annotation(df_group, s_exon2char):
s_exon_annot = []
seqs = []
for row in df_group.itertuples():
s_exon = s_exon2char[row.S_exonID]
seq = str(row.S_exon_Sequence).replace('*', '')
for _ in range(len(seq)):
s_exon_annot.append(s_exon)
seqs.append(seq)
return "".join(seqs), "".join(s_exon_annot)
|
[
"def",
"_fill_sequence_and_annotation",
"(",
"df_group",
",",
"s_exon2char",
")",
":",
"s_exon_annot",
"=",
"[",
"]",
"seqs",
"=",
"[",
"]",
"for",
"row",
"in",
"df_group",
".",
"itertuples",
"(",
")",
":",
"s_exon",
"=",
"s_exon2char",
"[",
"row",
".",
"S_exonID",
"]",
"seq",
"=",
"str",
"(",
"row",
".",
"S_exon_Sequence",
")",
".",
"replace",
"(",
"'*'",
",",
"''",
")",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"seq",
")",
")",
":",
"s_exon_annot",
".",
"append",
"(",
"s_exon",
")",
"seqs",
".",
"append",
"(",
"seq",
")",
"return",
"\"\"",
".",
"join",
"(",
"seqs",
")",
",",
"\"\"",
".",
"join",
"(",
"s_exon_annot",
")"
] |
Create a list of sequences and s-exons (annotation).
|
[
"Create",
"a",
"list",
"of",
"sequences",
"and",
"s",
"-",
"exons",
"(",
"annotation",
")",
"."
] |
[
"\"\"\"Create a list of sequences and s-exons (annotation).\"\"\""
] |
[
{
"param": "df_group",
"type": null
},
{
"param": "s_exon2char",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "df_group",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "s_exon2char",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _fill_sequence_and_annotation(df_group, s_exon2char):
s_exon_annot = []
seqs = []
for row in df_group.itertuples():
s_exon = s_exon2char[row.S_exonID]
seq = str(row.S_exon_Sequence).replace('*', '')
for _ in range(len(seq)):
s_exon_annot.append(s_exon)
seqs.append(seq)
return "".join(seqs), "".join(s_exon_annot)
| 911 | 619 |
20f7a0dc19c96f2ffed7670cc3b3c55def16723b
|
LSSTDESC/skyCatalogs
|
python/desc/skycatalogs/utils/translate_utils.py
|
[
"BSD-3-Clause"
] |
Python
|
check_file
|
<not_specific>
|
def check_file(path):
'''Look for a file that should not exist'''
try:
f = open(path, mode='r')
except FileNotFoundError as e:
return
raise ValueError(f'File for {path} already exists')
|
Look for a file that should not exist
|
Look for a file that should not exist
|
[
"Look",
"for",
"a",
"file",
"that",
"should",
"not",
"exist"
] |
def check_file(path):
try:
f = open(path, mode='r')
except FileNotFoundError as e:
return
raise ValueError(f'File for {path} already exists')
|
[
"def",
"check_file",
"(",
"path",
")",
":",
"try",
":",
"f",
"=",
"open",
"(",
"path",
",",
"mode",
"=",
"'r'",
")",
"except",
"FileNotFoundError",
"as",
"e",
":",
"return",
"raise",
"ValueError",
"(",
"f'File for {path} already exists'",
")"
] |
Look for a file that should not exist
|
[
"Look",
"for",
"a",
"file",
"that",
"should",
"not",
"exist"
] |
[
"'''Look for a file that should not exist'''"
] |
[
{
"param": "path",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def check_file(path):
try:
f = open(path, mode='r')
except FileNotFoundError as e:
return
raise ValueError(f'File for {path} already exists')
| 912 | 728 |
63cb7683c15d7dddc2ff1762704552c378b478fb
|
ythlml/mindspore
|
mindspore/dataset/transforms/vision/validators.py
|
[
"Apache-2.0"
] |
Python
|
check_degrees
|
<not_specific>
|
def check_degrees(degrees):
"""Check if the degrees is legal."""
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it cannot be negative.")
degrees = (-degrees, degrees)
elif isinstance(degrees, (list, tuple)):
if len(degrees) != 2:
raise TypeError("If degrees is a sequence, the length must be 2.")
else:
raise TypeError("Degrees must be a single non-negative number or a sequence")
return degrees
|
Check if the degrees is legal.
|
Check if the degrees is legal.
|
[
"Check",
"if",
"the",
"degrees",
"is",
"legal",
"."
] |
def check_degrees(degrees):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it cannot be negative.")
degrees = (-degrees, degrees)
elif isinstance(degrees, (list, tuple)):
if len(degrees) != 2:
raise TypeError("If degrees is a sequence, the length must be 2.")
else:
raise TypeError("Degrees must be a single non-negative number or a sequence")
return degrees
|
[
"def",
"check_degrees",
"(",
"degrees",
")",
":",
"if",
"isinstance",
"(",
"degrees",
",",
"numbers",
".",
"Number",
")",
":",
"if",
"degrees",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"If degrees is a single number, it cannot be negative.\"",
")",
"degrees",
"=",
"(",
"-",
"degrees",
",",
"degrees",
")",
"elif",
"isinstance",
"(",
"degrees",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"if",
"len",
"(",
"degrees",
")",
"!=",
"2",
":",
"raise",
"TypeError",
"(",
"\"If degrees is a sequence, the length must be 2.\"",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Degrees must be a single non-negative number or a sequence\"",
")",
"return",
"degrees"
] |
Check if the degrees is legal.
|
[
"Check",
"if",
"the",
"degrees",
"is",
"legal",
"."
] |
[
"\"\"\"Check if the degrees is legal.\"\"\""
] |
[
{
"param": "degrees",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "degrees",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import numbers
def check_degrees(degrees):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it cannot be negative.")
degrees = (-degrees, degrees)
elif isinstance(degrees, (list, tuple)):
if len(degrees) != 2:
raise TypeError("If degrees is a sequence, the length must be 2.")
else:
raise TypeError("Degrees must be a single non-negative number or a sequence")
return degrees
| 913 | 829 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.