hexsha
stringlengths 40
40
| repo
stringlengths 5
121
| path
stringlengths 4
227
| license
list | language
stringclasses 1
value | identifier
stringlengths 1
107
| return_type
stringlengths 2
237
⌀ | original_string
stringlengths 75
13.4k
| original_docstring
stringlengths 13
12.9k
| docstring
stringlengths 13
2.57k
| docstring_tokens
list | code
stringlengths 23
1.88k
| code_tokens
list | short_docstring
stringlengths 1
1.32k
| short_docstring_tokens
list | comment
list | parameters
list | docstring_params
dict | code_with_imports
stringlengths 23
1.88k
| idxs
int64 0
611k
| cluster
int64 0
1.02k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0afe202022f047d3e3bee7f06d72baf720ca6e25
|
wildfoundry/dataplicity-agent
|
dataplicity/m2m/packetbase.py
|
[
"BSD-3-Clause"
] |
Python
|
create
|
<not_specific>
|
def create(cls, packet_type, *args, **kwargs):
"""Dynamically create a packet from its type and parameters."""
packet_cls = cls.registry.get(cls.process_packet_type(packet_type))
if packet_cls is None:
raise ValueError("no packet type {}".format(packet_type))
return packet_cls(*args, **kwargs)
|
Dynamically create a packet from its type and parameters.
|
Dynamically create a packet from its type and parameters.
|
[
"Dynamically",
"create",
"a",
"packet",
"from",
"its",
"type",
"and",
"parameters",
"."
] |
def create(cls, packet_type, *args, **kwargs):
packet_cls = cls.registry.get(cls.process_packet_type(packet_type))
if packet_cls is None:
raise ValueError("no packet type {}".format(packet_type))
return packet_cls(*args, **kwargs)
|
[
"def",
"create",
"(",
"cls",
",",
"packet_type",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"packet_cls",
"=",
"cls",
".",
"registry",
".",
"get",
"(",
"cls",
".",
"process_packet_type",
"(",
"packet_type",
")",
")",
"if",
"packet_cls",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"no packet type {}\"",
".",
"format",
"(",
"packet_type",
")",
")",
"return",
"packet_cls",
"(",
"*",
"args",
",",
"**",
"kwargs",
")"
] |
Dynamically create a packet from its type and parameters.
|
[
"Dynamically",
"create",
"a",
"packet",
"from",
"its",
"type",
"and",
"parameters",
"."
] |
[
"\"\"\"Dynamically create a packet from its type and parameters.\"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "packet_type",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "packet_type",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def create(cls, packet_type, *args, **kwargs):
packet_cls = cls.registry.get(cls.process_packet_type(packet_type))
if packet_cls is None:
raise ValueError("no packet type {}".format(packet_type))
return packet_cls(*args, **kwargs)
| 1,137 | 864 |
70c6e706b82ace37aebe4bd2915b5ad372e4dff1
|
mdozmorov/genome_runner
|
grsnp/dbcreator_util.py
|
[
"AFL-3.0"
] |
Python
|
save_minmax
| null |
def save_minmax(data,path):
''' Saves the dictionary of key value pairs of minmax data to a text file.
'''
with open(path,'wb') as writer:
for k,v in data.items():
writer.write("{}\t{}\n".format(k,v))
|
Saves the dictionary of key value pairs of minmax data to a text file.
|
Saves the dictionary of key value pairs of minmax data to a text file.
|
[
"Saves",
"the",
"dictionary",
"of",
"key",
"value",
"pairs",
"of",
"minmax",
"data",
"to",
"a",
"text",
"file",
"."
] |
def save_minmax(data,path):
with open(path,'wb') as writer:
for k,v in data.items():
writer.write("{}\t{}\n".format(k,v))
|
[
"def",
"save_minmax",
"(",
"data",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"writer",
":",
"for",
"k",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
":",
"writer",
".",
"write",
"(",
"\"{}\\t{}\\n\"",
".",
"format",
"(",
"k",
",",
"v",
")",
")"
] |
Saves the dictionary of key value pairs of minmax data to a text file.
|
[
"Saves",
"the",
"dictionary",
"of",
"key",
"value",
"pairs",
"of",
"minmax",
"data",
"to",
"a",
"text",
"file",
"."
] |
[
"''' Saves the dictionary of key value pairs of minmax data to a text file.\n\t'''"
] |
[
{
"param": "data",
"type": null
},
{
"param": "path",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def save_minmax(data,path):
with open(path,'wb') as writer:
for k,v in data.items():
writer.write("{}\t{}\n".format(k,v))
| 1,138 | 522 |
a24cdf3edcca5040b05acdb98f3255ab87fb540b
|
bfu4/mdis
|
parser/hex_dump_parser.py
|
[
"MIT"
] |
Python
|
split_bytes
|
<not_specific>
|
def split_bytes(line_of_bytes: str):
"""
Split the bytes in a line into an array for pairs
:param line_of_bytes: line of bytes
:return: array of the bytes
"""
ctx = 0
append = []
while ctx < len(line_of_bytes):
append.insert(int(ctx / 2), str(line_of_bytes[ctx:ctx + 2]))
ctx += 2
return append
|
Split the bytes in a line into an array for pairs
:param line_of_bytes: line of bytes
:return: array of the bytes
|
Split the bytes in a line into an array for pairs
|
[
"Split",
"the",
"bytes",
"in",
"a",
"line",
"into",
"an",
"array",
"for",
"pairs"
] |
def split_bytes(line_of_bytes: str):
ctx = 0
append = []
while ctx < len(line_of_bytes):
append.insert(int(ctx / 2), str(line_of_bytes[ctx:ctx + 2]))
ctx += 2
return append
|
[
"def",
"split_bytes",
"(",
"line_of_bytes",
":",
"str",
")",
":",
"ctx",
"=",
"0",
"append",
"=",
"[",
"]",
"while",
"ctx",
"<",
"len",
"(",
"line_of_bytes",
")",
":",
"append",
".",
"insert",
"(",
"int",
"(",
"ctx",
"/",
"2",
")",
",",
"str",
"(",
"line_of_bytes",
"[",
"ctx",
":",
"ctx",
"+",
"2",
"]",
")",
")",
"ctx",
"+=",
"2",
"return",
"append"
] |
Split the bytes in a line into an array for pairs
|
[
"Split",
"the",
"bytes",
"in",
"a",
"line",
"into",
"an",
"array",
"for",
"pairs"
] |
[
"\"\"\"\n Split the bytes in a line into an array for pairs\n :param line_of_bytes: line of bytes\n :return: array of the bytes\n \"\"\""
] |
[
{
"param": "line_of_bytes",
"type": "str"
}
] |
{
"returns": [
{
"docstring": "array of the bytes",
"docstring_tokens": [
"array",
"of",
"the",
"bytes"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "line_of_bytes",
"type": "str",
"docstring": "line of bytes",
"docstring_tokens": [
"line",
"of",
"bytes"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def split_bytes(line_of_bytes: str):
ctx = 0
append = []
while ctx < len(line_of_bytes):
append.insert(int(ctx / 2), str(line_of_bytes[ctx:ctx + 2]))
ctx += 2
return append
| 1,139 | 915 |
2b7973209dbc4317a6b89e33b0965b2a9e553b29
|
bpatoul/amazon-lookout-for-equipment
|
apps/cloudwatch-dashboard/layers/lookoutequipment/python/l4ecwcw.py
|
[
"MIT-0"
] |
Python
|
create_button
|
<not_specific>
|
def create_button(action,
payload,
label,
display_mode='popup',
disabled=False):
"""
This function creates an HTML button the user can interact with from
a CloudWatch custom widget.
Parameters:
action (string):
The ARN of a lambda function to call when clicking on this button
payload (string):
A JSON formatted string that will be passed as argument to the
lambda function used as endpoint for this button
label (string):
The label to use on the button
display_mode (string):
Can either be `popup` (display the result in a popup) or `widget`
(to replace the content of the current widget by the output of the
Lambda function used as endpoint). Defaults to `popup`
disabled (boolean):
Set to True to display a disabled button
Returns:
string: an HTML string with the button to display
"""
if disabled:
button_style = 'awsui-button-disabled'
disabled = 'disabled=""'
else:
button_style = 'awsui-button awsui-button-variant-primary'
disabled = ""
button = (
f'<a class="btn {button_style}" {disabled}>{label}</a>\n'
'<cwdb-action '
f'action="call" '
f'endpoint="{action}" '
f'display="{display_mode}">'
f'{json.dumps(payload)}'
'</cwdb-action>\n'
)
return button
|
This function creates an HTML button the user can interact with from
a CloudWatch custom widget.
Parameters:
action (string):
The ARN of a lambda function to call when clicking on this button
payload (string):
A JSON formatted string that will be passed as argument to the
lambda function used as endpoint for this button
label (string):
The label to use on the button
display_mode (string):
Can either be `popup` (display the result in a popup) or `widget`
(to replace the content of the current widget by the output of the
Lambda function used as endpoint). Defaults to `popup`
disabled (boolean):
Set to True to display a disabled button
Returns:
string: an HTML string with the button to display
|
This function creates an HTML button the user can interact with from
a CloudWatch custom widget.
|
[
"This",
"function",
"creates",
"an",
"HTML",
"button",
"the",
"user",
"can",
"interact",
"with",
"from",
"a",
"CloudWatch",
"custom",
"widget",
"."
] |
def create_button(action,
payload,
label,
display_mode='popup',
disabled=False):
if disabled:
button_style = 'awsui-button-disabled'
disabled = 'disabled=""'
else:
button_style = 'awsui-button awsui-button-variant-primary'
disabled = ""
button = (
f'<a class="btn {button_style}" {disabled}>{label}</a>\n'
'<cwdb-action '
f'action="call" '
f'endpoint="{action}" '
f'display="{display_mode}">'
f'{json.dumps(payload)}'
'</cwdb-action>\n'
)
return button
|
[
"def",
"create_button",
"(",
"action",
",",
"payload",
",",
"label",
",",
"display_mode",
"=",
"'popup'",
",",
"disabled",
"=",
"False",
")",
":",
"if",
"disabled",
":",
"button_style",
"=",
"'awsui-button-disabled'",
"disabled",
"=",
"'disabled=\"\"'",
"else",
":",
"button_style",
"=",
"'awsui-button awsui-button-variant-primary'",
"disabled",
"=",
"\"\"",
"button",
"=",
"(",
"f'<a class=\"btn {button_style}\" {disabled}>{label}</a>\\n'",
"'<cwdb-action '",
"f'action=\"call\" '",
"f'endpoint=\"{action}\" '",
"f'display=\"{display_mode}\">'",
"f'{json.dumps(payload)}'",
"'</cwdb-action>\\n'",
")",
"return",
"button"
] |
This function creates an HTML button the user can interact with from
a CloudWatch custom widget.
|
[
"This",
"function",
"creates",
"an",
"HTML",
"button",
"the",
"user",
"can",
"interact",
"with",
"from",
"a",
"CloudWatch",
"custom",
"widget",
"."
] |
[
"\"\"\"\n This function creates an HTML button the user can interact with from\n a CloudWatch custom widget.\n \n Parameters:\n action (string):\n The ARN of a lambda function to call when clicking on this button\n payload (string):\n A JSON formatted string that will be passed as argument to the\n lambda function used as endpoint for this button\n label (string):\n The label to use on the button\n display_mode (string):\n Can either be `popup` (display the result in a popup) or `widget` \n (to replace the content of the current widget by the output of the \n Lambda function used as endpoint). Defaults to `popup`\n disabled (boolean):\n Set to True to display a disabled button\n \n Returns:\n string: an HTML string with the button to display\n \"\"\""
] |
[
{
"param": "action",
"type": null
},
{
"param": "payload",
"type": null
},
{
"param": "label",
"type": null
},
{
"param": "display_mode",
"type": null
},
{
"param": "disabled",
"type": null
}
] |
{
"returns": [
{
"docstring": "an HTML string with the button to display",
"docstring_tokens": [
"an",
"HTML",
"string",
"with",
"the",
"button",
"to",
"display"
],
"type": "string"
}
],
"raises": [],
"params": [
{
"identifier": "action",
"type": null,
"docstring": "The ARN of a lambda function to call when clicking on this button",
"docstring_tokens": [
"The",
"ARN",
"of",
"a",
"lambda",
"function",
"to",
"call",
"when",
"clicking",
"on",
"this",
"button"
],
"default": null,
"is_optional": false
},
{
"identifier": "payload",
"type": null,
"docstring": "A JSON formatted string that will be passed as argument to the\nlambda function used as endpoint for this button",
"docstring_tokens": [
"A",
"JSON",
"formatted",
"string",
"that",
"will",
"be",
"passed",
"as",
"argument",
"to",
"the",
"lambda",
"function",
"used",
"as",
"endpoint",
"for",
"this",
"button"
],
"default": null,
"is_optional": false
},
{
"identifier": "label",
"type": null,
"docstring": "The label to use on the button",
"docstring_tokens": [
"The",
"label",
"to",
"use",
"on",
"the",
"button"
],
"default": null,
"is_optional": false
},
{
"identifier": "display_mode",
"type": null,
"docstring": "Can either be `popup` (display the result in a popup) or `widget`\n(to replace the content of the current widget by the output of the\nLambda function used as endpoint). Defaults to `popup`",
"docstring_tokens": [
"Can",
"either",
"be",
"`",
"popup",
"`",
"(",
"display",
"the",
"result",
"in",
"a",
"popup",
")",
"or",
"`",
"widget",
"`",
"(",
"to",
"replace",
"the",
"content",
"of",
"the",
"current",
"widget",
"by",
"the",
"output",
"of",
"the",
"Lambda",
"function",
"used",
"as",
"endpoint",
")",
".",
"Defaults",
"to",
"`",
"popup",
"`"
],
"default": null,
"is_optional": false
},
{
"identifier": "disabled",
"type": null,
"docstring": "Set to True to display a disabled button",
"docstring_tokens": [
"Set",
"to",
"True",
"to",
"display",
"a",
"disabled",
"button"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
import json
def create_button(action,
payload,
label,
display_mode='popup',
disabled=False):
if disabled:
button_style = 'awsui-button-disabled'
disabled = 'disabled=""'
else:
button_style = 'awsui-button awsui-button-variant-primary'
disabled = ""
button = (
f'<a class="btn {button_style}" {disabled}>{label}</a>\n'
'<cwdb-action '
f'action="call" '
f'endpoint="{action}" '
f'display="{display_mode}">'
f'{json.dumps(payload)}'
'</cwdb-action>\n'
)
return button
| 1,140 | 366 |
c54213aa145d7c09663f1d8320995cd0e71959c2
|
edwintcloud/Social-Network-Graph-Tutorial
|
Graph-Tutorial/tools.py
|
[
"MIT"
] |
Python
|
generate_graph
| null |
def generate_graph(your_name, names_file="names.txt", f_name="graph_data.txt"):
"""
Generate a graph from names and save its representation to a file.
Args:
your_name (string): Your name.
names_file (string): The file name or file path to read in
friend names from.
f_name (string): The file name or file path to save the generated
graph representation in.
"""
with open(f_name, "w") as f:
# write graph type
f.write("G\n")
# get list of friend names
with open(names_file, "r") as f2:
friends = f2.read().splitlines()
# write vertices
f.write(f"{your_name},{','.join(friends)}\n")
# iterate through friends
for friend in friends:
# edge from you to the friend
f.write(f"({your_name},{friend})\n")
# create a copy of friends without current friend
other_friends = [i for i in friends if i != friend]
# generate a random sample of 2-4 other friends
idx_sample = random.sample(
range(len(other_friends)), random.randint(2, 4))
# edge from friend to 2-4 other distinct friends
for idx in idx_sample:
f.write(f"({friend},{other_friends[idx]})\n")
|
Generate a graph from names and save its representation to a file.
Args:
your_name (string): Your name.
names_file (string): The file name or file path to read in
friend names from.
f_name (string): The file name or file path to save the generated
graph representation in.
|
Generate a graph from names and save its representation to a file.
Args:
your_name (string): Your name.
names_file (string): The file name or file path to read in
friend names from.
f_name (string): The file name or file path to save the generated
graph representation in.
|
[
"Generate",
"a",
"graph",
"from",
"names",
"and",
"save",
"its",
"representation",
"to",
"a",
"file",
".",
"Args",
":",
"your_name",
"(",
"string",
")",
":",
"Your",
"name",
".",
"names_file",
"(",
"string",
")",
":",
"The",
"file",
"name",
"or",
"file",
"path",
"to",
"read",
"in",
"friend",
"names",
"from",
".",
"f_name",
"(",
"string",
")",
":",
"The",
"file",
"name",
"or",
"file",
"path",
"to",
"save",
"the",
"generated",
"graph",
"representation",
"in",
"."
] |
def generate_graph(your_name, names_file="names.txt", f_name="graph_data.txt"):
with open(f_name, "w") as f:
f.write("G\n")
with open(names_file, "r") as f2:
friends = f2.read().splitlines()
f.write(f"{your_name},{','.join(friends)}\n")
for friend in friends:
f.write(f"({your_name},{friend})\n")
other_friends = [i for i in friends if i != friend]
idx_sample = random.sample(
range(len(other_friends)), random.randint(2, 4))
for idx in idx_sample:
f.write(f"({friend},{other_friends[idx]})\n")
|
[
"def",
"generate_graph",
"(",
"your_name",
",",
"names_file",
"=",
"\"names.txt\"",
",",
"f_name",
"=",
"\"graph_data.txt\"",
")",
":",
"with",
"open",
"(",
"f_name",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"G\\n\"",
")",
"with",
"open",
"(",
"names_file",
",",
"\"r\"",
")",
"as",
"f2",
":",
"friends",
"=",
"f2",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"f",
".",
"write",
"(",
"f\"{your_name},{','.join(friends)}\\n\"",
")",
"for",
"friend",
"in",
"friends",
":",
"f",
".",
"write",
"(",
"f\"({your_name},{friend})\\n\"",
")",
"other_friends",
"=",
"[",
"i",
"for",
"i",
"in",
"friends",
"if",
"i",
"!=",
"friend",
"]",
"idx_sample",
"=",
"random",
".",
"sample",
"(",
"range",
"(",
"len",
"(",
"other_friends",
")",
")",
",",
"random",
".",
"randint",
"(",
"2",
",",
"4",
")",
")",
"for",
"idx",
"in",
"idx_sample",
":",
"f",
".",
"write",
"(",
"f\"({friend},{other_friends[idx]})\\n\"",
")"
] |
Generate a graph from names and save its representation to a file.
|
[
"Generate",
"a",
"graph",
"from",
"names",
"and",
"save",
"its",
"representation",
"to",
"a",
"file",
"."
] |
[
"\"\"\"\n Generate a graph from names and save its representation to a file.\n\n Args:\n your_name (string): Your name.\n names_file (string): The file name or file path to read in\n friend names from.\n f_name (string): The file name or file path to save the generated\n graph representation in.\n \"\"\"",
"# write graph type",
"# get list of friend names",
"# write vertices",
"# iterate through friends",
"# edge from you to the friend",
"# create a copy of friends without current friend",
"# generate a random sample of 2-4 other friends",
"# edge from friend to 2-4 other distinct friends"
] |
[
{
"param": "your_name",
"type": null
},
{
"param": "names_file",
"type": null
},
{
"param": "f_name",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "your_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "names_file",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "f_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import random
def generate_graph(your_name, names_file="names.txt", f_name="graph_data.txt"):
with open(f_name, "w") as f:
f.write("G\n")
with open(names_file, "r") as f2:
friends = f2.read().splitlines()
f.write(f"{your_name},{','.join(friends)}\n")
for friend in friends:
f.write(f"({your_name},{friend})\n")
other_friends = [i for i in friends if i != friend]
idx_sample = random.sample(
range(len(other_friends)), random.randint(2, 4))
for idx in idx_sample:
f.write(f"({friend},{other_friends[idx]})\n")
| 1,141 | 677 |
1a924d2a1e93dae49ef66dc6fa3570822e9b8d37
|
paaksing/Pyot
|
pyot/utils/text.py
|
[
"MIT"
] |
Python
|
camelcase
|
str
|
def camelcase(snake_str: str) -> str:
'''Convert string to json camelcase.'''
components = snake_str.split('_')
if len(components) == 1:
return components[0]
return components[0] + ''.join(x.title() for x in components[1:])
|
Convert string to json camelcase.
|
Convert string to json camelcase.
|
[
"Convert",
"string",
"to",
"json",
"camelcase",
"."
] |
def camelcase(snake_str: str) -> str:
components = snake_str.split('_')
if len(components) == 1:
return components[0]
return components[0] + ''.join(x.title() for x in components[1:])
|
[
"def",
"camelcase",
"(",
"snake_str",
":",
"str",
")",
"->",
"str",
":",
"components",
"=",
"snake_str",
".",
"split",
"(",
"'_'",
")",
"if",
"len",
"(",
"components",
")",
"==",
"1",
":",
"return",
"components",
"[",
"0",
"]",
"return",
"components",
"[",
"0",
"]",
"+",
"''",
".",
"join",
"(",
"x",
".",
"title",
"(",
")",
"for",
"x",
"in",
"components",
"[",
"1",
":",
"]",
")"
] |
Convert string to json camelcase.
|
[
"Convert",
"string",
"to",
"json",
"camelcase",
"."
] |
[
"'''Convert string to json camelcase.'''"
] |
[
{
"param": "snake_str",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "snake_str",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def camelcase(snake_str: str) -> str:
components = snake_str.split('_')
if len(components) == 1:
return components[0]
return components[0] + ''.join(x.title() for x in components[1:])
| 1,142 | 129 |
2c2a2c811cc5740f6e5cd4f4db562077d81a4d27
|
xhdix/probe-legacy
|
ooni/utils/onion.py
|
[
"BSD-2-Clause"
] |
Python
|
transport_name
|
<not_specific>
|
def transport_name(address):
"""
If the address of the bridge starts with a valid c identifier then
we consider it to be a bridge.
Returns:
The transport_name if it's a transport.
None if it's not a obfsproxy bridge.
"""
transport_name = address.split(' ')[0]
transport_name_chars = string.ascii_letters + string.digits
if all(c in transport_name_chars for c in transport_name):
return transport_name
return None
|
If the address of the bridge starts with a valid c identifier then
we consider it to be a bridge.
Returns:
The transport_name if it's a transport.
None if it's not a obfsproxy bridge.
|
If the address of the bridge starts with a valid c identifier then
we consider it to be a bridge.
|
[
"If",
"the",
"address",
"of",
"the",
"bridge",
"starts",
"with",
"a",
"valid",
"c",
"identifier",
"then",
"we",
"consider",
"it",
"to",
"be",
"a",
"bridge",
"."
] |
def transport_name(address):
transport_name = address.split(' ')[0]
transport_name_chars = string.ascii_letters + string.digits
if all(c in transport_name_chars for c in transport_name):
return transport_name
return None
|
[
"def",
"transport_name",
"(",
"address",
")",
":",
"transport_name",
"=",
"address",
".",
"split",
"(",
"' '",
")",
"[",
"0",
"]",
"transport_name_chars",
"=",
"string",
".",
"ascii_letters",
"+",
"string",
".",
"digits",
"if",
"all",
"(",
"c",
"in",
"transport_name_chars",
"for",
"c",
"in",
"transport_name",
")",
":",
"return",
"transport_name",
"return",
"None"
] |
If the address of the bridge starts with a valid c identifier then
we consider it to be a bridge.
|
[
"If",
"the",
"address",
"of",
"the",
"bridge",
"starts",
"with",
"a",
"valid",
"c",
"identifier",
"then",
"we",
"consider",
"it",
"to",
"be",
"a",
"bridge",
"."
] |
[
"\"\"\"\n If the address of the bridge starts with a valid c identifier then\n we consider it to be a bridge.\n Returns:\n The transport_name if it's a transport.\n None if it's not a obfsproxy bridge.\n \"\"\""
] |
[
{
"param": "address",
"type": null
}
] |
{
"returns": [
{
"docstring": "The transport_name if it's a transport.\nNone if it's not a obfsproxy bridge.",
"docstring_tokens": [
"The",
"transport_name",
"if",
"it",
"'",
"s",
"a",
"transport",
".",
"None",
"if",
"it",
"'",
"s",
"not",
"a",
"obfsproxy",
"bridge",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "address",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import string
def transport_name(address):
transport_name = address.split(' ')[0]
transport_name_chars = string.ascii_letters + string.digits
if all(c in transport_name_chars for c in transport_name):
return transport_name
return None
| 1,143 | 499 |
063be6d8eefb6e56fc5650ffdfeef956e840f037
|
clausserg/matrix_algebra
|
matrix_algebra/matrix_operations/mat_lu.py
|
[
"BSD-3-Clause"
] |
Python
|
swap_rows
|
<not_specific>
|
def swap_rows(mat_in, row, col):
"""
This function swaps a matrix row with another row.
Swaping will be done if the element at the position specified by the user
via the 'row' and 'col' variables, is zero. Swaping occurs with a row that
has a non-zero element in the same 'col'. If all rows contain a
zero at the 'row', 'col' position, the original input matrix is returned.
:param mat_in: list of lists, i.e. a matrix
:param row: int, row index in a matrix
:param col: int, column index in a matrix
:return: tuple, a list of lists and a multiplier that is either 1 if no
swapping has been done, or -1 if swapping was done.
"""
if mat_in[row][col] != 0:
# return the original matrix if the matrix element at the
# input [row][col] position is not zero
multiplier = 1
return mat_in, multiplier
else:
# find a row with non-zero matrix element in the same [col] position
# and swap. If swaping is done, the matrix determinant should be multiplied
# by -1, therefore a multiplier = -1 is returned togeteher with the matrix with
# swapped rows.
for idx in range(row+1, len(mat_in)):
if mat_in[idx][col] != 0:
mat_in[row], mat_in[idx] = mat_in[idx], mat_in[row]
multiplier = -1
return mat_in, multiplier
# if the if-else block is skipped, means all rows have a zero
# matrix element along the [col], in this case we return the original matrix
multiplier = 1
return mat_in, multiplier
|
This function swaps a matrix row with another row.
Swaping will be done if the element at the position specified by the user
via the 'row' and 'col' variables, is zero. Swaping occurs with a row that
has a non-zero element in the same 'col'. If all rows contain a
zero at the 'row', 'col' position, the original input matrix is returned.
:param mat_in: list of lists, i.e. a matrix
:param row: int, row index in a matrix
:param col: int, column index in a matrix
:return: tuple, a list of lists and a multiplier that is either 1 if no
swapping has been done, or -1 if swapping was done.
|
This function swaps a matrix row with another row.
Swaping will be done if the element at the position specified by the user
via the 'row' and 'col' variables, is zero. Swaping occurs with a row that
has a non-zero element in the same 'col'. If all rows contain a
zero at the 'row', 'col' position, the original input matrix is returned.
|
[
"This",
"function",
"swaps",
"a",
"matrix",
"row",
"with",
"another",
"row",
".",
"Swaping",
"will",
"be",
"done",
"if",
"the",
"element",
"at",
"the",
"position",
"specified",
"by",
"the",
"user",
"via",
"the",
"'",
"row",
"'",
"and",
"'",
"col",
"'",
"variables",
"is",
"zero",
".",
"Swaping",
"occurs",
"with",
"a",
"row",
"that",
"has",
"a",
"non",
"-",
"zero",
"element",
"in",
"the",
"same",
"'",
"col",
"'",
".",
"If",
"all",
"rows",
"contain",
"a",
"zero",
"at",
"the",
"'",
"row",
"'",
"'",
"col",
"'",
"position",
"the",
"original",
"input",
"matrix",
"is",
"returned",
"."
] |
def swap_rows(mat_in, row, col):
if mat_in[row][col] != 0:
multiplier = 1
return mat_in, multiplier
else:
for idx in range(row+1, len(mat_in)):
if mat_in[idx][col] != 0:
mat_in[row], mat_in[idx] = mat_in[idx], mat_in[row]
multiplier = -1
return mat_in, multiplier
multiplier = 1
return mat_in, multiplier
|
[
"def",
"swap_rows",
"(",
"mat_in",
",",
"row",
",",
"col",
")",
":",
"if",
"mat_in",
"[",
"row",
"]",
"[",
"col",
"]",
"!=",
"0",
":",
"multiplier",
"=",
"1",
"return",
"mat_in",
",",
"multiplier",
"else",
":",
"for",
"idx",
"in",
"range",
"(",
"row",
"+",
"1",
",",
"len",
"(",
"mat_in",
")",
")",
":",
"if",
"mat_in",
"[",
"idx",
"]",
"[",
"col",
"]",
"!=",
"0",
":",
"mat_in",
"[",
"row",
"]",
",",
"mat_in",
"[",
"idx",
"]",
"=",
"mat_in",
"[",
"idx",
"]",
",",
"mat_in",
"[",
"row",
"]",
"multiplier",
"=",
"-",
"1",
"return",
"mat_in",
",",
"multiplier",
"multiplier",
"=",
"1",
"return",
"mat_in",
",",
"multiplier"
] |
This function swaps a matrix row with another row.
|
[
"This",
"function",
"swaps",
"a",
"matrix",
"row",
"with",
"another",
"row",
"."
] |
[
"\"\"\"\n This function swaps a matrix row with another row.\n Swaping will be done if the element at the position specified by the user\n via the 'row' and 'col' variables, is zero. Swaping occurs with a row that\n has a non-zero element in the same 'col'. If all rows contain a\n zero at the 'row', 'col' position, the original input matrix is returned.\n\n :param mat_in: list of lists, i.e. a matrix\n :param row: int, row index in a matrix\n :param col: int, column index in a matrix\n :return: tuple, a list of lists and a multiplier that is either 1 if no\n swapping has been done, or -1 if swapping was done.\n \"\"\"",
"# return the original matrix if the matrix element at the",
"# input [row][col] position is not zero",
"# find a row with non-zero matrix element in the same [col] position",
"# and swap. If swaping is done, the matrix determinant should be multiplied",
"# by -1, therefore a multiplier = -1 is returned togeteher with the matrix with",
"# swapped rows.",
"# if the if-else block is skipped, means all rows have a zero",
"# matrix element along the [col], in this case we return the original matrix"
] |
[
{
"param": "mat_in",
"type": null
},
{
"param": "row",
"type": null
},
{
"param": "col",
"type": null
}
] |
{
"returns": [
{
"docstring": "tuple, a list of lists and a multiplier that is either 1 if no\nswapping has been done, or -1 if swapping was done.",
"docstring_tokens": [
"tuple",
"a",
"list",
"of",
"lists",
"and",
"a",
"multiplier",
"that",
"is",
"either",
"1",
"if",
"no",
"swapping",
"has",
"been",
"done",
"or",
"-",
"1",
"if",
"swapping",
"was",
"done",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "mat_in",
"type": null,
"docstring": "list of lists, i.e. a matrix",
"docstring_tokens": [
"list",
"of",
"lists",
"i",
".",
"e",
".",
"a",
"matrix"
],
"default": null,
"is_optional": null
},
{
"identifier": "row",
"type": null,
"docstring": "int, row index in a matrix",
"docstring_tokens": [
"int",
"row",
"index",
"in",
"a",
"matrix"
],
"default": null,
"is_optional": null
},
{
"identifier": "col",
"type": null,
"docstring": "int, column index in a matrix",
"docstring_tokens": [
"int",
"column",
"index",
"in",
"a",
"matrix"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def swap_rows(mat_in, row, col):
if mat_in[row][col] != 0:
multiplier = 1
return mat_in, multiplier
else:
for idx in range(row+1, len(mat_in)):
if mat_in[idx][col] != 0:
mat_in[row], mat_in[idx] = mat_in[idx], mat_in[row]
multiplier = -1
return mat_in, multiplier
multiplier = 1
return mat_in, multiplier
| 1,144 | 726 |
d83c0d837e5ceaccaaaf789fe9df1b3ab36e05c0
|
Bougeant/exoplanets
|
exoplanets/astro_data.py
|
[
"Apache-2.0"
] |
Python
|
record_dataframe
| null |
def record_dataframe(df, filename):
""" Saves the pandas DataFrame into a .csv format if a filename is provided.
:param pandas.DataFrame df:
A pandas DataFrame to save into a .csv format.
:param str filename:
The location in which to save the DataFrame into a .csv format.
"""
if filename:
path = os.path.dirname(filename)
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
df.to_csv(filename, index=False)
|
Saves the pandas DataFrame into a .csv format if a filename is provided.
:param pandas.DataFrame df:
A pandas DataFrame to save into a .csv format.
:param str filename:
The location in which to save the DataFrame into a .csv format.
|
Saves the pandas DataFrame into a .csv format if a filename is provided.
|
[
"Saves",
"the",
"pandas",
"DataFrame",
"into",
"a",
".",
"csv",
"format",
"if",
"a",
"filename",
"is",
"provided",
"."
] |
def record_dataframe(df, filename):
if filename:
path = os.path.dirname(filename)
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
df.to_csv(filename, index=False)
|
[
"def",
"record_dataframe",
"(",
"df",
",",
"filename",
")",
":",
"if",
"filename",
":",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"filename",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"makedirs",
"(",
"path",
",",
"exist_ok",
"=",
"True",
")",
"df",
".",
"to_csv",
"(",
"filename",
",",
"index",
"=",
"False",
")"
] |
Saves the pandas DataFrame into a .csv format if a filename is provided.
|
[
"Saves",
"the",
"pandas",
"DataFrame",
"into",
"a",
".",
"csv",
"format",
"if",
"a",
"filename",
"is",
"provided",
"."
] |
[
"\"\"\" Saves the pandas DataFrame into a .csv format if a filename is provided.\n\n :param pandas.DataFrame df:\n A pandas DataFrame to save into a .csv format.\n :param str filename:\n The location in which to save the DataFrame into a .csv format.\n \"\"\""
] |
[
{
"param": "df",
"type": null
},
{
"param": "filename",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "df",
"type": null,
"docstring": "A pandas DataFrame to save into a .csv format.",
"docstring_tokens": [
"A",
"pandas",
"DataFrame",
"to",
"save",
"into",
"a",
".",
"csv",
"format",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "filename",
"type": null,
"docstring": "The location in which to save the DataFrame into a .csv format.",
"docstring_tokens": [
"The",
"location",
"in",
"which",
"to",
"save",
"the",
"DataFrame",
"into",
"a",
".",
"csv",
"format",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
import os
def record_dataframe(df, filename):
if filename:
path = os.path.dirname(filename)
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
df.to_csv(filename, index=False)
| 1,145 | 781 |
2e7c81b95eb400ebcb29171d769427a4ca98877c
|
j-hugo/DLD1
|
train.py
|
[
"MIT"
] |
Python
|
makedirs
| null |
def makedirs(args):
"""create directories to save model and metric
Args:
args: arguments from the parser.
"""
os.makedirs(args.model_path, exist_ok=True)
os.makedirs(args.metric_path, exist_ok=True)
|
create directories to save model and metric
Args:
args: arguments from the parser.
|
create directories to save model and metric
|
[
"create",
"directories",
"to",
"save",
"model",
"and",
"metric"
] |
def makedirs(args):
os.makedirs(args.model_path, exist_ok=True)
os.makedirs(args.metric_path, exist_ok=True)
|
[
"def",
"makedirs",
"(",
"args",
")",
":",
"os",
".",
"makedirs",
"(",
"args",
".",
"model_path",
",",
"exist_ok",
"=",
"True",
")",
"os",
".",
"makedirs",
"(",
"args",
".",
"metric_path",
",",
"exist_ok",
"=",
"True",
")"
] |
create directories to save model and metric
|
[
"create",
"directories",
"to",
"save",
"model",
"and",
"metric"
] |
[
"\"\"\"create directories to save model and metric \n\n Args:\n args: arguments from the parser.\n \n \"\"\""
] |
[
{
"param": "args",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "args",
"type": null,
"docstring": "arguments from the parser.",
"docstring_tokens": [
"arguments",
"from",
"the",
"parser",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
def makedirs(args):
os.makedirs(args.model_path, exist_ok=True)
os.makedirs(args.metric_path, exist_ok=True)
| 1,147 | 235 |
d6ebadacc7a2e6a9bf5116fdf9033103518b40ab
|
kylepwarren/lxdui
|
app/lib/conf.py
|
[
"Apache-2.0"
] |
Python
|
envGet
|
<not_specific>
|
def envGet():
"""
Retrieve the environment variables containing the log and conf paths.
:return: Returns a dictionary containing the file paths
"""
env = {}
for k, v in os.environ.items():
if k in ['LXDUI_LOG', 'LXDUI_CONF']:
env.update({k: os.environ.get(k)})
return env
|
Retrieve the environment variables containing the log and conf paths.
:return: Returns a dictionary containing the file paths
|
Retrieve the environment variables containing the log and conf paths.
|
[
"Retrieve",
"the",
"environment",
"variables",
"containing",
"the",
"log",
"and",
"conf",
"paths",
"."
] |
def envGet():
env = {}
for k, v in os.environ.items():
if k in ['LXDUI_LOG', 'LXDUI_CONF']:
env.update({k: os.environ.get(k)})
return env
|
[
"def",
"envGet",
"(",
")",
":",
"env",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"os",
".",
"environ",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"[",
"'LXDUI_LOG'",
",",
"'LXDUI_CONF'",
"]",
":",
"env",
".",
"update",
"(",
"{",
"k",
":",
"os",
".",
"environ",
".",
"get",
"(",
"k",
")",
"}",
")",
"return",
"env"
] |
Retrieve the environment variables containing the log and conf paths.
|
[
"Retrieve",
"the",
"environment",
"variables",
"containing",
"the",
"log",
"and",
"conf",
"paths",
"."
] |
[
"\"\"\"\n Retrieve the environment variables containing the log and conf paths.\n\n :return: Returns a dictionary containing the file paths\n \"\"\""
] |
[] |
{
"returns": [
{
"docstring": "Returns a dictionary containing the file paths",
"docstring_tokens": [
"Returns",
"a",
"dictionary",
"containing",
"the",
"file",
"paths"
],
"type": null
}
],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import os
def envGet():
env = {}
for k, v in os.environ.items():
if k in ['LXDUI_LOG', 'LXDUI_CONF']:
env.update({k: os.environ.get(k)})
return env
| 1,148 | 757 |
6d421e67fa1d802f8a060ed3effbf76330db7f04
|
mohitkh7/oppia
|
scripts/linters/codeowner_linter.py
|
[
"Apache-2.0"
] |
Python
|
_is_path_ignored
|
<not_specific>
|
def _is_path_ignored(path_to_check):
"""Checks whether the given path is ignored by git.
Args:
path_to_check: str. A path to a file or a dir.
Returns:
bool. Whether the given path is ignored by git.
"""
command = ['git', 'check-ignore', '-q', path_to_check]
# The "git check-ignore <path>" command returns 0 when the path is ignored
# otherwise it returns 1. subprocess.call then returns this returncode.
return subprocess.call(command) == 0
|
Checks whether the given path is ignored by git.
Args:
path_to_check: str. A path to a file or a dir.
Returns:
bool. Whether the given path is ignored by git.
|
Checks whether the given path is ignored by git.
|
[
"Checks",
"whether",
"the",
"given",
"path",
"is",
"ignored",
"by",
"git",
"."
] |
def _is_path_ignored(path_to_check):
command = ['git', 'check-ignore', '-q', path_to_check]
return subprocess.call(command) == 0
|
[
"def",
"_is_path_ignored",
"(",
"path_to_check",
")",
":",
"command",
"=",
"[",
"'git'",
",",
"'check-ignore'",
",",
"'-q'",
",",
"path_to_check",
"]",
"return",
"subprocess",
".",
"call",
"(",
"command",
")",
"==",
"0"
] |
Checks whether the given path is ignored by git.
|
[
"Checks",
"whether",
"the",
"given",
"path",
"is",
"ignored",
"by",
"git",
"."
] |
[
"\"\"\"Checks whether the given path is ignored by git.\n\n Args:\n path_to_check: str. A path to a file or a dir.\n\n Returns:\n bool. Whether the given path is ignored by git.\n \"\"\"",
"# The \"git check-ignore <path>\" command returns 0 when the path is ignored",
"# otherwise it returns 1. subprocess.call then returns this returncode."
] |
[
{
"param": "path_to_check",
"type": null
}
] |
{
"returns": [
{
"docstring": "bool. Whether the given path is ignored by git.",
"docstring_tokens": [
"bool",
".",
"Whether",
"the",
"given",
"path",
"is",
"ignored",
"by",
"git",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "path_to_check",
"type": null,
"docstring": "str. A path to a file or a dir.",
"docstring_tokens": [
"str",
".",
"A",
"path",
"to",
"a",
"file",
"or",
"a",
"dir",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import subprocess
def _is_path_ignored(path_to_check):
command = ['git', 'check-ignore', '-q', path_to_check]
return subprocess.call(command) == 0
| 1,149 | 99 |
a277c88ec7bef7b7fc75336b3806e5b2e9367f24
|
dongxinb/naiveproxy
|
src/build/android/pylib/local/machine/local_machine_junit_test_run.py
|
[
"BSD-3-Clause"
] |
Python
|
PrintProcessesStdout
| null |
def PrintProcessesStdout(procs):
"""Prints the stdout of all the processes.
Buffers the stdout of the processes and prints it when finished.
Args:
procs: A list of subprocesses.
Returns: N/A
"""
streams = [p.stdout for p in procs]
outputs = collections.defaultdict(list)
first_fd = streams[0].fileno()
while streams:
rstreams, _, _ = select.select(streams, [], [])
for stream in rstreams:
line = stream.readline()
if line:
# Print out just one output so user can see work being done rather
# than waiting for it all at the end.
if stream.fileno() == first_fd:
sys.stdout.write(line)
else:
outputs[stream.fileno()].append(line)
else:
streams.remove(stream) # End of stream.
for p in procs:
sys.stdout.write(''.join(outputs[p.stdout.fileno()]))
|
Prints the stdout of all the processes.
Buffers the stdout of the processes and prints it when finished.
Args:
procs: A list of subprocesses.
Returns: N/A
|
Prints the stdout of all the processes.
Buffers the stdout of the processes and prints it when finished.
|
[
"Prints",
"the",
"stdout",
"of",
"all",
"the",
"processes",
".",
"Buffers",
"the",
"stdout",
"of",
"the",
"processes",
"and",
"prints",
"it",
"when",
"finished",
"."
] |
def PrintProcessesStdout(procs):
streams = [p.stdout for p in procs]
outputs = collections.defaultdict(list)
first_fd = streams[0].fileno()
while streams:
rstreams, _, _ = select.select(streams, [], [])
for stream in rstreams:
line = stream.readline()
if line:
if stream.fileno() == first_fd:
sys.stdout.write(line)
else:
outputs[stream.fileno()].append(line)
else:
streams.remove(stream)
for p in procs:
sys.stdout.write(''.join(outputs[p.stdout.fileno()]))
|
[
"def",
"PrintProcessesStdout",
"(",
"procs",
")",
":",
"streams",
"=",
"[",
"p",
".",
"stdout",
"for",
"p",
"in",
"procs",
"]",
"outputs",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"first_fd",
"=",
"streams",
"[",
"0",
"]",
".",
"fileno",
"(",
")",
"while",
"streams",
":",
"rstreams",
",",
"_",
",",
"_",
"=",
"select",
".",
"select",
"(",
"streams",
",",
"[",
"]",
",",
"[",
"]",
")",
"for",
"stream",
"in",
"rstreams",
":",
"line",
"=",
"stream",
".",
"readline",
"(",
")",
"if",
"line",
":",
"if",
"stream",
".",
"fileno",
"(",
")",
"==",
"first_fd",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"line",
")",
"else",
":",
"outputs",
"[",
"stream",
".",
"fileno",
"(",
")",
"]",
".",
"append",
"(",
"line",
")",
"else",
":",
"streams",
".",
"remove",
"(",
"stream",
")",
"for",
"p",
"in",
"procs",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"''",
".",
"join",
"(",
"outputs",
"[",
"p",
".",
"stdout",
".",
"fileno",
"(",
")",
"]",
")",
")"
] |
Prints the stdout of all the processes.
|
[
"Prints",
"the",
"stdout",
"of",
"all",
"the",
"processes",
"."
] |
[
"\"\"\"Prints the stdout of all the processes.\n\n Buffers the stdout of the processes and prints it when finished.\n\n Args:\n procs: A list of subprocesses.\n\n Returns: N/A\n \"\"\"",
"# Print out just one output so user can see work being done rather",
"# than waiting for it all at the end.",
"# End of stream."
] |
[
{
"param": "procs",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "procs",
"type": null,
"docstring": "A list of subprocesses.",
"docstring_tokens": [
"A",
"list",
"of",
"subprocesses",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import sys
import select
import collections
def PrintProcessesStdout(procs):
streams = [p.stdout for p in procs]
outputs = collections.defaultdict(list)
first_fd = streams[0].fileno()
while streams:
rstreams, _, _ = select.select(streams, [], [])
for stream in rstreams:
line = stream.readline()
if line:
if stream.fileno() == first_fd:
sys.stdout.write(line)
else:
outputs[stream.fileno()].append(line)
else:
streams.remove(stream)
for p in procs:
sys.stdout.write(''.join(outputs[p.stdout.fileno()]))
| 1,150 | 837 |
17fc1e6e9837b0f046a030cfb476ced9a05b09c1
|
Dlin163/sc-project
|
stanCode_Projects/hangman_game/rocket.py
|
[
"MIT"
] |
Python
|
belt
| null |
def belt(a):
"""
This function creates the rocket belt.
:param a: Positive int, a >= 1
:return: no return. It prints out str
"""
ans = '+'
for i in range(2 * a):
ans += '='
ans += '+'
print(ans)
|
This function creates the rocket belt.
:param a: Positive int, a >= 1
:return: no return. It prints out str
|
This function creates the rocket belt.
|
[
"This",
"function",
"creates",
"the",
"rocket",
"belt",
"."
] |
def belt(a):
ans = '+'
for i in range(2 * a):
ans += '='
ans += '+'
print(ans)
|
[
"def",
"belt",
"(",
"a",
")",
":",
"ans",
"=",
"'+'",
"for",
"i",
"in",
"range",
"(",
"2",
"*",
"a",
")",
":",
"ans",
"+=",
"'='",
"ans",
"+=",
"'+'",
"print",
"(",
"ans",
")"
] |
This function creates the rocket belt.
|
[
"This",
"function",
"creates",
"the",
"rocket",
"belt",
"."
] |
[
"\"\"\"\r\n This function creates the rocket belt.\r\n :param a: Positive int, a >= 1\r\n :return: no return. It prints out str\r\n \"\"\""
] |
[
{
"param": "a",
"type": null
}
] |
{
"returns": [
{
"docstring": "no return. It prints out str",
"docstring_tokens": [
"no",
"return",
".",
"It",
"prints",
"out",
"str"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "a",
"type": null,
"docstring": "Positive int, a >= 1",
"docstring_tokens": [
"Positive",
"int",
"a",
">",
"=",
"1"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def belt(a):
ans = '+'
for i in range(2 * a):
ans += '='
ans += '+'
print(ans)
| 1,151 | 728 |
098b9d645db46c361a6e54faeffd488076ab6cdb
|
elastic/apm-pipeline-library
|
resources/scripts/pytest_otel/tests/it/utils/__init__.py
|
[
"Apache-2.0"
] |
Python
|
is_portListening
|
<not_specific>
|
def is_portListening(host, port):
"""Check a port in a host is liostening"""
a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
location = (host, port)
result_of_check = a_socket.connect_ex(location)
if result_of_check == 0:
return True
else:
return False
|
Check a port in a host is liostening
|
Check a port in a host is liostening
|
[
"Check",
"a",
"port",
"in",
"a",
"host",
"is",
"liostening"
] |
def is_portListening(host, port):
a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
location = (host, port)
result_of_check = a_socket.connect_ex(location)
if result_of_check == 0:
return True
else:
return False
|
[
"def",
"is_portListening",
"(",
"host",
",",
"port",
")",
":",
"a_socket",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"location",
"=",
"(",
"host",
",",
"port",
")",
"result_of_check",
"=",
"a_socket",
".",
"connect_ex",
"(",
"location",
")",
"if",
"result_of_check",
"==",
"0",
":",
"return",
"True",
"else",
":",
"return",
"False"
] |
Check a port in a host is liostening
|
[
"Check",
"a",
"port",
"in",
"a",
"host",
"is",
"liostening"
] |
[
"\"\"\"Check a port in a host is liostening\"\"\""
] |
[
{
"param": "host",
"type": null
},
{
"param": "port",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "host",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "port",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import socket
def is_portListening(host, port):
a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
location = (host, port)
result_of_check = a_socket.connect_ex(location)
if result_of_check == 0:
return True
else:
return False
| 1,152 | 770 |
8e82e9151ba47ee93b26f09b102b90aa6aa03936
|
sergej-C/dl_utils
|
preprocessing.py
|
[
"MIT"
] |
Python
|
resize_pts
|
<not_specific>
|
def resize_pts(pts, ow, oh, nw, nh, pos_w, pos_h):
"""
calculate coo of points in a resized img
in pts pair of coo (x,y)
"""
new_pts = []
for p in pts:
ox = p[0]
oy = p[1]
newx = (ox/float(ow)*nw) + pos_w
newy = (oy/float(oh)*nh) + pos_h
new_pts.append((newx, newy))
return new_pts
|
calculate coo of points in a resized img
in pts pair of coo (x,y)
|
calculate coo of points in a resized img
in pts pair of coo (x,y)
|
[
"calculate",
"coo",
"of",
"points",
"in",
"a",
"resized",
"img",
"in",
"pts",
"pair",
"of",
"coo",
"(",
"x",
"y",
")"
] |
def resize_pts(pts, ow, oh, nw, nh, pos_w, pos_h):
new_pts = []
for p in pts:
ox = p[0]
oy = p[1]
newx = (ox/float(ow)*nw) + pos_w
newy = (oy/float(oh)*nh) + pos_h
new_pts.append((newx, newy))
return new_pts
|
[
"def",
"resize_pts",
"(",
"pts",
",",
"ow",
",",
"oh",
",",
"nw",
",",
"nh",
",",
"pos_w",
",",
"pos_h",
")",
":",
"new_pts",
"=",
"[",
"]",
"for",
"p",
"in",
"pts",
":",
"ox",
"=",
"p",
"[",
"0",
"]",
"oy",
"=",
"p",
"[",
"1",
"]",
"newx",
"=",
"(",
"ox",
"/",
"float",
"(",
"ow",
")",
"*",
"nw",
")",
"+",
"pos_w",
"newy",
"=",
"(",
"oy",
"/",
"float",
"(",
"oh",
")",
"*",
"nh",
")",
"+",
"pos_h",
"new_pts",
".",
"append",
"(",
"(",
"newx",
",",
"newy",
")",
")",
"return",
"new_pts"
] |
calculate coo of points in a resized img
in pts pair of coo (x,y)
|
[
"calculate",
"coo",
"of",
"points",
"in",
"a",
"resized",
"img",
"in",
"pts",
"pair",
"of",
"coo",
"(",
"x",
"y",
")"
] |
[
"\"\"\"\n calculate coo of points in a resized img\n in pts pair of coo (x,y)\n \"\"\""
] |
[
{
"param": "pts",
"type": null
},
{
"param": "ow",
"type": null
},
{
"param": "oh",
"type": null
},
{
"param": "nw",
"type": null
},
{
"param": "nh",
"type": null
},
{
"param": "pos_w",
"type": null
},
{
"param": "pos_h",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "pts",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "ow",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "oh",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "nw",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "nh",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "pos_w",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "pos_h",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def resize_pts(pts, ow, oh, nw, nh, pos_w, pos_h):
new_pts = []
for p in pts:
ox = p[0]
oy = p[1]
newx = (ox/float(ow)*nw) + pos_w
newy = (oy/float(oh)*nh) + pos_h
new_pts.append((newx, newy))
return new_pts
| 1,153 | 237 |
2a49820c67cc5312531bb3eb8208d81743535c6e
|
ynsnf/apysc
|
tests/_event/test_document_mouse_wheel_interface.py
|
[
"MIT"
] |
Python
|
on_mouse_wheel_1
|
None
|
def on_mouse_wheel_1(e: ap.WheelEvent, options: Dict[str, Any]) -> None:
"""
Test handler for mouse wheel event.
Parameters
----------
e : WheelEvent
Event object.
options : dict
Optional arguments dictionary.
"""
assert options['msg'] == 'Hello!'
|
Test handler for mouse wheel event.
Parameters
----------
e : WheelEvent
Event object.
options : dict
Optional arguments dictionary.
|
Test handler for mouse wheel event.
|
[
"Test",
"handler",
"for",
"mouse",
"wheel",
"event",
"."
] |
def on_mouse_wheel_1(e: ap.WheelEvent, options: Dict[str, Any]) -> None:
assert options['msg'] == 'Hello!'
|
[
"def",
"on_mouse_wheel_1",
"(",
"e",
":",
"ap",
".",
"WheelEvent",
",",
"options",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"None",
":",
"assert",
"options",
"[",
"'msg'",
"]",
"==",
"'Hello!'"
] |
Test handler for mouse wheel event.
|
[
"Test",
"handler",
"for",
"mouse",
"wheel",
"event",
"."
] |
[
"\"\"\"\r\n Test handler for mouse wheel event.\r\n\r\n Parameters\r\n ----------\r\n e : WheelEvent\r\n Event object.\r\n options : dict\r\n Optional arguments dictionary.\r\n \"\"\""
] |
[
{
"param": "e",
"type": "ap.WheelEvent"
},
{
"param": "options",
"type": "Dict[str, Any]"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "e",
"type": "ap.WheelEvent",
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
},
{
"identifier": "options",
"type": "Dict[str, Any]",
"docstring": "Optional arguments dictionary.",
"docstring_tokens": [
"Optional",
"arguments",
"dictionary",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def on_mouse_wheel_1(e: ap.WheelEvent, options: Dict[str, Any]) -> None:
assert options['msg'] == 'Hello!'
| 1,154 | 244 |
323f45fa0670810e55e531ff681ebe1cabd6a51c
|
Lab41/magichour
|
magichour/api/dist/preprocess/readLog.py
|
[
"Apache-2.0"
] |
Python
|
proc_log_line
|
<not_specific>
|
def proc_log_line(line, logFile):
'''
handles the logfile specific parsing input lines into 2 parts
ts: timestamp float
text: the rest of the message
Args:
line(string): text to process
logFile(string): hint of URI used for input
should use for switching parsing
based off different directories
Returns:
retval(list[string,string]): [ts, text]
'''
return line.strip().rstrip().split(' ', 3)[2:]
|
handles the logfile specific parsing input lines into 2 parts
ts: timestamp float
text: the rest of the message
Args:
line(string): text to process
logFile(string): hint of URI used for input
should use for switching parsing
based off different directories
Returns:
retval(list[string,string]): [ts, text]
|
handles the logfile specific parsing input lines into 2 parts
ts: timestamp float
text: the rest of the message
|
[
"handles",
"the",
"logfile",
"specific",
"parsing",
"input",
"lines",
"into",
"2",
"parts",
"ts",
":",
"timestamp",
"float",
"text",
":",
"the",
"rest",
"of",
"the",
"message"
] |
def proc_log_line(line, logFile):
return line.strip().rstrip().split(' ', 3)[2:]
|
[
"def",
"proc_log_line",
"(",
"line",
",",
"logFile",
")",
":",
"return",
"line",
".",
"strip",
"(",
")",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"' '",
",",
"3",
")",
"[",
"2",
":",
"]"
] |
handles the logfile specific parsing input lines into 2 parts
ts: timestamp float
text: the rest of the message
|
[
"handles",
"the",
"logfile",
"specific",
"parsing",
"input",
"lines",
"into",
"2",
"parts",
"ts",
":",
"timestamp",
"float",
"text",
":",
"the",
"rest",
"of",
"the",
"message"
] |
[
"'''\n handles the logfile specific parsing input lines into 2 parts\n ts: timestamp float\n text: the rest of the message\n\n Args:\n line(string): text to process\n logFile(string): hint of URI used for input\n should use for switching parsing\n based off different directories\n\n Returns:\n retval(list[string,string]): [ts, text]\n '''"
] |
[
{
"param": "line",
"type": null
},
{
"param": "logFile",
"type": null
}
] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "retval(list[string,string])"
}
],
"raises": [],
"params": [
{
"identifier": "line",
"type": null,
"docstring": "text to process",
"docstring_tokens": [
"text",
"to",
"process"
],
"default": null,
"is_optional": false
},
{
"identifier": "logFile",
"type": null,
"docstring": "hint of URI used for input\nshould use for switching parsing\nbased off different directories",
"docstring_tokens": [
"hint",
"of",
"URI",
"used",
"for",
"input",
"should",
"use",
"for",
"switching",
"parsing",
"based",
"off",
"different",
"directories"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def proc_log_line(line, logFile):
return line.strip().rstrip().split(' ', 3)[2:]
| 1,155 | 154 |
cc3d81debd46f50299e407b33f89938d78f72f07
|
prashg28/pyang-ct
|
pyang/statements.py
|
[
"0BSD"
] |
Python
|
search_grouping
|
<not_specific>
|
def search_grouping(stmt, name):
"""Search for a grouping in scope
First search the hierarchy, then the module and its submodules."""
while stmt is not None:
if name in stmt.i_groupings:
return stmt.i_groupings[name]
stmt = stmt.parent
return None
|
Search for a grouping in scope
First search the hierarchy, then the module and its submodules.
|
Search for a grouping in scope
First search the hierarchy, then the module and its submodules.
|
[
"Search",
"for",
"a",
"grouping",
"in",
"scope",
"First",
"search",
"the",
"hierarchy",
"then",
"the",
"module",
"and",
"its",
"submodules",
"."
] |
def search_grouping(stmt, name):
while stmt is not None:
if name in stmt.i_groupings:
return stmt.i_groupings[name]
stmt = stmt.parent
return None
|
[
"def",
"search_grouping",
"(",
"stmt",
",",
"name",
")",
":",
"while",
"stmt",
"is",
"not",
"None",
":",
"if",
"name",
"in",
"stmt",
".",
"i_groupings",
":",
"return",
"stmt",
".",
"i_groupings",
"[",
"name",
"]",
"stmt",
"=",
"stmt",
".",
"parent",
"return",
"None"
] |
Search for a grouping in scope
First search the hierarchy, then the module and its submodules.
|
[
"Search",
"for",
"a",
"grouping",
"in",
"scope",
"First",
"search",
"the",
"hierarchy",
"then",
"the",
"module",
"and",
"its",
"submodules",
"."
] |
[
"\"\"\"Search for a grouping in scope\n First search the hierarchy, then the module and its submodules.\"\"\""
] |
[
{
"param": "stmt",
"type": null
},
{
"param": "name",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "stmt",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def search_grouping(stmt, name):
while stmt is not None:
if name in stmt.i_groupings:
return stmt.i_groupings[name]
stmt = stmt.parent
return None
| 1,156 | 122 |
3b5146ab57141c38d8ce1fa483281d294e67e37a
|
gustavofariaa/Encrypt-Image-DES-
|
DES.py
|
[
"MIT"
] |
Python
|
apply_Permutation
|
<not_specific>
|
def apply_Permutation(permutation_table, sbox_32bits):
""" It takes Sboxes output and a permutation table and return 32 bit binary string"""
final_32bits = ""
for index in permutation_table:
final_32bits += sbox_32bits[index-1]
return final_32bits
|
It takes Sboxes output and a permutation table and return 32 bit binary string
|
It takes Sboxes output and a permutation table and return 32 bit binary string
|
[
"It",
"takes",
"Sboxes",
"output",
"and",
"a",
"permutation",
"table",
"and",
"return",
"32",
"bit",
"binary",
"string"
] |
def apply_Permutation(permutation_table, sbox_32bits):
final_32bits = ""
for index in permutation_table:
final_32bits += sbox_32bits[index-1]
return final_32bits
|
[
"def",
"apply_Permutation",
"(",
"permutation_table",
",",
"sbox_32bits",
")",
":",
"final_32bits",
"=",
"\"\"",
"for",
"index",
"in",
"permutation_table",
":",
"final_32bits",
"+=",
"sbox_32bits",
"[",
"index",
"-",
"1",
"]",
"return",
"final_32bits"
] |
It takes Sboxes output and a permutation table and return 32 bit binary string
|
[
"It",
"takes",
"Sboxes",
"output",
"and",
"a",
"permutation",
"table",
"and",
"return",
"32",
"bit",
"binary",
"string"
] |
[
"\"\"\" It takes Sboxes output and a permutation table and return 32 bit binary string\"\"\""
] |
[
{
"param": "permutation_table",
"type": null
},
{
"param": "sbox_32bits",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "permutation_table",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "sbox_32bits",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def apply_Permutation(permutation_table, sbox_32bits):
final_32bits = ""
for index in permutation_table:
final_32bits += sbox_32bits[index-1]
return final_32bits
| 1,157 | 901 |
b0971dbac886d8cae935d855103b9c3a46e59a3a
|
mrdragonbear/covid-tracking
|
module/tracking/geo.py
|
[
"MIT"
] |
Python
|
points_in_poly
|
<not_specific>
|
def points_in_poly(gdf, poly):
"""Return a boolean series indicating which points in `gdf` are within
shapely Polygon, `poly`.
"""
return gdf.geometry.within(poly)
|
Return a boolean series indicating which points in `gdf` are within
shapely Polygon, `poly`.
|
Return a boolean series indicating which points in `gdf` are within
shapely Polygon, `poly`.
|
[
"Return",
"a",
"boolean",
"series",
"indicating",
"which",
"points",
"in",
"`",
"gdf",
"`",
"are",
"within",
"shapely",
"Polygon",
"`",
"poly",
"`",
"."
] |
def points_in_poly(gdf, poly):
return gdf.geometry.within(poly)
|
[
"def",
"points_in_poly",
"(",
"gdf",
",",
"poly",
")",
":",
"return",
"gdf",
".",
"geometry",
".",
"within",
"(",
"poly",
")"
] |
Return a boolean series indicating which points in `gdf` are within
shapely Polygon, `poly`.
|
[
"Return",
"a",
"boolean",
"series",
"indicating",
"which",
"points",
"in",
"`",
"gdf",
"`",
"are",
"within",
"shapely",
"Polygon",
"`",
"poly",
"`",
"."
] |
[
"\"\"\"Return a boolean series indicating which points in `gdf` are within\n shapely Polygon, `poly`.\n \"\"\""
] |
[
{
"param": "gdf",
"type": null
},
{
"param": "poly",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "gdf",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "poly",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def points_in_poly(gdf, poly):
return gdf.geometry.within(poly)
| 1,158 | 925 |
ddb5a69524982c2225cc326e13840346322fd845
|
cyberhck/renku-python
|
renku/core/commands/checks/migration.py
|
[
"Apache-2.0"
] |
Python
|
ensure_clean_lock
| null |
def ensure_clean_lock(client):
"""Make sure Renku lock file is not part of repository."""
lock_file = client.path / '.renku.lock'
if lock_file.exists():
lock_file.unlink()
# Add lock file to .gitignore.
gitignore = client.path / '.gitignore'
if str(lock_file.name) not in gitignore.read_text():
gitignore.open('a').write('\n{0}\n'.format(lock_file.name))
|
Make sure Renku lock file is not part of repository.
|
Make sure Renku lock file is not part of repository.
|
[
"Make",
"sure",
"Renku",
"lock",
"file",
"is",
"not",
"part",
"of",
"repository",
"."
] |
def ensure_clean_lock(client):
lock_file = client.path / '.renku.lock'
if lock_file.exists():
lock_file.unlink()
gitignore = client.path / '.gitignore'
if str(lock_file.name) not in gitignore.read_text():
gitignore.open('a').write('\n{0}\n'.format(lock_file.name))
|
[
"def",
"ensure_clean_lock",
"(",
"client",
")",
":",
"lock_file",
"=",
"client",
".",
"path",
"/",
"'.renku.lock'",
"if",
"lock_file",
".",
"exists",
"(",
")",
":",
"lock_file",
".",
"unlink",
"(",
")",
"gitignore",
"=",
"client",
".",
"path",
"/",
"'.gitignore'",
"if",
"str",
"(",
"lock_file",
".",
"name",
")",
"not",
"in",
"gitignore",
".",
"read_text",
"(",
")",
":",
"gitignore",
".",
"open",
"(",
"'a'",
")",
".",
"write",
"(",
"'\\n{0}\\n'",
".",
"format",
"(",
"lock_file",
".",
"name",
")",
")"
] |
Make sure Renku lock file is not part of repository.
|
[
"Make",
"sure",
"Renku",
"lock",
"file",
"is",
"not",
"part",
"of",
"repository",
"."
] |
[
"\"\"\"Make sure Renku lock file is not part of repository.\"\"\"",
"# Add lock file to .gitignore."
] |
[
{
"param": "client",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "client",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def ensure_clean_lock(client):
lock_file = client.path / '.renku.lock'
if lock_file.exists():
lock_file.unlink()
gitignore = client.path / '.gitignore'
if str(lock_file.name) not in gitignore.read_text():
gitignore.open('a').write('\n{0}\n'.format(lock_file.name))
| 1,159 | 578 |
93f3610e8f7e9ae85fd8bb04ee168ff97d4f7f32
|
jimgoo/zipline
|
zipline/testing/predicates.py
|
[
"Apache-2.0"
] |
Python
|
_fmt_path
|
<not_specific>
|
def _fmt_path(path):
"""Format the path for final display.
Parameters
----------
path : iterable of str
The path to the values that are not equal.
Returns
-------
fmtd : str
The formatted path to put into the error message.
"""
if not path:
return ''
return 'path: _' + ''.join(path)
|
Format the path for final display.
Parameters
----------
path : iterable of str
The path to the values that are not equal.
Returns
-------
fmtd : str
The formatted path to put into the error message.
|
Format the path for final display.
Parameters
path : iterable of str
The path to the values that are not equal.
Returns
fmtd : str
The formatted path to put into the error message.
|
[
"Format",
"the",
"path",
"for",
"final",
"display",
".",
"Parameters",
"path",
":",
"iterable",
"of",
"str",
"The",
"path",
"to",
"the",
"values",
"that",
"are",
"not",
"equal",
".",
"Returns",
"fmtd",
":",
"str",
"The",
"formatted",
"path",
"to",
"put",
"into",
"the",
"error",
"message",
"."
] |
def _fmt_path(path):
if not path:
return ''
return 'path: _' + ''.join(path)
|
[
"def",
"_fmt_path",
"(",
"path",
")",
":",
"if",
"not",
"path",
":",
"return",
"''",
"return",
"'path: _'",
"+",
"''",
".",
"join",
"(",
"path",
")"
] |
Format the path for final display.
|
[
"Format",
"the",
"path",
"for",
"final",
"display",
"."
] |
[
"\"\"\"Format the path for final display.\n\n Parameters\n ----------\n path : iterable of str\n The path to the values that are not equal.\n\n Returns\n -------\n fmtd : str\n The formatted path to put into the error message.\n \"\"\""
] |
[
{
"param": "path",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _fmt_path(path):
if not path:
return ''
return 'path: _' + ''.join(path)
| 1,160 | 71 |
4ef667838a9b7ab3288f467f72946d42c0569a37
|
Deee92/royal-chaos
|
chaos-ns-3/bake/bake/Utils.py
|
[
"MIT"
] |
Python
|
print_backtrace
|
<not_specific>
|
def print_backtrace():
""" Prints the full trace of the exception."""
import sys
import traceback
trace = ""
exception = ""
exceptionHandling = True
if(not sys.exc_info()[0] or not sys.exc_info()[1]):
exceptionHandling = False
if exceptionHandling:
exc_list = traceback.format_exception_only (sys.exc_info()[0],sys.exc_info()[1])
for entry in exc_list:
exception += entry
tb_list = traceback.format_tb(sys.exc_info()[2])
else:
tb_list = traceback.format_stack()
for entry in tb_list:
trace += entry
toWrite = "\n%s\n%s" % (exception, trace)
sys.stderr.write(toWrite)
return toWrite
|
Prints the full trace of the exception.
|
Prints the full trace of the exception.
|
[
"Prints",
"the",
"full",
"trace",
"of",
"the",
"exception",
"."
] |
def print_backtrace():
import sys
import traceback
trace = ""
exception = ""
exceptionHandling = True
if(not sys.exc_info()[0] or not sys.exc_info()[1]):
exceptionHandling = False
if exceptionHandling:
exc_list = traceback.format_exception_only (sys.exc_info()[0],sys.exc_info()[1])
for entry in exc_list:
exception += entry
tb_list = traceback.format_tb(sys.exc_info()[2])
else:
tb_list = traceback.format_stack()
for entry in tb_list:
trace += entry
toWrite = "\n%s\n%s" % (exception, trace)
sys.stderr.write(toWrite)
return toWrite
|
[
"def",
"print_backtrace",
"(",
")",
":",
"import",
"sys",
"import",
"traceback",
"trace",
"=",
"\"\"",
"exception",
"=",
"\"\"",
"exceptionHandling",
"=",
"True",
"if",
"(",
"not",
"sys",
".",
"exc_info",
"(",
")",
"[",
"0",
"]",
"or",
"not",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
")",
":",
"exceptionHandling",
"=",
"False",
"if",
"exceptionHandling",
":",
"exc_list",
"=",
"traceback",
".",
"format_exception_only",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"0",
"]",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
")",
"for",
"entry",
"in",
"exc_list",
":",
"exception",
"+=",
"entry",
"tb_list",
"=",
"traceback",
".",
"format_tb",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")",
"else",
":",
"tb_list",
"=",
"traceback",
".",
"format_stack",
"(",
")",
"for",
"entry",
"in",
"tb_list",
":",
"trace",
"+=",
"entry",
"toWrite",
"=",
"\"\\n%s\\n%s\"",
"%",
"(",
"exception",
",",
"trace",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"toWrite",
")",
"return",
"toWrite"
] |
Prints the full trace of the exception.
|
[
"Prints",
"the",
"full",
"trace",
"of",
"the",
"exception",
"."
] |
[
"\"\"\" Prints the full trace of the exception.\"\"\""
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import sys
import traceback
def print_backtrace():
import sys
import traceback
trace = ""
exception = ""
exceptionHandling = True
if(not sys.exc_info()[0] or not sys.exc_info()[1]):
exceptionHandling = False
if exceptionHandling:
exc_list = traceback.format_exception_only (sys.exc_info()[0],sys.exc_info()[1])
for entry in exc_list:
exception += entry
tb_list = traceback.format_tb(sys.exc_info()[2])
else:
tb_list = traceback.format_stack()
for entry in tb_list:
trace += entry
toWrite = "\n%s\n%s" % (exception, trace)
sys.stderr.write(toWrite)
return toWrite
| 1,161 | 539 |
3e9734e96dd0fadf64f15bdb204255a486596525
|
ukirderohit/2017Challenges
|
challenge_2/python/sarcodian/src/challenge_2.py
|
[
"Apache-2.0"
] |
Python
|
pick_char
|
<not_specific>
|
def pick_char(array0):
'''
array0: list, the array that is entered with one unique char
returns: char, a single element string or int that was unique in the array
'''
unique = []
duplicate = []
for i in array0:
if i in duplicate:
pass
elif i in unique:
duplicate.append(i)
unique.remove(i)
else:
unique.append(i)
return unique[0]
|
array0: list, the array that is entered with one unique char
returns: char, a single element string or int that was unique in the array
|
list, the array that is entered with one unique char
returns: char, a single element string or int that was unique in the array
|
[
"list",
"the",
"array",
"that",
"is",
"entered",
"with",
"one",
"unique",
"char",
"returns",
":",
"char",
"a",
"single",
"element",
"string",
"or",
"int",
"that",
"was",
"unique",
"in",
"the",
"array"
] |
def pick_char(array0):
unique = []
duplicate = []
for i in array0:
if i in duplicate:
pass
elif i in unique:
duplicate.append(i)
unique.remove(i)
else:
unique.append(i)
return unique[0]
|
[
"def",
"pick_char",
"(",
"array0",
")",
":",
"unique",
"=",
"[",
"]",
"duplicate",
"=",
"[",
"]",
"for",
"i",
"in",
"array0",
":",
"if",
"i",
"in",
"duplicate",
":",
"pass",
"elif",
"i",
"in",
"unique",
":",
"duplicate",
".",
"append",
"(",
"i",
")",
"unique",
".",
"remove",
"(",
"i",
")",
"else",
":",
"unique",
".",
"append",
"(",
"i",
")",
"return",
"unique",
"[",
"0",
"]"
] |
array0: list, the array that is entered with one unique char
returns: char, a single element string or int that was unique in the array
|
[
"array0",
":",
"list",
"the",
"array",
"that",
"is",
"entered",
"with",
"one",
"unique",
"char",
"returns",
":",
"char",
"a",
"single",
"element",
"string",
"or",
"int",
"that",
"was",
"unique",
"in",
"the",
"array"
] |
[
"'''\n array0: list, the array that is entered with one unique char\n returns: char, a single element string or int that was unique in the array\n '''"
] |
[
{
"param": "array0",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "array0",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def pick_char(array0):
unique = []
duplicate = []
for i in array0:
if i in duplicate:
pass
elif i in unique:
duplicate.append(i)
unique.remove(i)
else:
unique.append(i)
return unique[0]
| 1,163 | 356 |
7840fc8f09ec0a0a12a619a63ade812cba011ae5
|
IliasGeoSo/Asymmetron
|
wrapper_functions.py
|
[
"Apache-2.0"
] |
Python
|
output_path
|
<not_specific>
|
def output_path(fun_name, extension, *args):
"""
Creates the path to be used for saving output files. File type needs to be added manually depending on the function
:param fun_name: Function name to append to the path
:param args: Additional arguments to append to the path
:param extension: file extension for the file
:return: Path under which to save the output. No file type is selected
"""
config = configparser.ConfigParser()
config.read('config.txt')
time_stamp = time.strftime("%Y%m%d_%H%M%S", time.localtime()) +"_" # To add timestamp to output file names
# Remove the time stamp if chosen by the user
if config['DEFAULT']['time_stamp'] == "False":
time_stamp = ""
if not os.path.exists("Asymmetron_output"):
os.makedirs("Asymmetron_output/")
if not os.path.exists("Asymmetron_output/"+fun_name):
os.makedirs("Asymmetron_output/" + fun_name)
return "Asymmetron_output/" + fun_name + "/" + time_stamp + fun_name + "_" + "_".join(args)+ "." + extension
|
Creates the path to be used for saving output files. File type needs to be added manually depending on the function
:param fun_name: Function name to append to the path
:param args: Additional arguments to append to the path
:param extension: file extension for the file
:return: Path under which to save the output. No file type is selected
|
Creates the path to be used for saving output files. File type needs to be added manually depending on the function
|
[
"Creates",
"the",
"path",
"to",
"be",
"used",
"for",
"saving",
"output",
"files",
".",
"File",
"type",
"needs",
"to",
"be",
"added",
"manually",
"depending",
"on",
"the",
"function"
] |
def output_path(fun_name, extension, *args):
config = configparser.ConfigParser()
config.read('config.txt')
time_stamp = time.strftime("%Y%m%d_%H%M%S", time.localtime()) +"_"
if config['DEFAULT']['time_stamp'] == "False":
time_stamp = ""
if not os.path.exists("Asymmetron_output"):
os.makedirs("Asymmetron_output/")
if not os.path.exists("Asymmetron_output/"+fun_name):
os.makedirs("Asymmetron_output/" + fun_name)
return "Asymmetron_output/" + fun_name + "/" + time_stamp + fun_name + "_" + "_".join(args)+ "." + extension
|
[
"def",
"output_path",
"(",
"fun_name",
",",
"extension",
",",
"*",
"args",
")",
":",
"config",
"=",
"configparser",
".",
"ConfigParser",
"(",
")",
"config",
".",
"read",
"(",
"'config.txt'",
")",
"time_stamp",
"=",
"time",
".",
"strftime",
"(",
"\"%Y%m%d_%H%M%S\"",
",",
"time",
".",
"localtime",
"(",
")",
")",
"+",
"\"_\"",
"if",
"config",
"[",
"'DEFAULT'",
"]",
"[",
"'time_stamp'",
"]",
"==",
"\"False\"",
":",
"time_stamp",
"=",
"\"\"",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"\"Asymmetron_output\"",
")",
":",
"os",
".",
"makedirs",
"(",
"\"Asymmetron_output/\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"\"Asymmetron_output/\"",
"+",
"fun_name",
")",
":",
"os",
".",
"makedirs",
"(",
"\"Asymmetron_output/\"",
"+",
"fun_name",
")",
"return",
"\"Asymmetron_output/\"",
"+",
"fun_name",
"+",
"\"/\"",
"+",
"time_stamp",
"+",
"fun_name",
"+",
"\"_\"",
"+",
"\"_\"",
".",
"join",
"(",
"args",
")",
"+",
"\".\"",
"+",
"extension"
] |
Creates the path to be used for saving output files.
|
[
"Creates",
"the",
"path",
"to",
"be",
"used",
"for",
"saving",
"output",
"files",
"."
] |
[
"\"\"\"\n Creates the path to be used for saving output files. File type needs to be added manually depending on the function\n\n :param fun_name: Function name to append to the path\n :param args: Additional arguments to append to the path\n :param extension: file extension for the file\n :return: Path under which to save the output. No file type is selected\n \"\"\"",
"# To add timestamp to output file names",
"# Remove the time stamp if chosen by the user"
] |
[
{
"param": "fun_name",
"type": null
},
{
"param": "extension",
"type": null
}
] |
{
"returns": [
{
"docstring": "Path under which to save the output. No file type is selected",
"docstring_tokens": [
"Path",
"under",
"which",
"to",
"save",
"the",
"output",
".",
"No",
"file",
"type",
"is",
"selected"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "fun_name",
"type": null,
"docstring": "Function name to append to the path",
"docstring_tokens": [
"Function",
"name",
"to",
"append",
"to",
"the",
"path"
],
"default": null,
"is_optional": null
},
{
"identifier": "extension",
"type": null,
"docstring": "file extension for the file",
"docstring_tokens": [
"file",
"extension",
"for",
"the",
"file"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [
{
"identifier": "args",
"type": null,
"docstring": "Additional arguments to append to the path",
"docstring_tokens": [
"Additional",
"arguments",
"to",
"append",
"to",
"the",
"path"
],
"default": null,
"is_optional": null
}
],
"others": []
}
|
import configparser
import time
import os
def output_path(fun_name, extension, *args):
config = configparser.ConfigParser()
config.read('config.txt')
time_stamp = time.strftime("%Y%m%d_%H%M%S", time.localtime()) +"_"
if config['DEFAULT']['time_stamp'] == "False":
time_stamp = ""
if not os.path.exists("Asymmetron_output"):
os.makedirs("Asymmetron_output/")
if not os.path.exists("Asymmetron_output/"+fun_name):
os.makedirs("Asymmetron_output/" + fun_name)
return "Asymmetron_output/" + fun_name + "/" + time_stamp + fun_name + "_" + "_".join(args)+ "." + extension
| 1,164 | 954 |
4b76efe54a5008fde0687255b1d28676bb26581d
|
mrgum/aws-parallelcluster
|
awsbatch-cli/src/awsbatch/awsbkill.py
|
[
"Apache-2.0"
] |
Python
|
_get_parser
|
<not_specific>
|
def _get_parser():
"""
Parse input parameters and return the ArgumentParser object.
If the command is executed without the --cluster parameter, the command will use the default cluster_name
specified in the [main] section of the user's awsbatch-cli.cfg configuration file and will search
for the [cluster cluster-name] section, if the section doesn't exist, it will ask to CloudFormation
the required information.
If the --cluster parameter is set, the command will search for the [cluster cluster-name] section
in the user's awsbatch-cli.cfg configuration file or, if the file doesn't exist, it will ask to CloudFormation
the required information.
:return: the ArgumentParser object
"""
parser = argparse.ArgumentParser(description="Cancels/terminates jobs submitted in the cluster.")
parser.add_argument("-c", "--cluster", help="Cluster to use")
parser.add_argument(
"-r",
"--reason",
help="A message to attach to the job that explains the reason for canceling it",
default="Terminated by the user",
)
parser.add_argument("-ll", "--log-level", help=argparse.SUPPRESS, default="ERROR")
parser.add_argument("job_ids", help="A space separated list of job IDs to cancel/terminate", nargs="+")
return parser
|
Parse input parameters and return the ArgumentParser object.
If the command is executed without the --cluster parameter, the command will use the default cluster_name
specified in the [main] section of the user's awsbatch-cli.cfg configuration file and will search
for the [cluster cluster-name] section, if the section doesn't exist, it will ask to CloudFormation
the required information.
If the --cluster parameter is set, the command will search for the [cluster cluster-name] section
in the user's awsbatch-cli.cfg configuration file or, if the file doesn't exist, it will ask to CloudFormation
the required information.
:return: the ArgumentParser object
|
Parse input parameters and return the ArgumentParser object.
If the command is executed without the --cluster parameter, the command will use the default cluster_name
specified in the [main] section of the user's awsbatch-cli.cfg configuration file and will search
for the [cluster cluster-name] section, if the section doesn't exist, it will ask to CloudFormation
the required information.
If the --cluster parameter is set, the command will search for the [cluster cluster-name] section
in the user's awsbatch-cli.cfg configuration file or, if the file doesn't exist, it will ask to CloudFormation
the required information.
|
[
"Parse",
"input",
"parameters",
"and",
"return",
"the",
"ArgumentParser",
"object",
".",
"If",
"the",
"command",
"is",
"executed",
"without",
"the",
"--",
"cluster",
"parameter",
"the",
"command",
"will",
"use",
"the",
"default",
"cluster_name",
"specified",
"in",
"the",
"[",
"main",
"]",
"section",
"of",
"the",
"user",
"'",
"s",
"awsbatch",
"-",
"cli",
".",
"cfg",
"configuration",
"file",
"and",
"will",
"search",
"for",
"the",
"[",
"cluster",
"cluster",
"-",
"name",
"]",
"section",
"if",
"the",
"section",
"doesn",
"'",
"t",
"exist",
"it",
"will",
"ask",
"to",
"CloudFormation",
"the",
"required",
"information",
".",
"If",
"the",
"--",
"cluster",
"parameter",
"is",
"set",
"the",
"command",
"will",
"search",
"for",
"the",
"[",
"cluster",
"cluster",
"-",
"name",
"]",
"section",
"in",
"the",
"user",
"'",
"s",
"awsbatch",
"-",
"cli",
".",
"cfg",
"configuration",
"file",
"or",
"if",
"the",
"file",
"doesn",
"'",
"t",
"exist",
"it",
"will",
"ask",
"to",
"CloudFormation",
"the",
"required",
"information",
"."
] |
def _get_parser():
parser = argparse.ArgumentParser(description="Cancels/terminates jobs submitted in the cluster.")
parser.add_argument("-c", "--cluster", help="Cluster to use")
parser.add_argument(
"-r",
"--reason",
help="A message to attach to the job that explains the reason for canceling it",
default="Terminated by the user",
)
parser.add_argument("-ll", "--log-level", help=argparse.SUPPRESS, default="ERROR")
parser.add_argument("job_ids", help="A space separated list of job IDs to cancel/terminate", nargs="+")
return parser
|
[
"def",
"_get_parser",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Cancels/terminates jobs submitted in the cluster.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-c\"",
",",
"\"--cluster\"",
",",
"help",
"=",
"\"Cluster to use\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-r\"",
",",
"\"--reason\"",
",",
"help",
"=",
"\"A message to attach to the job that explains the reason for canceling it\"",
",",
"default",
"=",
"\"Terminated by the user\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"-ll\"",
",",
"\"--log-level\"",
",",
"help",
"=",
"argparse",
".",
"SUPPRESS",
",",
"default",
"=",
"\"ERROR\"",
")",
"parser",
".",
"add_argument",
"(",
"\"job_ids\"",
",",
"help",
"=",
"\"A space separated list of job IDs to cancel/terminate\"",
",",
"nargs",
"=",
"\"+\"",
")",
"return",
"parser"
] |
Parse input parameters and return the ArgumentParser object.
|
[
"Parse",
"input",
"parameters",
"and",
"return",
"the",
"ArgumentParser",
"object",
"."
] |
[
"\"\"\"\n Parse input parameters and return the ArgumentParser object.\n\n If the command is executed without the --cluster parameter, the command will use the default cluster_name\n specified in the [main] section of the user's awsbatch-cli.cfg configuration file and will search\n for the [cluster cluster-name] section, if the section doesn't exist, it will ask to CloudFormation\n the required information.\n\n If the --cluster parameter is set, the command will search for the [cluster cluster-name] section\n in the user's awsbatch-cli.cfg configuration file or, if the file doesn't exist, it will ask to CloudFormation\n the required information.\n\n :return: the ArgumentParser object\n \"\"\""
] |
[] |
{
"returns": [
{
"docstring": "the ArgumentParser object",
"docstring_tokens": [
"the",
"ArgumentParser",
"object"
],
"type": null
}
],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import argparse
def _get_parser():
parser = argparse.ArgumentParser(description="Cancels/terminates jobs submitted in the cluster.")
parser.add_argument("-c", "--cluster", help="Cluster to use")
parser.add_argument(
"-r",
"--reason",
help="A message to attach to the job that explains the reason for canceling it",
default="Terminated by the user",
)
parser.add_argument("-ll", "--log-level", help=argparse.SUPPRESS, default="ERROR")
parser.add_argument("job_ids", help="A space separated list of job IDs to cancel/terminate", nargs="+")
return parser
| 1,165 | 882 |
3739a9b90f2ace20eccab3a86f94e404e5f5a094
|
westurner/celery
|
celery/app/builtins.py
|
[
"BSD-3-Clause"
] |
Python
|
add_backend_cleanup_task
|
<not_specific>
|
def add_backend_cleanup_task(app):
"""The backend cleanup task can be used to clean up the default result
backend.
This task is also added do the periodic task schedule so that it is
run every day at midnight, but :program:`celerybeat` must be running
for this to be effective.
Note that not all backends do anything for this, what needs to be
done at cleanup is up to each backend, and some backends
may even clean up in realtime so that a periodic cleanup is not necessary.
"""
@app.task(name="celery.backend_cleanup")
def backend_cleanup():
app.backend.cleanup()
return backend_cleanup
|
The backend cleanup task can be used to clean up the default result
backend.
This task is also added do the periodic task schedule so that it is
run every day at midnight, but :program:`celerybeat` must be running
for this to be effective.
Note that not all backends do anything for this, what needs to be
done at cleanup is up to each backend, and some backends
may even clean up in realtime so that a periodic cleanup is not necessary.
|
The backend cleanup task can be used to clean up the default result
backend.
This task is also added do the periodic task schedule so that it is
run every day at midnight, but :program:`celerybeat` must be running
for this to be effective.
Note that not all backends do anything for this, what needs to be
done at cleanup is up to each backend, and some backends
may even clean up in realtime so that a periodic cleanup is not necessary.
|
[
"The",
"backend",
"cleanup",
"task",
"can",
"be",
"used",
"to",
"clean",
"up",
"the",
"default",
"result",
"backend",
".",
"This",
"task",
"is",
"also",
"added",
"do",
"the",
"periodic",
"task",
"schedule",
"so",
"that",
"it",
"is",
"run",
"every",
"day",
"at",
"midnight",
"but",
":",
"program",
":",
"`",
"celerybeat",
"`",
"must",
"be",
"running",
"for",
"this",
"to",
"be",
"effective",
".",
"Note",
"that",
"not",
"all",
"backends",
"do",
"anything",
"for",
"this",
"what",
"needs",
"to",
"be",
"done",
"at",
"cleanup",
"is",
"up",
"to",
"each",
"backend",
"and",
"some",
"backends",
"may",
"even",
"clean",
"up",
"in",
"realtime",
"so",
"that",
"a",
"periodic",
"cleanup",
"is",
"not",
"necessary",
"."
] |
def add_backend_cleanup_task(app):
@app.task(name="celery.backend_cleanup")
def backend_cleanup():
app.backend.cleanup()
return backend_cleanup
|
[
"def",
"add_backend_cleanup_task",
"(",
"app",
")",
":",
"@",
"app",
".",
"task",
"(",
"name",
"=",
"\"celery.backend_cleanup\"",
")",
"def",
"backend_cleanup",
"(",
")",
":",
"app",
".",
"backend",
".",
"cleanup",
"(",
")",
"return",
"backend_cleanup"
] |
The backend cleanup task can be used to clean up the default result
backend.
|
[
"The",
"backend",
"cleanup",
"task",
"can",
"be",
"used",
"to",
"clean",
"up",
"the",
"default",
"result",
"backend",
"."
] |
[
"\"\"\"The backend cleanup task can be used to clean up the default result\n backend.\n\n This task is also added do the periodic task schedule so that it is\n run every day at midnight, but :program:`celerybeat` must be running\n for this to be effective.\n\n Note that not all backends do anything for this, what needs to be\n done at cleanup is up to each backend, and some backends\n may even clean up in realtime so that a periodic cleanup is not necessary.\n\n \"\"\""
] |
[
{
"param": "app",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "app",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def add_backend_cleanup_task(app):
@app.task(name="celery.backend_cleanup")
def backend_cleanup():
app.backend.cleanup()
return backend_cleanup
| 1,167 | 315 |
96ac96af2d40f01f7507371dbc69a663f668bf2b
|
JakubPluta/GamestonkTerminal
|
gamestonk_terminal/stocks/insider/openinsider_model.py
|
[
"MIT"
] |
Python
|
check_in_list
|
str
|
def check_in_list(
category: str, field: str, val: int, l_possible_vals: List[str]
) -> str:
"""Check value being in possible list
Parameters
----------
category : str
category of open insider screener
field : str
field from category of open insider screener
val : str
value's field of category from open insider screener
l_possible_vals : List[str]
list of possible values that should be allowed
Returns
----------
error : str
error message. If empty, no error.
"""
error = ""
if val:
if val not in l_possible_vals:
error += (
f"Invalid {category}.{field} '{val}'. "
f"Choose one of the following options: {', '.join(l_possible_vals)}.\n"
)
return error
|
Check value being in possible list
Parameters
----------
category : str
category of open insider screener
field : str
field from category of open insider screener
val : str
value's field of category from open insider screener
l_possible_vals : List[str]
list of possible values that should be allowed
Returns
----------
error : str
error message. If empty, no error.
|
Check value being in possible list
Parameters
category : str
category of open insider screener
field : str
field from category of open insider screener
val : str
value's field of category from open insider screener
l_possible_vals : List[str]
list of possible values that should be allowed
Returns
error : str
error message. If empty, no error.
|
[
"Check",
"value",
"being",
"in",
"possible",
"list",
"Parameters",
"category",
":",
"str",
"category",
"of",
"open",
"insider",
"screener",
"field",
":",
"str",
"field",
"from",
"category",
"of",
"open",
"insider",
"screener",
"val",
":",
"str",
"value",
"'",
"s",
"field",
"of",
"category",
"from",
"open",
"insider",
"screener",
"l_possible_vals",
":",
"List",
"[",
"str",
"]",
"list",
"of",
"possible",
"values",
"that",
"should",
"be",
"allowed",
"Returns",
"error",
":",
"str",
"error",
"message",
".",
"If",
"empty",
"no",
"error",
"."
] |
def check_in_list(
category: str, field: str, val: int, l_possible_vals: List[str]
) -> str:
error = ""
if val:
if val not in l_possible_vals:
error += (
f"Invalid {category}.{field} '{val}'. "
f"Choose one of the following options: {', '.join(l_possible_vals)}.\n"
)
return error
|
[
"def",
"check_in_list",
"(",
"category",
":",
"str",
",",
"field",
":",
"str",
",",
"val",
":",
"int",
",",
"l_possible_vals",
":",
"List",
"[",
"str",
"]",
")",
"->",
"str",
":",
"error",
"=",
"\"\"",
"if",
"val",
":",
"if",
"val",
"not",
"in",
"l_possible_vals",
":",
"error",
"+=",
"(",
"f\"Invalid {category}.{field} '{val}'. \"",
"f\"Choose one of the following options: {', '.join(l_possible_vals)}.\\n\"",
")",
"return",
"error"
] |
Check value being in possible list
Parameters
|
[
"Check",
"value",
"being",
"in",
"possible",
"list",
"Parameters"
] |
[
"\"\"\"Check value being in possible list\n\n Parameters\n ----------\n category : str\n category of open insider screener\n field : str\n field from category of open insider screener\n val : str\n value's field of category from open insider screener\n l_possible_vals : List[str]\n list of possible values that should be allowed\n\n Returns\n ----------\n error : str\n error message. If empty, no error.\n \"\"\""
] |
[
{
"param": "category",
"type": "str"
},
{
"param": "field",
"type": "str"
},
{
"param": "val",
"type": "int"
},
{
"param": "l_possible_vals",
"type": "List[str]"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "category",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "field",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "val",
"type": "int",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "l_possible_vals",
"type": "List[str]",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def check_in_list(
category: str, field: str, val: int, l_possible_vals: List[str]
) -> str:
error = ""
if val:
if val not in l_possible_vals:
error += (
f"Invalid {category}.{field} '{val}'. "
f"Choose one of the following options: {', '.join(l_possible_vals)}.\n"
)
return error
| 1,169 | 829 |
1b98b026e854a0e01eb662d917a86c9cf988a500
|
tingsyo/gpt2-zhtw
|
utils/create_line_sentences_poems.py
|
[
"CC0-1.0"
] |
Python
|
process_poem_file
|
<not_specific>
|
def process_poem_file(furl):
''' Process the poem data: list of json object is plain text. '''
logging.debug(furl)
# 1. Read in the list of json objects
with open(furl, 'r') as f:
raw = json.load(f)
# 2. Loop through a list of json objects
poems = [''.join(a['paragraphs']) for a in raw]
# Done
return(poems)
|
Process the poem data: list of json object is plain text.
|
Process the poem data: list of json object is plain text.
|
[
"Process",
"the",
"poem",
"data",
":",
"list",
"of",
"json",
"object",
"is",
"plain",
"text",
"."
] |
def process_poem_file(furl):
logging.debug(furl)
with open(furl, 'r') as f:
raw = json.load(f)
poems = [''.join(a['paragraphs']) for a in raw]
return(poems)
|
[
"def",
"process_poem_file",
"(",
"furl",
")",
":",
"logging",
".",
"debug",
"(",
"furl",
")",
"with",
"open",
"(",
"furl",
",",
"'r'",
")",
"as",
"f",
":",
"raw",
"=",
"json",
".",
"load",
"(",
"f",
")",
"poems",
"=",
"[",
"''",
".",
"join",
"(",
"a",
"[",
"'paragraphs'",
"]",
")",
"for",
"a",
"in",
"raw",
"]",
"return",
"(",
"poems",
")"
] |
Process the poem data: list of json object is plain text.
|
[
"Process",
"the",
"poem",
"data",
":",
"list",
"of",
"json",
"object",
"is",
"plain",
"text",
"."
] |
[
"''' Process the poem data: list of json object is plain text. '''",
"# 1. Read in the list of json objects",
"# 2. Loop through a list of json objects",
"# Done"
] |
[
{
"param": "furl",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "furl",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import logging
import json
def process_poem_file(furl):
logging.debug(furl)
with open(furl, 'r') as f:
raw = json.load(f)
poems = [''.join(a['paragraphs']) for a in raw]
return(poems)
| 1,170 | 483 |
ea82a83ad0a299676be7d39226593d027eb502e6
|
kaparna126/magellanmapper
|
magmap/io/subproc_io.py
|
[
"BSD-3-Clause"
] |
Python
|
decompress_file
| null |
def decompress_file(path_in, dir_out=None):
"""Decompress and unarchive a file.
Assumes that the file has been archived by ``tar`` and compressed
by ``zstd``, both available as shell commands.
Args:
path_in (str): Input path.
dir_out (str): Output directory path; defaults to None to output
to the current directory.
"""
tar_args = ["tar", "xvf", "-"]
if dir_out:
if not os.path.isdir(dir_out):
os.makedirs(dir_out)
tar_args.extend(["-C", dir_out])
else:
dir_out = "."
print("decompressing {} to {}".format(path_in, dir_out))
zst = subprocess.Popen(
["pzstd", "-dc", path_in], stdout=subprocess.PIPE, bufsize=0)
tar = subprocess.Popen(tar_args, stdin=zst.stdout, bufsize=0)
zst.stdout.close()
stderr = tar.communicate()[1]
if stderr:
print(stderr)
|
Decompress and unarchive a file.
Assumes that the file has been archived by ``tar`` and compressed
by ``zstd``, both available as shell commands.
Args:
path_in (str): Input path.
dir_out (str): Output directory path; defaults to None to output
to the current directory.
|
Decompress and unarchive a file.
|
[
"Decompress",
"and",
"unarchive",
"a",
"file",
"."
] |
def decompress_file(path_in, dir_out=None):
tar_args = ["tar", "xvf", "-"]
if dir_out:
if not os.path.isdir(dir_out):
os.makedirs(dir_out)
tar_args.extend(["-C", dir_out])
else:
dir_out = "."
print("decompressing {} to {}".format(path_in, dir_out))
zst = subprocess.Popen(
["pzstd", "-dc", path_in], stdout=subprocess.PIPE, bufsize=0)
tar = subprocess.Popen(tar_args, stdin=zst.stdout, bufsize=0)
zst.stdout.close()
stderr = tar.communicate()[1]
if stderr:
print(stderr)
|
[
"def",
"decompress_file",
"(",
"path_in",
",",
"dir_out",
"=",
"None",
")",
":",
"tar_args",
"=",
"[",
"\"tar\"",
",",
"\"xvf\"",
",",
"\"-\"",
"]",
"if",
"dir_out",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dir_out",
")",
":",
"os",
".",
"makedirs",
"(",
"dir_out",
")",
"tar_args",
".",
"extend",
"(",
"[",
"\"-C\"",
",",
"dir_out",
"]",
")",
"else",
":",
"dir_out",
"=",
"\".\"",
"print",
"(",
"\"decompressing {} to {}\"",
".",
"format",
"(",
"path_in",
",",
"dir_out",
")",
")",
"zst",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"\"pzstd\"",
",",
"\"-dc\"",
",",
"path_in",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"bufsize",
"=",
"0",
")",
"tar",
"=",
"subprocess",
".",
"Popen",
"(",
"tar_args",
",",
"stdin",
"=",
"zst",
".",
"stdout",
",",
"bufsize",
"=",
"0",
")",
"zst",
".",
"stdout",
".",
"close",
"(",
")",
"stderr",
"=",
"tar",
".",
"communicate",
"(",
")",
"[",
"1",
"]",
"if",
"stderr",
":",
"print",
"(",
"stderr",
")"
] |
Decompress and unarchive a file.
|
[
"Decompress",
"and",
"unarchive",
"a",
"file",
"."
] |
[
"\"\"\"Decompress and unarchive a file.\n\n Assumes that the file has been archived by ``tar`` and compressed\n by ``zstd``, both available as shell commands.\n\n Args:\n path_in (str): Input path.\n dir_out (str): Output directory path; defaults to None to output\n to the current directory.\n\n \"\"\""
] |
[
{
"param": "path_in",
"type": null
},
{
"param": "dir_out",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "path_in",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
},
{
"identifier": "dir_out",
"type": null,
"docstring": "Output directory path; defaults to None to output\nto the current directory.",
"docstring_tokens": [
"Output",
"directory",
"path",
";",
"defaults",
"to",
"None",
"to",
"output",
"to",
"the",
"current",
"directory",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
import subprocess
import os
def decompress_file(path_in, dir_out=None):
tar_args = ["tar", "xvf", "-"]
if dir_out:
if not os.path.isdir(dir_out):
os.makedirs(dir_out)
tar_args.extend(["-C", dir_out])
else:
dir_out = "."
print("decompressing {} to {}".format(path_in, dir_out))
zst = subprocess.Popen(
["pzstd", "-dc", path_in], stdout=subprocess.PIPE, bufsize=0)
tar = subprocess.Popen(tar_args, stdin=zst.stdout, bufsize=0)
zst.stdout.close()
stderr = tar.communicate()[1]
if stderr:
print(stderr)
| 1,172 | 125 |
90303dcdfa870ba6d235514115d4c56eaf3126bd
|
dargueta/binobj
|
binobj/varints.py
|
[
"BSD-3-Clause"
] |
Python
|
encode_integer_uleb128
|
bytes
|
def encode_integer_uleb128(value: int) -> bytes:
"""Encode an integer with unsigned LEB128 encoding.
:param int value: The value to encode.
:return: ``value`` encoded as a variable-length integer in ULEB128 format.
:rtype: bytes
"""
if value < 0:
raise ValueError(
"The ULEB128 integer encoding doesn't support negative numbers."
)
if value == 0:
return b"\0"
output = bytearray()
while value > 0:
continue_bit = 0x80 if value > 127 else 0
output.append(continue_bit | (value & 0x7F))
value >>= 7
return bytes(output)
|
Encode an integer with unsigned LEB128 encoding.
:param int value: The value to encode.
:return: ``value`` encoded as a variable-length integer in ULEB128 format.
:rtype: bytes
|
Encode an integer with unsigned LEB128 encoding.
|
[
"Encode",
"an",
"integer",
"with",
"unsigned",
"LEB128",
"encoding",
"."
] |
def encode_integer_uleb128(value: int) -> bytes:
if value < 0:
raise ValueError(
"The ULEB128 integer encoding doesn't support negative numbers."
)
if value == 0:
return b"\0"
output = bytearray()
while value > 0:
continue_bit = 0x80 if value > 127 else 0
output.append(continue_bit | (value & 0x7F))
value >>= 7
return bytes(output)
|
[
"def",
"encode_integer_uleb128",
"(",
"value",
":",
"int",
")",
"->",
"bytes",
":",
"if",
"value",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"The ULEB128 integer encoding doesn't support negative numbers.\"",
")",
"if",
"value",
"==",
"0",
":",
"return",
"b\"\\0\"",
"output",
"=",
"bytearray",
"(",
")",
"while",
"value",
">",
"0",
":",
"continue_bit",
"=",
"0x80",
"if",
"value",
">",
"127",
"else",
"0",
"output",
".",
"append",
"(",
"continue_bit",
"|",
"(",
"value",
"&",
"0x7F",
")",
")",
"value",
">>=",
"7",
"return",
"bytes",
"(",
"output",
")"
] |
Encode an integer with unsigned LEB128 encoding.
|
[
"Encode",
"an",
"integer",
"with",
"unsigned",
"LEB128",
"encoding",
"."
] |
[
"\"\"\"Encode an integer with unsigned LEB128 encoding.\n\n :param int value: The value to encode.\n\n :return: ``value`` encoded as a variable-length integer in ULEB128 format.\n :rtype: bytes\n \"\"\""
] |
[
{
"param": "value",
"type": "int"
}
] |
{
"returns": [
{
"docstring": "``value`` encoded as a variable-length integer in ULEB128 format.",
"docstring_tokens": [
"`",
"`",
"value",
"`",
"`",
"encoded",
"as",
"a",
"variable",
"-",
"length",
"integer",
"in",
"ULEB128",
"format",
"."
],
"type": "bytes"
}
],
"raises": [],
"params": [
{
"identifier": "value",
"type": "int",
"docstring": "The value to encode.",
"docstring_tokens": [
"The",
"value",
"to",
"encode",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def encode_integer_uleb128(value: int) -> bytes:
if value < 0:
raise ValueError(
"The ULEB128 integer encoding doesn't support negative numbers."
)
if value == 0:
return b"\0"
output = bytearray()
while value > 0:
continue_bit = 0x80 if value > 127 else 0
output.append(continue_bit | (value & 0x7F))
value >>= 7
return bytes(output)
| 1,173 | 374 |
4702c5fbd84bd1b1d64745ff604a381077d2eeb6
|
keszybz/python-pytest-cases
|
pytest_cases/tests/advanced/test_suite_parametrized_cases.py
|
[
"BSD-3-Clause"
] |
Python
|
case_simple2
|
<not_specific>
|
def case_simple2(step_name):
# type: (str) -> CaseData
""" Second case.
This function is called for each test step, we make the case data output vary accordingly"""
ins = dict(a=-1, b=2)
if step_name is 'step_check_a':
outs = 0, 3
elif step_name is 'step_check_b':
outs = 1, 4
else:
raise ValueError("Unknown step")
return ins, outs, None
|
Second case.
This function is called for each test step, we make the case data output vary accordingly
|
Second case.
This function is called for each test step, we make the case data output vary accordingly
|
[
"Second",
"case",
".",
"This",
"function",
"is",
"called",
"for",
"each",
"test",
"step",
"we",
"make",
"the",
"case",
"data",
"output",
"vary",
"accordingly"
] |
def case_simple2(step_name):
ins = dict(a=-1, b=2)
if step_name is 'step_check_a':
outs = 0, 3
elif step_name is 'step_check_b':
outs = 1, 4
else:
raise ValueError("Unknown step")
return ins, outs, None
|
[
"def",
"case_simple2",
"(",
"step_name",
")",
":",
"ins",
"=",
"dict",
"(",
"a",
"=",
"-",
"1",
",",
"b",
"=",
"2",
")",
"if",
"step_name",
"is",
"'step_check_a'",
":",
"outs",
"=",
"0",
",",
"3",
"elif",
"step_name",
"is",
"'step_check_b'",
":",
"outs",
"=",
"1",
",",
"4",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown step\"",
")",
"return",
"ins",
",",
"outs",
",",
"None"
] |
Second case.
|
[
"Second",
"case",
"."
] |
[
"# type: (str) -> CaseData",
"\"\"\" Second case.\n This function is called for each test step, we make the case data output vary accordingly\"\"\""
] |
[
{
"param": "step_name",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "step_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def case_simple2(step_name):
ins = dict(a=-1, b=2)
if step_name is 'step_check_a':
outs = 0, 3
elif step_name is 'step_check_b':
outs = 1, 4
else:
raise ValueError("Unknown step")
return ins, outs, None
| 1,174 | 165 |
d9e72fdccd31549393c0765b0255b7717287d185
|
saran-shabd/the-useful-tools
|
utils/file.py
|
[
"Apache-2.0"
] |
Python
|
create_file_with_license_notice
|
None
|
def create_file_with_license_notice(path: str, file_extension: str) -> None:
"""utility function to create a new file with license notice included"""
# load license notice
with open('LICENSE_NOTICE', 'r') as file:
license_notice: str = file.read()
# load new file template
if 'c' == file_extension or 'cpp' == file_extension:
from data.new_file.cpp import FILE_CONTENT
elif 'java' == file_extension:
from data.new_file.java import FILE_CONTENT
elif 'js' == file_extension:
from data.new_file.js import FILE_CONTENT
else: # py
from data.new_file.py import FILE_CONTENT
# load user license notice into new file template
FILE_CONTENT = FILE_CONTENT.replace('[NOTICE]', license_notice)
# create new file using the template
with open(path, 'w') as file:
file.write(FILE_CONTENT)
|
utility function to create a new file with license notice included
|
utility function to create a new file with license notice included
|
[
"utility",
"function",
"to",
"create",
"a",
"new",
"file",
"with",
"license",
"notice",
"included"
] |
def create_file_with_license_notice(path: str, file_extension: str) -> None:
with open('LICENSE_NOTICE', 'r') as file:
license_notice: str = file.read()
if 'c' == file_extension or 'cpp' == file_extension:
from data.new_file.cpp import FILE_CONTENT
elif 'java' == file_extension:
from data.new_file.java import FILE_CONTENT
elif 'js' == file_extension:
from data.new_file.js import FILE_CONTENT
else:
from data.new_file.py import FILE_CONTENT
FILE_CONTENT = FILE_CONTENT.replace('[NOTICE]', license_notice)
with open(path, 'w') as file:
file.write(FILE_CONTENT)
|
[
"def",
"create_file_with_license_notice",
"(",
"path",
":",
"str",
",",
"file_extension",
":",
"str",
")",
"->",
"None",
":",
"with",
"open",
"(",
"'LICENSE_NOTICE'",
",",
"'r'",
")",
"as",
"file",
":",
"license_notice",
":",
"str",
"=",
"file",
".",
"read",
"(",
")",
"if",
"'c'",
"==",
"file_extension",
"or",
"'cpp'",
"==",
"file_extension",
":",
"from",
"data",
".",
"new_file",
".",
"cpp",
"import",
"FILE_CONTENT",
"elif",
"'java'",
"==",
"file_extension",
":",
"from",
"data",
".",
"new_file",
".",
"java",
"import",
"FILE_CONTENT",
"elif",
"'js'",
"==",
"file_extension",
":",
"from",
"data",
".",
"new_file",
".",
"js",
"import",
"FILE_CONTENT",
"else",
":",
"from",
"data",
".",
"new_file",
".",
"py",
"import",
"FILE_CONTENT",
"FILE_CONTENT",
"=",
"FILE_CONTENT",
".",
"replace",
"(",
"'[NOTICE]'",
",",
"license_notice",
")",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"file",
":",
"file",
".",
"write",
"(",
"FILE_CONTENT",
")"
] |
utility function to create a new file with license notice included
|
[
"utility",
"function",
"to",
"create",
"a",
"new",
"file",
"with",
"license",
"notice",
"included"
] |
[
"\"\"\"utility function to create a new file with license notice included\"\"\"",
"# load license notice",
"# load new file template",
"# py",
"# load user license notice into new file template",
"# create new file using the template"
] |
[
{
"param": "path",
"type": "str"
},
{
"param": "file_extension",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "path",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "file_extension",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def create_file_with_license_notice(path: str, file_extension: str) -> None:
with open('LICENSE_NOTICE', 'r') as file:
license_notice: str = file.read()
if 'c' == file_extension or 'cpp' == file_extension:
from data.new_file.cpp import FILE_CONTENT
elif 'java' == file_extension:
from data.new_file.java import FILE_CONTENT
elif 'js' == file_extension:
from data.new_file.js import FILE_CONTENT
else:
from data.new_file.py import FILE_CONTENT
FILE_CONTENT = FILE_CONTENT.replace('[NOTICE]', license_notice)
with open(path, 'w') as file:
file.write(FILE_CONTENT)
| 1,175 | 548 |
55b1d4bbb0b8266d3122651840d60752ffcc634d
|
lolrenceH/TrieDedup
|
lib/restrictedDict.py
|
[
"Apache-2.0"
] |
Python
|
addAllowedKeys
| null |
def addAllowedKeys(cls, iterable):
"""
Add keys in iterable to allowed_keys
"""
for x in iterable:
if x not in cls.key2idx:
prevLen = len(cls.allowed_keys)
cls.allowed_keys.append(x)
cls.key2idx[x] = prevLen
|
Add keys in iterable to allowed_keys
|
Add keys in iterable to allowed_keys
|
[
"Add",
"keys",
"in",
"iterable",
"to",
"allowed_keys"
] |
def addAllowedKeys(cls, iterable):
for x in iterable:
if x not in cls.key2idx:
prevLen = len(cls.allowed_keys)
cls.allowed_keys.append(x)
cls.key2idx[x] = prevLen
|
[
"def",
"addAllowedKeys",
"(",
"cls",
",",
"iterable",
")",
":",
"for",
"x",
"in",
"iterable",
":",
"if",
"x",
"not",
"in",
"cls",
".",
"key2idx",
":",
"prevLen",
"=",
"len",
"(",
"cls",
".",
"allowed_keys",
")",
"cls",
".",
"allowed_keys",
".",
"append",
"(",
"x",
")",
"cls",
".",
"key2idx",
"[",
"x",
"]",
"=",
"prevLen"
] |
Add keys in iterable to allowed_keys
|
[
"Add",
"keys",
"in",
"iterable",
"to",
"allowed_keys"
] |
[
"\"\"\"\n Add keys in iterable to allowed_keys\n \"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "iterable",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "iterable",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def addAllowedKeys(cls, iterable):
for x in iterable:
if x not in cls.key2idx:
prevLen = len(cls.allowed_keys)
cls.allowed_keys.append(x)
cls.key2idx[x] = prevLen
| 1,176 | 877 |
961e29926d37d241283694879905f01b5bcc39cc
|
gadalang/gadalang-lang
|
gadalang_lang/test_utils.py
|
[
"MIT"
] |
Python
|
tmp_file
| null |
def tmp_file():
"""Create a temporary file and delete it afterward.
:return: temporary file
"""
f = tempfile.NamedTemporaryFile(delete=False)
yield f
f.close()
os.remove(f.name)
|
Create a temporary file and delete it afterward.
:return: temporary file
|
Create a temporary file and delete it afterward.
|
[
"Create",
"a",
"temporary",
"file",
"and",
"delete",
"it",
"afterward",
"."
] |
def tmp_file():
f = tempfile.NamedTemporaryFile(delete=False)
yield f
f.close()
os.remove(f.name)
|
[
"def",
"tmp_file",
"(",
")",
":",
"f",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"delete",
"=",
"False",
")",
"yield",
"f",
"f",
".",
"close",
"(",
")",
"os",
".",
"remove",
"(",
"f",
".",
"name",
")"
] |
Create a temporary file and delete it afterward.
|
[
"Create",
"a",
"temporary",
"file",
"and",
"delete",
"it",
"afterward",
"."
] |
[
"\"\"\"Create a temporary file and delete it afterward.\n\n :return: temporary file\n \"\"\""
] |
[] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import tempfile
import os
def tmp_file():
f = tempfile.NamedTemporaryFile(delete=False)
yield f
f.close()
os.remove(f.name)
| 1,177 | 1,018 |
7dc77a470c407da6bfa5fab5698574a7aa8752db
|
liqing-ustc/dreamcoder
|
pyccg/pyccg/logic.py
|
[
"MIT"
] |
Python
|
listify
|
<not_specific>
|
def listify(fn=None, wrapper=list):
"""
A decorator which wraps a function's return value in ``list(...)``.
Useful when an algorithm can be expressed more cleanly as a generator but
the function should return an list.
"""
def listify_return(fn):
@functools.wraps(fn)
def listify_helper(*args, **kw):
return wrapper(fn(*args, **kw))
return listify_helper
if fn is None:
return listify_return
return listify_return(fn)
|
A decorator which wraps a function's return value in ``list(...)``.
Useful when an algorithm can be expressed more cleanly as a generator but
the function should return an list.
|
A decorator which wraps a function's return value in ``list(...)``.
Useful when an algorithm can be expressed more cleanly as a generator but
the function should return an list.
|
[
"A",
"decorator",
"which",
"wraps",
"a",
"function",
"'",
"s",
"return",
"value",
"in",
"`",
"`",
"list",
"(",
"...",
")",
"`",
"`",
".",
"Useful",
"when",
"an",
"algorithm",
"can",
"be",
"expressed",
"more",
"cleanly",
"as",
"a",
"generator",
"but",
"the",
"function",
"should",
"return",
"an",
"list",
"."
] |
def listify(fn=None, wrapper=list):
def listify_return(fn):
@functools.wraps(fn)
def listify_helper(*args, **kw):
return wrapper(fn(*args, **kw))
return listify_helper
if fn is None:
return listify_return
return listify_return(fn)
|
[
"def",
"listify",
"(",
"fn",
"=",
"None",
",",
"wrapper",
"=",
"list",
")",
":",
"def",
"listify_return",
"(",
"fn",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"fn",
")",
"def",
"listify_helper",
"(",
"*",
"args",
",",
"**",
"kw",
")",
":",
"return",
"wrapper",
"(",
"fn",
"(",
"*",
"args",
",",
"**",
"kw",
")",
")",
"return",
"listify_helper",
"if",
"fn",
"is",
"None",
":",
"return",
"listify_return",
"return",
"listify_return",
"(",
"fn",
")"
] |
A decorator which wraps a function's return value in ``list(...)``.
|
[
"A",
"decorator",
"which",
"wraps",
"a",
"function",
"'",
"s",
"return",
"value",
"in",
"`",
"`",
"list",
"(",
"...",
")",
"`",
"`",
"."
] |
[
"\"\"\"\n A decorator which wraps a function's return value in ``list(...)``.\n\n Useful when an algorithm can be expressed more cleanly as a generator but\n the function should return an list.\n \"\"\""
] |
[
{
"param": "fn",
"type": null
},
{
"param": "wrapper",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "fn",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "wrapper",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import functools
def listify(fn=None, wrapper=list):
def listify_return(fn):
@functools.wraps(fn)
def listify_helper(*args, **kw):
return wrapper(fn(*args, **kw))
return listify_helper
if fn is None:
return listify_return
return listify_return(fn)
| 1,178 | 811 |
99afc1ca4a4d18c9e82f83f63bdc4420cc5212ae
|
OverKoder/AdventOfCode
|
2021/Day1.py
|
[
"MIT"
] |
Python
|
solve_part2
|
<not_specific>
|
def solve_part2(depth_list):
"""
Here we take advantage that sums overlap, and thus, to calculate the next sum of numbers, we can
use the previous sum substracting the number that is no longer part of the new sum and adding the next number in the list
For example:
1.- 199 + 200 + 208 = sum
2.- 200 + 208 + 210 = ???
??? can be calculated as sum - 199 + 210, and thus, needing only 2 operations, not 3.
"""
increase = 0
prev_sum = sum(depth_list[0:3])
for i in range(1, len(depth_list) - 2):
# Calculate sum
current_sum = prev_sum - depth_list[i-1] + depth_list[i+2]
if prev_sum < current_sum:
increase += 1
prev_sum = current_sum
return increase
|
Here we take advantage that sums overlap, and thus, to calculate the next sum of numbers, we can
use the previous sum substracting the number that is no longer part of the new sum and adding the next number in the list
For example:
1.- 199 + 200 + 208 = sum
2.- 200 + 208 + 210 = ???
??? can be calculated as sum - 199 + 210, and thus, needing only 2 operations, not 3.
|
Here we take advantage that sums overlap, and thus, to calculate the next sum of numbers, we can
use the previous sum substracting the number that is no longer part of the new sum and adding the next number in the list
|
[
"Here",
"we",
"take",
"advantage",
"that",
"sums",
"overlap",
"and",
"thus",
"to",
"calculate",
"the",
"next",
"sum",
"of",
"numbers",
"we",
"can",
"use",
"the",
"previous",
"sum",
"substracting",
"the",
"number",
"that",
"is",
"no",
"longer",
"part",
"of",
"the",
"new",
"sum",
"and",
"adding",
"the",
"next",
"number",
"in",
"the",
"list"
] |
def solve_part2(depth_list):
increase = 0
prev_sum = sum(depth_list[0:3])
for i in range(1, len(depth_list) - 2):
current_sum = prev_sum - depth_list[i-1] + depth_list[i+2]
if prev_sum < current_sum:
increase += 1
prev_sum = current_sum
return increase
|
[
"def",
"solve_part2",
"(",
"depth_list",
")",
":",
"increase",
"=",
"0",
"prev_sum",
"=",
"sum",
"(",
"depth_list",
"[",
"0",
":",
"3",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"depth_list",
")",
"-",
"2",
")",
":",
"current_sum",
"=",
"prev_sum",
"-",
"depth_list",
"[",
"i",
"-",
"1",
"]",
"+",
"depth_list",
"[",
"i",
"+",
"2",
"]",
"if",
"prev_sum",
"<",
"current_sum",
":",
"increase",
"+=",
"1",
"prev_sum",
"=",
"current_sum",
"return",
"increase"
] |
Here we take advantage that sums overlap, and thus, to calculate the next sum of numbers, we can
use the previous sum substracting the number that is no longer part of the new sum and adding the next number in the list
|
[
"Here",
"we",
"take",
"advantage",
"that",
"sums",
"overlap",
"and",
"thus",
"to",
"calculate",
"the",
"next",
"sum",
"of",
"numbers",
"we",
"can",
"use",
"the",
"previous",
"sum",
"substracting",
"the",
"number",
"that",
"is",
"no",
"longer",
"part",
"of",
"the",
"new",
"sum",
"and",
"adding",
"the",
"next",
"number",
"in",
"the",
"list"
] |
[
"\"\"\"\n Here we take advantage that sums overlap, and thus, to calculate the next sum of numbers, we can\n use the previous sum substracting the number that is no longer part of the new sum and adding the next number in the list\n\n For example:\n 1.- 199 + 200 + 208 = sum\n 2.- 200 + 208 + 210 = ???\n\n ??? can be calculated as sum - 199 + 210, and thus, needing only 2 operations, not 3.\n \"\"\"",
"# Calculate sum"
] |
[
{
"param": "depth_list",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "depth_list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def solve_part2(depth_list):
increase = 0
prev_sum = sum(depth_list[0:3])
for i in range(1, len(depth_list) - 2):
current_sum = prev_sum - depth_list[i-1] + depth_list[i+2]
if prev_sum < current_sum:
increase += 1
prev_sum = current_sum
return increase
| 1,179 | 153 |
2f61c6b11d4359dab696825a056a533371bb6641
|
random-weights/Tensorflow-Project-Template
|
utils.py
|
[
"Apache-2.0"
] |
Python
|
write_to_json
| null |
def write_to_json(exp_name, epochs, iter_per_epoch, batch_size, learning_rate):
"""
Makes sense to store each config file inside the experiments/exp_name dir.
That way all the data regarding an experiment is in one directory.
this config file will be generated for each instance of trainer obj.
"""
edict = {
"exp_name": exp_name,
"epochs": epochs,
"iter_per_epoch": iter_per_epoch,
"batch_size": batch_size,
"learning_rate": learning_rate}
|
Makes sense to store each config file inside the experiments/exp_name dir.
That way all the data regarding an experiment is in one directory.
this config file will be generated for each instance of trainer obj.
|
Makes sense to store each config file inside the experiments/exp_name dir.
That way all the data regarding an experiment is in one directory.
this config file will be generated for each instance of trainer obj.
|
[
"Makes",
"sense",
"to",
"store",
"each",
"config",
"file",
"inside",
"the",
"experiments",
"/",
"exp_name",
"dir",
".",
"That",
"way",
"all",
"the",
"data",
"regarding",
"an",
"experiment",
"is",
"in",
"one",
"directory",
".",
"this",
"config",
"file",
"will",
"be",
"generated",
"for",
"each",
"instance",
"of",
"trainer",
"obj",
"."
] |
def write_to_json(exp_name, epochs, iter_per_epoch, batch_size, learning_rate):
edict = {
"exp_name": exp_name,
"epochs": epochs,
"iter_per_epoch": iter_per_epoch,
"batch_size": batch_size,
"learning_rate": learning_rate}
|
[
"def",
"write_to_json",
"(",
"exp_name",
",",
"epochs",
",",
"iter_per_epoch",
",",
"batch_size",
",",
"learning_rate",
")",
":",
"edict",
"=",
"{",
"\"exp_name\"",
":",
"exp_name",
",",
"\"epochs\"",
":",
"epochs",
",",
"\"iter_per_epoch\"",
":",
"iter_per_epoch",
",",
"\"batch_size\"",
":",
"batch_size",
",",
"\"learning_rate\"",
":",
"learning_rate",
"}"
] |
Makes sense to store each config file inside the experiments/exp_name dir.
|
[
"Makes",
"sense",
"to",
"store",
"each",
"config",
"file",
"inside",
"the",
"experiments",
"/",
"exp_name",
"dir",
"."
] |
[
"\"\"\"\n\tMakes sense to store each config file inside the experiments/exp_name dir.\n\tThat way all the data regarding an experiment is in one directory.\n\tthis config file will be generated for each instance of trainer obj.\n\t\"\"\""
] |
[
{
"param": "exp_name",
"type": null
},
{
"param": "epochs",
"type": null
},
{
"param": "iter_per_epoch",
"type": null
},
{
"param": "batch_size",
"type": null
},
{
"param": "learning_rate",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "exp_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "epochs",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "iter_per_epoch",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "batch_size",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "learning_rate",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def write_to_json(exp_name, epochs, iter_per_epoch, batch_size, learning_rate):
edict = {
"exp_name": exp_name,
"epochs": epochs,
"iter_per_epoch": iter_per_epoch,
"batch_size": batch_size,
"learning_rate": learning_rate}
| 1,180 | 471 |
6be69e5fe2a9e88da94a3922e093a1a634f79165
|
lonegunmanb/pulumi
|
sdk/python/lib/pulumi/runtime/known_types.py
|
[
"Apache-2.0"
] |
Python
|
asset
|
type
|
def asset(class_obj: type) -> type:
"""
Decorator to annotate the Asset class. Registers the decorated class
as the Asset known type.
"""
assert isinstance(class_obj, type), "class_obj is not a Class"
global _asset_resource_type
_asset_resource_type = class_obj
return class_obj
|
Decorator to annotate the Asset class. Registers the decorated class
as the Asset known type.
|
Decorator to annotate the Asset class. Registers the decorated class
as the Asset known type.
|
[
"Decorator",
"to",
"annotate",
"the",
"Asset",
"class",
".",
"Registers",
"the",
"decorated",
"class",
"as",
"the",
"Asset",
"known",
"type",
"."
] |
def asset(class_obj: type) -> type:
assert isinstance(class_obj, type), "class_obj is not a Class"
global _asset_resource_type
_asset_resource_type = class_obj
return class_obj
|
[
"def",
"asset",
"(",
"class_obj",
":",
"type",
")",
"->",
"type",
":",
"assert",
"isinstance",
"(",
"class_obj",
",",
"type",
")",
",",
"\"class_obj is not a Class\"",
"global",
"_asset_resource_type",
"_asset_resource_type",
"=",
"class_obj",
"return",
"class_obj"
] |
Decorator to annotate the Asset class.
|
[
"Decorator",
"to",
"annotate",
"the",
"Asset",
"class",
"."
] |
[
"\"\"\"\n Decorator to annotate the Asset class. Registers the decorated class\n as the Asset known type.\n \"\"\""
] |
[
{
"param": "class_obj",
"type": "type"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "class_obj",
"type": "type",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def asset(class_obj: type) -> type:
assert isinstance(class_obj, type), "class_obj is not a Class"
global _asset_resource_type
_asset_resource_type = class_obj
return class_obj
| 1,181 | 1,016 |
dcbfb87b0c041c1302a54f0b0e4c4b202b951d1b
|
letrend/neopixel_fpga
|
ros_catkin_ws/src/ros_comm/roslaunch/src/roslaunch/config.py
|
[
"MIT"
] |
Python
|
_summary_name
|
<not_specific>
|
def _summary_name(node):
"""
Generate summary label for node based on its package, type, and name
"""
if node.name:
return "%s (%s/%s)"%(node.name, node.package, node.type)
else:
return "%s/%s"%(node.package, node.type)
|
Generate summary label for node based on its package, type, and name
|
Generate summary label for node based on its package, type, and name
|
[
"Generate",
"summary",
"label",
"for",
"node",
"based",
"on",
"its",
"package",
"type",
"and",
"name"
] |
def _summary_name(node):
if node.name:
return "%s (%s/%s)"%(node.name, node.package, node.type)
else:
return "%s/%s"%(node.package, node.type)
|
[
"def",
"_summary_name",
"(",
"node",
")",
":",
"if",
"node",
".",
"name",
":",
"return",
"\"%s (%s/%s)\"",
"%",
"(",
"node",
".",
"name",
",",
"node",
".",
"package",
",",
"node",
".",
"type",
")",
"else",
":",
"return",
"\"%s/%s\"",
"%",
"(",
"node",
".",
"package",
",",
"node",
".",
"type",
")"
] |
Generate summary label for node based on its package, type, and name
|
[
"Generate",
"summary",
"label",
"for",
"node",
"based",
"on",
"its",
"package",
"type",
"and",
"name"
] |
[
"\"\"\"\n Generate summary label for node based on its package, type, and name\n \"\"\""
] |
[
{
"param": "node",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "node",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _summary_name(node):
if node.name:
return "%s (%s/%s)"%(node.name, node.package, node.type)
else:
return "%s/%s"%(node.package, node.type)
| 1,182 | 274 |
c99ef35e176a4ab75dcbed7e59c0eb79dc040881
|
chesterharvey/StreetSpace
|
streetspace/geometry.py
|
[
"MIT"
] |
Python
|
intersect_shapes
|
<not_specific>
|
def intersect_shapes(shapes_a, shapes_b, shapes_b_sindex=None):
"""Find intersections between shapes in two lists
Parameters
----------
shapes_a : list of Shapely geometries
List of geometries to be intersected with those in shapes_b
shapes_b : list of Shapely geometries
List of geometries to be intersected with those in shapes_a
shapes_b_sindex : :class:`rtree.index.Index`, optional, default = ``None``
Spatial index for shapes_b (best created with ``list_sindex``)
Returns
-------
:obj:`list`
List of tuples for each intersection with structure:\
(a_index, b_index, intersection_geometry)
"""
intersections = []
for i, shape_a in enumerate(shapes_a):
indices_b = list(range(len(shapes_b)))
if shapes_b_sindex:
b_for_analysis = [(indices_b[i], shapes_b[i]) for i in
shapes_b_sindex.intersection(shape_a.bounds)]#, objects='raw')]
else:
b_for_analysis = zip(indices_b, shapes_b)
for j, shape_b in b_for_analysis:
if shape_a.intersects(shape_b):
intersection = shape_a.intersection(shape_b)
intersections.append((i, j, intersection))
return intersections
|
Find intersections between shapes in two lists
Parameters
----------
shapes_a : list of Shapely geometries
List of geometries to be intersected with those in shapes_b
shapes_b : list of Shapely geometries
List of geometries to be intersected with those in shapes_a
shapes_b_sindex : :class:`rtree.index.Index`, optional, default = ``None``
Spatial index for shapes_b (best created with ``list_sindex``)
Returns
-------
:obj:`list`
List of tuples for each intersection with structure:\
(a_index, b_index, intersection_geometry)
|
Find intersections between shapes in two lists
Parameters
Returns
|
[
"Find",
"intersections",
"between",
"shapes",
"in",
"two",
"lists",
"Parameters",
"Returns"
] |
def intersect_shapes(shapes_a, shapes_b, shapes_b_sindex=None):
intersections = []
for i, shape_a in enumerate(shapes_a):
indices_b = list(range(len(shapes_b)))
if shapes_b_sindex:
b_for_analysis = [(indices_b[i], shapes_b[i]) for i in
shapes_b_sindex.intersection(shape_a.bounds)]
else:
b_for_analysis = zip(indices_b, shapes_b)
for j, shape_b in b_for_analysis:
if shape_a.intersects(shape_b):
intersection = shape_a.intersection(shape_b)
intersections.append((i, j, intersection))
return intersections
|
[
"def",
"intersect_shapes",
"(",
"shapes_a",
",",
"shapes_b",
",",
"shapes_b_sindex",
"=",
"None",
")",
":",
"intersections",
"=",
"[",
"]",
"for",
"i",
",",
"shape_a",
"in",
"enumerate",
"(",
"shapes_a",
")",
":",
"indices_b",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"shapes_b",
")",
")",
")",
"if",
"shapes_b_sindex",
":",
"b_for_analysis",
"=",
"[",
"(",
"indices_b",
"[",
"i",
"]",
",",
"shapes_b",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"shapes_b_sindex",
".",
"intersection",
"(",
"shape_a",
".",
"bounds",
")",
"]",
"else",
":",
"b_for_analysis",
"=",
"zip",
"(",
"indices_b",
",",
"shapes_b",
")",
"for",
"j",
",",
"shape_b",
"in",
"b_for_analysis",
":",
"if",
"shape_a",
".",
"intersects",
"(",
"shape_b",
")",
":",
"intersection",
"=",
"shape_a",
".",
"intersection",
"(",
"shape_b",
")",
"intersections",
".",
"append",
"(",
"(",
"i",
",",
"j",
",",
"intersection",
")",
")",
"return",
"intersections"
] |
Find intersections between shapes in two lists
Parameters
|
[
"Find",
"intersections",
"between",
"shapes",
"in",
"two",
"lists",
"Parameters"
] |
[
"\"\"\"Find intersections between shapes in two lists\n\n Parameters\n ----------\n shapes_a : list of Shapely geometries\n List of geometries to be intersected with those in shapes_b\n shapes_b : list of Shapely geometries\n List of geometries to be intersected with those in shapes_a\n shapes_b_sindex : :class:`rtree.index.Index`, optional, default = ``None``\n Spatial index for shapes_b (best created with ``list_sindex``)\n\n Returns\n -------\n :obj:`list`\n List of tuples for each intersection with structure:\\\n (a_index, b_index, intersection_geometry)\n \"\"\"",
"#, objects='raw')]"
] |
[
{
"param": "shapes_a",
"type": null
},
{
"param": "shapes_b",
"type": null
},
{
"param": "shapes_b_sindex",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "shapes_a",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "shapes_b",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "shapes_b_sindex",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": [
{
"identifier": "obj",
"docstring": "`list`\nList of tuples for each intersection with structure:\\\n(a_index, b_index, intersection_geometry)",
"docstring_tokens": [
"`",
"list",
"`",
"List",
"of",
"tuples",
"for",
"each",
"intersection",
"with",
"structure",
":",
"\\",
"(",
"a_index",
"b_index",
"intersection_geometry",
")"
]
}
]
}
|
def intersect_shapes(shapes_a, shapes_b, shapes_b_sindex=None):
intersections = []
for i, shape_a in enumerate(shapes_a):
indices_b = list(range(len(shapes_b)))
if shapes_b_sindex:
b_for_analysis = [(indices_b[i], shapes_b[i]) for i in
shapes_b_sindex.intersection(shape_a.bounds)]
else:
b_for_analysis = zip(indices_b, shapes_b)
for j, shape_b in b_for_analysis:
if shape_a.intersects(shape_b):
intersection = shape_a.intersection(shape_b)
intersections.append((i, j, intersection))
return intersections
| 1,183 | 527 |
72721b43301dc03c1044ab6ec4a86badcad06242
|
wimax-grapl/pants
|
src/python/pants/util/strutil.py
|
[
"Apache-2.0"
] |
Python
|
first_paragraph
|
str
|
def first_paragraph(s: str) -> str:
"""Get the first paragraph, where paragraphs are separated by blank lines."""
lines = s.splitlines()
first_blank_line_index = next(
(i for i, line in enumerate(lines) if line.strip() == ""), len(lines)
)
return " ".join(lines[:first_blank_line_index])
|
Get the first paragraph, where paragraphs are separated by blank lines.
|
Get the first paragraph, where paragraphs are separated by blank lines.
|
[
"Get",
"the",
"first",
"paragraph",
"where",
"paragraphs",
"are",
"separated",
"by",
"blank",
"lines",
"."
] |
def first_paragraph(s: str) -> str:
lines = s.splitlines()
first_blank_line_index = next(
(i for i, line in enumerate(lines) if line.strip() == ""), len(lines)
)
return " ".join(lines[:first_blank_line_index])
|
[
"def",
"first_paragraph",
"(",
"s",
":",
"str",
")",
"->",
"str",
":",
"lines",
"=",
"s",
".",
"splitlines",
"(",
")",
"first_blank_line_index",
"=",
"next",
"(",
"(",
"i",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
"if",
"line",
".",
"strip",
"(",
")",
"==",
"\"\"",
")",
",",
"len",
"(",
"lines",
")",
")",
"return",
"\" \"",
".",
"join",
"(",
"lines",
"[",
":",
"first_blank_line_index",
"]",
")"
] |
Get the first paragraph, where paragraphs are separated by blank lines.
|
[
"Get",
"the",
"first",
"paragraph",
"where",
"paragraphs",
"are",
"separated",
"by",
"blank",
"lines",
"."
] |
[
"\"\"\"Get the first paragraph, where paragraphs are separated by blank lines.\"\"\""
] |
[
{
"param": "s",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "s",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def first_paragraph(s: str) -> str:
lines = s.splitlines()
first_blank_line_index = next(
(i for i, line in enumerate(lines) if line.strip() == ""), len(lines)
)
return " ".join(lines[:first_blank_line_index])
| 1,184 | 628 |
2960052e743788f0426f2808cae26c3b71418268
|
WithPrecedent/denovo
|
denovo/utilities/modify.py
|
[
"Apache-2.0"
] |
Python
|
capitalify
|
str
|
def capitalify(item: str) -> str:
"""Converts a snake case str to capital case.
Args:
item (str): str to convert.
Returns:
str: 'item' converted to capital case.
"""
return item.replace('_', ' ').title().replace(' ', '')
|
Converts a snake case str to capital case.
Args:
item (str): str to convert.
Returns:
str: 'item' converted to capital case.
|
Converts a snake case str to capital case.
|
[
"Converts",
"a",
"snake",
"case",
"str",
"to",
"capital",
"case",
"."
] |
def capitalify(item: str) -> str:
return item.replace('_', ' ').title().replace(' ', '')
|
[
"def",
"capitalify",
"(",
"item",
":",
"str",
")",
"->",
"str",
":",
"return",
"item",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
".",
"title",
"(",
")",
".",
"replace",
"(",
"' '",
",",
"''",
")"
] |
Converts a snake case str to capital case.
|
[
"Converts",
"a",
"snake",
"case",
"str",
"to",
"capital",
"case",
"."
] |
[
"\"\"\"Converts a snake case str to capital case.\n\n Args:\n item (str): str to convert.\n\n Returns:\n str: 'item' converted to capital case.\n\n \"\"\""
] |
[
{
"param": "item",
"type": "str"
}
] |
{
"returns": [
{
"docstring": "'item' converted to capital case.",
"docstring_tokens": [
"'",
"item",
"'",
"converted",
"to",
"capital",
"case",
"."
],
"type": "str"
}
],
"raises": [],
"params": [
{
"identifier": "item",
"type": "str",
"docstring": "str to convert.",
"docstring_tokens": [
"str",
"to",
"convert",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def capitalify(item: str) -> str:
return item.replace('_', ' ').title().replace(' ', '')
| 1,185 | 900 |
af89b60e95ef0917fbca46c146e55df9171697a4
|
ShadowLordAlpha/lumberyard
|
dev/Code/Tools/AzTestScanner/aztest/common.py
|
[
"AML"
] |
Python
|
to_list
|
<not_specific>
|
def to_list(obj):
"""Generator that converts given object to a list if possible and yields each value in it.
This function specifically handles strings, lists, sets, and tuples. All other objects are treated as a single value
of a list, including dictionaries and other containers.
:param obj: the object to convert
:return: a list of the given value(s)
:rtype: list
"""
if obj is None:
return
elif isinstance(obj, str):
yield obj
elif isinstance(obj, list) or isinstance(obj, set) or isinstance(obj, tuple):
for x in obj:
yield x
else:
yield obj
|
Generator that converts given object to a list if possible and yields each value in it.
This function specifically handles strings, lists, sets, and tuples. All other objects are treated as a single value
of a list, including dictionaries and other containers.
:param obj: the object to convert
:return: a list of the given value(s)
:rtype: list
|
Generator that converts given object to a list if possible and yields each value in it.
This function specifically handles strings, lists, sets, and tuples. All other objects are treated as a single value
of a list, including dictionaries and other containers.
|
[
"Generator",
"that",
"converts",
"given",
"object",
"to",
"a",
"list",
"if",
"possible",
"and",
"yields",
"each",
"value",
"in",
"it",
".",
"This",
"function",
"specifically",
"handles",
"strings",
"lists",
"sets",
"and",
"tuples",
".",
"All",
"other",
"objects",
"are",
"treated",
"as",
"a",
"single",
"value",
"of",
"a",
"list",
"including",
"dictionaries",
"and",
"other",
"containers",
"."
] |
def to_list(obj):
if obj is None:
return
elif isinstance(obj, str):
yield obj
elif isinstance(obj, list) or isinstance(obj, set) or isinstance(obj, tuple):
for x in obj:
yield x
else:
yield obj
|
[
"def",
"to_list",
"(",
"obj",
")",
":",
"if",
"obj",
"is",
"None",
":",
"return",
"elif",
"isinstance",
"(",
"obj",
",",
"str",
")",
":",
"yield",
"obj",
"elif",
"isinstance",
"(",
"obj",
",",
"list",
")",
"or",
"isinstance",
"(",
"obj",
",",
"set",
")",
"or",
"isinstance",
"(",
"obj",
",",
"tuple",
")",
":",
"for",
"x",
"in",
"obj",
":",
"yield",
"x",
"else",
":",
"yield",
"obj"
] |
Generator that converts given object to a list if possible and yields each value in it.
|
[
"Generator",
"that",
"converts",
"given",
"object",
"to",
"a",
"list",
"if",
"possible",
"and",
"yields",
"each",
"value",
"in",
"it",
"."
] |
[
"\"\"\"Generator that converts given object to a list if possible and yields each value in it.\n\n This function specifically handles strings, lists, sets, and tuples. All other objects are treated as a single value\n of a list, including dictionaries and other containers.\n\n :param obj: the object to convert\n :return: a list of the given value(s)\n :rtype: list\n \"\"\""
] |
[
{
"param": "obj",
"type": null
}
] |
{
"returns": [
{
"docstring": "a list of the given value(s)",
"docstring_tokens": [
"a",
"list",
"of",
"the",
"given",
"value",
"(",
"s",
")"
],
"type": "list"
}
],
"raises": [],
"params": [
{
"identifier": "obj",
"type": null,
"docstring": "the object to convert",
"docstring_tokens": [
"the",
"object",
"to",
"convert"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def to_list(obj):
if obj is None:
return
elif isinstance(obj, str):
yield obj
elif isinstance(obj, list) or isinstance(obj, set) or isinstance(obj, tuple):
for x in obj:
yield x
else:
yield obj
| 1,186 | 563 |
8448568041d9e1ab11d967d3069bea82bc6f9f2d
|
Adi-Shavit/PySudokuSolver
|
src/algorithm_x.py
|
[
"MIT"
] |
Python
|
uncover
| null |
def uncover(column_node: QuadNode):
"""
This function performs the uncovers a previously covered column.
"""
row_node = column_node.top_node
while row_node is not column_node:
left_node = row_node.left_node
while left_node is not row_node:
left_node.top_node.bottom_node = left_node
left_node.bottom_node.top_node = left_node
left_node = left_node.left_node
row_node = row_node.top_node
column_node.right_node.left_node = column_node
column_node.left_node.right_node = column_node
|
This function performs the uncovers a previously covered column.
|
This function performs the uncovers a previously covered column.
|
[
"This",
"function",
"performs",
"the",
"uncovers",
"a",
"previously",
"covered",
"column",
"."
] |
def uncover(column_node: QuadNode):
row_node = column_node.top_node
while row_node is not column_node:
left_node = row_node.left_node
while left_node is not row_node:
left_node.top_node.bottom_node = left_node
left_node.bottom_node.top_node = left_node
left_node = left_node.left_node
row_node = row_node.top_node
column_node.right_node.left_node = column_node
column_node.left_node.right_node = column_node
|
[
"def",
"uncover",
"(",
"column_node",
":",
"QuadNode",
")",
":",
"row_node",
"=",
"column_node",
".",
"top_node",
"while",
"row_node",
"is",
"not",
"column_node",
":",
"left_node",
"=",
"row_node",
".",
"left_node",
"while",
"left_node",
"is",
"not",
"row_node",
":",
"left_node",
".",
"top_node",
".",
"bottom_node",
"=",
"left_node",
"left_node",
".",
"bottom_node",
".",
"top_node",
"=",
"left_node",
"left_node",
"=",
"left_node",
".",
"left_node",
"row_node",
"=",
"row_node",
".",
"top_node",
"column_node",
".",
"right_node",
".",
"left_node",
"=",
"column_node",
"column_node",
".",
"left_node",
".",
"right_node",
"=",
"column_node"
] |
This function performs the uncovers a previously covered column.
|
[
"This",
"function",
"performs",
"the",
"uncovers",
"a",
"previously",
"covered",
"column",
"."
] |
[
"\"\"\"\n This function performs the uncovers a previously covered column.\n \"\"\""
] |
[
{
"param": "column_node",
"type": "QuadNode"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "column_node",
"type": "QuadNode",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def uncover(column_node: QuadNode):
row_node = column_node.top_node
while row_node is not column_node:
left_node = row_node.left_node
while left_node is not row_node:
left_node.top_node.bottom_node = left_node
left_node.bottom_node.top_node = left_node
left_node = left_node.left_node
row_node = row_node.top_node
column_node.right_node.left_node = column_node
column_node.left_node.right_node = column_node
| 1,187 | 262 |
75468cc3fc1b3ed282c8279d69cb8d047c61d25f
|
mttaggart/codeforteachers
|
ch4/4-1.py
|
[
"MIT"
] |
Python
|
double
|
<not_specific>
|
def double(n):
"""
Takes a number n and doubles it
"""
return n * 2
|
Takes a number n and doubles it
|
Takes a number n and doubles it
|
[
"Takes",
"a",
"number",
"n",
"and",
"doubles",
"it"
] |
def double(n):
return n * 2
|
[
"def",
"double",
"(",
"n",
")",
":",
"return",
"n",
"*",
"2"
] |
Takes a number n and doubles it
|
[
"Takes",
"a",
"number",
"n",
"and",
"doubles",
"it"
] |
[
"\"\"\"\n Takes a number n and doubles it\n \"\"\""
] |
[
{
"param": "n",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "n",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def double(n):
return n * 2
| 1,189 | 399 |
fd06ca7d150fa2a84714ff325265e0df4b146440
|
frostspb/torskel
|
torskel/str_utils.py
|
[
"MIT"
] |
Python
|
is_number
|
<not_specific>
|
def is_number(value):
"""
Checks whether the value is a number
:param value:
:return: bool
"""
try:
complex(value)
except ValueError:
return False
return True
|
Checks whether the value is a number
:param value:
:return: bool
|
Checks whether the value is a number
|
[
"Checks",
"whether",
"the",
"value",
"is",
"a",
"number"
] |
def is_number(value):
try:
complex(value)
except ValueError:
return False
return True
|
[
"def",
"is_number",
"(",
"value",
")",
":",
"try",
":",
"complex",
"(",
"value",
")",
"except",
"ValueError",
":",
"return",
"False",
"return",
"True"
] |
Checks whether the value is a number
|
[
"Checks",
"whether",
"the",
"value",
"is",
"a",
"number"
] |
[
"\"\"\"\n Checks whether the value is a number\n :param value:\n :return: bool\n \"\"\""
] |
[
{
"param": "value",
"type": null
}
] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "value",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def is_number(value):
try:
complex(value)
except ValueError:
return False
return True
| 1,190 | 179 |
275d97bb531fa2f8a8c2b2cc56c0ec1eafb5146b
|
Rasools/CancerProteinSecretionML
|
scripts/omicsAnalysisFunctions.py
|
[
"MIT"
] |
Python
|
dropNaNs
|
<not_specific>
|
def dropNaNs(df, ClassVar='none'):
"""
Given the omics data passed as a dataframe, and (optionally) the name of a
class variable, it
i) prints the total number of samples in the dataset.
ii) if none of the samples have any missing values, it returns the same dataframe, else:
a) number of samples having missing values are reported
b) these samples are removed from the dataset
c) number of samples remained in the dataset after removing with missing values, are reported
d) returns the updated dataframe
"""
print("\n*********************************************")
print("Number of samples in the dataset: {0}".format(df.shape[0]))
print("*********************************************")
if ClassVar == 'none':
dfdna = df.dropna()
else:
dfdna = df.dropna(subset=[ClassVar])
if df.shape[0] > dfdna.shape[0]:
print("Number of samples having missing values: {0}".format(df.shape[0]- dfdna.shape[0]))
print("Number of samples remained after dropping samples with missing data: {0}".format(dfdna.shape[0]))
else:
print("There are no samples with missing values!")
return dfdna
|
Given the omics data passed as a dataframe, and (optionally) the name of a
class variable, it
i) prints the total number of samples in the dataset.
ii) if none of the samples have any missing values, it returns the same dataframe, else:
a) number of samples having missing values are reported
b) these samples are removed from the dataset
c) number of samples remained in the dataset after removing with missing values, are reported
d) returns the updated dataframe
|
Given the omics data passed as a dataframe, and (optionally) the name of a
class variable, it
i) prints the total number of samples in the dataset.
ii) if none of the samples have any missing values, it returns the same dataframe, else:
a) number of samples having missing values are reported
b) these samples are removed from the dataset
c) number of samples remained in the dataset after removing with missing values, are reported
d) returns the updated dataframe
|
[
"Given",
"the",
"omics",
"data",
"passed",
"as",
"a",
"dataframe",
"and",
"(",
"optionally",
")",
"the",
"name",
"of",
"a",
"class",
"variable",
"it",
"i",
")",
"prints",
"the",
"total",
"number",
"of",
"samples",
"in",
"the",
"dataset",
".",
"ii",
")",
"if",
"none",
"of",
"the",
"samples",
"have",
"any",
"missing",
"values",
"it",
"returns",
"the",
"same",
"dataframe",
"else",
":",
"a",
")",
"number",
"of",
"samples",
"having",
"missing",
"values",
"are",
"reported",
"b",
")",
"these",
"samples",
"are",
"removed",
"from",
"the",
"dataset",
"c",
")",
"number",
"of",
"samples",
"remained",
"in",
"the",
"dataset",
"after",
"removing",
"with",
"missing",
"values",
"are",
"reported",
"d",
")",
"returns",
"the",
"updated",
"dataframe"
] |
def dropNaNs(df, ClassVar='none'):
print("\n*********************************************")
print("Number of samples in the dataset: {0}".format(df.shape[0]))
print("*********************************************")
if ClassVar == 'none':
dfdna = df.dropna()
else:
dfdna = df.dropna(subset=[ClassVar])
if df.shape[0] > dfdna.shape[0]:
print("Number of samples having missing values: {0}".format(df.shape[0]- dfdna.shape[0]))
print("Number of samples remained after dropping samples with missing data: {0}".format(dfdna.shape[0]))
else:
print("There are no samples with missing values!")
return dfdna
|
[
"def",
"dropNaNs",
"(",
"df",
",",
"ClassVar",
"=",
"'none'",
")",
":",
"print",
"(",
"\"\\n*********************************************\"",
")",
"print",
"(",
"\"Number of samples in the dataset: {0}\"",
".",
"format",
"(",
"df",
".",
"shape",
"[",
"0",
"]",
")",
")",
"print",
"(",
"\"*********************************************\"",
")",
"if",
"ClassVar",
"==",
"'none'",
":",
"dfdna",
"=",
"df",
".",
"dropna",
"(",
")",
"else",
":",
"dfdna",
"=",
"df",
".",
"dropna",
"(",
"subset",
"=",
"[",
"ClassVar",
"]",
")",
"if",
"df",
".",
"shape",
"[",
"0",
"]",
">",
"dfdna",
".",
"shape",
"[",
"0",
"]",
":",
"print",
"(",
"\"Number of samples having missing values: {0}\"",
".",
"format",
"(",
"df",
".",
"shape",
"[",
"0",
"]",
"-",
"dfdna",
".",
"shape",
"[",
"0",
"]",
")",
")",
"print",
"(",
"\"Number of samples remained after dropping samples with missing data: {0}\"",
".",
"format",
"(",
"dfdna",
".",
"shape",
"[",
"0",
"]",
")",
")",
"else",
":",
"print",
"(",
"\"There are no samples with missing values!\"",
")",
"return",
"dfdna"
] |
Given the omics data passed as a dataframe, and (optionally) the name of a
class variable, it
|
[
"Given",
"the",
"omics",
"data",
"passed",
"as",
"a",
"dataframe",
"and",
"(",
"optionally",
")",
"the",
"name",
"of",
"a",
"class",
"variable",
"it"
] |
[
"\"\"\"\n Given the omics data passed as a dataframe, and (optionally) the name of a\n class variable, it\n \n i) prints the total number of samples in the dataset.\n ii) if none of the samples have any missing values, it returns the same dataframe, else:\n a) number of samples having missing values are reported\n b) these samples are removed from the dataset\n c) number of samples remained in the dataset after removing with missing values, are reported\n d) returns the updated dataframe\n \"\"\""
] |
[
{
"param": "df",
"type": null
},
{
"param": "ClassVar",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "df",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "ClassVar",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def dropNaNs(df, ClassVar='none'):
print("\n*********************************************")
print("Number of samples in the dataset: {0}".format(df.shape[0]))
print("*********************************************")
if ClassVar == 'none':
dfdna = df.dropna()
else:
dfdna = df.dropna(subset=[ClassVar])
if df.shape[0] > dfdna.shape[0]:
print("Number of samples having missing values: {0}".format(df.shape[0]- dfdna.shape[0]))
print("Number of samples remained after dropping samples with missing data: {0}".format(dfdna.shape[0]))
else:
print("There are no samples with missing values!")
return dfdna
| 1,191 | 636 |
11abb823f13274101a7ce3985d2edbdba4ecf30f
|
CedricTomasini/imprimes_valaisans
|
5_N_metadata.py
|
[
"Apache-2.0"
] |
Python
|
search_for_pattern
|
<not_specific>
|
def search_for_pattern(pattern, text):
"""
Generic function that encapsulate the search for the first occurence of a pattern that may not exist.
(re.search). Case insensitive and pattern cannot start or end in the middle of a word.
Return : match object or None
"""
return re.search("\\b"+pattern+"\\b",text, re.IGNORECASE)
|
Generic function that encapsulate the search for the first occurence of a pattern that may not exist.
(re.search). Case insensitive and pattern cannot start or end in the middle of a word.
Return : match object or None
|
Generic function that encapsulate the search for the first occurence of a pattern that may not exist.
|
[
"Generic",
"function",
"that",
"encapsulate",
"the",
"search",
"for",
"the",
"first",
"occurence",
"of",
"a",
"pattern",
"that",
"may",
"not",
"exist",
"."
] |
def search_for_pattern(pattern, text):
return re.search("\\b"+pattern+"\\b",text, re.IGNORECASE)
|
[
"def",
"search_for_pattern",
"(",
"pattern",
",",
"text",
")",
":",
"return",
"re",
".",
"search",
"(",
"\"\\\\b\"",
"+",
"pattern",
"+",
"\"\\\\b\"",
",",
"text",
",",
"re",
".",
"IGNORECASE",
")"
] |
Generic function that encapsulate the search for the first occurence of a pattern that may not exist.
|
[
"Generic",
"function",
"that",
"encapsulate",
"the",
"search",
"for",
"the",
"first",
"occurence",
"of",
"a",
"pattern",
"that",
"may",
"not",
"exist",
"."
] |
[
"\"\"\"\n Generic function that encapsulate the search for the first occurence of a pattern that may not exist.\n (re.search). Case insensitive and pattern cannot start or end in the middle of a word.\n Return : match object or None\n \"\"\""
] |
[
{
"param": "pattern",
"type": null
},
{
"param": "text",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "pattern",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "text",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import re
def search_for_pattern(pattern, text):
return re.search("\\b"+pattern+"\\b",text, re.IGNORECASE)
| 1,192 | 48 |
a6dd3ceaccf6776ce6c35782a1e52a91df83e121
|
Fliipzy/lindenmayer-system-illustrator
|
src/utilities.py
|
[
"MIT"
] |
Python
|
rgb_tuple_to_hex_string
|
<not_specific>
|
def rgb_tuple_to_hex_string(rgb_tuple):
'''
Takes in a RGB tuple (float, float, float) and returns the hex formatted RGB value (string)
'''
return "#%02x%02x%02x" % (int(rgb_tuple[0]), int(rgb_tuple[1]), int(rgb_tuple[2]))
|
Takes in a RGB tuple (float, float, float) and returns the hex formatted RGB value (string)
|
Takes in a RGB tuple (float, float, float) and returns the hex formatted RGB value (string)
|
[
"Takes",
"in",
"a",
"RGB",
"tuple",
"(",
"float",
"float",
"float",
")",
"and",
"returns",
"the",
"hex",
"formatted",
"RGB",
"value",
"(",
"string",
")"
] |
def rgb_tuple_to_hex_string(rgb_tuple):
return "#%02x%02x%02x" % (int(rgb_tuple[0]), int(rgb_tuple[1]), int(rgb_tuple[2]))
|
[
"def",
"rgb_tuple_to_hex_string",
"(",
"rgb_tuple",
")",
":",
"return",
"\"#%02x%02x%02x\"",
"%",
"(",
"int",
"(",
"rgb_tuple",
"[",
"0",
"]",
")",
",",
"int",
"(",
"rgb_tuple",
"[",
"1",
"]",
")",
",",
"int",
"(",
"rgb_tuple",
"[",
"2",
"]",
")",
")"
] |
Takes in a RGB tuple (float, float, float) and returns the hex formatted RGB value (string)
|
[
"Takes",
"in",
"a",
"RGB",
"tuple",
"(",
"float",
"float",
"float",
")",
"and",
"returns",
"the",
"hex",
"formatted",
"RGB",
"value",
"(",
"string",
")"
] |
[
"'''\n Takes in a RGB tuple (float, float, float) and returns the hex formatted RGB value (string)\n '''"
] |
[
{
"param": "rgb_tuple",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "rgb_tuple",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def rgb_tuple_to_hex_string(rgb_tuple):
return "#%02x%02x%02x" % (int(rgb_tuple[0]), int(rgb_tuple[1]), int(rgb_tuple[2]))
| 1,193 | 199 |
97a117b2056542869917d47426a5629822e2aa8b
|
michael-ross-ven/vengeance
|
dist/vengeance-1.0.3.tar/dist/vengeance-1.0.3/vengeance/excel_com/workbook.py
|
[
"MIT"
] |
Python
|
close_workbook
| null |
def close_workbook(wb, save):
"""
all references need to be severed for excel_com pointer to be released
variables should be set to None
"""
if save and wb.ReadOnly:
raise AssertionError("workbook: '{}' is open read-only, cannot save and close".format(wb.Name))
excel_app = wb.Application
if save:
wb.Save()
else:
excel_app.DisplayAlerts = False
wb.Close()
wb = None
if save is False:
excel_app.DisplayAlerts = True
if excel_app.Workbooks.Count == 0:
excel_app.Quit()
excel_app = None
|
all references need to be severed for excel_com pointer to be released
variables should be set to None
|
all references need to be severed for excel_com pointer to be released
variables should be set to None
|
[
"all",
"references",
"need",
"to",
"be",
"severed",
"for",
"excel_com",
"pointer",
"to",
"be",
"released",
"variables",
"should",
"be",
"set",
"to",
"None"
] |
def close_workbook(wb, save):
if save and wb.ReadOnly:
raise AssertionError("workbook: '{}' is open read-only, cannot save and close".format(wb.Name))
excel_app = wb.Application
if save:
wb.Save()
else:
excel_app.DisplayAlerts = False
wb.Close()
wb = None
if save is False:
excel_app.DisplayAlerts = True
if excel_app.Workbooks.Count == 0:
excel_app.Quit()
excel_app = None
|
[
"def",
"close_workbook",
"(",
"wb",
",",
"save",
")",
":",
"if",
"save",
"and",
"wb",
".",
"ReadOnly",
":",
"raise",
"AssertionError",
"(",
"\"workbook: '{}' is open read-only, cannot save and close\"",
".",
"format",
"(",
"wb",
".",
"Name",
")",
")",
"excel_app",
"=",
"wb",
".",
"Application",
"if",
"save",
":",
"wb",
".",
"Save",
"(",
")",
"else",
":",
"excel_app",
".",
"DisplayAlerts",
"=",
"False",
"wb",
".",
"Close",
"(",
")",
"wb",
"=",
"None",
"if",
"save",
"is",
"False",
":",
"excel_app",
".",
"DisplayAlerts",
"=",
"True",
"if",
"excel_app",
".",
"Workbooks",
".",
"Count",
"==",
"0",
":",
"excel_app",
".",
"Quit",
"(",
")",
"excel_app",
"=",
"None"
] |
all references need to be severed for excel_com pointer to be released
variables should be set to None
|
[
"all",
"references",
"need",
"to",
"be",
"severed",
"for",
"excel_com",
"pointer",
"to",
"be",
"released",
"variables",
"should",
"be",
"set",
"to",
"None"
] |
[
"\"\"\"\n all references need to be severed for excel_com pointer to be released\n variables should be set to None\n \"\"\""
] |
[
{
"param": "wb",
"type": null
},
{
"param": "save",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "wb",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "save",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def close_workbook(wb, save):
if save and wb.ReadOnly:
raise AssertionError("workbook: '{}' is open read-only, cannot save and close".format(wb.Name))
excel_app = wb.Application
if save:
wb.Save()
else:
excel_app.DisplayAlerts = False
wb.Close()
wb = None
if save is False:
excel_app.DisplayAlerts = True
if excel_app.Workbooks.Count == 0:
excel_app.Quit()
excel_app = None
| 1,194 | 22 |
66e5527ff26b37d392623ed891a50a5bafab2d2b
|
zocean/Norma
|
src/TSA-seq_normalize_with_spike_in.py
|
[
"MIT"
] |
Python
|
filterPick
|
<not_specific>
|
def filterPick(list,filter):
'''
return the items match the filter pattern
'''
return [ ( l ) for l in list for m in (filter(l),) if m]
|
return the items match the filter pattern
|
return the items match the filter pattern
|
[
"return",
"the",
"items",
"match",
"the",
"filter",
"pattern"
] |
def filterPick(list,filter):
return [ ( l ) for l in list for m in (filter(l),) if m]
|
[
"def",
"filterPick",
"(",
"list",
",",
"filter",
")",
":",
"return",
"[",
"(",
"l",
")",
"for",
"l",
"in",
"list",
"for",
"m",
"in",
"(",
"filter",
"(",
"l",
")",
",",
")",
"if",
"m",
"]"
] |
return the items match the filter pattern
|
[
"return",
"the",
"items",
"match",
"the",
"filter",
"pattern"
] |
[
"'''\n return the items match the filter pattern\n '''"
] |
[
{
"param": "list",
"type": null
},
{
"param": "filter",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "filter",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def filterPick(list,filter):
return [ ( l ) for l in list for m in (filter(l),) if m]
| 1,195 | 622 |
a97f68cc07492808b4bd7747956e514d9ff77ef1
|
peeyushsahu/GCAM_python
|
GCAM/Previous_genecheck.py
|
[
"BSD-3-Clause"
] |
Python
|
check_old_analysed_genes
|
<not_specific>
|
def check_old_analysed_genes(genenames, dataframe):
'''
This function will check if the gene has already been analysed and retrieve its occurrence.
:param genenames:
:param resource_path:
:return:
'''
#dataframe = FilesFolders.read_previous_occurrence_table(resource_path)
new_genelist = []
has_genes = []
if not dataframe is None:
for gene in genenames:
if gene not in dataframe.columns:
new_genelist.append(gene)
if gene in dataframe.columns:
has_genes.append(gene)
foundgenes_df = dataframe[has_genes]
return new_genelist, foundgenes_df
|
This function will check if the gene has already been analysed and retrieve its occurrence.
:param genenames:
:param resource_path:
:return:
|
This function will check if the gene has already been analysed and retrieve its occurrence.
|
[
"This",
"function",
"will",
"check",
"if",
"the",
"gene",
"has",
"already",
"been",
"analysed",
"and",
"retrieve",
"its",
"occurrence",
"."
] |
def check_old_analysed_genes(genenames, dataframe):
new_genelist = []
has_genes = []
if not dataframe is None:
for gene in genenames:
if gene not in dataframe.columns:
new_genelist.append(gene)
if gene in dataframe.columns:
has_genes.append(gene)
foundgenes_df = dataframe[has_genes]
return new_genelist, foundgenes_df
|
[
"def",
"check_old_analysed_genes",
"(",
"genenames",
",",
"dataframe",
")",
":",
"new_genelist",
"=",
"[",
"]",
"has_genes",
"=",
"[",
"]",
"if",
"not",
"dataframe",
"is",
"None",
":",
"for",
"gene",
"in",
"genenames",
":",
"if",
"gene",
"not",
"in",
"dataframe",
".",
"columns",
":",
"new_genelist",
".",
"append",
"(",
"gene",
")",
"if",
"gene",
"in",
"dataframe",
".",
"columns",
":",
"has_genes",
".",
"append",
"(",
"gene",
")",
"foundgenes_df",
"=",
"dataframe",
"[",
"has_genes",
"]",
"return",
"new_genelist",
",",
"foundgenes_df"
] |
This function will check if the gene has already been analysed and retrieve its occurrence.
|
[
"This",
"function",
"will",
"check",
"if",
"the",
"gene",
"has",
"already",
"been",
"analysed",
"and",
"retrieve",
"its",
"occurrence",
"."
] |
[
"'''\n This function will check if the gene has already been analysed and retrieve its occurrence.\n :param genenames:\n :param resource_path:\n :return:\n '''",
"#dataframe = FilesFolders.read_previous_occurrence_table(resource_path)"
] |
[
{
"param": "genenames",
"type": null
},
{
"param": "dataframe",
"type": null
}
] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "genenames",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "dataframe",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [
{
"identifier": "resource_path",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"others": []
}
|
def check_old_analysed_genes(genenames, dataframe):
new_genelist = []
has_genes = []
if not dataframe is None:
for gene in genenames:
if gene not in dataframe.columns:
new_genelist.append(gene)
if gene in dataframe.columns:
has_genes.append(gene)
foundgenes_df = dataframe[has_genes]
return new_genelist, foundgenes_df
| 1,196 | 601 |
564aa53d4742f7a3f15622b401d7ec86d25d8a90
|
gerritholl/pyatmlab
|
pyatmlab/time.py
|
[
"BSD-3-Clause"
] |
Python
|
mean_local_solar_time
|
<not_specific>
|
def mean_local_solar_time(utctime, lon):
"""Calculate mean local solar time.
Calculates the mean local solar time for a specific longitude.
This is not the true local solar time because it does not take into
account the equation of time. It is purely based on the hour angle of
the Sun. Do not use for astronomical calculations!
:param time utctime: Time in UTC. Should be a datetime.time object.
:param float lon: Longitude in degrees. Can be either in [-180, 180]
or in [0, 360].
:returns time: Time in mean local solar time.
"""
hours_offset = lon/15
dummy_datetime = datetime.datetime.combine(datetime.date.today(), utctime)
new_dummy = dummy_datetime + datetime.timedelta(hours=hours_offset)
return new_dummy.time()
|
Calculate mean local solar time.
Calculates the mean local solar time for a specific longitude.
This is not the true local solar time because it does not take into
account the equation of time. It is purely based on the hour angle of
the Sun. Do not use for astronomical calculations!
:param time utctime: Time in UTC. Should be a datetime.time object.
:param float lon: Longitude in degrees. Can be either in [-180, 180]
or in [0, 360].
:returns time: Time in mean local solar time.
|
Calculate mean local solar time.
Calculates the mean local solar time for a specific longitude.
This is not the true local solar time because it does not take into
account the equation of time. It is purely based on the hour angle of
the Sun. Do not use for astronomical calculations!
|
[
"Calculate",
"mean",
"local",
"solar",
"time",
".",
"Calculates",
"the",
"mean",
"local",
"solar",
"time",
"for",
"a",
"specific",
"longitude",
".",
"This",
"is",
"not",
"the",
"true",
"local",
"solar",
"time",
"because",
"it",
"does",
"not",
"take",
"into",
"account",
"the",
"equation",
"of",
"time",
".",
"It",
"is",
"purely",
"based",
"on",
"the",
"hour",
"angle",
"of",
"the",
"Sun",
".",
"Do",
"not",
"use",
"for",
"astronomical",
"calculations!"
] |
def mean_local_solar_time(utctime, lon):
hours_offset = lon/15
dummy_datetime = datetime.datetime.combine(datetime.date.today(), utctime)
new_dummy = dummy_datetime + datetime.timedelta(hours=hours_offset)
return new_dummy.time()
|
[
"def",
"mean_local_solar_time",
"(",
"utctime",
",",
"lon",
")",
":",
"hours_offset",
"=",
"lon",
"/",
"15",
"dummy_datetime",
"=",
"datetime",
".",
"datetime",
".",
"combine",
"(",
"datetime",
".",
"date",
".",
"today",
"(",
")",
",",
"utctime",
")",
"new_dummy",
"=",
"dummy_datetime",
"+",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"hours_offset",
")",
"return",
"new_dummy",
".",
"time",
"(",
")"
] |
Calculate mean local solar time.
|
[
"Calculate",
"mean",
"local",
"solar",
"time",
"."
] |
[
"\"\"\"Calculate mean local solar time.\n\n Calculates the mean local solar time for a specific longitude.\n This is not the true local solar time because it does not take into\n account the equation of time. It is purely based on the hour angle of\n the Sun. Do not use for astronomical calculations!\n\n :param time utctime: Time in UTC. Should be a datetime.time object.\n :param float lon: Longitude in degrees. Can be either in [-180, 180]\n or in [0, 360].\n :returns time: Time in mean local solar time.\n \"\"\""
] |
[
{
"param": "utctime",
"type": null
},
{
"param": "lon",
"type": null
}
] |
{
"returns": [
{
"docstring": "Time in mean local solar time.",
"docstring_tokens": [
"Time",
"in",
"mean",
"local",
"solar",
"time",
"."
],
"type": "time"
}
],
"raises": [],
"params": [
{
"identifier": "utctime",
"type": null,
"docstring": "Time in UTC. Should be a datetime.time object.",
"docstring_tokens": [
"Time",
"in",
"UTC",
".",
"Should",
"be",
"a",
"datetime",
".",
"time",
"object",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "lon",
"type": null,
"docstring": "Longitude in degrees.",
"docstring_tokens": [
"Longitude",
"in",
"degrees",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
import datetime
def mean_local_solar_time(utctime, lon):
hours_offset = lon/15
dummy_datetime = datetime.datetime.combine(datetime.date.today(), utctime)
new_dummy = dummy_datetime + datetime.timedelta(hours=hours_offset)
return new_dummy.time()
| 1,197 | 594 |
1bdd7abad29284a447a3d693bc03d09001db099b
|
Brece/CS50
|
Brece-cs50-2018-x-similarities-less/helpers.py
|
[
"MIT"
] |
Python
|
substrings
|
<not_specific>
|
def substrings(a, b, n):
"""Return substrings of length n in both a and b"""
# TODO
# store substrings in lists
listA = list()
listB = list()
for i in range(len(a) - n+1):
listA.append(a[i:n+i])
for i in range(len(b) - n+1):
listB.append(b[i:n+i])
# save substrings of each list element in a set
match_set = set(listA).intersection(listB)
# convert set into a list
match = list(match_set)
return match
|
Return substrings of length n in both a and b
|
Return substrings of length n in both a and b
|
[
"Return",
"substrings",
"of",
"length",
"n",
"in",
"both",
"a",
"and",
"b"
] |
def substrings(a, b, n):
listA = list()
listB = list()
for i in range(len(a) - n+1):
listA.append(a[i:n+i])
for i in range(len(b) - n+1):
listB.append(b[i:n+i])
match_set = set(listA).intersection(listB)
match = list(match_set)
return match
|
[
"def",
"substrings",
"(",
"a",
",",
"b",
",",
"n",
")",
":",
"listA",
"=",
"list",
"(",
")",
"listB",
"=",
"list",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"a",
")",
"-",
"n",
"+",
"1",
")",
":",
"listA",
".",
"append",
"(",
"a",
"[",
"i",
":",
"n",
"+",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"b",
")",
"-",
"n",
"+",
"1",
")",
":",
"listB",
".",
"append",
"(",
"b",
"[",
"i",
":",
"n",
"+",
"i",
"]",
")",
"match_set",
"=",
"set",
"(",
"listA",
")",
".",
"intersection",
"(",
"listB",
")",
"match",
"=",
"list",
"(",
"match_set",
")",
"return",
"match"
] |
Return substrings of length n in both a and b
|
[
"Return",
"substrings",
"of",
"length",
"n",
"in",
"both",
"a",
"and",
"b"
] |
[
"\"\"\"Return substrings of length n in both a and b\"\"\"",
"# TODO",
"# store substrings in lists",
"# save substrings of each list element in a set",
"# convert set into a list"
] |
[
{
"param": "a",
"type": null
},
{
"param": "b",
"type": null
},
{
"param": "n",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "a",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "b",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "n",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def substrings(a, b, n):
listA = list()
listB = list()
for i in range(len(a) - n+1):
listA.append(a[i:n+i])
for i in range(len(b) - n+1):
listB.append(b[i:n+i])
match_set = set(listA).intersection(listB)
match = list(match_set)
return match
| 1,198 | 1,004 |
533e8c105c74243d22a426c35e9aba0948bf730e
|
cleverking/cloudcourse
|
core/models.py
|
[
"Apache-2.0"
] |
Python
|
AddRegisterOrder
| null |
def AddRegisterOrder(query):
"""Adds to query the ranking order of user registration entities."""
query.order('queue_time')
query.order('user')
|
Adds to query the ranking order of user registration entities.
|
Adds to query the ranking order of user registration entities.
|
[
"Adds",
"to",
"query",
"the",
"ranking",
"order",
"of",
"user",
"registration",
"entities",
"."
] |
def AddRegisterOrder(query):
query.order('queue_time')
query.order('user')
|
[
"def",
"AddRegisterOrder",
"(",
"query",
")",
":",
"query",
".",
"order",
"(",
"'queue_time'",
")",
"query",
".",
"order",
"(",
"'user'",
")"
] |
Adds to query the ranking order of user registration entities.
|
[
"Adds",
"to",
"query",
"the",
"ranking",
"order",
"of",
"user",
"registration",
"entities",
"."
] |
[
"\"\"\"Adds to query the ranking order of user registration entities.\"\"\""
] |
[
{
"param": "query",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "query",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def AddRegisterOrder(query):
query.order('queue_time')
query.order('user')
| 1,199 | 212 |
2f2f5539e8f5cec66f73e4d549f3e5dfdd0463d2
|
cmd-ntrf/py-hostlist
|
hostlist/hostlist.py
|
[
"MIT"
] |
Python
|
sort_nodes
|
<not_specific>
|
def sort_nodes(nodelist):
"""
sort_nodes is a helper method that sorts the nodes in ascending order.
:param nodelist: The hostlist string.
:return: The hostlist string in ascending order.
"""
list_of_nodes = nodelist
result_hostlist = []
if type(list_of_nodes) == str:
left_br = list_of_nodes.replace("[", "")
right_br = left_br.replace("]", "")
nodelist = right_br.split(',')
count = 0
num_list = []
for node in nodelist:
iter_node = nodelist[count]
nodelist_match = r"([a-z]+)(\d+)(.*)"
machine_name = re.search(nodelist_match, iter_node)
num_list.append(int(machine_name.group(2)))
count = count + 1
num_list.sort()
# append hostname to the node numbers
hostlist_no_suffix = []
for elem in num_list:
hostlist_no_suffix.append(machine_name.group(1) + str(elem))
# append suffix to hostlist if there is one
final_hostlist = []
for elem in hostlist_no_suffix:
final_hostlist.append(elem + machine_name.group(3))
result_hostlist.append('%s' % ','.join(map(str, final_hostlist)))
return '%s' % ','.join(map(str, final_hostlist))
|
sort_nodes is a helper method that sorts the nodes in ascending order.
:param nodelist: The hostlist string.
:return: The hostlist string in ascending order.
|
sort_nodes is a helper method that sorts the nodes in ascending order.
|
[
"sort_nodes",
"is",
"a",
"helper",
"method",
"that",
"sorts",
"the",
"nodes",
"in",
"ascending",
"order",
"."
] |
def sort_nodes(nodelist):
list_of_nodes = nodelist
result_hostlist = []
if type(list_of_nodes) == str:
left_br = list_of_nodes.replace("[", "")
right_br = left_br.replace("]", "")
nodelist = right_br.split(',')
count = 0
num_list = []
for node in nodelist:
iter_node = nodelist[count]
nodelist_match = r"([a-z]+)(\d+)(.*)"
machine_name = re.search(nodelist_match, iter_node)
num_list.append(int(machine_name.group(2)))
count = count + 1
num_list.sort()
hostlist_no_suffix = []
for elem in num_list:
hostlist_no_suffix.append(machine_name.group(1) + str(elem))
final_hostlist = []
for elem in hostlist_no_suffix:
final_hostlist.append(elem + machine_name.group(3))
result_hostlist.append('%s' % ','.join(map(str, final_hostlist)))
return '%s' % ','.join(map(str, final_hostlist))
|
[
"def",
"sort_nodes",
"(",
"nodelist",
")",
":",
"list_of_nodes",
"=",
"nodelist",
"result_hostlist",
"=",
"[",
"]",
"if",
"type",
"(",
"list_of_nodes",
")",
"==",
"str",
":",
"left_br",
"=",
"list_of_nodes",
".",
"replace",
"(",
"\"[\"",
",",
"\"\"",
")",
"right_br",
"=",
"left_br",
".",
"replace",
"(",
"\"]\"",
",",
"\"\"",
")",
"nodelist",
"=",
"right_br",
".",
"split",
"(",
"','",
")",
"count",
"=",
"0",
"num_list",
"=",
"[",
"]",
"for",
"node",
"in",
"nodelist",
":",
"iter_node",
"=",
"nodelist",
"[",
"count",
"]",
"nodelist_match",
"=",
"r\"([a-z]+)(\\d+)(.*)\"",
"machine_name",
"=",
"re",
".",
"search",
"(",
"nodelist_match",
",",
"iter_node",
")",
"num_list",
".",
"append",
"(",
"int",
"(",
"machine_name",
".",
"group",
"(",
"2",
")",
")",
")",
"count",
"=",
"count",
"+",
"1",
"num_list",
".",
"sort",
"(",
")",
"hostlist_no_suffix",
"=",
"[",
"]",
"for",
"elem",
"in",
"num_list",
":",
"hostlist_no_suffix",
".",
"append",
"(",
"machine_name",
".",
"group",
"(",
"1",
")",
"+",
"str",
"(",
"elem",
")",
")",
"final_hostlist",
"=",
"[",
"]",
"for",
"elem",
"in",
"hostlist_no_suffix",
":",
"final_hostlist",
".",
"append",
"(",
"elem",
"+",
"machine_name",
".",
"group",
"(",
"3",
")",
")",
"result_hostlist",
".",
"append",
"(",
"'%s'",
"%",
"','",
".",
"join",
"(",
"map",
"(",
"str",
",",
"final_hostlist",
")",
")",
")",
"return",
"'%s'",
"%",
"','",
".",
"join",
"(",
"map",
"(",
"str",
",",
"final_hostlist",
")",
")"
] |
sort_nodes is a helper method that sorts the nodes in ascending order.
|
[
"sort_nodes",
"is",
"a",
"helper",
"method",
"that",
"sorts",
"the",
"nodes",
"in",
"ascending",
"order",
"."
] |
[
"\"\"\"\n sort_nodes is a helper method that sorts the nodes in ascending order.\n\n :param nodelist: The hostlist string.\n :return: The hostlist string in ascending order.\n \"\"\"",
"# append hostname to the node numbers",
"# append suffix to hostlist if there is one"
] |
[
{
"param": "nodelist",
"type": null
}
] |
{
"returns": [
{
"docstring": "The hostlist string in ascending order.",
"docstring_tokens": [
"The",
"hostlist",
"string",
"in",
"ascending",
"order",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "nodelist",
"type": null,
"docstring": "The hostlist string.",
"docstring_tokens": [
"The",
"hostlist",
"string",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import re
def sort_nodes(nodelist):
list_of_nodes = nodelist
result_hostlist = []
if type(list_of_nodes) == str:
left_br = list_of_nodes.replace("[", "")
right_br = left_br.replace("]", "")
nodelist = right_br.split(',')
count = 0
num_list = []
for node in nodelist:
iter_node = nodelist[count]
nodelist_match = r"([a-z]+)(\d+)(.*)"
machine_name = re.search(nodelist_match, iter_node)
num_list.append(int(machine_name.group(2)))
count = count + 1
num_list.sort()
hostlist_no_suffix = []
for elem in num_list:
hostlist_no_suffix.append(machine_name.group(1) + str(elem))
final_hostlist = []
for elem in hostlist_no_suffix:
final_hostlist.append(elem + machine_name.group(3))
result_hostlist.append('%s' % ','.join(map(str, final_hostlist)))
return '%s' % ','.join(map(str, final_hostlist))
| 1,200 | 447 |
f09046f024860d57f65a07b8f3681605b9b89778
|
vdonato/streamlit
|
lib/streamlit/config.py
|
[
"Apache-2.0"
] |
Python
|
_server_enable_websocket_compression
|
bool
|
def _server_enable_websocket_compression() -> bool:
"""Enables support for websocket compression.
Default: true
"""
return True
|
Enables support for websocket compression.
Default: true
|
Enables support for websocket compression.
Default: true
|
[
"Enables",
"support",
"for",
"websocket",
"compression",
".",
"Default",
":",
"true"
] |
def _server_enable_websocket_compression() -> bool:
return True
|
[
"def",
"_server_enable_websocket_compression",
"(",
")",
"->",
"bool",
":",
"return",
"True"
] |
Enables support for websocket compression.
|
[
"Enables",
"support",
"for",
"websocket",
"compression",
"."
] |
[
"\"\"\"Enables support for websocket compression.\n\n Default: true\n \"\"\""
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
def _server_enable_websocket_compression() -> bool:
return True
| 1,201 | 5 |
a341e57afa6cab929d53c82ef77da37914526f40
|
Lambda-School-Labs/sftma-data-analysis-ds
|
sfmta-api/application/route/route.py
|
[
"MIT"
] |
Python
|
extract_path
|
<not_specific>
|
def extract_path(route_data):
"""
Extracts the list of path coordinates for a route.
The raw data stores this as an unordered list of sub-routes, so this
function deciphers the order they should go in and returns a single list.
"""
# KNOWN BUG
# this approach assumed all routes were either a line or a loop.
# routes that have multiple sub-paths meeting at a point break this,
# route 24 is a current example.
# I'm committing this now to get the rest of the code out there
# extract the list of subpaths as just (lat,lon) coordinates
# also converts from string to float (raw data has strings)
path = []
for sub_path in route_data['path']:
path.append([(float(p['lat']), float(p['lon']))
for p in sub_path['point']])
# start with the first element, remove it from path
final = path[0]
path.pop(0)
# loop until the first and last coordinates in final match
counter = len(path)
done = True
while final[0] != final[-1]:
# loop through the sub-paths that we haven't yet moved to final
for i in range(len(path)):
# check if the last coordinate in final matches the first
# coordinate of another sub-path
if final[-1] == path[i][0]:
# match found, move it to final
# leave out the first coordinate to avoid duplicates
final = final + path[i][1:]
path.pop(i)
break # break the for loop
# protection against infinite loops, if the path never closes
counter -= 1
if counter < 0:
done = False
break
if not done:
# route did not connect in a loop, perform same steps backwards
# to get the rest of the line
for _ in range(len(path)):
# loop through the sub-paths that we haven't yet moved to final
for i in range(len(path)):
# check if the first coordinate in final matches the last
# coordinate of another sub-path
if final[0] == path[i][-1]:
# match found, move it to final
# leave out the last coordinate to avoid duplicates
final = path[i][:-1] + final
path.pop(i)
break # break the for loop
# some routes may have un-used sub-paths
# Route 1 for example has two sub-paths that are almost identical, with the
# same start and end points
if len(path) > 0:
print(f"WARNING: {len(path)} unused sub-paths")
# return the final result
return final
|
Extracts the list of path coordinates for a route.
The raw data stores this as an unordered list of sub-routes, so this
function deciphers the order they should go in and returns a single list.
|
Extracts the list of path coordinates for a route.
The raw data stores this as an unordered list of sub-routes, so this
function deciphers the order they should go in and returns a single list.
|
[
"Extracts",
"the",
"list",
"of",
"path",
"coordinates",
"for",
"a",
"route",
".",
"The",
"raw",
"data",
"stores",
"this",
"as",
"an",
"unordered",
"list",
"of",
"sub",
"-",
"routes",
"so",
"this",
"function",
"deciphers",
"the",
"order",
"they",
"should",
"go",
"in",
"and",
"returns",
"a",
"single",
"list",
"."
] |
def extract_path(route_data):
path = []
for sub_path in route_data['path']:
path.append([(float(p['lat']), float(p['lon']))
for p in sub_path['point']])
final = path[0]
path.pop(0)
counter = len(path)
done = True
while final[0] != final[-1]:
for i in range(len(path)):
if final[-1] == path[i][0]:
final = final + path[i][1:]
path.pop(i)
break
counter -= 1
if counter < 0:
done = False
break
if not done:
for _ in range(len(path)):
for i in range(len(path)):
if final[0] == path[i][-1]:
final = path[i][:-1] + final
path.pop(i)
break
if len(path) > 0:
print(f"WARNING: {len(path)} unused sub-paths")
return final
|
[
"def",
"extract_path",
"(",
"route_data",
")",
":",
"path",
"=",
"[",
"]",
"for",
"sub_path",
"in",
"route_data",
"[",
"'path'",
"]",
":",
"path",
".",
"append",
"(",
"[",
"(",
"float",
"(",
"p",
"[",
"'lat'",
"]",
")",
",",
"float",
"(",
"p",
"[",
"'lon'",
"]",
")",
")",
"for",
"p",
"in",
"sub_path",
"[",
"'point'",
"]",
"]",
")",
"final",
"=",
"path",
"[",
"0",
"]",
"path",
".",
"pop",
"(",
"0",
")",
"counter",
"=",
"len",
"(",
"path",
")",
"done",
"=",
"True",
"while",
"final",
"[",
"0",
"]",
"!=",
"final",
"[",
"-",
"1",
"]",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"path",
")",
")",
":",
"if",
"final",
"[",
"-",
"1",
"]",
"==",
"path",
"[",
"i",
"]",
"[",
"0",
"]",
":",
"final",
"=",
"final",
"+",
"path",
"[",
"i",
"]",
"[",
"1",
":",
"]",
"path",
".",
"pop",
"(",
"i",
")",
"break",
"counter",
"-=",
"1",
"if",
"counter",
"<",
"0",
":",
"done",
"=",
"False",
"break",
"if",
"not",
"done",
":",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"path",
")",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"path",
")",
")",
":",
"if",
"final",
"[",
"0",
"]",
"==",
"path",
"[",
"i",
"]",
"[",
"-",
"1",
"]",
":",
"final",
"=",
"path",
"[",
"i",
"]",
"[",
":",
"-",
"1",
"]",
"+",
"final",
"path",
".",
"pop",
"(",
"i",
")",
"break",
"if",
"len",
"(",
"path",
")",
">",
"0",
":",
"print",
"(",
"f\"WARNING: {len(path)} unused sub-paths\"",
")",
"return",
"final"
] |
Extracts the list of path coordinates for a route.
|
[
"Extracts",
"the",
"list",
"of",
"path",
"coordinates",
"for",
"a",
"route",
"."
] |
[
"\"\"\"\n Extracts the list of path coordinates for a route.\n\n The raw data stores this as an unordered list of sub-routes, so this\n function deciphers the order they should go in and returns a single list.\n \"\"\"",
"# KNOWN BUG",
"# this approach assumed all routes were either a line or a loop.",
"# routes that have multiple sub-paths meeting at a point break this,",
"# route 24 is a current example.",
"# I'm committing this now to get the rest of the code out there",
"# extract the list of subpaths as just (lat,lon) coordinates",
"# also converts from string to float (raw data has strings)",
"# start with the first element, remove it from path",
"# loop until the first and last coordinates in final match",
"# loop through the sub-paths that we haven't yet moved to final",
"# check if the last coordinate in final matches the first ",
"# coordinate of another sub-path",
"# match found, move it to final",
"# leave out the first coordinate to avoid duplicates",
"# break the for loop",
"# protection against infinite loops, if the path never closes",
"# route did not connect in a loop, perform same steps backwards ",
"# to get the rest of the line",
"# loop through the sub-paths that we haven't yet moved to final",
"# check if the first coordinate in final matches the last ",
"# coordinate of another sub-path",
"# match found, move it to final",
"# leave out the last coordinate to avoid duplicates",
"# break the for loop",
"# some routes may have un-used sub-paths",
"# Route 1 for example has two sub-paths that are almost identical, with the ",
"# same start and end points",
"# return the final result"
] |
[
{
"param": "route_data",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "route_data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def extract_path(route_data):
path = []
for sub_path in route_data['path']:
path.append([(float(p['lat']), float(p['lon']))
for p in sub_path['point']])
final = path[0]
path.pop(0)
counter = len(path)
done = True
while final[0] != final[-1]:
for i in range(len(path)):
if final[-1] == path[i][0]:
final = final + path[i][1:]
path.pop(i)
break
counter -= 1
if counter < 0:
done = False
break
if not done:
for _ in range(len(path)):
for i in range(len(path)):
if final[0] == path[i][-1]:
final = path[i][:-1] + final
path.pop(i)
break
if len(path) > 0:
print(f"WARNING: {len(path)} unused sub-paths")
return final
| 1,202 | 1,005 |
b06381f3371956232d042c5d0477c2ee0b421beb
|
manmay2/arith-tools
|
build/lib/arithtools/__init__.py
|
[
"MIT"
] |
Python
|
fibonacci_series
|
int
|
def fibonacci_series(range, num1=0, num2=1) -> int:
"""
Takes <range> from user and default parameter value is 0 and 1 and prints the fibonacci series in order.
"""
range -= 2
a, b = num1, num2
print(a)
print(b)
while(range != 0):
c = a+b
a = b
b = c
print(c)
range -= 1
return None
|
Takes <range> from user and default parameter value is 0 and 1 and prints the fibonacci series in order.
|
Takes from user and default parameter value is 0 and 1 and prints the fibonacci series in order.
|
[
"Takes",
"from",
"user",
"and",
"default",
"parameter",
"value",
"is",
"0",
"and",
"1",
"and",
"prints",
"the",
"fibonacci",
"series",
"in",
"order",
"."
] |
def fibonacci_series(range, num1=0, num2=1) -> int:
range -= 2
a, b = num1, num2
print(a)
print(b)
while(range != 0):
c = a+b
a = b
b = c
print(c)
range -= 1
return None
|
[
"def",
"fibonacci_series",
"(",
"range",
",",
"num1",
"=",
"0",
",",
"num2",
"=",
"1",
")",
"->",
"int",
":",
"range",
"-=",
"2",
"a",
",",
"b",
"=",
"num1",
",",
"num2",
"print",
"(",
"a",
")",
"print",
"(",
"b",
")",
"while",
"(",
"range",
"!=",
"0",
")",
":",
"c",
"=",
"a",
"+",
"b",
"a",
"=",
"b",
"b",
"=",
"c",
"print",
"(",
"c",
")",
"range",
"-=",
"1",
"return",
"None"
] |
Takes <range> from user and default parameter value is 0 and 1 and prints the fibonacci series in order.
|
[
"Takes",
"<range",
">",
"from",
"user",
"and",
"default",
"parameter",
"value",
"is",
"0",
"and",
"1",
"and",
"prints",
"the",
"fibonacci",
"series",
"in",
"order",
"."
] |
[
"\"\"\"\n Takes <range> from user and default parameter value is 0 and 1 and prints the fibonacci series in order.\n \"\"\""
] |
[
{
"param": "range",
"type": null
},
{
"param": "num1",
"type": null
},
{
"param": "num2",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "range",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "num1",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "num2",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def fibonacci_series(range, num1=0, num2=1) -> int:
range -= 2
a, b = num1, num2
print(a)
print(b)
while(range != 0):
c = a+b
a = b
b = c
print(c)
range -= 1
return None
| 1,203 | 84 |
862ec2d5a3d541fcf9f4651744f5240dea37f9c5
|
byehack/Nuitka
|
nuitka/MainControl.py
|
[
"Apache-2.0"
] |
Python
|
pickSourceFilenames
|
<not_specific>
|
def pickSourceFilenames(source_dir, modules):
"""Pick the names for the C files of each module.
Args:
source_dir - the directory to put the module sources will be put into
modules - all the modules to build.
Returns:
Dictionary mapping modules to filenames in source_dir.
Notes:
These filenames can collide, due to e.g. mixed case usage, or there
being duplicate copies, e.g. a package named the same as the main
binary.
Conflicts are resolved by appending @<number> with a count in the
list of sorted modules. We try to be reproducible here, so we get
still good caching for external tools.
"""
collision_filenames = set()
def _getModuleFilenames(module):
base_filename = os.path.join(source_dir, "module." + module.getFullName())
# Note: Could detect if the file system is cases sensitive in source_dir
# or not, but that's probably not worth the effort. False positives do
# no harm at all. We cannot use normcase, as macOS is not using one that
# will tell us the truth.
collision_filename = base_filename.lower()
return base_filename, collision_filename
seen_filenames = set()
# First pass, check for collisions.
for module in modules:
if module.isPythonExtensionModule():
continue
_base_filename, collision_filename = _getModuleFilenames(module)
if collision_filename in seen_filenames:
collision_filenames.add(collision_filename)
seen_filenames.add(collision_filename)
# Our output.
module_filenames = {}
# Count up for colliding filenames as we go.
collision_counts = {}
# Second pass, this time sorted, so we get deterministic results. We will
# apply an "@1"/"@2",... to disambiguate the filenames.
for module in sorted(modules, key=lambda x: x.getFullName()):
if module.isPythonExtensionModule():
continue
base_filename, collision_filename = _getModuleFilenames(module)
if collision_filename in collision_filenames:
collision_counts[collision_filename] = (
collision_counts.get(collision_filename, 0) + 1
)
base_filename += "@%d" % collision_counts[collision_filename]
module_filenames[module] = base_filename + ".c"
return module_filenames
|
Pick the names for the C files of each module.
Args:
source_dir - the directory to put the module sources will be put into
modules - all the modules to build.
Returns:
Dictionary mapping modules to filenames in source_dir.
Notes:
These filenames can collide, due to e.g. mixed case usage, or there
being duplicate copies, e.g. a package named the same as the main
binary.
Conflicts are resolved by appending @<number> with a count in the
list of sorted modules. We try to be reproducible here, so we get
still good caching for external tools.
|
Pick the names for the C files of each module.
Args:
source_dir - the directory to put the module sources will be put into
modules - all the modules to build.
Dictionary mapping modules to filenames in source_dir.
These filenames can collide, due to e.g. mixed case usage, or there
being duplicate copies, e.g. a package named the same as the main
binary.
Conflicts are resolved by appending @ with a count in the
list of sorted modules. We try to be reproducible here, so we get
still good caching for external tools.
|
[
"Pick",
"the",
"names",
"for",
"the",
"C",
"files",
"of",
"each",
"module",
".",
"Args",
":",
"source_dir",
"-",
"the",
"directory",
"to",
"put",
"the",
"module",
"sources",
"will",
"be",
"put",
"into",
"modules",
"-",
"all",
"the",
"modules",
"to",
"build",
".",
"Dictionary",
"mapping",
"modules",
"to",
"filenames",
"in",
"source_dir",
".",
"These",
"filenames",
"can",
"collide",
"due",
"to",
"e",
".",
"g",
".",
"mixed",
"case",
"usage",
"or",
"there",
"being",
"duplicate",
"copies",
"e",
".",
"g",
".",
"a",
"package",
"named",
"the",
"same",
"as",
"the",
"main",
"binary",
".",
"Conflicts",
"are",
"resolved",
"by",
"appending",
"@",
"with",
"a",
"count",
"in",
"the",
"list",
"of",
"sorted",
"modules",
".",
"We",
"try",
"to",
"be",
"reproducible",
"here",
"so",
"we",
"get",
"still",
"good",
"caching",
"for",
"external",
"tools",
"."
] |
def pickSourceFilenames(source_dir, modules):
collision_filenames = set()
def _getModuleFilenames(module):
base_filename = os.path.join(source_dir, "module." + module.getFullName())
collision_filename = base_filename.lower()
return base_filename, collision_filename
seen_filenames = set()
for module in modules:
if module.isPythonExtensionModule():
continue
_base_filename, collision_filename = _getModuleFilenames(module)
if collision_filename in seen_filenames:
collision_filenames.add(collision_filename)
seen_filenames.add(collision_filename)
module_filenames = {}
collision_counts = {}
for module in sorted(modules, key=lambda x: x.getFullName()):
if module.isPythonExtensionModule():
continue
base_filename, collision_filename = _getModuleFilenames(module)
if collision_filename in collision_filenames:
collision_counts[collision_filename] = (
collision_counts.get(collision_filename, 0) + 1
)
base_filename += "@%d" % collision_counts[collision_filename]
module_filenames[module] = base_filename + ".c"
return module_filenames
|
[
"def",
"pickSourceFilenames",
"(",
"source_dir",
",",
"modules",
")",
":",
"collision_filenames",
"=",
"set",
"(",
")",
"def",
"_getModuleFilenames",
"(",
"module",
")",
":",
"base_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"source_dir",
",",
"\"module.\"",
"+",
"module",
".",
"getFullName",
"(",
")",
")",
"collision_filename",
"=",
"base_filename",
".",
"lower",
"(",
")",
"return",
"base_filename",
",",
"collision_filename",
"seen_filenames",
"=",
"set",
"(",
")",
"for",
"module",
"in",
"modules",
":",
"if",
"module",
".",
"isPythonExtensionModule",
"(",
")",
":",
"continue",
"_base_filename",
",",
"collision_filename",
"=",
"_getModuleFilenames",
"(",
"module",
")",
"if",
"collision_filename",
"in",
"seen_filenames",
":",
"collision_filenames",
".",
"add",
"(",
"collision_filename",
")",
"seen_filenames",
".",
"add",
"(",
"collision_filename",
")",
"module_filenames",
"=",
"{",
"}",
"collision_counts",
"=",
"{",
"}",
"for",
"module",
"in",
"sorted",
"(",
"modules",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"getFullName",
"(",
")",
")",
":",
"if",
"module",
".",
"isPythonExtensionModule",
"(",
")",
":",
"continue",
"base_filename",
",",
"collision_filename",
"=",
"_getModuleFilenames",
"(",
"module",
")",
"if",
"collision_filename",
"in",
"collision_filenames",
":",
"collision_counts",
"[",
"collision_filename",
"]",
"=",
"(",
"collision_counts",
".",
"get",
"(",
"collision_filename",
",",
"0",
")",
"+",
"1",
")",
"base_filename",
"+=",
"\"@%d\"",
"%",
"collision_counts",
"[",
"collision_filename",
"]",
"module_filenames",
"[",
"module",
"]",
"=",
"base_filename",
"+",
"\".c\"",
"return",
"module_filenames"
] |
Pick the names for the C files of each module.
|
[
"Pick",
"the",
"names",
"for",
"the",
"C",
"files",
"of",
"each",
"module",
"."
] |
[
"\"\"\"Pick the names for the C files of each module.\n\n Args:\n source_dir - the directory to put the module sources will be put into\n modules - all the modules to build.\n\n Returns:\n Dictionary mapping modules to filenames in source_dir.\n\n Notes:\n These filenames can collide, due to e.g. mixed case usage, or there\n being duplicate copies, e.g. a package named the same as the main\n binary.\n\n Conflicts are resolved by appending @<number> with a count in the\n list of sorted modules. We try to be reproducible here, so we get\n still good caching for external tools.\n \"\"\"",
"# Note: Could detect if the file system is cases sensitive in source_dir",
"# or not, but that's probably not worth the effort. False positives do",
"# no harm at all. We cannot use normcase, as macOS is not using one that",
"# will tell us the truth.",
"# First pass, check for collisions.",
"# Our output.",
"# Count up for colliding filenames as we go.",
"# Second pass, this time sorted, so we get deterministic results. We will",
"# apply an \"@1\"/\"@2\",... to disambiguate the filenames."
] |
[
{
"param": "source_dir",
"type": null
},
{
"param": "modules",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "source_dir",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "modules",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
def pickSourceFilenames(source_dir, modules):
collision_filenames = set()
def _getModuleFilenames(module):
base_filename = os.path.join(source_dir, "module." + module.getFullName())
collision_filename = base_filename.lower()
return base_filename, collision_filename
seen_filenames = set()
for module in modules:
if module.isPythonExtensionModule():
continue
_base_filename, collision_filename = _getModuleFilenames(module)
if collision_filename in seen_filenames:
collision_filenames.add(collision_filename)
seen_filenames.add(collision_filename)
module_filenames = {}
collision_counts = {}
for module in sorted(modules, key=lambda x: x.getFullName()):
if module.isPythonExtensionModule():
continue
base_filename, collision_filename = _getModuleFilenames(module)
if collision_filename in collision_filenames:
collision_counts[collision_filename] = (
collision_counts.get(collision_filename, 0) + 1
)
base_filename += "@%d" % collision_counts[collision_filename]
module_filenames[module] = base_filename + ".c"
return module_filenames
| 1,204 | 468 |
b0ecfd880ae100f93dd41736898ebfd1f0bdebe7
|
Luca96/face-clustering
|
py/ul19_training.py
|
[
"MIT"
] |
Python
|
hamming_distance
|
<not_specific>
|
def hamming_distance(x, y):
'''Hamming distance: use to get the number of mis-predicted features'''
assert(len(x) == len(y))
count = 0
for i in range(len(x)):
if x[i] != y[i]:
count += 1
return count
|
Hamming distance: use to get the number of mis-predicted features
|
Hamming distance: use to get the number of mis-predicted features
|
[
"Hamming",
"distance",
":",
"use",
"to",
"get",
"the",
"number",
"of",
"mis",
"-",
"predicted",
"features"
] |
def hamming_distance(x, y):
assert(len(x) == len(y))
count = 0
for i in range(len(x)):
if x[i] != y[i]:
count += 1
return count
|
[
"def",
"hamming_distance",
"(",
"x",
",",
"y",
")",
":",
"assert",
"(",
"len",
"(",
"x",
")",
"==",
"len",
"(",
"y",
")",
")",
"count",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"x",
")",
")",
":",
"if",
"x",
"[",
"i",
"]",
"!=",
"y",
"[",
"i",
"]",
":",
"count",
"+=",
"1",
"return",
"count"
] |
Hamming distance: use to get the number of mis-predicted features
|
[
"Hamming",
"distance",
":",
"use",
"to",
"get",
"the",
"number",
"of",
"mis",
"-",
"predicted",
"features"
] |
[
"'''Hamming distance: use to get the number of mis-predicted features'''"
] |
[
{
"param": "x",
"type": null
},
{
"param": "y",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "x",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "y",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def hamming_distance(x, y):
assert(len(x) == len(y))
count = 0
for i in range(len(x)):
if x[i] != y[i]:
count += 1
return count
| 1,205 | 273 |
b83f06edc818ce4cb74c5bf22eee849140398fbc
|
rbalik/asn1tools
|
asn1tools/codecs/__init__.py
|
[
"MIT"
] |
Python
|
format_or
|
<not_specific>
|
def format_or(items):
"""Return a string of comma separated items, with the last to items
separated by "or".
[1, 2, 3] -> "1, 2 or 3"
"""
formatted_items = []
for item in items:
try:
item = "'" + item + "'"
except TypeError:
item = str(item)
formatted_items.append(item)
if len(items) == 1:
return formatted_items[0]
else:
return '{} or {}'.format(', '.join(formatted_items[:-1]),
formatted_items[-1])
|
Return a string of comma separated items, with the last to items
separated by "or".
[1, 2, 3] -> "1, 2 or 3"
|
Return a string of comma separated items, with the last to items
separated by "or".
|
[
"Return",
"a",
"string",
"of",
"comma",
"separated",
"items",
"with",
"the",
"last",
"to",
"items",
"separated",
"by",
"\"",
"or",
"\"",
"."
] |
def format_or(items):
formatted_items = []
for item in items:
try:
item = "'" + item + "'"
except TypeError:
item = str(item)
formatted_items.append(item)
if len(items) == 1:
return formatted_items[0]
else:
return '{} or {}'.format(', '.join(formatted_items[:-1]),
formatted_items[-1])
|
[
"def",
"format_or",
"(",
"items",
")",
":",
"formatted_items",
"=",
"[",
"]",
"for",
"item",
"in",
"items",
":",
"try",
":",
"item",
"=",
"\"'\"",
"+",
"item",
"+",
"\"'\"",
"except",
"TypeError",
":",
"item",
"=",
"str",
"(",
"item",
")",
"formatted_items",
".",
"append",
"(",
"item",
")",
"if",
"len",
"(",
"items",
")",
"==",
"1",
":",
"return",
"formatted_items",
"[",
"0",
"]",
"else",
":",
"return",
"'{} or {}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"formatted_items",
"[",
":",
"-",
"1",
"]",
")",
",",
"formatted_items",
"[",
"-",
"1",
"]",
")"
] |
Return a string of comma separated items, with the last to items
separated by "or".
|
[
"Return",
"a",
"string",
"of",
"comma",
"separated",
"items",
"with",
"the",
"last",
"to",
"items",
"separated",
"by",
"\"",
"or",
"\"",
"."
] |
[
"\"\"\"Return a string of comma separated items, with the last to items\n separated by \"or\".\n\n [1, 2, 3] -> \"1, 2 or 3\"\n\n \"\"\""
] |
[
{
"param": "items",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "items",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def format_or(items):
formatted_items = []
for item in items:
try:
item = "'" + item + "'"
except TypeError:
item = str(item)
formatted_items.append(item)
if len(items) == 1:
return formatted_items[0]
else:
return '{} or {}'.format(', '.join(formatted_items[:-1]),
formatted_items[-1])
| 1,206 | 697 |
345e2c2a3b9e135f2852a7472768e760f1118acd
|
djpasseyjr/rescomp
|
rescomp/optimizer/optimizer_functions.py
|
[
"MIT"
] |
Python
|
make_initial
|
<not_specific>
|
def make_initial(pred_type, rcomp, U):
""" Create initial condition for the type of prediction. Either create a reservoir node
initial condition or use a state space initial condition. Returned as a dictionary with the
parameter's name mapped to its value.
"""
if pred_type == "continue":
# Continue evolution of reservoir nodes from current node state
return {"r0": rcomp.r0}
else:
# Use the state space initial condition. (Reservoir will map it to a reservoir node condition)
return {"u0": U[0]}
|
Create initial condition for the type of prediction. Either create a reservoir node
initial condition or use a state space initial condition. Returned as a dictionary with the
parameter's name mapped to its value.
|
Create initial condition for the type of prediction. Either create a reservoir node
initial condition or use a state space initial condition. Returned as a dictionary with the
parameter's name mapped to its value.
|
[
"Create",
"initial",
"condition",
"for",
"the",
"type",
"of",
"prediction",
".",
"Either",
"create",
"a",
"reservoir",
"node",
"initial",
"condition",
"or",
"use",
"a",
"state",
"space",
"initial",
"condition",
".",
"Returned",
"as",
"a",
"dictionary",
"with",
"the",
"parameter",
"'",
"s",
"name",
"mapped",
"to",
"its",
"value",
"."
] |
def make_initial(pred_type, rcomp, U):
if pred_type == "continue":
return {"r0": rcomp.r0}
else:
return {"u0": U[0]}
|
[
"def",
"make_initial",
"(",
"pred_type",
",",
"rcomp",
",",
"U",
")",
":",
"if",
"pred_type",
"==",
"\"continue\"",
":",
"return",
"{",
"\"r0\"",
":",
"rcomp",
".",
"r0",
"}",
"else",
":",
"return",
"{",
"\"u0\"",
":",
"U",
"[",
"0",
"]",
"}"
] |
Create initial condition for the type of prediction.
|
[
"Create",
"initial",
"condition",
"for",
"the",
"type",
"of",
"prediction",
"."
] |
[
"\"\"\" Create initial condition for the type of prediction. Either create a reservoir node\n initial condition or use a state space initial condition. Returned as a dictionary with the\n parameter's name mapped to its value.\n \"\"\"",
"# Continue evolution of reservoir nodes from current node state",
"# Use the state space initial condition. (Reservoir will map it to a reservoir node condition)"
] |
[
{
"param": "pred_type",
"type": null
},
{
"param": "rcomp",
"type": null
},
{
"param": "U",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "pred_type",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "rcomp",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "U",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def make_initial(pred_type, rcomp, U):
if pred_type == "continue":
return {"r0": rcomp.r0}
else:
return {"u0": U[0]}
| 1,208 | 359 |
9b3bbc071c147b6fd9fdcddf67beb2ffd4eb75ca
|
Farfetch/maestro
|
web/api/maestro_api/libs/utils.py
|
[
"MIT"
] |
Python
|
str_to_list
|
list
|
def str_to_list(str_value) -> list:
"""
Convert string to list with one element
or just return received list
"""
return [str_value] if not isinstance(str_value, list) else str_value
|
Convert string to list with one element
or just return received list
|
Convert string to list with one element
or just return received list
|
[
"Convert",
"string",
"to",
"list",
"with",
"one",
"element",
"or",
"just",
"return",
"received",
"list"
] |
def str_to_list(str_value) -> list:
return [str_value] if not isinstance(str_value, list) else str_value
|
[
"def",
"str_to_list",
"(",
"str_value",
")",
"->",
"list",
":",
"return",
"[",
"str_value",
"]",
"if",
"not",
"isinstance",
"(",
"str_value",
",",
"list",
")",
"else",
"str_value"
] |
Convert string to list with one element
or just return received list
|
[
"Convert",
"string",
"to",
"list",
"with",
"one",
"element",
"or",
"just",
"return",
"received",
"list"
] |
[
"\"\"\"\n Convert string to list with one element\n or just return received list\n \"\"\""
] |
[
{
"param": "str_value",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "str_value",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def str_to_list(str_value) -> list:
return [str_value] if not isinstance(str_value, list) else str_value
| 1,209 | 593 |
9497573eb6f2e617e2c3062c81315fbfa7bb011a
|
MacHu-GWU/aws_text_insight-project
|
aws_text_insight/lbd/hdl_3_textract_output_to_text.py
|
[
"BSD-2-Clause"
] |
Python
|
merge_textract_result
|
<not_specific>
|
def merge_textract_result(
s3_client,
s3_bucket_input, s3_prefix_input,
s3_bucket_output, s3_key_output,
):
"""
Merge multiple textract result output files into one single text file.
:param s3_client: s3 boto3 client
:param s3_bucket_input: AWS Textract ``start_document_text_detection`` API
output s3 bucket
:param s3_prefix_input: AWS Textract ``start_document_text_detection`` API
output prefix.
:param s3_bucket_output: where you want to store the merged txt file
:param s3_key_output: where you want to store the merged txt file
"""
ls_obj_response = s3_client.list_objects_v2(
Bucket=s3_bucket_input, Prefix=s3_prefix_input, MaxKeys=1000
)
lines = list()
for content in ls_obj_response.get("Contents", []):
s3_key = content["Key"]
if s3_key.endswith(".s3_access_check"):
continue
get_obj_response = s3_client.get_object(
Bucket=s3_bucket_input, Key=s3_key
)
data = json.loads(get_obj_response["Body"].read())
for block in data["Blocks"]:
block_type = block["BlockType"]
text = block["Text"]
if block_type == "LINE":
lines.append(text)
body = "\n".join(lines)
s3_client.put_object(
Bucket=s3_bucket_output,
Key=s3_key_output,
Body=body,
)
return body
|
Merge multiple textract result output files into one single text file.
:param s3_client: s3 boto3 client
:param s3_bucket_input: AWS Textract ``start_document_text_detection`` API
output s3 bucket
:param s3_prefix_input: AWS Textract ``start_document_text_detection`` API
output prefix.
:param s3_bucket_output: where you want to store the merged txt file
:param s3_key_output: where you want to store the merged txt file
|
Merge multiple textract result output files into one single text file.
|
[
"Merge",
"multiple",
"textract",
"result",
"output",
"files",
"into",
"one",
"single",
"text",
"file",
"."
] |
def merge_textract_result(
s3_client,
s3_bucket_input, s3_prefix_input,
s3_bucket_output, s3_key_output,
):
ls_obj_response = s3_client.list_objects_v2(
Bucket=s3_bucket_input, Prefix=s3_prefix_input, MaxKeys=1000
)
lines = list()
for content in ls_obj_response.get("Contents", []):
s3_key = content["Key"]
if s3_key.endswith(".s3_access_check"):
continue
get_obj_response = s3_client.get_object(
Bucket=s3_bucket_input, Key=s3_key
)
data = json.loads(get_obj_response["Body"].read())
for block in data["Blocks"]:
block_type = block["BlockType"]
text = block["Text"]
if block_type == "LINE":
lines.append(text)
body = "\n".join(lines)
s3_client.put_object(
Bucket=s3_bucket_output,
Key=s3_key_output,
Body=body,
)
return body
|
[
"def",
"merge_textract_result",
"(",
"s3_client",
",",
"s3_bucket_input",
",",
"s3_prefix_input",
",",
"s3_bucket_output",
",",
"s3_key_output",
",",
")",
":",
"ls_obj_response",
"=",
"s3_client",
".",
"list_objects_v2",
"(",
"Bucket",
"=",
"s3_bucket_input",
",",
"Prefix",
"=",
"s3_prefix_input",
",",
"MaxKeys",
"=",
"1000",
")",
"lines",
"=",
"list",
"(",
")",
"for",
"content",
"in",
"ls_obj_response",
".",
"get",
"(",
"\"Contents\"",
",",
"[",
"]",
")",
":",
"s3_key",
"=",
"content",
"[",
"\"Key\"",
"]",
"if",
"s3_key",
".",
"endswith",
"(",
"\".s3_access_check\"",
")",
":",
"continue",
"get_obj_response",
"=",
"s3_client",
".",
"get_object",
"(",
"Bucket",
"=",
"s3_bucket_input",
",",
"Key",
"=",
"s3_key",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"get_obj_response",
"[",
"\"Body\"",
"]",
".",
"read",
"(",
")",
")",
"for",
"block",
"in",
"data",
"[",
"\"Blocks\"",
"]",
":",
"block_type",
"=",
"block",
"[",
"\"BlockType\"",
"]",
"text",
"=",
"block",
"[",
"\"Text\"",
"]",
"if",
"block_type",
"==",
"\"LINE\"",
":",
"lines",
".",
"append",
"(",
"text",
")",
"body",
"=",
"\"\\n\"",
".",
"join",
"(",
"lines",
")",
"s3_client",
".",
"put_object",
"(",
"Bucket",
"=",
"s3_bucket_output",
",",
"Key",
"=",
"s3_key_output",
",",
"Body",
"=",
"body",
",",
")",
"return",
"body"
] |
Merge multiple textract result output files into one single text file.
|
[
"Merge",
"multiple",
"textract",
"result",
"output",
"files",
"into",
"one",
"single",
"text",
"file",
"."
] |
[
"\"\"\"\n Merge multiple textract result output files into one single text file.\n\n :param s3_client: s3 boto3 client\n :param s3_bucket_input: AWS Textract ``start_document_text_detection`` API\n output s3 bucket\n :param s3_prefix_input: AWS Textract ``start_document_text_detection`` API\n output prefix.\n :param s3_bucket_output: where you want to store the merged txt file\n :param s3_key_output: where you want to store the merged txt file\n \"\"\""
] |
[
{
"param": "s3_client",
"type": null
},
{
"param": "s3_bucket_input",
"type": null
},
{
"param": "s3_prefix_input",
"type": null
},
{
"param": "s3_bucket_output",
"type": null
},
{
"param": "s3_key_output",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "s3_client",
"type": null,
"docstring": "s3 boto3 client",
"docstring_tokens": [
"s3",
"boto3",
"client"
],
"default": null,
"is_optional": null
},
{
"identifier": "s3_bucket_input",
"type": null,
"docstring": "AWS Textract ``start_document_text_detection`` API\noutput s3 bucket",
"docstring_tokens": [
"AWS",
"Textract",
"`",
"`",
"start_document_text_detection",
"`",
"`",
"API",
"output",
"s3",
"bucket"
],
"default": null,
"is_optional": null
},
{
"identifier": "s3_prefix_input",
"type": null,
"docstring": "AWS Textract ``start_document_text_detection`` API\noutput prefix.",
"docstring_tokens": [
"AWS",
"Textract",
"`",
"`",
"start_document_text_detection",
"`",
"`",
"API",
"output",
"prefix",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "s3_bucket_output",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "s3_key_output",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import json
def merge_textract_result(
s3_client,
s3_bucket_input, s3_prefix_input,
s3_bucket_output, s3_key_output,
):
ls_obj_response = s3_client.list_objects_v2(
Bucket=s3_bucket_input, Prefix=s3_prefix_input, MaxKeys=1000
)
lines = list()
for content in ls_obj_response.get("Contents", []):
s3_key = content["Key"]
if s3_key.endswith(".s3_access_check"):
continue
get_obj_response = s3_client.get_object(
Bucket=s3_bucket_input, Key=s3_key
)
data = json.loads(get_obj_response["Body"].read())
for block in data["Blocks"]:
block_type = block["BlockType"]
text = block["Text"]
if block_type == "LINE":
lines.append(text)
body = "\n".join(lines)
s3_client.put_object(
Bucket=s3_bucket_output,
Key=s3_key_output,
Body=body,
)
return body
| 1,210 | 468 |
a5afb2dae95dd2639f86cf667135106107e75653
|
Justintime50/adventofcode-2020
|
adventofcode/_2021/day3/challenge.py
|
[
"MIT"
] |
Python
|
calculate_answer1_rating
|
int
|
def calculate_answer1_rating(data: list[str], context: str) -> int:
"""Calculate the gamma/epsilon rating by taking the most common bit in each position and converting to decimal."""
most_common_bit_binary_number = ''
transposed_lists = list(map(list, zip(*data)))
for bit_group in transposed_lists:
ones = bit_group.count('1')
zeros = bit_group.count('0')
if ones >= zeros and context == 'gamma':
most_common_bit_binary_number += '1'
elif ones < zeros and context == 'epsilon':
most_common_bit_binary_number += '1'
else:
most_common_bit_binary_number += '0'
binary_to_decimal = int(most_common_bit_binary_number, 2)
return binary_to_decimal
|
Calculate the gamma/epsilon rating by taking the most common bit in each position and converting to decimal.
|
Calculate the gamma/epsilon rating by taking the most common bit in each position and converting to decimal.
|
[
"Calculate",
"the",
"gamma",
"/",
"epsilon",
"rating",
"by",
"taking",
"the",
"most",
"common",
"bit",
"in",
"each",
"position",
"and",
"converting",
"to",
"decimal",
"."
] |
def calculate_answer1_rating(data: list[str], context: str) -> int:
most_common_bit_binary_number = ''
transposed_lists = list(map(list, zip(*data)))
for bit_group in transposed_lists:
ones = bit_group.count('1')
zeros = bit_group.count('0')
if ones >= zeros and context == 'gamma':
most_common_bit_binary_number += '1'
elif ones < zeros and context == 'epsilon':
most_common_bit_binary_number += '1'
else:
most_common_bit_binary_number += '0'
binary_to_decimal = int(most_common_bit_binary_number, 2)
return binary_to_decimal
|
[
"def",
"calculate_answer1_rating",
"(",
"data",
":",
"list",
"[",
"str",
"]",
",",
"context",
":",
"str",
")",
"->",
"int",
":",
"most_common_bit_binary_number",
"=",
"''",
"transposed_lists",
"=",
"list",
"(",
"map",
"(",
"list",
",",
"zip",
"(",
"*",
"data",
")",
")",
")",
"for",
"bit_group",
"in",
"transposed_lists",
":",
"ones",
"=",
"bit_group",
".",
"count",
"(",
"'1'",
")",
"zeros",
"=",
"bit_group",
".",
"count",
"(",
"'0'",
")",
"if",
"ones",
">=",
"zeros",
"and",
"context",
"==",
"'gamma'",
":",
"most_common_bit_binary_number",
"+=",
"'1'",
"elif",
"ones",
"<",
"zeros",
"and",
"context",
"==",
"'epsilon'",
":",
"most_common_bit_binary_number",
"+=",
"'1'",
"else",
":",
"most_common_bit_binary_number",
"+=",
"'0'",
"binary_to_decimal",
"=",
"int",
"(",
"most_common_bit_binary_number",
",",
"2",
")",
"return",
"binary_to_decimal"
] |
Calculate the gamma/epsilon rating by taking the most common bit in each position and converting to decimal.
|
[
"Calculate",
"the",
"gamma",
"/",
"epsilon",
"rating",
"by",
"taking",
"the",
"most",
"common",
"bit",
"in",
"each",
"position",
"and",
"converting",
"to",
"decimal",
"."
] |
[
"\"\"\"Calculate the gamma/epsilon rating by taking the most common bit in each position and converting to decimal.\"\"\""
] |
[
{
"param": "data",
"type": "list[str]"
},
{
"param": "context",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "data",
"type": "list[str]",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "context",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def calculate_answer1_rating(data: list[str], context: str) -> int:
most_common_bit_binary_number = ''
transposed_lists = list(map(list, zip(*data)))
for bit_group in transposed_lists:
ones = bit_group.count('1')
zeros = bit_group.count('0')
if ones >= zeros and context == 'gamma':
most_common_bit_binary_number += '1'
elif ones < zeros and context == 'epsilon':
most_common_bit_binary_number += '1'
else:
most_common_bit_binary_number += '0'
binary_to_decimal = int(most_common_bit_binary_number, 2)
return binary_to_decimal
| 1,211 | 355 |
0859fea0c4c9a4b6d08fe4992ee2bc4271cdb831
|
manishmeganathan/flask-audioserver
|
audiofiles/audiofiles.py
|
[
"MIT"
] |
Python
|
validate_string
|
typing.Tuple[bool, str]
|
def validate_string(cls, string: str) -> typing.Tuple[bool, str]:
"""
A classmethod that determines if a given object is a positive integer.
Returns a validity bool and and an error str in a tuple.
"""
if not isinstance(string, str):
return False, "not an str"
if len(string) > 100:
return False, "str too long"
return True, "null"
|
A classmethod that determines if a given object is a positive integer.
Returns a validity bool and and an error str in a tuple.
|
A classmethod that determines if a given object is a positive integer.
Returns a validity bool and and an error str in a tuple.
|
[
"A",
"classmethod",
"that",
"determines",
"if",
"a",
"given",
"object",
"is",
"a",
"positive",
"integer",
".",
"Returns",
"a",
"validity",
"bool",
"and",
"and",
"an",
"error",
"str",
"in",
"a",
"tuple",
"."
] |
def validate_string(cls, string: str) -> typing.Tuple[bool, str]:
if not isinstance(string, str):
return False, "not an str"
if len(string) > 100:
return False, "str too long"
return True, "null"
|
[
"def",
"validate_string",
"(",
"cls",
",",
"string",
":",
"str",
")",
"->",
"typing",
".",
"Tuple",
"[",
"bool",
",",
"str",
"]",
":",
"if",
"not",
"isinstance",
"(",
"string",
",",
"str",
")",
":",
"return",
"False",
",",
"\"not an str\"",
"if",
"len",
"(",
"string",
")",
">",
"100",
":",
"return",
"False",
",",
"\"str too long\"",
"return",
"True",
",",
"\"null\""
] |
A classmethod that determines if a given object is a positive integer.
|
[
"A",
"classmethod",
"that",
"determines",
"if",
"a",
"given",
"object",
"is",
"a",
"positive",
"integer",
"."
] |
[
"\"\"\"\n A classmethod that determines if a given object is a positive integer.\n Returns a validity bool and and an error str in a tuple.\n \"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "string",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "string",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import typing
def validate_string(cls, string: str) -> typing.Tuple[bool, str]:
if not isinstance(string, str):
return False, "not an str"
if len(string) > 100:
return False, "str too long"
return True, "null"
| 1,212 | 41 |
09e611c2c6905c29c49d807a2e0bb2ac87e59b3a
|
zanecodes/crds
|
crds/core/generic_tpn.py
|
[
"BSD-3-Clause"
] |
Python
|
_remove_quotes
|
<not_specific>
|
def _remove_quotes(values):
"""Remove any quotes from quoted values."""
removed = []
for value in values:
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
removed.append(value)
return removed
|
Remove any quotes from quoted values.
|
Remove any quotes from quoted values.
|
[
"Remove",
"any",
"quotes",
"from",
"quoted",
"values",
"."
] |
def _remove_quotes(values):
removed = []
for value in values:
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
removed.append(value)
return removed
|
[
"def",
"_remove_quotes",
"(",
"values",
")",
":",
"removed",
"=",
"[",
"]",
"for",
"value",
"in",
"values",
":",
"if",
"value",
".",
"startswith",
"(",
"'\"'",
")",
"and",
"value",
".",
"endswith",
"(",
"'\"'",
")",
":",
"value",
"=",
"value",
"[",
"1",
":",
"-",
"1",
"]",
"removed",
".",
"append",
"(",
"value",
")",
"return",
"removed"
] |
Remove any quotes from quoted values.
|
[
"Remove",
"any",
"quotes",
"from",
"quoted",
"values",
"."
] |
[
"\"\"\"Remove any quotes from quoted values.\"\"\""
] |
[
{
"param": "values",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "values",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _remove_quotes(values):
removed = []
for value in values:
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
removed.append(value)
return removed
| 1,213 | 690 |
a28fb38814b0436021034ee0a4608c78aba86e95
|
Leobouloc/Merge-Machine
|
merge_machine/es_labeller.py
|
[
"MIT"
] |
Python
|
print_name
|
<not_specific>
|
def print_name(function):
"""Print the function being run with indentation for inclusion.
To be used as a decorator.
"""
def wrapper(*args, **kwargs):
global INDENT_LVL
my_id = random.randint(0, 100000)
ind = 2 * INDENT_LVL * ' '
INDENT_LVL += 1
print('{0}<< Starting: {1} ({2})'.format(ind, function.__name__, my_id))
start_time = time.time()
res = function(*args, **kwargs)
INDENT_LVL -= 1
print('{0}>> Ending: {1} ({2}) / Took {3}s'.format(ind, \
function.__name__, my_id, time.time()-start_time))
return res
return wrapper
|
Print the function being run with indentation for inclusion.
To be used as a decorator.
|
Print the function being run with indentation for inclusion.
To be used as a decorator.
|
[
"Print",
"the",
"function",
"being",
"run",
"with",
"indentation",
"for",
"inclusion",
".",
"To",
"be",
"used",
"as",
"a",
"decorator",
"."
] |
def print_name(function):
def wrapper(*args, **kwargs):
global INDENT_LVL
my_id = random.randint(0, 100000)
ind = 2 * INDENT_LVL * ' '
INDENT_LVL += 1
print('{0}<< Starting: {1} ({2})'.format(ind, function.__name__, my_id))
start_time = time.time()
res = function(*args, **kwargs)
INDENT_LVL -= 1
print('{0}>> Ending: {1} ({2}) / Took {3}s'.format(ind, \
function.__name__, my_id, time.time()-start_time))
return res
return wrapper
|
[
"def",
"print_name",
"(",
"function",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"global",
"INDENT_LVL",
"my_id",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"100000",
")",
"ind",
"=",
"2",
"*",
"INDENT_LVL",
"*",
"' '",
"INDENT_LVL",
"+=",
"1",
"print",
"(",
"'{0}<< Starting: {1} ({2})'",
".",
"format",
"(",
"ind",
",",
"function",
".",
"__name__",
",",
"my_id",
")",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"res",
"=",
"function",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"INDENT_LVL",
"-=",
"1",
"print",
"(",
"'{0}>> Ending: {1} ({2}) / Took {3}s'",
".",
"format",
"(",
"ind",
",",
"function",
".",
"__name__",
",",
"my_id",
",",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
")",
"return",
"res",
"return",
"wrapper"
] |
Print the function being run with indentation for inclusion.
|
[
"Print",
"the",
"function",
"being",
"run",
"with",
"indentation",
"for",
"inclusion",
"."
] |
[
"\"\"\"Print the function being run with indentation for inclusion.\n \n To be used as a decorator.\n \"\"\""
] |
[
{
"param": "function",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "function",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import time
import random
def print_name(function):
def wrapper(*args, **kwargs):
global INDENT_LVL
my_id = random.randint(0, 100000)
ind = 2 * INDENT_LVL * ' '
INDENT_LVL += 1
print('{0}<< Starting: {1} ({2})'.format(ind, function.__name__, my_id))
start_time = time.time()
res = function(*args, **kwargs)
INDENT_LVL -= 1
print('{0}>> Ending: {1} ({2}) / Took {3}s'.format(ind, \
function.__name__, my_id, time.time()-start_time))
return res
return wrapper
| 1,216 | 614 |
a332a06bfee68be057f4a33d24668014c98c8d10
|
acbaraka/py-html-checker
|
tests/110_cli_common_options.py
|
[
"MIT"
] |
Python
|
mock_export_logging_build
| null |
def mock_export_logging_build(*args, **kwargs):
"""
Mock method to just logging given command lines in ``report``
argument (following validator mockups).
"""
cls = args[0]
report = args[1]
for k in report:
cls.log.info(" ".join(k))
|
Mock method to just logging given command lines in ``report``
argument (following validator mockups).
|
Mock method to just logging given command lines in ``report``
argument (following validator mockups).
|
[
"Mock",
"method",
"to",
"just",
"logging",
"given",
"command",
"lines",
"in",
"`",
"`",
"report",
"`",
"`",
"argument",
"(",
"following",
"validator",
"mockups",
")",
"."
] |
def mock_export_logging_build(*args, **kwargs):
cls = args[0]
report = args[1]
for k in report:
cls.log.info(" ".join(k))
|
[
"def",
"mock_export_logging_build",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"cls",
"=",
"args",
"[",
"0",
"]",
"report",
"=",
"args",
"[",
"1",
"]",
"for",
"k",
"in",
"report",
":",
"cls",
".",
"log",
".",
"info",
"(",
"\" \"",
".",
"join",
"(",
"k",
")",
")"
] |
Mock method to just logging given command lines in ``report``
argument (following validator mockups).
|
[
"Mock",
"method",
"to",
"just",
"logging",
"given",
"command",
"lines",
"in",
"`",
"`",
"report",
"`",
"`",
"argument",
"(",
"following",
"validator",
"mockups",
")",
"."
] |
[
"\"\"\"\n Mock method to just logging given command lines in ``report``\n argument (following validator mockups).\n \"\"\""
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
def mock_export_logging_build(*args, **kwargs):
cls = args[0]
report = args[1]
for k in report:
cls.log.info(" ".join(k))
| 1,217 | 797 |
52a9d164bff777cc63031c98a805e01ecbad0d24
|
vimc/orderly-web-deploy
|
orderly_web/config.py
|
[
"MIT"
] |
Python
|
combine
| null |
def combine(base, extra):
"""Combine exactly two dictionaries recursively, modifying the first
argument in place with the contets of the second"""
for k, v in extra.items():
if k in base and type(base[k]) is dict and v is not None:
combine(base[k], v)
else:
base[k] = v
|
Combine exactly two dictionaries recursively, modifying the first
argument in place with the contets of the second
|
Combine exactly two dictionaries recursively, modifying the first
argument in place with the contets of the second
|
[
"Combine",
"exactly",
"two",
"dictionaries",
"recursively",
"modifying",
"the",
"first",
"argument",
"in",
"place",
"with",
"the",
"contets",
"of",
"the",
"second"
] |
def combine(base, extra):
for k, v in extra.items():
if k in base and type(base[k]) is dict and v is not None:
combine(base[k], v)
else:
base[k] = v
|
[
"def",
"combine",
"(",
"base",
",",
"extra",
")",
":",
"for",
"k",
",",
"v",
"in",
"extra",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"base",
"and",
"type",
"(",
"base",
"[",
"k",
"]",
")",
"is",
"dict",
"and",
"v",
"is",
"not",
"None",
":",
"combine",
"(",
"base",
"[",
"k",
"]",
",",
"v",
")",
"else",
":",
"base",
"[",
"k",
"]",
"=",
"v"
] |
Combine exactly two dictionaries recursively, modifying the first
argument in place with the contets of the second
|
[
"Combine",
"exactly",
"two",
"dictionaries",
"recursively",
"modifying",
"the",
"first",
"argument",
"in",
"place",
"with",
"the",
"contets",
"of",
"the",
"second"
] |
[
"\"\"\"Combine exactly two dictionaries recursively, modifying the first\nargument in place with the contets of the second\"\"\""
] |
[
{
"param": "base",
"type": null
},
{
"param": "extra",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "base",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "extra",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def combine(base, extra):
for k, v in extra.items():
if k in base and type(base[k]) is dict and v is not None:
combine(base[k], v)
else:
base[k] = v
| 1,218 | 842 |
9446ab3a99dda519ef86a0fd2973dd0dfc1c9e49
|
mpearmain/forml
|
forml/runtime/code/instruction.py
|
[
"Apache-2.0"
] |
Python
|
_function
|
bytes
|
def _function(actor: task.Actor, *args) -> bytes:
"""Consumer objective is the train method.
Args:
actor: Target actor to run the objective on.
*args: List of arguments to be passed to the actor objective.
Returns: New actor state.
"""
actor.train(*args)
return actor.get_state()
|
Consumer objective is the train method.
Args:
actor: Target actor to run the objective on.
*args: List of arguments to be passed to the actor objective.
Returns: New actor state.
|
Consumer objective is the train method.
|
[
"Consumer",
"objective",
"is",
"the",
"train",
"method",
"."
] |
def _function(actor: task.Actor, *args) -> bytes:
actor.train(*args)
return actor.get_state()
|
[
"def",
"_function",
"(",
"actor",
":",
"task",
".",
"Actor",
",",
"*",
"args",
")",
"->",
"bytes",
":",
"actor",
".",
"train",
"(",
"*",
"args",
")",
"return",
"actor",
".",
"get_state",
"(",
")"
] |
Consumer objective is the train method.
|
[
"Consumer",
"objective",
"is",
"the",
"train",
"method",
"."
] |
[
"\"\"\"Consumer objective is the train method.\n\n Args:\n actor: Target actor to run the objective on.\n *args: List of arguments to be passed to the actor objective.\n\n Returns: New actor state.\n \"\"\""
] |
[
{
"param": "actor",
"type": "task.Actor"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "actor",
"type": "task.Actor",
"docstring": "Target actor to run the objective on.",
"docstring_tokens": [
"Target",
"actor",
"to",
"run",
"the",
"objective",
"on",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [
{
"identifier": "*args",
"type": null,
"docstring": "List of arguments to be passed to the actor objective.",
"docstring_tokens": [
"List",
"of",
"arguments",
"to",
"be",
"passed",
"to",
"the",
"actor",
"objective",
"."
],
"default": null,
"is_optional": null
}
],
"others": []
}
|
def _function(actor: task.Actor, *args) -> bytes:
actor.train(*args)
return actor.get_state()
| 1,219 | 302 |
306c5c469a0baf930241060d902fb82700b84119
|
thompsonsed/pycoalescence
|
pycoalescence/system_operations.py
|
[
"MIT"
] |
Python
|
cantor_pairing
|
<not_specific>
|
def cantor_pairing(x1, x2):
"""
Creates a unique integer from the two provided positive integers.
Maps ZxZ -> N, so only relevant for positive numbers.
For any A and B, generates C such that no D and E produce C unless D=A and B=E.
Assigns consecutive numbers to points along diagonals of a plane
.. note:
For most use cases which are not performance-critical, :func:`~elegant_pairing` provides a more reliable outcome
by reducing the size of the integers and therefore reducing the chance of an integer overflow.
:param x1: the first number
:param x2: the second number
:return: a unique reference combining the two integers
"""
return ((x1 + x2) * (x1 + x2 + 1) / 2) + x2
|
Creates a unique integer from the two provided positive integers.
Maps ZxZ -> N, so only relevant for positive numbers.
For any A and B, generates C such that no D and E produce C unless D=A and B=E.
Assigns consecutive numbers to points along diagonals of a plane
.. note:
For most use cases which are not performance-critical, :func:`~elegant_pairing` provides a more reliable outcome
by reducing the size of the integers and therefore reducing the chance of an integer overflow.
:param x1: the first number
:param x2: the second number
:return: a unique reference combining the two integers
|
Creates a unique integer from the two provided positive integers.
Maps ZxZ -> N, so only relevant for positive numbers.
For any A and B, generates C such that no D and E produce C unless D=A and B=E.
Assigns consecutive numbers to points along diagonals of a plane
For most use cases which are not performance-critical, :func:`~elegant_pairing` provides a more reliable outcome
by reducing the size of the integers and therefore reducing the chance of an integer overflow.
|
[
"Creates",
"a",
"unique",
"integer",
"from",
"the",
"two",
"provided",
"positive",
"integers",
".",
"Maps",
"ZxZ",
"-",
">",
"N",
"so",
"only",
"relevant",
"for",
"positive",
"numbers",
".",
"For",
"any",
"A",
"and",
"B",
"generates",
"C",
"such",
"that",
"no",
"D",
"and",
"E",
"produce",
"C",
"unless",
"D",
"=",
"A",
"and",
"B",
"=",
"E",
".",
"Assigns",
"consecutive",
"numbers",
"to",
"points",
"along",
"diagonals",
"of",
"a",
"plane",
"For",
"most",
"use",
"cases",
"which",
"are",
"not",
"performance",
"-",
"critical",
":",
"func",
":",
"`",
"~elegant_pairing",
"`",
"provides",
"a",
"more",
"reliable",
"outcome",
"by",
"reducing",
"the",
"size",
"of",
"the",
"integers",
"and",
"therefore",
"reducing",
"the",
"chance",
"of",
"an",
"integer",
"overflow",
"."
] |
def cantor_pairing(x1, x2):
return ((x1 + x2) * (x1 + x2 + 1) / 2) + x2
|
[
"def",
"cantor_pairing",
"(",
"x1",
",",
"x2",
")",
":",
"return",
"(",
"(",
"x1",
"+",
"x2",
")",
"*",
"(",
"x1",
"+",
"x2",
"+",
"1",
")",
"/",
"2",
")",
"+",
"x2"
] |
Creates a unique integer from the two provided positive integers.
|
[
"Creates",
"a",
"unique",
"integer",
"from",
"the",
"two",
"provided",
"positive",
"integers",
"."
] |
[
"\"\"\"\n Creates a unique integer from the two provided positive integers.\n\n Maps ZxZ -> N, so only relevant for positive numbers.\n For any A and B, generates C such that no D and E produce C unless D=A and B=E.\n\n Assigns consecutive numbers to points along diagonals of a plane\n\n .. note:\n For most use cases which are not performance-critical, :func:`~elegant_pairing` provides a more reliable outcome\n by reducing the size of the integers and therefore reducing the chance of an integer overflow.\n\n :param x1: the first number\n :param x2: the second number\n\n :return: a unique reference combining the two integers\n \"\"\""
] |
[
{
"param": "x1",
"type": null
},
{
"param": "x2",
"type": null
}
] |
{
"returns": [
{
"docstring": "a unique reference combining the two integers",
"docstring_tokens": [
"a",
"unique",
"reference",
"combining",
"the",
"two",
"integers"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "x1",
"type": null,
"docstring": "the first number",
"docstring_tokens": [
"the",
"first",
"number"
],
"default": null,
"is_optional": null
},
{
"identifier": "x2",
"type": null,
"docstring": "the second number",
"docstring_tokens": [
"the",
"second",
"number"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def cantor_pairing(x1, x2):
return ((x1 + x2) * (x1 + x2 + 1) / 2) + x2
| 1,220 | 297 |
34abd0f99e7ad0ce4d65b55d1f5d3fa2db4b4dec
|
glhuilli/papeles
|
papeles/utils/header.py
|
[
"MIT"
] |
Python
|
clean_word
|
str
|
def clean_word(w: str) -> str:
"""
Remove specific characters from word in a paper header
"""
return ''.join([
c for c in w if c not in [
'.', ',', ':', '´', '*', '∗', '†', '†', '‡', '¨', '`', '§', '⇤', '£', '0', '1', '2',
'3', '4', '5', '6'
]
])
|
Remove specific characters from word in a paper header
|
Remove specific characters from word in a paper header
|
[
"Remove",
"specific",
"characters",
"from",
"word",
"in",
"a",
"paper",
"header"
] |
def clean_word(w: str) -> str:
return ''.join([
c for c in w if c not in [
'.', ',', ':', '´', '*', '∗', '†', '†', '‡', '¨', '`', '§', '⇤', '£', '0', '1', '2',
'3', '4', '5', '6'
]
])
|
[
"def",
"clean_word",
"(",
"w",
":",
"str",
")",
"->",
"str",
":",
"return",
"''",
".",
"join",
"(",
"[",
"c",
"for",
"c",
"in",
"w",
"if",
"c",
"not",
"in",
"[",
"'.'",
",",
"','",
",",
"':'",
",",
"'´',",
" ",
"*',",
" ",
"∗', '",
"†",
", '†'",
",",
"'‡', ",
"'",
"', '`",
"'",
" '§'",
",",
"'⇤'",
",",
"'£',",
" ",
"0', '",
"1",
", '2",
"'",
"",
"",
"",
"",
"",
"",
"'3'",
",",
"'4'",
",",
"'5'",
",",
"'6'",
"]",
"]",
")"
] |
Remove specific characters from word in a paper header
|
[
"Remove",
"specific",
"characters",
"from",
"word",
"in",
"a",
"paper",
"header"
] |
[
"\"\"\"\n Remove specific characters from word in a paper header\n \"\"\""
] |
[
{
"param": "w",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "w",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def clean_word(w: str) -> str:
return ''.join([
c for c in w if c not in [
'.', ',', ':', '´', '*', '∗', '†', '†', '‡', '¨', '`', '§', '⇤', '£', '0', '1', '2',
'3', '4', '5', '6'
]
])
| 1,221 | 330 |
ef8312d496ae2af013139338aa412c8d984f6c6d
|
clanker/HyDAGS
|
Lanker_2019_HyDAGS_Code.py
|
[
"MIT"
] |
Python
|
reduce_lists
|
<not_specific>
|
def reduce_lists(ids, metric, verbose=False):
"""
This function reduces nearby basis functions of the same sign
to a single representative basis function for that feature.
"""
ids = list(ids)
temp = ids.copy()
for j in temp:
if (j-1) in ids and metric[j] > metric[j-1]:
if verbose:
print('j-1 removed from id list for', j)
ids.remove(j-1)
if (j+1) in ids and metric[j] > metric[j+1]:
if verbose:
print('j+1 removed from id list for', j)
ids.remove(j+1)
return ids
|
This function reduces nearby basis functions of the same sign
to a single representative basis function for that feature.
|
This function reduces nearby basis functions of the same sign
to a single representative basis function for that feature.
|
[
"This",
"function",
"reduces",
"nearby",
"basis",
"functions",
"of",
"the",
"same",
"sign",
"to",
"a",
"single",
"representative",
"basis",
"function",
"for",
"that",
"feature",
"."
] |
def reduce_lists(ids, metric, verbose=False):
ids = list(ids)
temp = ids.copy()
for j in temp:
if (j-1) in ids and metric[j] > metric[j-1]:
if verbose:
print('j-1 removed from id list for', j)
ids.remove(j-1)
if (j+1) in ids and metric[j] > metric[j+1]:
if verbose:
print('j+1 removed from id list for', j)
ids.remove(j+1)
return ids
|
[
"def",
"reduce_lists",
"(",
"ids",
",",
"metric",
",",
"verbose",
"=",
"False",
")",
":",
"ids",
"=",
"list",
"(",
"ids",
")",
"temp",
"=",
"ids",
".",
"copy",
"(",
")",
"for",
"j",
"in",
"temp",
":",
"if",
"(",
"j",
"-",
"1",
")",
"in",
"ids",
"and",
"metric",
"[",
"j",
"]",
">",
"metric",
"[",
"j",
"-",
"1",
"]",
":",
"if",
"verbose",
":",
"print",
"(",
"'j-1 removed from id list for'",
",",
"j",
")",
"ids",
".",
"remove",
"(",
"j",
"-",
"1",
")",
"if",
"(",
"j",
"+",
"1",
")",
"in",
"ids",
"and",
"metric",
"[",
"j",
"]",
">",
"metric",
"[",
"j",
"+",
"1",
"]",
":",
"if",
"verbose",
":",
"print",
"(",
"'j+1 removed from id list for'",
",",
"j",
")",
"ids",
".",
"remove",
"(",
"j",
"+",
"1",
")",
"return",
"ids"
] |
This function reduces nearby basis functions of the same sign
to a single representative basis function for that feature.
|
[
"This",
"function",
"reduces",
"nearby",
"basis",
"functions",
"of",
"the",
"same",
"sign",
"to",
"a",
"single",
"representative",
"basis",
"function",
"for",
"that",
"feature",
"."
] |
[
"\"\"\"\n This function reduces nearby basis functions of the same sign\n to a single representative basis function for that feature.\n \"\"\""
] |
[
{
"param": "ids",
"type": null
},
{
"param": "metric",
"type": null
},
{
"param": "verbose",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "ids",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "metric",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "verbose",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def reduce_lists(ids, metric, verbose=False):
ids = list(ids)
temp = ids.copy()
for j in temp:
if (j-1) in ids and metric[j] > metric[j-1]:
if verbose:
print('j-1 removed from id list for', j)
ids.remove(j-1)
if (j+1) in ids and metric[j] > metric[j+1]:
if verbose:
print('j+1 removed from id list for', j)
ids.remove(j+1)
return ids
| 1,222 | 557 |
95d81a479b871106fc6b0ce39b66ec27dcd20282
|
lzskyline/DPDK_SURICATA-4_1_1
|
suricata-4.1.4/suricata-update/suricata/update/rule.py
|
[
"MIT"
] |
Python
|
parse_var_names
|
<not_specific>
|
def parse_var_names(var):
""" Parse out the variable names from a string. """
if var is None:
return []
return re.findall("\$([\w_]+)", var)
|
Parse out the variable names from a string.
|
Parse out the variable names from a string.
|
[
"Parse",
"out",
"the",
"variable",
"names",
"from",
"a",
"string",
"."
] |
def parse_var_names(var):
if var is None:
return []
return re.findall("\$([\w_]+)", var)
|
[
"def",
"parse_var_names",
"(",
"var",
")",
":",
"if",
"var",
"is",
"None",
":",
"return",
"[",
"]",
"return",
"re",
".",
"findall",
"(",
"\"\\$([\\w_]+)\"",
",",
"var",
")"
] |
Parse out the variable names from a string.
|
[
"Parse",
"out",
"the",
"variable",
"names",
"from",
"a",
"string",
"."
] |
[
"\"\"\" Parse out the variable names from a string. \"\"\""
] |
[
{
"param": "var",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "var",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import re
def parse_var_names(var):
if var is None:
return []
return re.findall("\$([\w_]+)", var)
| 1,223 | 446 |
3a6af2c23fdedd3c230b268e6be2edc00ad86d1f
|
aprilsanchez/ictf-framework
|
database/support/mysql-connector-python-2.1.3/tests/__init__.py
|
[
"BSD-2-Clause-FreeBSD"
] |
Python
|
have_engine
|
<not_specific>
|
def have_engine(cnx, engine):
"""Check support for given storage engine
This function checks if the MySQL server accessed through cnx has
support for the storage engine.
Returns True or False.
"""
have = False
engine = engine.lower()
cur = cnx.cursor()
# Should use INFORMATION_SCHEMA, but play nice with v4.1
cur.execute("SHOW ENGINES")
rows = cur.fetchall()
for row in rows:
if row[0].lower() == engine:
if row[1].lower() == 'yes':
have = True
break
cur.close()
return have
|
Check support for given storage engine
This function checks if the MySQL server accessed through cnx has
support for the storage engine.
Returns True or False.
|
Check support for given storage engine
This function checks if the MySQL server accessed through cnx has
support for the storage engine.
Returns True or False.
|
[
"Check",
"support",
"for",
"given",
"storage",
"engine",
"This",
"function",
"checks",
"if",
"the",
"MySQL",
"server",
"accessed",
"through",
"cnx",
"has",
"support",
"for",
"the",
"storage",
"engine",
".",
"Returns",
"True",
"or",
"False",
"."
] |
def have_engine(cnx, engine):
have = False
engine = engine.lower()
cur = cnx.cursor()
cur.execute("SHOW ENGINES")
rows = cur.fetchall()
for row in rows:
if row[0].lower() == engine:
if row[1].lower() == 'yes':
have = True
break
cur.close()
return have
|
[
"def",
"have_engine",
"(",
"cnx",
",",
"engine",
")",
":",
"have",
"=",
"False",
"engine",
"=",
"engine",
".",
"lower",
"(",
")",
"cur",
"=",
"cnx",
".",
"cursor",
"(",
")",
"cur",
".",
"execute",
"(",
"\"SHOW ENGINES\"",
")",
"rows",
"=",
"cur",
".",
"fetchall",
"(",
")",
"for",
"row",
"in",
"rows",
":",
"if",
"row",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"==",
"engine",
":",
"if",
"row",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"==",
"'yes'",
":",
"have",
"=",
"True",
"break",
"cur",
".",
"close",
"(",
")",
"return",
"have"
] |
Check support for given storage engine
This function checks if the MySQL server accessed through cnx has
support for the storage engine.
|
[
"Check",
"support",
"for",
"given",
"storage",
"engine",
"This",
"function",
"checks",
"if",
"the",
"MySQL",
"server",
"accessed",
"through",
"cnx",
"has",
"support",
"for",
"the",
"storage",
"engine",
"."
] |
[
"\"\"\"Check support for given storage engine\n\n This function checks if the MySQL server accessed through cnx has\n support for the storage engine.\n\n Returns True or False.\n \"\"\"",
"# Should use INFORMATION_SCHEMA, but play nice with v4.1"
] |
[
{
"param": "cnx",
"type": null
},
{
"param": "engine",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cnx",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "engine",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def have_engine(cnx, engine):
have = False
engine = engine.lower()
cur = cnx.cursor()
cur.execute("SHOW ENGINES")
rows = cur.fetchall()
for row in rows:
if row[0].lower() == engine:
if row[1].lower() == 'yes':
have = True
break
cur.close()
return have
| 1,224 | 224 |
66ff6aea078a5f38aa511bc1fa8389e0e406bd44
|
GiovanniFiorini/Obnoxious
|
heuristics/variable_depth_search.py
|
[
"MIT"
] |
Python
|
bookmark
|
tuple
|
def bookmark(best_facility: Optional[Facility], best_hazard: int, new_facility: Facility, new_hazard: int) -> tuple:
"""
Update if necessary the best facility and the best hazard
:param best_facility: the previous facility that minimize the total hazard for the town with total hazard
:param best_hazard: total hazard in the solution with best_facility
:param new_facility: the new facility that minimize the total hazard for the town with total hazard
:param new_hazard: the new total hazard in the solution with best_facility
:return: the current best facility and best hazard
"""
if best_hazard == 0 and not best_facility:
return new_facility, new_hazard
if new_hazard < best_hazard:
return new_facility, new_hazard
else:
return best_facility, best_hazard
|
Update if necessary the best facility and the best hazard
:param best_facility: the previous facility that minimize the total hazard for the town with total hazard
:param best_hazard: total hazard in the solution with best_facility
:param new_facility: the new facility that minimize the total hazard for the town with total hazard
:param new_hazard: the new total hazard in the solution with best_facility
:return: the current best facility and best hazard
|
Update if necessary the best facility and the best hazard
|
[
"Update",
"if",
"necessary",
"the",
"best",
"facility",
"and",
"the",
"best",
"hazard"
] |
def bookmark(best_facility: Optional[Facility], best_hazard: int, new_facility: Facility, new_hazard: int) -> tuple:
if best_hazard == 0 and not best_facility:
return new_facility, new_hazard
if new_hazard < best_hazard:
return new_facility, new_hazard
else:
return best_facility, best_hazard
|
[
"def",
"bookmark",
"(",
"best_facility",
":",
"Optional",
"[",
"Facility",
"]",
",",
"best_hazard",
":",
"int",
",",
"new_facility",
":",
"Facility",
",",
"new_hazard",
":",
"int",
")",
"->",
"tuple",
":",
"if",
"best_hazard",
"==",
"0",
"and",
"not",
"best_facility",
":",
"return",
"new_facility",
",",
"new_hazard",
"if",
"new_hazard",
"<",
"best_hazard",
":",
"return",
"new_facility",
",",
"new_hazard",
"else",
":",
"return",
"best_facility",
",",
"best_hazard"
] |
Update if necessary the best facility and the best hazard
|
[
"Update",
"if",
"necessary",
"the",
"best",
"facility",
"and",
"the",
"best",
"hazard"
] |
[
"\"\"\"\r\n Update if necessary the best facility and the best hazard\r\n\r\n :param best_facility: the previous facility that minimize the total hazard for the town with total hazard\r\n :param best_hazard: total hazard in the solution with best_facility\r\n :param new_facility: the new facility that minimize the total hazard for the town with total hazard\r\n :param new_hazard: the new total hazard in the solution with best_facility\r\n :return: the current best facility and best hazard\r\n \"\"\""
] |
[
{
"param": "best_facility",
"type": "Optional[Facility]"
},
{
"param": "best_hazard",
"type": "int"
},
{
"param": "new_facility",
"type": "Facility"
},
{
"param": "new_hazard",
"type": "int"
}
] |
{
"returns": [
{
"docstring": "the current best facility and best hazard",
"docstring_tokens": [
"the",
"current",
"best",
"facility",
"and",
"best",
"hazard"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "best_facility",
"type": "Optional[Facility]",
"docstring": "the previous facility that minimize the total hazard for the town with total hazard",
"docstring_tokens": [
"the",
"previous",
"facility",
"that",
"minimize",
"the",
"total",
"hazard",
"for",
"the",
"town",
"with",
"total",
"hazard"
],
"default": null,
"is_optional": null
},
{
"identifier": "best_hazard",
"type": "int",
"docstring": "total hazard in the solution with best_facility",
"docstring_tokens": [
"total",
"hazard",
"in",
"the",
"solution",
"with",
"best_facility"
],
"default": null,
"is_optional": null
},
{
"identifier": "new_facility",
"type": "Facility",
"docstring": "the new facility that minimize the total hazard for the town with total hazard",
"docstring_tokens": [
"the",
"new",
"facility",
"that",
"minimize",
"the",
"total",
"hazard",
"for",
"the",
"town",
"with",
"total",
"hazard"
],
"default": null,
"is_optional": null
},
{
"identifier": "new_hazard",
"type": "int",
"docstring": "the new total hazard in the solution with best_facility",
"docstring_tokens": [
"the",
"new",
"total",
"hazard",
"in",
"the",
"solution",
"with",
"best_facility"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def bookmark(best_facility: Optional[Facility], best_hazard: int, new_facility: Facility, new_hazard: int) -> tuple:
if best_hazard == 0 and not best_facility:
return new_facility, new_hazard
if new_hazard < best_hazard:
return new_facility, new_hazard
else:
return best_facility, best_hazard
| 1,225 | 73 |
694d10b2c73447fe82ad7f9d803c159b416472cd
|
Bollos00/opensourcegames
|
code/utils/osg.py
|
[
"CC0-1.0"
] |
Python
|
git_repo
|
<not_specific>
|
def git_repo(repo):
"""
Tests if a repo URL is a git repo, then returns the repo url.
"""
# everything that starts with 'git://'
if repo.startswith('git://'):
return repo
# generic (https://*.git) or (http://*.git) ending on git
if (repo.startswith('https://') or repo.startswith('http://')) and repo.endswith('.git'):
return repo
# for all others we just check if they start with the typical urls of git services
services = ['https://git.tuxfamily.org/', 'http://git.pond.sub.org/', 'https://gitorious.org/',
'https://git.code.sf.net/p/']
if any(repo.startswith(service) for service in services):
return repo
# the rest is not recognized as a git url
return None
|
Tests if a repo URL is a git repo, then returns the repo url.
|
Tests if a repo URL is a git repo, then returns the repo url.
|
[
"Tests",
"if",
"a",
"repo",
"URL",
"is",
"a",
"git",
"repo",
"then",
"returns",
"the",
"repo",
"url",
"."
] |
def git_repo(repo):
if repo.startswith('git://'):
return repo
if (repo.startswith('https://') or repo.startswith('http://')) and repo.endswith('.git'):
return repo
services = ['https://git.tuxfamily.org/', 'http://git.pond.sub.org/', 'https://gitorious.org/',
'https://git.code.sf.net/p/']
if any(repo.startswith(service) for service in services):
return repo
return None
|
[
"def",
"git_repo",
"(",
"repo",
")",
":",
"if",
"repo",
".",
"startswith",
"(",
"'git://'",
")",
":",
"return",
"repo",
"if",
"(",
"repo",
".",
"startswith",
"(",
"'https://'",
")",
"or",
"repo",
".",
"startswith",
"(",
"'http://'",
")",
")",
"and",
"repo",
".",
"endswith",
"(",
"'.git'",
")",
":",
"return",
"repo",
"services",
"=",
"[",
"'https://git.tuxfamily.org/'",
",",
"'http://git.pond.sub.org/'",
",",
"'https://gitorious.org/'",
",",
"'https://git.code.sf.net/p/'",
"]",
"if",
"any",
"(",
"repo",
".",
"startswith",
"(",
"service",
")",
"for",
"service",
"in",
"services",
")",
":",
"return",
"repo",
"return",
"None"
] |
Tests if a repo URL is a git repo, then returns the repo url.
|
[
"Tests",
"if",
"a",
"repo",
"URL",
"is",
"a",
"git",
"repo",
"then",
"returns",
"the",
"repo",
"url",
"."
] |
[
"\"\"\"\n Tests if a repo URL is a git repo, then returns the repo url.\n \"\"\"",
"# everything that starts with 'git://'",
"# generic (https://*.git) or (http://*.git) ending on git",
"# for all others we just check if they start with the typical urls of git services",
"# the rest is not recognized as a git url"
] |
[
{
"param": "repo",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "repo",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def git_repo(repo):
if repo.startswith('git://'):
return repo
if (repo.startswith('https://') or repo.startswith('http://')) and repo.endswith('.git'):
return repo
services = ['https://git.tuxfamily.org/', 'http://git.pond.sub.org/', 'https://gitorious.org/',
'https://git.code.sf.net/p/']
if any(repo.startswith(service) for service in services):
return repo
return None
| 1,226 | 423 |
c9dedc50027d4b09f0dc21592c3c318587fbb319
|
suddi/coding-challenges
|
python/hash_tables/ransom_note.py
|
[
"MIT"
] |
Python
|
solution1
|
<not_specific>
|
def solution1(magazine, ransom): # O(N^2)
"""
Similar to src.strings.making_anagrams, detect if 2 strings can be equated by
only removing some words.
>>> solution1('give me one grand today night', 'give one grand one today')
False
>>> solution1('give me one grand today night', 'give one grand today')
True
>>> solution1('give me one grand one today night', 'give one grand today')
True
"""
list_a = magazine.split(' ') # O(N)
list_b = ransom.split(' ') # O(N)
for word in list_b: # O(N)
if word in list_a: # O(1)
list_a.remove(word) # O(N)
else: # O(1)
return False # O(1)
return True # O(1)
|
Similar to src.strings.making_anagrams, detect if 2 strings can be equated by
only removing some words.
>>> solution1('give me one grand today night', 'give one grand one today')
False
>>> solution1('give me one grand today night', 'give one grand today')
True
>>> solution1('give me one grand one today night', 'give one grand today')
True
|
Similar to src.strings.making_anagrams, detect if 2 strings can be equated by
only removing some words.
|
[
"Similar",
"to",
"src",
".",
"strings",
".",
"making_anagrams",
"detect",
"if",
"2",
"strings",
"can",
"be",
"equated",
"by",
"only",
"removing",
"some",
"words",
"."
] |
def solution1(magazine, ransom):
list_a = magazine.split(' ')
list_b = ransom.split(' ')
for word in list_b:
if word in list_a:
list_a.remove(word)
else:
return False
return True
|
[
"def",
"solution1",
"(",
"magazine",
",",
"ransom",
")",
":",
"list_a",
"=",
"magazine",
".",
"split",
"(",
"' '",
")",
"list_b",
"=",
"ransom",
".",
"split",
"(",
"' '",
")",
"for",
"word",
"in",
"list_b",
":",
"if",
"word",
"in",
"list_a",
":",
"list_a",
".",
"remove",
"(",
"word",
")",
"else",
":",
"return",
"False",
"return",
"True"
] |
Similar to src.strings.making_anagrams, detect if 2 strings can be equated by
only removing some words.
|
[
"Similar",
"to",
"src",
".",
"strings",
".",
"making_anagrams",
"detect",
"if",
"2",
"strings",
"can",
"be",
"equated",
"by",
"only",
"removing",
"some",
"words",
"."
] |
[
"# O(N^2)",
"\"\"\"\n Similar to src.strings.making_anagrams, detect if 2 strings can be equated by\n only removing some words.\n\n >>> solution1('give me one grand today night', 'give one grand one today')\n False\n >>> solution1('give me one grand today night', 'give one grand today')\n True\n >>> solution1('give me one grand one today night', 'give one grand today')\n True\n \"\"\"",
"# O(N)",
"# O(N)",
"# O(N)",
"# O(1)",
"# O(N)",
"# O(1)",
"# O(1)",
"# O(1)"
] |
[
{
"param": "magazine",
"type": null
},
{
"param": "ransom",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "magazine",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "ransom",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def solution1(magazine, ransom):
list_a = magazine.split(' ')
list_b = ransom.split(' ')
for word in list_b:
if word in list_a:
list_a.remove(word)
else:
return False
return True
| 1,227 | 350 |
51a9695e44a31a672493db342d409947cb6fb420
|
Scoan/blender
|
release/scripts/startup/bl_operators/clip.py
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] |
Python
|
collection_in_collection
|
<not_specific>
|
def collection_in_collection(collection, collection_to_query):
"""Return true if collection is in any of the children or """
"""grandchildren of collection_to_query"""
for child in collection_to_query.children:
if collection == child:
return True
if collection_in_collection(collection, child):
return True
|
Return true if collection is in any of the children or
|
Return true if collection is in any of the children or
|
[
"Return",
"true",
"if",
"collection",
"is",
"in",
"any",
"of",
"the",
"children",
"or"
] |
def collection_in_collection(collection, collection_to_query):
for child in collection_to_query.children:
if collection == child:
return True
if collection_in_collection(collection, child):
return True
|
[
"def",
"collection_in_collection",
"(",
"collection",
",",
"collection_to_query",
")",
":",
"\"\"\"grandchildren of collection_to_query\"\"\"",
"for",
"child",
"in",
"collection_to_query",
".",
"children",
":",
"if",
"collection",
"==",
"child",
":",
"return",
"True",
"if",
"collection_in_collection",
"(",
"collection",
",",
"child",
")",
":",
"return",
"True"
] |
Return true if collection is in any of the children or
|
[
"Return",
"true",
"if",
"collection",
"is",
"in",
"any",
"of",
"the",
"children",
"or"
] |
[
"\"\"\"Return true if collection is in any of the children or \"\"\"",
"\"\"\"grandchildren of collection_to_query\"\"\""
] |
[
{
"param": "collection",
"type": null
},
{
"param": "collection_to_query",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "collection",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "collection_to_query",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def collection_in_collection(collection, collection_to_query):
for child in collection_to_query.children:
if collection == child:
return True
if collection_in_collection(collection, child):
return True
| 1,228 | 138 |
4b13faee04a2175e0bf1c40508994ebf6c3424a9
|
LiliaMudryk/skyscrapers
|
skyscrapers.py
|
[
"MIT"
] |
Python
|
read_input
|
<not_specific>
|
def read_input(path: str):
"""
Read game board file from path.
Return list of str.
"""
f_open = open(path,mode = "r",encoding="UTF-8")
lines_lst = []
for line in f_open:
line = line.strip()
lines_lst.append(line)
return lines_lst
|
Read game board file from path.
Return list of str.
|
Read game board file from path.
Return list of str.
|
[
"Read",
"game",
"board",
"file",
"from",
"path",
".",
"Return",
"list",
"of",
"str",
"."
] |
def read_input(path: str):
f_open = open(path,mode = "r",encoding="UTF-8")
lines_lst = []
for line in f_open:
line = line.strip()
lines_lst.append(line)
return lines_lst
|
[
"def",
"read_input",
"(",
"path",
":",
"str",
")",
":",
"f_open",
"=",
"open",
"(",
"path",
",",
"mode",
"=",
"\"r\"",
",",
"encoding",
"=",
"\"UTF-8\"",
")",
"lines_lst",
"=",
"[",
"]",
"for",
"line",
"in",
"f_open",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"lines_lst",
".",
"append",
"(",
"line",
")",
"return",
"lines_lst"
] |
Read game board file from path.
|
[
"Read",
"game",
"board",
"file",
"from",
"path",
"."
] |
[
"\"\"\"\n Read game board file from path.\n Return list of str.\n \"\"\""
] |
[
{
"param": "path",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "path",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def read_input(path: str):
f_open = open(path,mode = "r",encoding="UTF-8")
lines_lst = []
for line in f_open:
line = line.strip()
lines_lst.append(line)
return lines_lst
| 1,229 | 628 |
4cd7e8fb5a8c462a9192f832fe4b0409cb868fcf
|
marcelarosalesj/metal
|
inventory/inventory/inventory/objects/port.py
|
[
"Apache-2.0"
] |
Python
|
list
|
<not_specific>
|
def list(cls, context, limit=None, marker=None, sort_key=None,
sort_dir=None, filters=None):
"""Return a list of Port objects.
:param cls: the :class:`Port`
:param context: Security context.
:param limit: maximum number of resources to return in a single result.
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
:param filters: Filters to apply.
:returns: a list of :class:`Port` object.
"""
db_ports = cls.dbapi.port_get_list(
filters=filters,
limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir)
return cls._from_db_object_list(context, db_ports)
|
Return a list of Port objects.
:param cls: the :class:`Port`
:param context: Security context.
:param limit: maximum number of resources to return in a single result.
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
:param filters: Filters to apply.
:returns: a list of :class:`Port` object.
|
Return a list of Port objects.
|
[
"Return",
"a",
"list",
"of",
"Port",
"objects",
"."
] |
def list(cls, context, limit=None, marker=None, sort_key=None,
sort_dir=None, filters=None):
db_ports = cls.dbapi.port_get_list(
filters=filters,
limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir)
return cls._from_db_object_list(context, db_ports)
|
[
"def",
"list",
"(",
"cls",
",",
"context",
",",
"limit",
"=",
"None",
",",
"marker",
"=",
"None",
",",
"sort_key",
"=",
"None",
",",
"sort_dir",
"=",
"None",
",",
"filters",
"=",
"None",
")",
":",
"db_ports",
"=",
"cls",
".",
"dbapi",
".",
"port_get_list",
"(",
"filters",
"=",
"filters",
",",
"limit",
"=",
"limit",
",",
"marker",
"=",
"marker",
",",
"sort_key",
"=",
"sort_key",
",",
"sort_dir",
"=",
"sort_dir",
")",
"return",
"cls",
".",
"_from_db_object_list",
"(",
"context",
",",
"db_ports",
")"
] |
Return a list of Port objects.
|
[
"Return",
"a",
"list",
"of",
"Port",
"objects",
"."
] |
[
"\"\"\"Return a list of Port objects.\n\n :param cls: the :class:`Port`\n :param context: Security context.\n :param limit: maximum number of resources to return in a single result.\n :param marker: pagination marker for large data sets.\n :param sort_key: column to sort results by.\n :param sort_dir: direction to sort. \"asc\" or \"desc\".\n :param filters: Filters to apply.\n :returns: a list of :class:`Port` object.\n\n \"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "context",
"type": null
},
{
"param": "limit",
"type": null
},
{
"param": "marker",
"type": null
},
{
"param": "sort_key",
"type": null
},
{
"param": "sort_dir",
"type": null
},
{
"param": "filters",
"type": null
}
] |
{
"returns": [
{
"docstring": "a list of :class:`Port` object.",
"docstring_tokens": [
"a",
"list",
"of",
":",
"class",
":",
"`",
"Port",
"`",
"object",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "context",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "limit",
"type": null,
"docstring": "maximum number of resources to return in a single result.",
"docstring_tokens": [
"maximum",
"number",
"of",
"resources",
"to",
"return",
"in",
"a",
"single",
"result",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "marker",
"type": null,
"docstring": "pagination marker for large data sets.",
"docstring_tokens": [
"pagination",
"marker",
"for",
"large",
"data",
"sets",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "sort_key",
"type": null,
"docstring": "column to sort results by.",
"docstring_tokens": [
"column",
"to",
"sort",
"results",
"by",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "sort_dir",
"type": null,
"docstring": "direction to sort.",
"docstring_tokens": [
"direction",
"to",
"sort",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "filters",
"type": null,
"docstring": "Filters to apply.",
"docstring_tokens": [
"Filters",
"to",
"apply",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def list(cls, context, limit=None, marker=None, sort_key=None,
sort_dir=None, filters=None):
db_ports = cls.dbapi.port_get_list(
filters=filters,
limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir)
return cls._from_db_object_list(context, db_ports)
| 1,230 | 740 |
4b5b06ab510f629e43550d05b2b3596377f284a9
|
lgirault/pants
|
src/python/pants/goal/goal.py
|
[
"Apache-2.0"
] |
Python
|
register
|
<not_specific>
|
def register(cls, name, description, options_registrar_cls=None):
"""Register a goal description.
Otherwise the description must be set when registering some task on the goal,
which is clunky, and dependent on things like registration order of tasks in the goal.
A goal that isn't explicitly registered with a description will fall back to the description
of the task in that goal with the same name (if any). So singleton goals (e.g., 'clean-all')
need not be registered explicitly. This method is primarily useful for setting a
description on a generic goal like 'compile' or 'test', that multiple backends will
register tasks on.
:API: public
:param string name: The name of the goal; ie: the way to specify it on the command line.
:param string description: A description of the tasks in the goal do.
:param :class:pants.option.Optionable options_registrar_cls: A class for registering options
at the goal scope. Useful for registering recursive options on all tasks in a goal.
:return: The freshly registered goal.
:rtype: :class:`_Goal`
"""
goal = cls.by_name(name)
goal._description = description
goal._options_registrar_cls = (options_registrar_cls.registrar_for_scope(name)
if options_registrar_cls else None)
return goal
|
Register a goal description.
Otherwise the description must be set when registering some task on the goal,
which is clunky, and dependent on things like registration order of tasks in the goal.
A goal that isn't explicitly registered with a description will fall back to the description
of the task in that goal with the same name (if any). So singleton goals (e.g., 'clean-all')
need not be registered explicitly. This method is primarily useful for setting a
description on a generic goal like 'compile' or 'test', that multiple backends will
register tasks on.
:API: public
:param string name: The name of the goal; ie: the way to specify it on the command line.
:param string description: A description of the tasks in the goal do.
:param :class:pants.option.Optionable options_registrar_cls: A class for registering options
at the goal scope. Useful for registering recursive options on all tasks in a goal.
:return: The freshly registered goal.
:rtype: :class:`_Goal`
|
Register a goal description.
Otherwise the description must be set when registering some task on the goal,
which is clunky, and dependent on things like registration order of tasks in the goal.
A goal that isn't explicitly registered with a description will fall back to the description
of the task in that goal with the same name (if any). So singleton goals
need not be registered explicitly. This method is primarily useful for setting a
description on a generic goal like 'compile' or 'test', that multiple backends will
register tasks on.
:API: public
:param string name: The name of the goal; ie: the way to specify it on the command line.
:param string description: A description of the tasks in the goal do.
:param :class:pants.option.Optionable options_registrar_cls: A class for registering options
at the goal scope. Useful for registering recursive options on all tasks in a goal.
:return: The freshly registered goal.
:rtype: :class:`_Goal`
|
[
"Register",
"a",
"goal",
"description",
".",
"Otherwise",
"the",
"description",
"must",
"be",
"set",
"when",
"registering",
"some",
"task",
"on",
"the",
"goal",
"which",
"is",
"clunky",
"and",
"dependent",
"on",
"things",
"like",
"registration",
"order",
"of",
"tasks",
"in",
"the",
"goal",
".",
"A",
"goal",
"that",
"isn",
"'",
"t",
"explicitly",
"registered",
"with",
"a",
"description",
"will",
"fall",
"back",
"to",
"the",
"description",
"of",
"the",
"task",
"in",
"that",
"goal",
"with",
"the",
"same",
"name",
"(",
"if",
"any",
")",
".",
"So",
"singleton",
"goals",
"need",
"not",
"be",
"registered",
"explicitly",
".",
"This",
"method",
"is",
"primarily",
"useful",
"for",
"setting",
"a",
"description",
"on",
"a",
"generic",
"goal",
"like",
"'",
"compile",
"'",
"or",
"'",
"test",
"'",
"that",
"multiple",
"backends",
"will",
"register",
"tasks",
"on",
".",
":",
"API",
":",
"public",
":",
"param",
"string",
"name",
":",
"The",
"name",
"of",
"the",
"goal",
";",
"ie",
":",
"the",
"way",
"to",
"specify",
"it",
"on",
"the",
"command",
"line",
".",
":",
"param",
"string",
"description",
":",
"A",
"description",
"of",
"the",
"tasks",
"in",
"the",
"goal",
"do",
".",
":",
"param",
":",
"class",
":",
"pants",
".",
"option",
".",
"Optionable",
"options_registrar_cls",
":",
"A",
"class",
"for",
"registering",
"options",
"at",
"the",
"goal",
"scope",
".",
"Useful",
"for",
"registering",
"recursive",
"options",
"on",
"all",
"tasks",
"in",
"a",
"goal",
".",
":",
"return",
":",
"The",
"freshly",
"registered",
"goal",
".",
":",
"rtype",
":",
":",
"class",
":",
"`",
"_Goal",
"`"
] |
def register(cls, name, description, options_registrar_cls=None):
goal = cls.by_name(name)
goal._description = description
goal._options_registrar_cls = (options_registrar_cls.registrar_for_scope(name)
if options_registrar_cls else None)
return goal
|
[
"def",
"register",
"(",
"cls",
",",
"name",
",",
"description",
",",
"options_registrar_cls",
"=",
"None",
")",
":",
"goal",
"=",
"cls",
".",
"by_name",
"(",
"name",
")",
"goal",
".",
"_description",
"=",
"description",
"goal",
".",
"_options_registrar_cls",
"=",
"(",
"options_registrar_cls",
".",
"registrar_for_scope",
"(",
"name",
")",
"if",
"options_registrar_cls",
"else",
"None",
")",
"return",
"goal"
] |
Register a goal description.
|
[
"Register",
"a",
"goal",
"description",
"."
] |
[
"\"\"\"Register a goal description.\n\n Otherwise the description must be set when registering some task on the goal,\n which is clunky, and dependent on things like registration order of tasks in the goal.\n\n A goal that isn't explicitly registered with a description will fall back to the description\n of the task in that goal with the same name (if any). So singleton goals (e.g., 'clean-all')\n need not be registered explicitly. This method is primarily useful for setting a\n description on a generic goal like 'compile' or 'test', that multiple backends will\n register tasks on.\n\n :API: public\n\n :param string name: The name of the goal; ie: the way to specify it on the command line.\n :param string description: A description of the tasks in the goal do.\n :param :class:pants.option.Optionable options_registrar_cls: A class for registering options\n at the goal scope. Useful for registering recursive options on all tasks in a goal.\n :return: The freshly registered goal.\n :rtype: :class:`_Goal`\n \"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "name",
"type": null
},
{
"param": "description",
"type": null
},
{
"param": "options_registrar_cls",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "description",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "options_registrar_cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def register(cls, name, description, options_registrar_cls=None):
goal = cls.by_name(name)
goal._description = description
goal._options_registrar_cls = (options_registrar_cls.registrar_for_scope(name)
if options_registrar_cls else None)
return goal
| 1,231 | 73 |
f319399c4dc81006f4609b31db08fd0eab35e4fa
|
jonathan-longe/RSBC-DataHub-API
|
python/common/middleware.py
|
[
"Apache-2.0"
] |
Python
|
create_disclosure_event
|
tuple
|
def create_disclosure_event(**args) -> tuple:
"""
After a review date is scheduled, the system will send disclosure
(police documents) to the applicant. Since a business rule states
that disclosure cannot be sent immediately, we use this method to
create a disclosure event that's added to the hold queue.
"""
event_type = "send_disclosure"
config = args.get('config')
vips_application = args.get('vips_application')
args['message'] = dict({
"event_version": config.PAYLOAD_VERSION_NUMBER,
"event_date_time": datetime.now().isoformat(),
"event_type": event_type,
event_type: {
"applicant_name": "{} {}".format(vips_application['firstGivenNm'], vips_application['surnameNm']),
"email": vips_application['email'],
"prohibition_number": args.get('prohibition_number'),
}
})
return True, args
|
After a review date is scheduled, the system will send disclosure
(police documents) to the applicant. Since a business rule states
that disclosure cannot be sent immediately, we use this method to
create a disclosure event that's added to the hold queue.
|
After a review date is scheduled, the system will send disclosure
(police documents) to the applicant. Since a business rule states
that disclosure cannot be sent immediately, we use this method to
create a disclosure event that's added to the hold queue.
|
[
"After",
"a",
"review",
"date",
"is",
"scheduled",
"the",
"system",
"will",
"send",
"disclosure",
"(",
"police",
"documents",
")",
"to",
"the",
"applicant",
".",
"Since",
"a",
"business",
"rule",
"states",
"that",
"disclosure",
"cannot",
"be",
"sent",
"immediately",
"we",
"use",
"this",
"method",
"to",
"create",
"a",
"disclosure",
"event",
"that",
"'",
"s",
"added",
"to",
"the",
"hold",
"queue",
"."
] |
def create_disclosure_event(**args) -> tuple:
event_type = "send_disclosure"
config = args.get('config')
vips_application = args.get('vips_application')
args['message'] = dict({
"event_version": config.PAYLOAD_VERSION_NUMBER,
"event_date_time": datetime.now().isoformat(),
"event_type": event_type,
event_type: {
"applicant_name": "{} {}".format(vips_application['firstGivenNm'], vips_application['surnameNm']),
"email": vips_application['email'],
"prohibition_number": args.get('prohibition_number'),
}
})
return True, args
|
[
"def",
"create_disclosure_event",
"(",
"**",
"args",
")",
"->",
"tuple",
":",
"event_type",
"=",
"\"send_disclosure\"",
"config",
"=",
"args",
".",
"get",
"(",
"'config'",
")",
"vips_application",
"=",
"args",
".",
"get",
"(",
"'vips_application'",
")",
"args",
"[",
"'message'",
"]",
"=",
"dict",
"(",
"{",
"\"event_version\"",
":",
"config",
".",
"PAYLOAD_VERSION_NUMBER",
",",
"\"event_date_time\"",
":",
"datetime",
".",
"now",
"(",
")",
".",
"isoformat",
"(",
")",
",",
"\"event_type\"",
":",
"event_type",
",",
"event_type",
":",
"{",
"\"applicant_name\"",
":",
"\"{} {}\"",
".",
"format",
"(",
"vips_application",
"[",
"'firstGivenNm'",
"]",
",",
"vips_application",
"[",
"'surnameNm'",
"]",
")",
",",
"\"email\"",
":",
"vips_application",
"[",
"'email'",
"]",
",",
"\"prohibition_number\"",
":",
"args",
".",
"get",
"(",
"'prohibition_number'",
")",
",",
"}",
"}",
")",
"return",
"True",
",",
"args"
] |
After a review date is scheduled, the system will send disclosure
(police documents) to the applicant.
|
[
"After",
"a",
"review",
"date",
"is",
"scheduled",
"the",
"system",
"will",
"send",
"disclosure",
"(",
"police",
"documents",
")",
"to",
"the",
"applicant",
"."
] |
[
"\"\"\"\n After a review date is scheduled, the system will send disclosure\n (police documents) to the applicant. Since a business rule states\n that disclosure cannot be sent immediately, we use this method to\n create a disclosure event that's added to the hold queue.\n \"\"\""
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import datetime
def create_disclosure_event(**args) -> tuple:
event_type = "send_disclosure"
config = args.get('config')
vips_application = args.get('vips_application')
args['message'] = dict({
"event_version": config.PAYLOAD_VERSION_NUMBER,
"event_date_time": datetime.now().isoformat(),
"event_type": event_type,
event_type: {
"applicant_name": "{} {}".format(vips_application['firstGivenNm'], vips_application['surnameNm']),
"email": vips_application['email'],
"prohibition_number": args.get('prohibition_number'),
}
})
return True, args
| 1,233 | 753 |
106fefa9f290800164866a2250a2233565e109fc
|
Sreyas-108/osc_server_app
|
app/utils/KMLUtils.py
|
[
"Apache-2.0"
] |
Python
|
convertZoomToRange
|
<not_specific>
|
def convertZoomToRange(zoom):
"""Converts zoom data obtained from Google map to Range value for KMl file."""
qrange = 35200000 / (2 ** zoom)
if qrange < 300:
return 300
return qrange
|
Converts zoom data obtained from Google map to Range value for KMl file.
|
Converts zoom data obtained from Google map to Range value for KMl file.
|
[
"Converts",
"zoom",
"data",
"obtained",
"from",
"Google",
"map",
"to",
"Range",
"value",
"for",
"KMl",
"file",
"."
] |
def convertZoomToRange(zoom):
qrange = 35200000 / (2 ** zoom)
if qrange < 300:
return 300
return qrange
|
[
"def",
"convertZoomToRange",
"(",
"zoom",
")",
":",
"qrange",
"=",
"35200000",
"/",
"(",
"2",
"**",
"zoom",
")",
"if",
"qrange",
"<",
"300",
":",
"return",
"300",
"return",
"qrange"
] |
Converts zoom data obtained from Google map to Range value for KMl file.
|
[
"Converts",
"zoom",
"data",
"obtained",
"from",
"Google",
"map",
"to",
"Range",
"value",
"for",
"KMl",
"file",
"."
] |
[
"\"\"\"Converts zoom data obtained from Google map to Range value for KMl file.\"\"\""
] |
[
{
"param": "zoom",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "zoom",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def convertZoomToRange(zoom):
qrange = 35200000 / (2 ** zoom)
if qrange < 300:
return 300
return qrange
| 1,234 | 148 |
f8d4f76ea14227c9a198e24c7b67ef8d177da722
|
roboterclubaachen/xpcc
|
scons/site_tools/xpcc.py
|
[
"BSD-3-Clause"
] |
Python
|
define_header
| null |
def define_header(env, defines, header, comment, template="define_template.hpp.in"):
"""
Shows Scons how to build a header file containing defines.
The headerfile will be created in the XPCC_BUILDPATH
and the XPCC_BUILDPATH will be added to the include
search path.
:param defines: dictionary containing key value pairs
:param header: name of the header, #include<${header}>
:param comment: say because of what this file was created
"""
include_guard = header.upper().replace('.', '_')
comment = textwrap.wrap(comment, 78-len("// "))
comment = "\n".join(["// " + c for c in comment])
#c = ""
#while len(comment) > 0:
# c += "// " + comment[:70] + '\n'
# comment = comment[70:]
define_list = ["#define %s %s" % (key.upper(), value) for key, value in defines.items()]
file = env.Template(
target = os.path.join(env['XPCC_BUILDPATH'], header),
source = os.path.join(env['XPCC_ROOTPATH'], 'templates', template),
substitutions = {'defines': '\n'.join(define_list),
'include_guard': include_guard,
'comment': comment})
env.AppendUnique(CPPPATH = env['XPCC_BUILDPATH'])
|
Shows Scons how to build a header file containing defines.
The headerfile will be created in the XPCC_BUILDPATH
and the XPCC_BUILDPATH will be added to the include
search path.
:param defines: dictionary containing key value pairs
:param header: name of the header, #include<${header}>
:param comment: say because of what this file was created
|
Shows Scons how to build a header file containing defines.
The headerfile will be created in the XPCC_BUILDPATH
and the XPCC_BUILDPATH will be added to the include
search path.
|
[
"Shows",
"Scons",
"how",
"to",
"build",
"a",
"header",
"file",
"containing",
"defines",
".",
"The",
"headerfile",
"will",
"be",
"created",
"in",
"the",
"XPCC_BUILDPATH",
"and",
"the",
"XPCC_BUILDPATH",
"will",
"be",
"added",
"to",
"the",
"include",
"search",
"path",
"."
] |
def define_header(env, defines, header, comment, template="define_template.hpp.in"):
include_guard = header.upper().replace('.', '_')
comment = textwrap.wrap(comment, 78-len("// "))
comment = "\n".join(["// " + c for c in comment])
define_list = ["#define %s %s" % (key.upper(), value) for key, value in defines.items()]
file = env.Template(
target = os.path.join(env['XPCC_BUILDPATH'], header),
source = os.path.join(env['XPCC_ROOTPATH'], 'templates', template),
substitutions = {'defines': '\n'.join(define_list),
'include_guard': include_guard,
'comment': comment})
env.AppendUnique(CPPPATH = env['XPCC_BUILDPATH'])
|
[
"def",
"define_header",
"(",
"env",
",",
"defines",
",",
"header",
",",
"comment",
",",
"template",
"=",
"\"define_template.hpp.in\"",
")",
":",
"include_guard",
"=",
"header",
".",
"upper",
"(",
")",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
"comment",
"=",
"textwrap",
".",
"wrap",
"(",
"comment",
",",
"78",
"-",
"len",
"(",
"\"// \"",
")",
")",
"comment",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"\"// \"",
"+",
"c",
"for",
"c",
"in",
"comment",
"]",
")",
"define_list",
"=",
"[",
"\"#define %s %s\"",
"%",
"(",
"key",
".",
"upper",
"(",
")",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"defines",
".",
"items",
"(",
")",
"]",
"file",
"=",
"env",
".",
"Template",
"(",
"target",
"=",
"os",
".",
"path",
".",
"join",
"(",
"env",
"[",
"'XPCC_BUILDPATH'",
"]",
",",
"header",
")",
",",
"source",
"=",
"os",
".",
"path",
".",
"join",
"(",
"env",
"[",
"'XPCC_ROOTPATH'",
"]",
",",
"'templates'",
",",
"template",
")",
",",
"substitutions",
"=",
"{",
"'defines'",
":",
"'\\n'",
".",
"join",
"(",
"define_list",
")",
",",
"'include_guard'",
":",
"include_guard",
",",
"'comment'",
":",
"comment",
"}",
")",
"env",
".",
"AppendUnique",
"(",
"CPPPATH",
"=",
"env",
"[",
"'XPCC_BUILDPATH'",
"]",
")"
] |
Shows Scons how to build a header file containing defines.
|
[
"Shows",
"Scons",
"how",
"to",
"build",
"a",
"header",
"file",
"containing",
"defines",
"."
] |
[
"\"\"\"\n\t\tShows Scons how to build a header file containing defines.\n\t\tThe headerfile will be created in the XPCC_BUILDPATH\n\t\tand the XPCC_BUILDPATH will be added to the include\n\t\tsearch path.\n\t\t:param defines: dictionary containing key value pairs\n\t\t:param header: name of the header, #include<${header}>\n\t\t:param comment: say because of what this file was created\n\t\"\"\"",
"#c = \"\"",
"#while len(comment) > 0:",
"#\tc += \"// \" + comment[:70] + '\\n'",
"#\tcomment = comment[70:]"
] |
[
{
"param": "env",
"type": null
},
{
"param": "defines",
"type": null
},
{
"param": "header",
"type": null
},
{
"param": "comment",
"type": null
},
{
"param": "template",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "env",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "defines",
"type": null,
"docstring": "dictionary containing key value pairs",
"docstring_tokens": [
"dictionary",
"containing",
"key",
"value",
"pairs"
],
"default": null,
"is_optional": null
},
{
"identifier": "header",
"type": null,
"docstring": "name of the header, #include<${header}>",
"docstring_tokens": [
"name",
"of",
"the",
"header",
"#include<$",
"{",
"header",
"}",
">"
],
"default": null,
"is_optional": null
},
{
"identifier": "comment",
"type": null,
"docstring": "say because of what this file was created",
"docstring_tokens": [
"say",
"because",
"of",
"what",
"this",
"file",
"was",
"created"
],
"default": null,
"is_optional": null
},
{
"identifier": "template",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import textwrap
import os
def define_header(env, defines, header, comment, template="define_template.hpp.in"):
include_guard = header.upper().replace('.', '_')
comment = textwrap.wrap(comment, 78-len("// "))
comment = "\n".join(["// " + c for c in comment])
define_list = ["#define %s %s" % (key.upper(), value) for key, value in defines.items()]
file = env.Template(
target = os.path.join(env['XPCC_BUILDPATH'], header),
source = os.path.join(env['XPCC_ROOTPATH'], 'templates', template),
substitutions = {'defines': '\n'.join(define_list),
'include_guard': include_guard,
'comment': comment})
env.AppendUnique(CPPPATH = env['XPCC_BUILDPATH'])
| 1,235 | 954 |
3511507a90558d42ce444afc7a09bf9e44c718a2
|
serkkz/RenderPipeline
|
rpcore/effect.py
|
[
"MIT"
] |
Python
|
load
|
<not_specific>
|
def load(cls, filename, options):
""" Loads an effect from a given filename with the specified options.
This lookups in the global effect cache, and checks if a similar effect
(i.e. with the same hash) was already loaded, and in that case returns it.
Otherwise a new effect with the given options is created. """
effect_hash = cls._generate_hash(filename, options)
if effect_hash in cls._GLOBAL_CACHE:
return cls._GLOBAL_CACHE[effect_hash]
effect = cls()
effect.set_options(options)
if not effect.do_load(filename):
print("Effect", "Could not load effect!")
return None
return effect
|
Loads an effect from a given filename with the specified options.
This lookups in the global effect cache, and checks if a similar effect
(i.e. with the same hash) was already loaded, and in that case returns it.
Otherwise a new effect with the given options is created.
|
Loads an effect from a given filename with the specified options.
This lookups in the global effect cache, and checks if a similar effect
was already loaded, and in that case returns it.
Otherwise a new effect with the given options is created.
|
[
"Loads",
"an",
"effect",
"from",
"a",
"given",
"filename",
"with",
"the",
"specified",
"options",
".",
"This",
"lookups",
"in",
"the",
"global",
"effect",
"cache",
"and",
"checks",
"if",
"a",
"similar",
"effect",
"was",
"already",
"loaded",
"and",
"in",
"that",
"case",
"returns",
"it",
".",
"Otherwise",
"a",
"new",
"effect",
"with",
"the",
"given",
"options",
"is",
"created",
"."
] |
def load(cls, filename, options):
effect_hash = cls._generate_hash(filename, options)
if effect_hash in cls._GLOBAL_CACHE:
return cls._GLOBAL_CACHE[effect_hash]
effect = cls()
effect.set_options(options)
if not effect.do_load(filename):
print("Effect", "Could not load effect!")
return None
return effect
|
[
"def",
"load",
"(",
"cls",
",",
"filename",
",",
"options",
")",
":",
"effect_hash",
"=",
"cls",
".",
"_generate_hash",
"(",
"filename",
",",
"options",
")",
"if",
"effect_hash",
"in",
"cls",
".",
"_GLOBAL_CACHE",
":",
"return",
"cls",
".",
"_GLOBAL_CACHE",
"[",
"effect_hash",
"]",
"effect",
"=",
"cls",
"(",
")",
"effect",
".",
"set_options",
"(",
"options",
")",
"if",
"not",
"effect",
".",
"do_load",
"(",
"filename",
")",
":",
"print",
"(",
"\"Effect\"",
",",
"\"Could not load effect!\"",
")",
"return",
"None",
"return",
"effect"
] |
Loads an effect from a given filename with the specified options.
|
[
"Loads",
"an",
"effect",
"from",
"a",
"given",
"filename",
"with",
"the",
"specified",
"options",
"."
] |
[
"\"\"\" Loads an effect from a given filename with the specified options.\n This lookups in the global effect cache, and checks if a similar effect\n (i.e. with the same hash) was already loaded, and in that case returns it.\n Otherwise a new effect with the given options is created. \"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "filename",
"type": null
},
{
"param": "options",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "filename",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "options",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def load(cls, filename, options):
effect_hash = cls._generate_hash(filename, options)
if effect_hash in cls._GLOBAL_CACHE:
return cls._GLOBAL_CACHE[effect_hash]
effect = cls()
effect.set_options(options)
if not effect.do_load(filename):
print("Effect", "Could not load effect!")
return None
return effect
| 1,236 | 150 |
438dcdb03158ff1472de966fbe8dce3174818fd4
|
nmbooker/python-funbox
|
funbox/flogic.py
|
[
"MIT"
] |
Python
|
fnot
|
<not_specific>
|
def fnot(predicate):
"""Not the return value of predicate at call time.
>>> is_odd = lambda num: bool(num % 2)
>>> is_even = fnot(is_odd)
>>> is_odd(2)
False
>>> is_even(2)
True
"""
return lambda val: not predicate(val)
|
Not the return value of predicate at call time.
>>> is_odd = lambda num: bool(num % 2)
>>> is_even = fnot(is_odd)
>>> is_odd(2)
False
>>> is_even(2)
True
|
Not the return value of predicate at call time.
|
[
"Not",
"the",
"return",
"value",
"of",
"predicate",
"at",
"call",
"time",
"."
] |
def fnot(predicate):
return lambda val: not predicate(val)
|
[
"def",
"fnot",
"(",
"predicate",
")",
":",
"return",
"lambda",
"val",
":",
"not",
"predicate",
"(",
"val",
")"
] |
Not the return value of predicate at call time.
|
[
"Not",
"the",
"return",
"value",
"of",
"predicate",
"at",
"call",
"time",
"."
] |
[
"\"\"\"Not the return value of predicate at call time.\n\n >>> is_odd = lambda num: bool(num % 2)\n >>> is_even = fnot(is_odd)\n >>> is_odd(2)\n False\n >>> is_even(2)\n True\n \"\"\""
] |
[
{
"param": "predicate",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "predicate",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def fnot(predicate):
return lambda val: not predicate(val)
| 1,237 | 163 |
40021efa3bf4802ac13606d9a76229cd3a6d2fcf
|
patrickleweryharris/code-snippets
|
python/Cipher/caesar_cipher.py
|
[
"MIT"
] |
Python
|
encrypt_letter
|
<not_specific>
|
def encrypt_letter(letter):
""" (str) -> str
Precondition: len(letter) == 1 and letter.isupper()
Return letter encrypted by shifting 3 places to the right.
>>> encrypt_letter('V')
'Y'
"""
# Translate to a number in the range 0-25. 'A' translates to 0, 'B' to 1,
# and so on.
ord_diff = ord(letter) - ord('A')
# Apply the right shift; we use % to handle the end of the alphabet.
# The result is still in the range 0-25.
new_char_ord = (ord_diff + 3) % 26
# Convert back to a letter.
return chr(new_char_ord + ord('A'))
|
(str) -> str
Precondition: len(letter) == 1 and letter.isupper()
Return letter encrypted by shifting 3 places to the right.
>>> encrypt_letter('V')
'Y'
|
Return letter encrypted by shifting 3 places to the right.
|
[
"Return",
"letter",
"encrypted",
"by",
"shifting",
"3",
"places",
"to",
"the",
"right",
"."
] |
def encrypt_letter(letter):
ord_diff = ord(letter) - ord('A')
new_char_ord = (ord_diff + 3) % 26
return chr(new_char_ord + ord('A'))
|
[
"def",
"encrypt_letter",
"(",
"letter",
")",
":",
"ord_diff",
"=",
"ord",
"(",
"letter",
")",
"-",
"ord",
"(",
"'A'",
")",
"new_char_ord",
"=",
"(",
"ord_diff",
"+",
"3",
")",
"%",
"26",
"return",
"chr",
"(",
"new_char_ord",
"+",
"ord",
"(",
"'A'",
")",
")"
] |
(str) -> str
Precondition: len(letter) == 1 and letter.isupper()
|
[
"(",
"str",
")",
"-",
">",
"str",
"Precondition",
":",
"len",
"(",
"letter",
")",
"==",
"1",
"and",
"letter",
".",
"isupper",
"()"
] |
[
"\"\"\" (str) -> str\n\n Precondition: len(letter) == 1 and letter.isupper()\n\n Return letter encrypted by shifting 3 places to the right.\n\n >>> encrypt_letter('V')\n 'Y'\n \"\"\"",
"# Translate to a number in the range 0-25. 'A' translates to 0, 'B' to 1,",
"# and so on.",
"# Apply the right shift; we use % to handle the end of the alphabet.",
"# The result is still in the range 0-25.",
"# Convert back to a letter."
] |
[
{
"param": "letter",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "letter",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def encrypt_letter(letter):
ord_diff = ord(letter) - ord('A')
new_char_ord = (ord_diff + 3) % 26
return chr(new_char_ord + ord('A'))
| 1,238 | 227 |
be2be22b69c8382ed329bbf52f447beae8d2b0e3
|
jfy133/paleomix
|
paleomix/common/bedtools.py
|
[
"MIT"
] |
Python
|
sort_bed_by_bamfile
|
<not_specific>
|
def sort_bed_by_bamfile(bamfile, regions):
"""Orders a set of BED regions, such that processing matches
(as far as possible) the layout of the BAM file. This may be
used to ensure that extraction of regions occurs (close to)
linearly."""
if not regions:
return
references = bamfile.references
indices = dict(zip(references, range(len(references))))
def _by_bam_layout(region):
return (indices[region.contig], region.start, region.end)
regions.sort(key=_by_bam_layout)
|
Orders a set of BED regions, such that processing matches
(as far as possible) the layout of the BAM file. This may be
used to ensure that extraction of regions occurs (close to)
linearly.
|
Orders a set of BED regions, such that processing matches
(as far as possible) the layout of the BAM file. This may be
used to ensure that extraction of regions occurs (close to)
linearly.
|
[
"Orders",
"a",
"set",
"of",
"BED",
"regions",
"such",
"that",
"processing",
"matches",
"(",
"as",
"far",
"as",
"possible",
")",
"the",
"layout",
"of",
"the",
"BAM",
"file",
".",
"This",
"may",
"be",
"used",
"to",
"ensure",
"that",
"extraction",
"of",
"regions",
"occurs",
"(",
"close",
"to",
")",
"linearly",
"."
] |
def sort_bed_by_bamfile(bamfile, regions):
if not regions:
return
references = bamfile.references
indices = dict(zip(references, range(len(references))))
def _by_bam_layout(region):
return (indices[region.contig], region.start, region.end)
regions.sort(key=_by_bam_layout)
|
[
"def",
"sort_bed_by_bamfile",
"(",
"bamfile",
",",
"regions",
")",
":",
"if",
"not",
"regions",
":",
"return",
"references",
"=",
"bamfile",
".",
"references",
"indices",
"=",
"dict",
"(",
"zip",
"(",
"references",
",",
"range",
"(",
"len",
"(",
"references",
")",
")",
")",
")",
"def",
"_by_bam_layout",
"(",
"region",
")",
":",
"return",
"(",
"indices",
"[",
"region",
".",
"contig",
"]",
",",
"region",
".",
"start",
",",
"region",
".",
"end",
")",
"regions",
".",
"sort",
"(",
"key",
"=",
"_by_bam_layout",
")"
] |
Orders a set of BED regions, such that processing matches
(as far as possible) the layout of the BAM file.
|
[
"Orders",
"a",
"set",
"of",
"BED",
"regions",
"such",
"that",
"processing",
"matches",
"(",
"as",
"far",
"as",
"possible",
")",
"the",
"layout",
"of",
"the",
"BAM",
"file",
"."
] |
[
"\"\"\"Orders a set of BED regions, such that processing matches\n (as far as possible) the layout of the BAM file. This may be\n used to ensure that extraction of regions occurs (close to)\n linearly.\"\"\""
] |
[
{
"param": "bamfile",
"type": null
},
{
"param": "regions",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "bamfile",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "regions",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def sort_bed_by_bamfile(bamfile, regions):
if not regions:
return
references = bamfile.references
indices = dict(zip(references, range(len(references))))
def _by_bam_layout(region):
return (indices[region.contig], region.start, region.end)
regions.sort(key=_by_bam_layout)
| 1,240 | 453 |
34ce995d1eff0a90493f22fa633fb0afce4f991b
|
josuav1/MPContribs
|
mpcontribs-users/mpcontribs/users/redox_thermo_csp/rest/energy_analysis.py
|
[
"MIT"
] |
Python
|
c_p_water_liquid
|
<not_specific>
|
def c_p_water_liquid(temp):
"""
Calculates the heat capacity of liquid water.
:return: cp_water
"""
# constants: Chase, NIST-JANAF Thermochemistry tables, Fourth Edition, 1998
shomdat = [-203.6060, 1523.290, -3196.413, 2474.455, 3.855326]
temp_frac = temp / 1000
c_p_water = shomdat[0] + (shomdat[1] * temp_frac) + (shomdat[2] * (temp_frac ** 2)) + (
shomdat[3] * (temp_frac ** 3)) + (shomdat[4] / (temp_frac ** 2))
return c_p_water
|
Calculates the heat capacity of liquid water.
:return: cp_water
|
Calculates the heat capacity of liquid water.
|
[
"Calculates",
"the",
"heat",
"capacity",
"of",
"liquid",
"water",
"."
] |
def c_p_water_liquid(temp):
shomdat = [-203.6060, 1523.290, -3196.413, 2474.455, 3.855326]
temp_frac = temp / 1000
c_p_water = shomdat[0] + (shomdat[1] * temp_frac) + (shomdat[2] * (temp_frac ** 2)) + (
shomdat[3] * (temp_frac ** 3)) + (shomdat[4] / (temp_frac ** 2))
return c_p_water
|
[
"def",
"c_p_water_liquid",
"(",
"temp",
")",
":",
"shomdat",
"=",
"[",
"-",
"203.6060",
",",
"1523.290",
",",
"-",
"3196.413",
",",
"2474.455",
",",
"3.855326",
"]",
"temp_frac",
"=",
"temp",
"/",
"1000",
"c_p_water",
"=",
"shomdat",
"[",
"0",
"]",
"+",
"(",
"shomdat",
"[",
"1",
"]",
"*",
"temp_frac",
")",
"+",
"(",
"shomdat",
"[",
"2",
"]",
"*",
"(",
"temp_frac",
"**",
"2",
")",
")",
"+",
"(",
"shomdat",
"[",
"3",
"]",
"*",
"(",
"temp_frac",
"**",
"3",
")",
")",
"+",
"(",
"shomdat",
"[",
"4",
"]",
"/",
"(",
"temp_frac",
"**",
"2",
")",
")",
"return",
"c_p_water"
] |
Calculates the heat capacity of liquid water.
|
[
"Calculates",
"the",
"heat",
"capacity",
"of",
"liquid",
"water",
"."
] |
[
"\"\"\"\n Calculates the heat capacity of liquid water.\n :return: cp_water\n \"\"\"",
"# constants: Chase, NIST-JANAF Thermochemistry tables, Fourth Edition, 1998"
] |
[
{
"param": "temp",
"type": null
}
] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "temp",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def c_p_water_liquid(temp):
shomdat = [-203.6060, 1523.290, -3196.413, 2474.455, 3.855326]
temp_frac = temp / 1000
c_p_water = shomdat[0] + (shomdat[1] * temp_frac) + (shomdat[2] * (temp_frac ** 2)) + (
shomdat[3] * (temp_frac ** 3)) + (shomdat[4] / (temp_frac ** 2))
return c_p_water
| 1,241 | 645 |
be1b43d8c02ae938e4e01727eaea4408dfe3f6c4
|
g4brielvs/mobility_data_retrieval
|
src/taxonomy.py
|
[
"MIT"
] |
Python
|
fix_taxonomy_from_file
|
<not_specific>
|
def fix_taxonomy_from_file(data=dict(), names=list()):
"""
Fixes a dictionary with default keys
Args:
src (str): path to the data structure file
"""
for i, item in enumerate(names, 1):
key = '{:02}00-nr'.format(i)
if key not in data:
data[key] = {'nameSeq' : i, 'txID' : key}
key = '{:02}99-na'.format(i)
if key not in data:
data[key] = {'nameSeq' : i, 'txID' : key}
return data
|
Fixes a dictionary with default keys
Args:
src (str): path to the data structure file
|
Fixes a dictionary with default keys
|
[
"Fixes",
"a",
"dictionary",
"with",
"default",
"keys"
] |
def fix_taxonomy_from_file(data=dict(), names=list()):
for i, item in enumerate(names, 1):
key = '{:02}00-nr'.format(i)
if key not in data:
data[key] = {'nameSeq' : i, 'txID' : key}
key = '{:02}99-na'.format(i)
if key not in data:
data[key] = {'nameSeq' : i, 'txID' : key}
return data
|
[
"def",
"fix_taxonomy_from_file",
"(",
"data",
"=",
"dict",
"(",
")",
",",
"names",
"=",
"list",
"(",
")",
")",
":",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"names",
",",
"1",
")",
":",
"key",
"=",
"'{:02}00-nr'",
".",
"format",
"(",
"i",
")",
"if",
"key",
"not",
"in",
"data",
":",
"data",
"[",
"key",
"]",
"=",
"{",
"'nameSeq'",
":",
"i",
",",
"'txID'",
":",
"key",
"}",
"key",
"=",
"'{:02}99-na'",
".",
"format",
"(",
"i",
")",
"if",
"key",
"not",
"in",
"data",
":",
"data",
"[",
"key",
"]",
"=",
"{",
"'nameSeq'",
":",
"i",
",",
"'txID'",
":",
"key",
"}",
"return",
"data"
] |
Fixes a dictionary with default keys
|
[
"Fixes",
"a",
"dictionary",
"with",
"default",
"keys"
] |
[
"\"\"\"\n Fixes a dictionary with default keys\n\n Args:\n src (str): path to the data structure file\n \"\"\""
] |
[
{
"param": "data",
"type": null
},
{
"param": "names",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "names",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [
{
"identifier": "src",
"type": null,
"docstring": "path to the data structure file",
"docstring_tokens": [
"path",
"to",
"the",
"data",
"structure",
"file"
],
"default": null,
"is_optional": false
}
],
"others": []
}
|
def fix_taxonomy_from_file(data=dict(), names=list()):
for i, item in enumerate(names, 1):
key = '{:02}00-nr'.format(i)
if key not in data:
data[key] = {'nameSeq' : i, 'txID' : key}
key = '{:02}99-na'.format(i)
if key not in data:
data[key] = {'nameSeq' : i, 'txID' : key}
return data
| 1,244 | 509 |
ccd36be74be89f952cf30a6f17454afcb04d8eea
|
ggchappell/GraphR
|
genramsey.py
|
[
"MIT"
] |
Python
|
is_clique
|
<not_specific>
|
def is_clique(g, s):
"""Predicate. Return True if s is a clique in g.
s is a list or tuple whose items are vertices of g. We determine
whether s, considered as a set of vertices, induces a complete
subgraph of g. We return True if so.
This function is an induced-hereditary predicate.
Arguments:
g -- graph
s -- list or tuple of vertices of g; represents set of vertices
See isograph.py for our graph representation.
>>> g = [ [1], [0,2,3], [1,3], [1,2] ]
>>> s = [0,1]
>>> is_clique(g, s)
True
>>> s = [1,2,3]
>>> is_clique(g, s)
True
>>> s = [0,2]
>>> is_clique(g, s)
False
"""
for v in s:
for x in s:
if x != v and x not in g[v]:
return False
return True
|
Predicate. Return True if s is a clique in g.
s is a list or tuple whose items are vertices of g. We determine
whether s, considered as a set of vertices, induces a complete
subgraph of g. We return True if so.
This function is an induced-hereditary predicate.
Arguments:
g -- graph
s -- list or tuple of vertices of g; represents set of vertices
See isograph.py for our graph representation.
>>> g = [ [1], [0,2,3], [1,3], [1,2] ]
>>> s = [0,1]
>>> is_clique(g, s)
True
>>> s = [1,2,3]
>>> is_clique(g, s)
True
>>> s = [0,2]
>>> is_clique(g, s)
False
|
Predicate. Return True if s is a clique in g.
s is a list or tuple whose items are vertices of g. We determine
whether s, considered as a set of vertices, induces a complete
subgraph of g. We return True if so.
This function is an induced-hereditary predicate.
- graph
s -- list or tuple of vertices of g; represents set of vertices
See isograph.py for our graph representation.
|
[
"Predicate",
".",
"Return",
"True",
"if",
"s",
"is",
"a",
"clique",
"in",
"g",
".",
"s",
"is",
"a",
"list",
"or",
"tuple",
"whose",
"items",
"are",
"vertices",
"of",
"g",
".",
"We",
"determine",
"whether",
"s",
"considered",
"as",
"a",
"set",
"of",
"vertices",
"induces",
"a",
"complete",
"subgraph",
"of",
"g",
".",
"We",
"return",
"True",
"if",
"so",
".",
"This",
"function",
"is",
"an",
"induced",
"-",
"hereditary",
"predicate",
".",
"-",
"graph",
"s",
"--",
"list",
"or",
"tuple",
"of",
"vertices",
"of",
"g",
";",
"represents",
"set",
"of",
"vertices",
"See",
"isograph",
".",
"py",
"for",
"our",
"graph",
"representation",
"."
] |
def is_clique(g, s):
for v in s:
for x in s:
if x != v and x not in g[v]:
return False
return True
|
[
"def",
"is_clique",
"(",
"g",
",",
"s",
")",
":",
"for",
"v",
"in",
"s",
":",
"for",
"x",
"in",
"s",
":",
"if",
"x",
"!=",
"v",
"and",
"x",
"not",
"in",
"g",
"[",
"v",
"]",
":",
"return",
"False",
"return",
"True"
] |
Predicate.
|
[
"Predicate",
"."
] |
[
"\"\"\"Predicate. Return True if s is a clique in g.\n\n s is a list or tuple whose items are vertices of g. We determine\n whether s, considered as a set of vertices, induces a complete\n subgraph of g. We return True if so.\n\n This function is an induced-hereditary predicate.\n\n Arguments:\n g -- graph\n s -- list or tuple of vertices of g; represents set of vertices\n\n See isograph.py for our graph representation.\n\n >>> g = [ [1], [0,2,3], [1,3], [1,2] ]\n >>> s = [0,1]\n >>> is_clique(g, s)\n True\n >>> s = [1,2,3]\n >>> is_clique(g, s)\n True\n >>> s = [0,2]\n >>> is_clique(g, s)\n False\n\n \"\"\""
] |
[
{
"param": "g",
"type": null
},
{
"param": "s",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "g",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "s",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def is_clique(g, s):
for v in s:
for x in s:
if x != v and x not in g[v]:
return False
return True
| 1,245 | 526 |
9940bde322d48436c5dd8db0137e299dfeac3212
|
travcunn/me-virtual-machine
|
snake/cli.py
|
[
"BSD-3-Clause"
] |
Python
|
create_compiler_parser
|
<not_specific>
|
def create_compiler_parser():
""" Create an ArgumentParser for the compiler. """
parser = argparse.ArgumentParser(
description='A snake language compiler.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"file",
help="file to be compiled."
)
parser.add_argument(
'-o', '--outfile', default=None, required=False,
help='output file'
)
return parser
|
Create an ArgumentParser for the compiler.
|
Create an ArgumentParser for the compiler.
|
[
"Create",
"an",
"ArgumentParser",
"for",
"the",
"compiler",
"."
] |
def create_compiler_parser():
parser = argparse.ArgumentParser(
description='A snake language compiler.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"file",
help="file to be compiled."
)
parser.add_argument(
'-o', '--outfile', default=None, required=False,
help='output file'
)
return parser
|
[
"def",
"create_compiler_parser",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'A snake language compiler.'",
",",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
")",
"parser",
".",
"add_argument",
"(",
"\"file\"",
",",
"help",
"=",
"\"file to be compiled.\"",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--outfile'",
",",
"default",
"=",
"None",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'output file'",
")",
"return",
"parser"
] |
Create an ArgumentParser for the compiler.
|
[
"Create",
"an",
"ArgumentParser",
"for",
"the",
"compiler",
"."
] |
[
"\"\"\" Create an ArgumentParser for the compiler. \"\"\""
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import argparse
def create_compiler_parser():
parser = argparse.ArgumentParser(
description='A snake language compiler.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"file",
help="file to be compiled."
)
parser.add_argument(
'-o', '--outfile', default=None, required=False,
help='output file'
)
return parser
| 1,246 | 882 |
4cb58e9d8b5efe7b607d0961538a9bb7003b13d7
|
massung/bioindex
|
bioindex/lib/s3.py
|
[
"BSD-3-Clause"
] |
Python
|
is_absolute
|
<not_specific>
|
def is_absolute(s3_key):
"""
True if the s3 key points to an absolute bucket location.
"""
return s3_key.startsWith('s3://')
|
True if the s3 key points to an absolute bucket location.
|
True if the s3 key points to an absolute bucket location.
|
[
"True",
"if",
"the",
"s3",
"key",
"points",
"to",
"an",
"absolute",
"bucket",
"location",
"."
] |
def is_absolute(s3_key):
return s3_key.startsWith('s3://')
|
[
"def",
"is_absolute",
"(",
"s3_key",
")",
":",
"return",
"s3_key",
".",
"startsWith",
"(",
"'s3://'",
")"
] |
True if the s3 key points to an absolute bucket location.
|
[
"True",
"if",
"the",
"s3",
"key",
"points",
"to",
"an",
"absolute",
"bucket",
"location",
"."
] |
[
"\"\"\"\n True if the s3 key points to an absolute bucket location.\n \"\"\""
] |
[
{
"param": "s3_key",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "s3_key",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def is_absolute(s3_key):
return s3_key.startsWith('s3://')
| 1,247 | 1,010 |
ed22d8580dffd8b5e5e5ff171165db0a99eb8859
|
AMReX-Astro/wdmerger
|
analysis/wdmerger.py
|
[
"MIT"
] |
Python
|
make_movie
| null |
def make_movie(output_dir, img_list, mpg_filename):
"""Make an MPEG movie from a list of image files."""
import os
import shutil
import glob
cwd = os.getcwd()
# Get file extension; assume all files have the same format.
file_format = '.' + img_list[0].split('.')[-1]
# Copy them to a series of files that are indexed by
# monotonically increasing integers (this is needed for ffmpeg).
in_list = [output_dir + '/temp_' + '{:05d}'.format(i) + file_format for i, img in enumerate(img_list)]
for img_new, img_old in zip(in_list, img_list):
try:
shutil.copyfile(img_old, img_new)
except:
print("Error: source file " + img_old + " does not exist.")
pass
try:
os.system('ffmpeg -i ' + output_dir + '/temp_\%05d' + file_format + ' -b:v 20M ' + mpg_filename)
except:
print("Error: could not successfully make a movie with ffmpeg.")
pass
for img in in_list:
os.remove(img)
|
Make an MPEG movie from a list of image files.
|
Make an MPEG movie from a list of image files.
|
[
"Make",
"an",
"MPEG",
"movie",
"from",
"a",
"list",
"of",
"image",
"files",
"."
] |
def make_movie(output_dir, img_list, mpg_filename):
import os
import shutil
import glob
cwd = os.getcwd()
file_format = '.' + img_list[0].split('.')[-1]
in_list = [output_dir + '/temp_' + '{:05d}'.format(i) + file_format for i, img in enumerate(img_list)]
for img_new, img_old in zip(in_list, img_list):
try:
shutil.copyfile(img_old, img_new)
except:
print("Error: source file " + img_old + " does not exist.")
pass
try:
os.system('ffmpeg -i ' + output_dir + '/temp_\%05d' + file_format + ' -b:v 20M ' + mpg_filename)
except:
print("Error: could not successfully make a movie with ffmpeg.")
pass
for img in in_list:
os.remove(img)
|
[
"def",
"make_movie",
"(",
"output_dir",
",",
"img_list",
",",
"mpg_filename",
")",
":",
"import",
"os",
"import",
"shutil",
"import",
"glob",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"file_format",
"=",
"'.'",
"+",
"img_list",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"in_list",
"=",
"[",
"output_dir",
"+",
"'/temp_'",
"+",
"'{:05d}'",
".",
"format",
"(",
"i",
")",
"+",
"file_format",
"for",
"i",
",",
"img",
"in",
"enumerate",
"(",
"img_list",
")",
"]",
"for",
"img_new",
",",
"img_old",
"in",
"zip",
"(",
"in_list",
",",
"img_list",
")",
":",
"try",
":",
"shutil",
".",
"copyfile",
"(",
"img_old",
",",
"img_new",
")",
"except",
":",
"print",
"(",
"\"Error: source file \"",
"+",
"img_old",
"+",
"\" does not exist.\"",
")",
"pass",
"try",
":",
"os",
".",
"system",
"(",
"'ffmpeg -i '",
"+",
"output_dir",
"+",
"'/temp_\\%05d'",
"+",
"file_format",
"+",
"' -b:v 20M '",
"+",
"mpg_filename",
")",
"except",
":",
"print",
"(",
"\"Error: could not successfully make a movie with ffmpeg.\"",
")",
"pass",
"for",
"img",
"in",
"in_list",
":",
"os",
".",
"remove",
"(",
"img",
")"
] |
Make an MPEG movie from a list of image files.
|
[
"Make",
"an",
"MPEG",
"movie",
"from",
"a",
"list",
"of",
"image",
"files",
"."
] |
[
"\"\"\"Make an MPEG movie from a list of image files.\"\"\"",
"# Get file extension; assume all files have the same format.",
"# Copy them to a series of files that are indexed by",
"# monotonically increasing integers (this is needed for ffmpeg)."
] |
[
{
"param": "output_dir",
"type": null
},
{
"param": "img_list",
"type": null
},
{
"param": "mpg_filename",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "output_dir",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "img_list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "mpg_filename",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import shutil
import os
def make_movie(output_dir, img_list, mpg_filename):
import os
import shutil
import glob
cwd = os.getcwd()
file_format = '.' + img_list[0].split('.')[-1]
in_list = [output_dir + '/temp_' + '{:05d}'.format(i) + file_format for i, img in enumerate(img_list)]
for img_new, img_old in zip(in_list, img_list):
try:
shutil.copyfile(img_old, img_new)
except:
print("Error: source file " + img_old + " does not exist.")
pass
try:
os.system('ffmpeg -i ' + output_dir + '/temp_\%05d' + file_format + ' -b:v 20M ' + mpg_filename)
except:
print("Error: could not successfully make a movie with ffmpeg.")
pass
for img in in_list:
os.remove(img)
| 1,248 | 508 |
99a052891e6cd7fe40131a5877548a2fc4be0fa5
|
dbmi-bgm/granite
|
granite/SVqcVCF.py
|
[
"MIT"
] |
Python
|
_genotype
| null |
def _genotype(vnt_obj, ID, var_type, stat_dict):
''' genotype information, update counts for ID '''
GT = vnt_obj.get_genotype_value(ID, 'GT').replace('|', '/')
if GT not in ['0/0', './.']: # sample has variant
stat_dict[ID][var_type]['total'] += 1
#end if
|
genotype information, update counts for ID
|
genotype information, update counts for ID
|
[
"genotype",
"information",
"update",
"counts",
"for",
"ID"
] |
def _genotype(vnt_obj, ID, var_type, stat_dict):
GT = vnt_obj.get_genotype_value(ID, 'GT').replace('|', '/')
if GT not in ['0/0', './.']:
stat_dict[ID][var_type]['total'] += 1
|
[
"def",
"_genotype",
"(",
"vnt_obj",
",",
"ID",
",",
"var_type",
",",
"stat_dict",
")",
":",
"GT",
"=",
"vnt_obj",
".",
"get_genotype_value",
"(",
"ID",
",",
"'GT'",
")",
".",
"replace",
"(",
"'|'",
",",
"'/'",
")",
"if",
"GT",
"not",
"in",
"[",
"'0/0'",
",",
"'./.'",
"]",
":",
"stat_dict",
"[",
"ID",
"]",
"[",
"var_type",
"]",
"[",
"'total'",
"]",
"+=",
"1"
] |
genotype information, update counts for ID
|
[
"genotype",
"information",
"update",
"counts",
"for",
"ID"
] |
[
"''' genotype information, update counts for ID '''",
"# sample has variant",
"#end if"
] |
[
{
"param": "vnt_obj",
"type": null
},
{
"param": "ID",
"type": null
},
{
"param": "var_type",
"type": null
},
{
"param": "stat_dict",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "vnt_obj",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "ID",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "var_type",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "stat_dict",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _genotype(vnt_obj, ID, var_type, stat_dict):
GT = vnt_obj.get_genotype_value(ID, 'GT').replace('|', '/')
if GT not in ['0/0', './.']:
stat_dict[ID][var_type]['total'] += 1
| 1,249 | 307 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.