code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def time_in_range(self):
"""Return true if current time is in the active range"""
curr = datetime.datetime.now().time()
if self.start_time <= self.end_time:
return self.start_time <= curr <= self.end_time
else:
return self.start_time <= curr or curr <= self.end_time | Return true if current time is in the active range | Below is the the instruction that describes the task:
### Input:
Return true if current time is in the active range
### Response:
def time_in_range(self):
"""Return true if current time is in the active range"""
curr = datetime.datetime.now().time()
if self.start_time <= self.end_time:
return self.start_time <= curr <= self.end_time
else:
return self.start_time <= curr or curr <= self.end_time |
def load_config():
"""Load a keyring using the config file in the config root."""
filename = 'keyringrc.cfg'
keyring_cfg = os.path.join(platform.config_root(), filename)
if not os.path.exists(keyring_cfg):
return
config = configparser.RawConfigParser()
config.read(keyring_cfg)
_load_keyring_path(config)
# load the keyring class name, and then load this keyring
try:
if config.has_section("backend"):
keyring_name = config.get("backend", "default-keyring").strip()
else:
raise configparser.NoOptionError('backend', 'default-keyring')
except (configparser.NoOptionError, ImportError):
logger = logging.getLogger('keyring')
logger.warning("Keyring config file contains incorrect values.\n"
+ "Config file: %s" % keyring_cfg)
return
return load_keyring(keyring_name) | Load a keyring using the config file in the config root. | Below is the the instruction that describes the task:
### Input:
Load a keyring using the config file in the config root.
### Response:
def load_config():
"""Load a keyring using the config file in the config root."""
filename = 'keyringrc.cfg'
keyring_cfg = os.path.join(platform.config_root(), filename)
if not os.path.exists(keyring_cfg):
return
config = configparser.RawConfigParser()
config.read(keyring_cfg)
_load_keyring_path(config)
# load the keyring class name, and then load this keyring
try:
if config.has_section("backend"):
keyring_name = config.get("backend", "default-keyring").strip()
else:
raise configparser.NoOptionError('backend', 'default-keyring')
except (configparser.NoOptionError, ImportError):
logger = logging.getLogger('keyring')
logger.warning("Keyring config file contains incorrect values.\n"
+ "Config file: %s" % keyring_cfg)
return
return load_keyring(keyring_name) |
def sayHello(self, name="Not given", message="nothing"):
"""
Synchronous implementation of IHello.sayHello synchronous method.
The remote calling thread will be blocked until this is executed and
responds.
"""
print(
"Python.sayHello called by: {0} "
"with message: '{1}'".format(name, message)
)
return (
"PythonSync says: Howdy {0} "
"that's a nice runtime you got there".format(name)
) | Synchronous implementation of IHello.sayHello synchronous method.
The remote calling thread will be blocked until this is executed and
responds. | Below is the the instruction that describes the task:
### Input:
Synchronous implementation of IHello.sayHello synchronous method.
The remote calling thread will be blocked until this is executed and
responds.
### Response:
def sayHello(self, name="Not given", message="nothing"):
"""
Synchronous implementation of IHello.sayHello synchronous method.
The remote calling thread will be blocked until this is executed and
responds.
"""
print(
"Python.sayHello called by: {0} "
"with message: '{1}'".format(name, message)
)
return (
"PythonSync says: Howdy {0} "
"that's a nice runtime you got there".format(name)
) |
def set_connection(host=None, database=None, user=None, password=None):
"""Set connection parameters. Call set_connection with no arguments to clear."""
c.CONNECTION['HOST'] = host
c.CONNECTION['DATABASE'] = database
c.CONNECTION['USER'] = user
c.CONNECTION['PASSWORD'] = password | Set connection parameters. Call set_connection with no arguments to clear. | Below is the the instruction that describes the task:
### Input:
Set connection parameters. Call set_connection with no arguments to clear.
### Response:
def set_connection(host=None, database=None, user=None, password=None):
"""Set connection parameters. Call set_connection with no arguments to clear."""
c.CONNECTION['HOST'] = host
c.CONNECTION['DATABASE'] = database
c.CONNECTION['USER'] = user
c.CONNECTION['PASSWORD'] = password |
def __build_author_name_expr(author_name, author_email_address):
"""
Build the name of the author of a message as described in the Internet
Message Format specification: https://tools.ietf.org/html/rfc5322#section-3.6.2
@param author_name: complete name of the originator of the message.
@param author_email_address: address of the mailbox to which the author
of the message suggests that replies be sent.
@return: a string representing the author of the message, that is, the
mailbox of the person or system responsible for the writing of the
message. This string is intended to be used as the "From:" field
of the message.
"""
assert author_name or author_email_address, 'Both arguments MUST NOT be bull'
# Use the specified name of the author or the username of his email
# address.
author_name_expr = author_name or author_email_address[:author_email_address.find('@')]
# Escape the name of the author if it contains a space character.
if ' ' in author_name_expr:
author_name_expr = '"%s"' % author_name_expr
# Complete the name of the author with his email address when specified.
if author_email_address:
author_name_expr = '%s <%s>' % (author_name_expr, author_email_address)
return author_name_expr | Build the name of the author of a message as described in the Internet
Message Format specification: https://tools.ietf.org/html/rfc5322#section-3.6.2
@param author_name: complete name of the originator of the message.
@param author_email_address: address of the mailbox to which the author
of the message suggests that replies be sent.
@return: a string representing the author of the message, that is, the
mailbox of the person or system responsible for the writing of the
message. This string is intended to be used as the "From:" field
of the message. | Below is the the instruction that describes the task:
### Input:
Build the name of the author of a message as described in the Internet
Message Format specification: https://tools.ietf.org/html/rfc5322#section-3.6.2
@param author_name: complete name of the originator of the message.
@param author_email_address: address of the mailbox to which the author
of the message suggests that replies be sent.
@return: a string representing the author of the message, that is, the
mailbox of the person or system responsible for the writing of the
message. This string is intended to be used as the "From:" field
of the message.
### Response:
def __build_author_name_expr(author_name, author_email_address):
"""
Build the name of the author of a message as described in the Internet
Message Format specification: https://tools.ietf.org/html/rfc5322#section-3.6.2
@param author_name: complete name of the originator of the message.
@param author_email_address: address of the mailbox to which the author
of the message suggests that replies be sent.
@return: a string representing the author of the message, that is, the
mailbox of the person or system responsible for the writing of the
message. This string is intended to be used as the "From:" field
of the message.
"""
assert author_name or author_email_address, 'Both arguments MUST NOT be bull'
# Use the specified name of the author or the username of his email
# address.
author_name_expr = author_name or author_email_address[:author_email_address.find('@')]
# Escape the name of the author if it contains a space character.
if ' ' in author_name_expr:
author_name_expr = '"%s"' % author_name_expr
# Complete the name of the author with his email address when specified.
if author_email_address:
author_name_expr = '%s <%s>' % (author_name_expr, author_email_address)
return author_name_expr |
def zip_entry_rollup(zipfile):
"""
returns a tuple of (files, dirs, size_uncompressed,
size_compressed). files+dirs will equal len(zipfile.infolist)
"""
files = dirs = 0
total_c = total_u = 0
for i in zipfile.infolist():
if i.filename[-1] == '/':
# I wonder if there's a better detection method than this
dirs += 1
else:
files += 1
total_c += i.compress_size
total_u += i.file_size
return files, dirs, total_c, total_u | returns a tuple of (files, dirs, size_uncompressed,
size_compressed). files+dirs will equal len(zipfile.infolist) | Below is the the instruction that describes the task:
### Input:
returns a tuple of (files, dirs, size_uncompressed,
size_compressed). files+dirs will equal len(zipfile.infolist)
### Response:
def zip_entry_rollup(zipfile):
"""
returns a tuple of (files, dirs, size_uncompressed,
size_compressed). files+dirs will equal len(zipfile.infolist)
"""
files = dirs = 0
total_c = total_u = 0
for i in zipfile.infolist():
if i.filename[-1] == '/':
# I wonder if there's a better detection method than this
dirs += 1
else:
files += 1
total_c += i.compress_size
total_u += i.file_size
return files, dirs, total_c, total_u |
def _get_reference(self):
"""
Sets up references to important components. A reference is typically an
index or a list of indices that point to the corresponding elements
in a flatten array, which is how MuJoCo stores physical simulation data.
"""
super()._get_reference()
self.cubeA_body_id = self.sim.model.body_name2id("cubeA")
self.cubeB_body_id = self.sim.model.body_name2id("cubeB")
self.l_finger_geom_ids = [
self.sim.model.geom_name2id(x) for x in self.gripper.left_finger_geoms
]
self.r_finger_geom_ids = [
self.sim.model.geom_name2id(x) for x in self.gripper.right_finger_geoms
]
self.cubeA_geom_id = self.sim.model.geom_name2id("cubeA")
self.cubeB_geom_id = self.sim.model.geom_name2id("cubeB") | Sets up references to important components. A reference is typically an
index or a list of indices that point to the corresponding elements
in a flatten array, which is how MuJoCo stores physical simulation data. | Below is the the instruction that describes the task:
### Input:
Sets up references to important components. A reference is typically an
index or a list of indices that point to the corresponding elements
in a flatten array, which is how MuJoCo stores physical simulation data.
### Response:
def _get_reference(self):
"""
Sets up references to important components. A reference is typically an
index or a list of indices that point to the corresponding elements
in a flatten array, which is how MuJoCo stores physical simulation data.
"""
super()._get_reference()
self.cubeA_body_id = self.sim.model.body_name2id("cubeA")
self.cubeB_body_id = self.sim.model.body_name2id("cubeB")
self.l_finger_geom_ids = [
self.sim.model.geom_name2id(x) for x in self.gripper.left_finger_geoms
]
self.r_finger_geom_ids = [
self.sim.model.geom_name2id(x) for x in self.gripper.right_finger_geoms
]
self.cubeA_geom_id = self.sim.model.geom_name2id("cubeA")
self.cubeB_geom_id = self.sim.model.geom_name2id("cubeB") |
def create_app(config='udata.settings.Defaults', override=None,
init_logging=init_logging):
'''Factory for a minimal application'''
app = UDataApp(APP_NAME)
app.config.from_object(config)
settings = os.environ.get('UDATA_SETTINGS', join(os.getcwd(), 'udata.cfg'))
if exists(settings):
app.settings_file = settings # Keep track of loaded settings for diagnostic
app.config.from_pyfile(settings)
if override:
app.config.from_object(override)
# Loads defaults from plugins
for pkg in entrypoints.get_roots(app):
if pkg == 'udata':
continue # Defaults are already loaded
module = '{}.settings'.format(pkg)
if pkgutil.find_loader(module):
settings = pkgutil.get_loader(module)
for key, default in settings.__dict__.items():
app.config.setdefault(key, default)
app.json_encoder = UDataJsonEncoder
app.debug = app.config['DEBUG'] and not app.config['TESTING']
app.wsgi_app = ProxyFix(app.wsgi_app)
init_logging(app)
register_extensions(app)
return app | Factory for a minimal application | Below is the the instruction that describes the task:
### Input:
Factory for a minimal application
### Response:
def create_app(config='udata.settings.Defaults', override=None,
init_logging=init_logging):
'''Factory for a minimal application'''
app = UDataApp(APP_NAME)
app.config.from_object(config)
settings = os.environ.get('UDATA_SETTINGS', join(os.getcwd(), 'udata.cfg'))
if exists(settings):
app.settings_file = settings # Keep track of loaded settings for diagnostic
app.config.from_pyfile(settings)
if override:
app.config.from_object(override)
# Loads defaults from plugins
for pkg in entrypoints.get_roots(app):
if pkg == 'udata':
continue # Defaults are already loaded
module = '{}.settings'.format(pkg)
if pkgutil.find_loader(module):
settings = pkgutil.get_loader(module)
for key, default in settings.__dict__.items():
app.config.setdefault(key, default)
app.json_encoder = UDataJsonEncoder
app.debug = app.config['DEBUG'] and not app.config['TESTING']
app.wsgi_app = ProxyFix(app.wsgi_app)
init_logging(app)
register_extensions(app)
return app |
def validate_data(self):
"""
Validate specimen, sample, site, and location data.
"""
warnings = {}
spec_warnings, samp_warnings, site_warnings, loc_warnings = {}, {}, {}, {}
if self.specimens:
spec_warnings = self.validate_items(self.specimens, 'specimen')
if self.samples:
samp_warnings = self.validate_items(self.samples, 'sample')
if self.sites:
site_warnings = self.validate_items(self.sites, 'site')
if self.locations:
loc_warnings = self.validate_items(self.locations, 'location')
return spec_warnings, samp_warnings, site_warnings, loc_warnings | Validate specimen, sample, site, and location data. | Below is the the instruction that describes the task:
### Input:
Validate specimen, sample, site, and location data.
### Response:
def validate_data(self):
"""
Validate specimen, sample, site, and location data.
"""
warnings = {}
spec_warnings, samp_warnings, site_warnings, loc_warnings = {}, {}, {}, {}
if self.specimens:
spec_warnings = self.validate_items(self.specimens, 'specimen')
if self.samples:
samp_warnings = self.validate_items(self.samples, 'sample')
if self.sites:
site_warnings = self.validate_items(self.sites, 'site')
if self.locations:
loc_warnings = self.validate_items(self.locations, 'location')
return spec_warnings, samp_warnings, site_warnings, loc_warnings |
def get_locations():
"""
Pull the accounts locations.
"""
arequest = requests.get(LOCATIONS_URL, headers=HEADERS)
status_code = str(arequest.status_code)
if status_code == '401':
_LOGGER.error("Token expired.")
return False
return arequest.json() | Pull the accounts locations. | Below is the the instruction that describes the task:
### Input:
Pull the accounts locations.
### Response:
def get_locations():
"""
Pull the accounts locations.
"""
arequest = requests.get(LOCATIONS_URL, headers=HEADERS)
status_code = str(arequest.status_code)
if status_code == '401':
_LOGGER.error("Token expired.")
return False
return arequest.json() |
def guess_labels(self, doc):
"""
return a prediction of label names
"""
if doc.nb_pages <= 0:
return set()
self.label_guesser.total_nb_documents = len(self._docs_by_id.keys())
label_names = self.label_guesser.guess(doc)
labels = set()
for label_name in label_names:
label = self.labels[label_name]
labels.add(label)
return labels | return a prediction of label names | Below is the the instruction that describes the task:
### Input:
return a prediction of label names
### Response:
def guess_labels(self, doc):
"""
return a prediction of label names
"""
if doc.nb_pages <= 0:
return set()
self.label_guesser.total_nb_documents = len(self._docs_by_id.keys())
label_names = self.label_guesser.guess(doc)
labels = set()
for label_name in label_names:
label = self.labels[label_name]
labels.add(label)
return labels |
def _at_extend(self, calculator, rule, scope, block):
"""
Implements @extend
"""
from scss.selector import Selector
selectors = calculator.apply_vars(block.argument)
rule.extends_selectors.extend(Selector.parse_many(selectors)) | Implements @extend | Below is the the instruction that describes the task:
### Input:
Implements @extend
### Response:
def _at_extend(self, calculator, rule, scope, block):
"""
Implements @extend
"""
from scss.selector import Selector
selectors = calculator.apply_vars(block.argument)
rule.extends_selectors.extend(Selector.parse_many(selectors)) |
def wrap(x):
"""
Wraps an element or integer type by serializing it and base64 encoding
the resulting bytes.
"""
# Detect the type so we can call the proper serialization routine
if isinstance(x, G1Element):
return _wrap(x, serializeG1)
elif isinstance(x, G2Element):
return _wrap(x, serializeG2)
elif isinstance(x, GtElement):
return _wrap(x, serializeGt)
elif isinstance(x, str):
return x
elif isinstance(x, (int, long, BigInt)):
return hex(long(x))
# All other items
else:
raise NotImplementedError("Cannot unwrap {}; only types {} supported".
format(type(x),
[G1Element, G2Element, GtElement, int, long, BigInt]) ) | Wraps an element or integer type by serializing it and base64 encoding
the resulting bytes. | Below is the the instruction that describes the task:
### Input:
Wraps an element or integer type by serializing it and base64 encoding
the resulting bytes.
### Response:
def wrap(x):
"""
Wraps an element or integer type by serializing it and base64 encoding
the resulting bytes.
"""
# Detect the type so we can call the proper serialization routine
if isinstance(x, G1Element):
return _wrap(x, serializeG1)
elif isinstance(x, G2Element):
return _wrap(x, serializeG2)
elif isinstance(x, GtElement):
return _wrap(x, serializeGt)
elif isinstance(x, str):
return x
elif isinstance(x, (int, long, BigInt)):
return hex(long(x))
# All other items
else:
raise NotImplementedError("Cannot unwrap {}; only types {} supported".
format(type(x),
[G1Element, G2Element, GtElement, int, long, BigInt]) ) |
def set_event_data(self, data, read_attrs):
""" Set event data with the specied attributes.
:param data: Event data table.
:param read_attrs: Attributes to put on the read group. This must include
the read_number, which must refer to a read present in the object. The
attributes should not include the standard read attributes:
* read_id
* start_time
* duration
* start_mux
Those will be pulled from the read information already present in the
object for the specified read.
"""
if self.handle.mode == 'r':
raise Exception('File is not open for writing.')
read_number = read_attrs['read_number']
read_group = '{}/Reads/Read_{}'.format(self.group_name, read_number)
read_info = self.handle.status.read_info
read_number_map = self.handle.status.read_number_map
index = read_number_map.get(read_number)
if index is None:
raise Exception('Cannot add event detection data for a read that does not exist.')
info = read_info[index]
read_attrs.update({'read_id': info.read_id,
'start_time': info.start_time,
'duration': info.duration,
'start_mux': info.start_mux,
'median_before': info.median_before})
attrs = self.handle.get_analysis_attributes(read_group)
if attrs is None:
self.handle.add_analysis_subgroup(self.group_name, 'Reads/Read_{}'.format(read_number),
attrs=read_attrs)
self.handle.add_analysis_dataset(read_group, 'Events', data)
else:
raise Exception('Event detection data already exists for this analysis and read.') | Set event data with the specied attributes.
:param data: Event data table.
:param read_attrs: Attributes to put on the read group. This must include
the read_number, which must refer to a read present in the object. The
attributes should not include the standard read attributes:
* read_id
* start_time
* duration
* start_mux
Those will be pulled from the read information already present in the
object for the specified read. | Below is the the instruction that describes the task:
### Input:
Set event data with the specied attributes.
:param data: Event data table.
:param read_attrs: Attributes to put on the read group. This must include
the read_number, which must refer to a read present in the object. The
attributes should not include the standard read attributes:
* read_id
* start_time
* duration
* start_mux
Those will be pulled from the read information already present in the
object for the specified read.
### Response:
def set_event_data(self, data, read_attrs):
""" Set event data with the specied attributes.
:param data: Event data table.
:param read_attrs: Attributes to put on the read group. This must include
the read_number, which must refer to a read present in the object. The
attributes should not include the standard read attributes:
* read_id
* start_time
* duration
* start_mux
Those will be pulled from the read information already present in the
object for the specified read.
"""
if self.handle.mode == 'r':
raise Exception('File is not open for writing.')
read_number = read_attrs['read_number']
read_group = '{}/Reads/Read_{}'.format(self.group_name, read_number)
read_info = self.handle.status.read_info
read_number_map = self.handle.status.read_number_map
index = read_number_map.get(read_number)
if index is None:
raise Exception('Cannot add event detection data for a read that does not exist.')
info = read_info[index]
read_attrs.update({'read_id': info.read_id,
'start_time': info.start_time,
'duration': info.duration,
'start_mux': info.start_mux,
'median_before': info.median_before})
attrs = self.handle.get_analysis_attributes(read_group)
if attrs is None:
self.handle.add_analysis_subgroup(self.group_name, 'Reads/Read_{}'.format(read_number),
attrs=read_attrs)
self.handle.add_analysis_dataset(read_group, 'Events', data)
else:
raise Exception('Event detection data already exists for this analysis and read.') |
def _construct_w(self, inputs):
"""Connects the module into the graph, with input Tensor `inputs`.
Args:
inputs: A 4D Tensor of shape:
[batch_size, input_height, input_width, input_channels]
and of type `tf.float16`, `tf.bfloat16` or `tf.float32`.
Returns:
A tuple of two 4D Tensors, each with the same dtype as `inputs`:
1. w_dw, the depthwise weight matrix, of shape:
[kernel_size, input_channels, channel_multiplier]
2. w_pw, the pointwise weight matrix, of shape:
[1, 1, channel_multiplier * input_channels, output_channels].
"""
depthwise_weight_shape = self._kernel_shape + (self._input_channels,
self._channel_multiplier)
pointwise_input_size = self._channel_multiplier * self._input_channels
pointwise_weight_shape = (1, 1, pointwise_input_size, self._output_channels)
if "w_dw" not in self._initializers:
fan_in_shape = depthwise_weight_shape[:2]
self._initializers["w_dw"] = create_weight_initializer(fan_in_shape,
dtype=inputs.dtype)
if "w_pw" not in self._initializers:
fan_in_shape = pointwise_weight_shape[:3]
self._initializers["w_pw"] = create_weight_initializer(fan_in_shape,
dtype=inputs.dtype)
w_dw = tf.get_variable(
"w_dw",
shape=depthwise_weight_shape,
dtype=inputs.dtype,
initializer=self._initializers["w_dw"],
partitioner=self._partitioners.get("w_dw", None),
regularizer=self._regularizers.get("w_dw", None))
w_pw = tf.get_variable(
"w_pw",
shape=pointwise_weight_shape,
dtype=inputs.dtype,
initializer=self._initializers["w_pw"],
partitioner=self._partitioners.get("w_pw", None),
regularizer=self._regularizers.get("w_pw", None))
return w_dw, w_pw | Connects the module into the graph, with input Tensor `inputs`.
Args:
inputs: A 4D Tensor of shape:
[batch_size, input_height, input_width, input_channels]
and of type `tf.float16`, `tf.bfloat16` or `tf.float32`.
Returns:
A tuple of two 4D Tensors, each with the same dtype as `inputs`:
1. w_dw, the depthwise weight matrix, of shape:
[kernel_size, input_channels, channel_multiplier]
2. w_pw, the pointwise weight matrix, of shape:
[1, 1, channel_multiplier * input_channels, output_channels]. | Below is the the instruction that describes the task:
### Input:
Connects the module into the graph, with input Tensor `inputs`.
Args:
inputs: A 4D Tensor of shape:
[batch_size, input_height, input_width, input_channels]
and of type `tf.float16`, `tf.bfloat16` or `tf.float32`.
Returns:
A tuple of two 4D Tensors, each with the same dtype as `inputs`:
1. w_dw, the depthwise weight matrix, of shape:
[kernel_size, input_channels, channel_multiplier]
2. w_pw, the pointwise weight matrix, of shape:
[1, 1, channel_multiplier * input_channels, output_channels].
### Response:
def _construct_w(self, inputs):
"""Connects the module into the graph, with input Tensor `inputs`.
Args:
inputs: A 4D Tensor of shape:
[batch_size, input_height, input_width, input_channels]
and of type `tf.float16`, `tf.bfloat16` or `tf.float32`.
Returns:
A tuple of two 4D Tensors, each with the same dtype as `inputs`:
1. w_dw, the depthwise weight matrix, of shape:
[kernel_size, input_channels, channel_multiplier]
2. w_pw, the pointwise weight matrix, of shape:
[1, 1, channel_multiplier * input_channels, output_channels].
"""
depthwise_weight_shape = self._kernel_shape + (self._input_channels,
self._channel_multiplier)
pointwise_input_size = self._channel_multiplier * self._input_channels
pointwise_weight_shape = (1, 1, pointwise_input_size, self._output_channels)
if "w_dw" not in self._initializers:
fan_in_shape = depthwise_weight_shape[:2]
self._initializers["w_dw"] = create_weight_initializer(fan_in_shape,
dtype=inputs.dtype)
if "w_pw" not in self._initializers:
fan_in_shape = pointwise_weight_shape[:3]
self._initializers["w_pw"] = create_weight_initializer(fan_in_shape,
dtype=inputs.dtype)
w_dw = tf.get_variable(
"w_dw",
shape=depthwise_weight_shape,
dtype=inputs.dtype,
initializer=self._initializers["w_dw"],
partitioner=self._partitioners.get("w_dw", None),
regularizer=self._regularizers.get("w_dw", None))
w_pw = tf.get_variable(
"w_pw",
shape=pointwise_weight_shape,
dtype=inputs.dtype,
initializer=self._initializers["w_pw"],
partitioner=self._partitioners.get("w_pw", None),
regularizer=self._regularizers.get("w_pw", None))
return w_dw, w_pw |
def _parse_geometry(geometry):
""" Parses given geometry into shapely object
:param geometry:
:return: Shapely polygon or multipolygon
:rtype: shapely.geometry.Polygon or shapely.geometry.MultiPolygon
:raises TypeError
"""
if isinstance(geometry, str):
geometry = shapely.wkt.loads(geometry)
elif isinstance(geometry, dict):
geometry = shapely.geometry.shape(geometry)
elif not isinstance(geometry, shapely.geometry.base.BaseGeometry):
raise TypeError('Unsupported geometry representation')
if not isinstance(geometry, (shapely.geometry.Polygon, shapely.geometry.MultiPolygon)):
raise ValueError('Supported geometry types are polygon and multipolygon, got {}'.format(type(geometry)))
return geometry | Parses given geometry into shapely object
:param geometry:
:return: Shapely polygon or multipolygon
:rtype: shapely.geometry.Polygon or shapely.geometry.MultiPolygon
:raises TypeError | Below is the the instruction that describes the task:
### Input:
Parses given geometry into shapely object
:param geometry:
:return: Shapely polygon or multipolygon
:rtype: shapely.geometry.Polygon or shapely.geometry.MultiPolygon
:raises TypeError
### Response:
def _parse_geometry(geometry):
""" Parses given geometry into shapely object
:param geometry:
:return: Shapely polygon or multipolygon
:rtype: shapely.geometry.Polygon or shapely.geometry.MultiPolygon
:raises TypeError
"""
if isinstance(geometry, str):
geometry = shapely.wkt.loads(geometry)
elif isinstance(geometry, dict):
geometry = shapely.geometry.shape(geometry)
elif not isinstance(geometry, shapely.geometry.base.BaseGeometry):
raise TypeError('Unsupported geometry representation')
if not isinstance(geometry, (shapely.geometry.Polygon, shapely.geometry.MultiPolygon)):
raise ValueError('Supported geometry types are polygon and multipolygon, got {}'.format(type(geometry)))
return geometry |
def _add_arg_python(self, key, value=None, mask=False):
"""Add CLI Arg formatted specifically for Python.
Args:
key (string): The CLI Args key (e.g., --name).
value (string): The CLI Args value (e.g., bob).
mask (boolean, default:False): Indicates whether no mask value.
"""
self._data[key] = value
if not value:
# both false boolean values (flags) and empty values should not be added.
pass
elif value is True:
# true boolean values are flags and should not contain a value
self._args.append('--{}'.format(key))
self._args_quoted.append('--{}'.format(key))
self._args_masked.append('--{}'.format(key))
else:
self._args.append('--{}={}'.format(key, value))
if mask:
# mask sensitive values
value = 'x' * len(str(value))
else:
# quote all values that would get displayed
value = self.quote(value)
self._args_quoted.append('--{}={}'.format(key, value))
self._args_masked.append('--{}={}'.format(key, value)) | Add CLI Arg formatted specifically for Python.
Args:
key (string): The CLI Args key (e.g., --name).
value (string): The CLI Args value (e.g., bob).
mask (boolean, default:False): Indicates whether no mask value. | Below is the the instruction that describes the task:
### Input:
Add CLI Arg formatted specifically for Python.
Args:
key (string): The CLI Args key (e.g., --name).
value (string): The CLI Args value (e.g., bob).
mask (boolean, default:False): Indicates whether no mask value.
### Response:
def _add_arg_python(self, key, value=None, mask=False):
"""Add CLI Arg formatted specifically for Python.
Args:
key (string): The CLI Args key (e.g., --name).
value (string): The CLI Args value (e.g., bob).
mask (boolean, default:False): Indicates whether no mask value.
"""
self._data[key] = value
if not value:
# both false boolean values (flags) and empty values should not be added.
pass
elif value is True:
# true boolean values are flags and should not contain a value
self._args.append('--{}'.format(key))
self._args_quoted.append('--{}'.format(key))
self._args_masked.append('--{}'.format(key))
else:
self._args.append('--{}={}'.format(key, value))
if mask:
# mask sensitive values
value = 'x' * len(str(value))
else:
# quote all values that would get displayed
value = self.quote(value)
self._args_quoted.append('--{}={}'.format(key, value))
self._args_masked.append('--{}={}'.format(key, value)) |
def _create_page_control(self):
""" Creates and connects the underlying paging widget.
"""
if self.custom_page_control:
control = self.custom_page_control()
elif self.kind == 'plain':
control = QtGui.QPlainTextEdit()
elif self.kind == 'rich':
control = QtGui.QTextEdit()
control.installEventFilter(self)
viewport = control.viewport()
viewport.installEventFilter(self)
control.setReadOnly(True)
control.setUndoRedoEnabled(False)
control.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
return control | Creates and connects the underlying paging widget. | Below is the the instruction that describes the task:
### Input:
Creates and connects the underlying paging widget.
### Response:
def _create_page_control(self):
""" Creates and connects the underlying paging widget.
"""
if self.custom_page_control:
control = self.custom_page_control()
elif self.kind == 'plain':
control = QtGui.QPlainTextEdit()
elif self.kind == 'rich':
control = QtGui.QTextEdit()
control.installEventFilter(self)
viewport = control.viewport()
viewport.installEventFilter(self)
control.setReadOnly(True)
control.setUndoRedoEnabled(False)
control.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
return control |
def limit(self, limit_value, key_func=None, per_method=False,
methods=None, error_message=None, exempt_when=None):
"""
decorator to be used for rate limiting individual routes.
:param limit_value: rate limit string or a callable that returns a string.
:ref:`ratelimit-string` for more details.
:param function key_func: function/lambda to extract the unique identifier for
the rate limit. defaults to remote address of the request.
:param bool per_method: whether the limit is sub categorized into the http
method of the request.
:param list methods: if specified, only the methods in this list will be rate
limited (default: None).
:param error_message: string (or callable that returns one) to override the
error message used in the response.
:return:
"""
return self.__limit_decorator(limit_value, key_func, per_method=per_method,
methods=methods, error_message=error_message,
exempt_when=exempt_when) | decorator to be used for rate limiting individual routes.
:param limit_value: rate limit string or a callable that returns a string.
:ref:`ratelimit-string` for more details.
:param function key_func: function/lambda to extract the unique identifier for
the rate limit. defaults to remote address of the request.
:param bool per_method: whether the limit is sub categorized into the http
method of the request.
:param list methods: if specified, only the methods in this list will be rate
limited (default: None).
:param error_message: string (or callable that returns one) to override the
error message used in the response.
:return: | Below is the the instruction that describes the task:
### Input:
decorator to be used for rate limiting individual routes.
:param limit_value: rate limit string or a callable that returns a string.
:ref:`ratelimit-string` for more details.
:param function key_func: function/lambda to extract the unique identifier for
the rate limit. defaults to remote address of the request.
:param bool per_method: whether the limit is sub categorized into the http
method of the request.
:param list methods: if specified, only the methods in this list will be rate
limited (default: None).
:param error_message: string (or callable that returns one) to override the
error message used in the response.
:return:
### Response:
def limit(self, limit_value, key_func=None, per_method=False,
methods=None, error_message=None, exempt_when=None):
"""
decorator to be used for rate limiting individual routes.
:param limit_value: rate limit string or a callable that returns a string.
:ref:`ratelimit-string` for more details.
:param function key_func: function/lambda to extract the unique identifier for
the rate limit. defaults to remote address of the request.
:param bool per_method: whether the limit is sub categorized into the http
method of the request.
:param list methods: if specified, only the methods in this list will be rate
limited (default: None).
:param error_message: string (or callable that returns one) to override the
error message used in the response.
:return:
"""
return self.__limit_decorator(limit_value, key_func, per_method=per_method,
methods=methods, error_message=error_message,
exempt_when=exempt_when) |
def ruamel_structure(data, validator=None):
"""
Take dicts and lists and return a ruamel.yaml style
structure of CommentedMaps, CommentedSeqs and
data.
If a validator is presented and the type is unknown,
it is checked against the validator to see if it will
turn it back in to YAML.
"""
if isinstance(data, dict):
if len(data) == 0:
raise exceptions.CannotBuildDocumentsFromEmptyDictOrList(
"Document must be built with non-empty dicts and lists"
)
return CommentedMap(
[
(ruamel_structure(key), ruamel_structure(value))
for key, value in data.items()
]
)
elif isinstance(data, list):
if len(data) == 0:
raise exceptions.CannotBuildDocumentsFromEmptyDictOrList(
"Document must be built with non-empty dicts and lists"
)
return CommentedSeq([ruamel_structure(item) for item in data])
elif isinstance(data, bool):
return u"yes" if data else u"no"
elif isinstance(data, (int, float)):
return str(data)
else:
if not is_string(data):
raise exceptions.CannotBuildDocumentFromInvalidData(
(
"Document must be built from a combination of:\n"
"string, int, float, bool or nonempty list/dict\n\n"
"Instead, found variable with type '{}': '{}'"
).format(type(data).__name__, data)
)
return data | Take dicts and lists and return a ruamel.yaml style
structure of CommentedMaps, CommentedSeqs and
data.
If a validator is presented and the type is unknown,
it is checked against the validator to see if it will
turn it back in to YAML. | Below is the the instruction that describes the task:
### Input:
Take dicts and lists and return a ruamel.yaml style
structure of CommentedMaps, CommentedSeqs and
data.
If a validator is presented and the type is unknown,
it is checked against the validator to see if it will
turn it back in to YAML.
### Response:
def ruamel_structure(data, validator=None):
"""
Take dicts and lists and return a ruamel.yaml style
structure of CommentedMaps, CommentedSeqs and
data.
If a validator is presented and the type is unknown,
it is checked against the validator to see if it will
turn it back in to YAML.
"""
if isinstance(data, dict):
if len(data) == 0:
raise exceptions.CannotBuildDocumentsFromEmptyDictOrList(
"Document must be built with non-empty dicts and lists"
)
return CommentedMap(
[
(ruamel_structure(key), ruamel_structure(value))
for key, value in data.items()
]
)
elif isinstance(data, list):
if len(data) == 0:
raise exceptions.CannotBuildDocumentsFromEmptyDictOrList(
"Document must be built with non-empty dicts and lists"
)
return CommentedSeq([ruamel_structure(item) for item in data])
elif isinstance(data, bool):
return u"yes" if data else u"no"
elif isinstance(data, (int, float)):
return str(data)
else:
if not is_string(data):
raise exceptions.CannotBuildDocumentFromInvalidData(
(
"Document must be built from a combination of:\n"
"string, int, float, bool or nonempty list/dict\n\n"
"Instead, found variable with type '{}': '{}'"
).format(type(data).__name__, data)
)
return data |
def _copy_selection(self, *event):
"""Copies the current selection to the clipboard.
"""
if react_to_event(self.view, self.view.editor, event):
logger.debug("copy selection")
global_clipboard.copy(self.model.selection)
return True | Copies the current selection to the clipboard. | Below is the the instruction that describes the task:
### Input:
Copies the current selection to the clipboard.
### Response:
def _copy_selection(self, *event):
"""Copies the current selection to the clipboard.
"""
if react_to_event(self.view, self.view.editor, event):
logger.debug("copy selection")
global_clipboard.copy(self.model.selection)
return True |
def getquals(args):
"""
%prog getquals [--options] gbkfile > qualsfile
Read GenBank file and extract all qualifiers per feature type
into a tab-delimited file
"""
p = OptionParser(getquals.__doc__)
p.add_option("--types", default="gene,mRNA,CDS",
type="str", dest="quals_ftypes",
help="Feature types from which to extract qualifiers")
p.add_option("--ignore", default="locus_tag,product,codon_start,translation",
type="str", dest="quals_ignore",
help="Qualifiers to exclude from parsing")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gbkfile, = args
quals_ftypes = opts.quals_ftypes.split(",")
quals_ignore = opts.quals_ignore.split(",")
locus = dict()
locus_tag = None
for rec in SeqIO.parse(gbkfile, "gb"):
for f in rec.features:
if f.type in quals_ftypes:
locus_tag = f.qualifiers[LT][0]
if locus_tag not in locus:
locus[locus_tag] = dict()
for ftype in quals_ftypes:
if ftype not in locus[locus_tag]:
locus[locus_tag][ftype] = []
if ftype == "CDS": # store the CDS protein_id
locus[locus_tag]["protein_id"] = []
quals = []
for qual in f.qualifiers:
if qual in quals_ignore:
continue
for qval in f.qualifiers[qual]:
quals.append((locus_tag, qual, qval))
if qual == "protein_id":
locus[locus_tag]["protein_id"].append(qval)
if len(quals) > 0:
locus[locus_tag][f.type].append(quals)
for locus_tag in locus:
print_locus_quals(locus_tag, locus, quals_ftypes) | %prog getquals [--options] gbkfile > qualsfile
Read GenBank file and extract all qualifiers per feature type
into a tab-delimited file | Below is the the instruction that describes the task:
### Input:
%prog getquals [--options] gbkfile > qualsfile
Read GenBank file and extract all qualifiers per feature type
into a tab-delimited file
### Response:
def getquals(args):
"""
%prog getquals [--options] gbkfile > qualsfile
Read GenBank file and extract all qualifiers per feature type
into a tab-delimited file
"""
p = OptionParser(getquals.__doc__)
p.add_option("--types", default="gene,mRNA,CDS",
type="str", dest="quals_ftypes",
help="Feature types from which to extract qualifiers")
p.add_option("--ignore", default="locus_tag,product,codon_start,translation",
type="str", dest="quals_ignore",
help="Qualifiers to exclude from parsing")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gbkfile, = args
quals_ftypes = opts.quals_ftypes.split(",")
quals_ignore = opts.quals_ignore.split(",")
locus = dict()
locus_tag = None
for rec in SeqIO.parse(gbkfile, "gb"):
for f in rec.features:
if f.type in quals_ftypes:
locus_tag = f.qualifiers[LT][0]
if locus_tag not in locus:
locus[locus_tag] = dict()
for ftype in quals_ftypes:
if ftype not in locus[locus_tag]:
locus[locus_tag][ftype] = []
if ftype == "CDS": # store the CDS protein_id
locus[locus_tag]["protein_id"] = []
quals = []
for qual in f.qualifiers:
if qual in quals_ignore:
continue
for qval in f.qualifiers[qual]:
quals.append((locus_tag, qual, qval))
if qual == "protein_id":
locus[locus_tag]["protein_id"].append(qval)
if len(quals) > 0:
locus[locus_tag][f.type].append(quals)
for locus_tag in locus:
print_locus_quals(locus_tag, locus, quals_ftypes) |
def SetPlatformArchContext():
"""Add the running contexts to the config system."""
# Initialize the running platform context:
_CONFIG.AddContext("Platform:%s" % platform.system().title())
machine = platform.uname()[4]
if machine in ["x86_64", "AMD64", "i686"]:
# 32 bit binaries running on AMD64 will still have a i386 arch.
if platform.architecture()[0] == "32bit":
arch = "i386"
else:
arch = "amd64"
elif machine == "x86":
arch = "i386"
else:
arch = machine
_CONFIG.AddContext("Arch:%s" % arch) | Add the running contexts to the config system. | Below is the the instruction that describes the task:
### Input:
Add the running contexts to the config system.
### Response:
def SetPlatformArchContext():
"""Add the running contexts to the config system."""
# Initialize the running platform context:
_CONFIG.AddContext("Platform:%s" % platform.system().title())
machine = platform.uname()[4]
if machine in ["x86_64", "AMD64", "i686"]:
# 32 bit binaries running on AMD64 will still have a i386 arch.
if platform.architecture()[0] == "32bit":
arch = "i386"
else:
arch = "amd64"
elif machine == "x86":
arch = "i386"
else:
arch = machine
_CONFIG.AddContext("Arch:%s" % arch) |
def logBranch(self, indent=0, level=logging.DEBUG):
""" Logs the item and all descendants, one line per child
"""
if 0:
print(indent * " " + str(self))
else:
logger.log(level, indent * " " + str(self))
for childItems in self.childItems:
childItems.logBranch(indent + 1, level=level) | Logs the item and all descendants, one line per child | Below is the the instruction that describes the task:
### Input:
Logs the item and all descendants, one line per child
### Response:
def logBranch(self, indent=0, level=logging.DEBUG):
""" Logs the item and all descendants, one line per child
"""
if 0:
print(indent * " " + str(self))
else:
logger.log(level, indent * " " + str(self))
for childItems in self.childItems:
childItems.logBranch(indent + 1, level=level) |
def rev_comp(seq):
"""Get reverse complement of sequence.
rev_comp will maintain the case of the sequence.
Parameters
----------
seq : str
nucleotide sequence. valid {a, c, t, g, n}
Returns
-------
rev_comp_seq : str
reverse complement of sequence
"""
rev_seq = seq[::-1]
rev_comp_seq = ''.join([base_pairing[s] for s in rev_seq])
return rev_comp_seq | Get reverse complement of sequence.
rev_comp will maintain the case of the sequence.
Parameters
----------
seq : str
nucleotide sequence. valid {a, c, t, g, n}
Returns
-------
rev_comp_seq : str
reverse complement of sequence | Below is the the instruction that describes the task:
### Input:
Get reverse complement of sequence.
rev_comp will maintain the case of the sequence.
Parameters
----------
seq : str
nucleotide sequence. valid {a, c, t, g, n}
Returns
-------
rev_comp_seq : str
reverse complement of sequence
### Response:
def rev_comp(seq):
"""Get reverse complement of sequence.
rev_comp will maintain the case of the sequence.
Parameters
----------
seq : str
nucleotide sequence. valid {a, c, t, g, n}
Returns
-------
rev_comp_seq : str
reverse complement of sequence
"""
rev_seq = seq[::-1]
rev_comp_seq = ''.join([base_pairing[s] for s in rev_seq])
return rev_comp_seq |
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
filename = urllib.request.url2pathname(path)
if os.path.isfile(filename):
return urllib.request.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
filepath = os.path.join(filename, f)
if f == 'index.html':
with open(filepath, 'r') as fp:
body = fp.read()
break
elif os.path.isdir(filepath):
f += '/'
files.append('<a href="{name}">{name}</a>'.format(name=f))
else:
tmpl = (
"<html><head><title>{url}</title>"
"</head><body>{files}</body></html>")
body = tmpl.format(url=url, files='\n'.join(files))
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
body_stream = six.StringIO(body)
return urllib.error.HTTPError(url, status, message, headers, body_stream) | Read a local path, with special support for directories | Below is the the instruction that describes the task:
### Input:
Read a local path, with special support for directories
### Response:
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
filename = urllib.request.url2pathname(path)
if os.path.isfile(filename):
return urllib.request.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
filepath = os.path.join(filename, f)
if f == 'index.html':
with open(filepath, 'r') as fp:
body = fp.read()
break
elif os.path.isdir(filepath):
f += '/'
files.append('<a href="{name}">{name}</a>'.format(name=f))
else:
tmpl = (
"<html><head><title>{url}</title>"
"</head><body>{files}</body></html>")
body = tmpl.format(url=url, files='\n'.join(files))
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
body_stream = six.StringIO(body)
return urllib.error.HTTPError(url, status, message, headers, body_stream) |
def cmd_tcpscan(ip, port, iface, flags, sleeptime, timeout, show_all, verbose):
"""TCP Port Scanner.
Print the ports that generated a response with the SYN flag or (if show use -a) all the
ports that generated a response.
It's really basic compared with nmap, but who is comparing?
Example:
\b
# habu.tcpscan -p 22,23,80,443 -s 1 45.77.113.133
22 S -> SA
80 S -> SA
443 S -> SA
"""
if verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
conf.verb = False
if iface:
conf.iface = iface
port_regex = r'^[0-9,-]+$'
if not re.match(port_regex, port):
logging.critical("Invalid port specification")
return False
ports = []
for p in str(port).split(','):
if '-' in p:
first, last = p.split('-')
for n in range(int(first), int(last)+1):
ports.append(n)
else:
ports.append(int(p))
out = "{port} {sflags} -> {rflags}"
pkts = IP(dst=ip)/TCP(flags=flags, dport=ports)
if sleeptime:
res = []
for pkt in pkts:
logging.info(pkt.summary())
_ = sr1(pkt)
if _:
logging.info(_.summary())
res.append((pkt, _))
else:
res, unans = sr(pkts, verbose=verbose)
for s,r in res:
if show_all or 'S' in r.sprintf(r"%TCP.flags%"):
print(out.format(
port=s[TCP].dport,
sflags=s.sprintf(r"%TCP.flags%"),
rflags=r.sprintf(r"%TCP.flags%")
)) | TCP Port Scanner.
Print the ports that generated a response with the SYN flag or (if show use -a) all the
ports that generated a response.
It's really basic compared with nmap, but who is comparing?
Example:
\b
# habu.tcpscan -p 22,23,80,443 -s 1 45.77.113.133
22 S -> SA
80 S -> SA
443 S -> SA | Below is the the instruction that describes the task:
### Input:
TCP Port Scanner.
Print the ports that generated a response with the SYN flag or (if show use -a) all the
ports that generated a response.
It's really basic compared with nmap, but who is comparing?
Example:
\b
# habu.tcpscan -p 22,23,80,443 -s 1 45.77.113.133
22 S -> SA
80 S -> SA
443 S -> SA
### Response:
def cmd_tcpscan(ip, port, iface, flags, sleeptime, timeout, show_all, verbose):
"""TCP Port Scanner.
Print the ports that generated a response with the SYN flag or (if show use -a) all the
ports that generated a response.
It's really basic compared with nmap, but who is comparing?
Example:
\b
# habu.tcpscan -p 22,23,80,443 -s 1 45.77.113.133
22 S -> SA
80 S -> SA
443 S -> SA
"""
if verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
conf.verb = False
if iface:
conf.iface = iface
port_regex = r'^[0-9,-]+$'
if not re.match(port_regex, port):
logging.critical("Invalid port specification")
return False
ports = []
for p in str(port).split(','):
if '-' in p:
first, last = p.split('-')
for n in range(int(first), int(last)+1):
ports.append(n)
else:
ports.append(int(p))
out = "{port} {sflags} -> {rflags}"
pkts = IP(dst=ip)/TCP(flags=flags, dport=ports)
if sleeptime:
res = []
for pkt in pkts:
logging.info(pkt.summary())
_ = sr1(pkt)
if _:
logging.info(_.summary())
res.append((pkt, _))
else:
res, unans = sr(pkts, verbose=verbose)
for s,r in res:
if show_all or 'S' in r.sprintf(r"%TCP.flags%"):
print(out.format(
port=s[TCP].dport,
sflags=s.sprintf(r"%TCP.flags%"),
rflags=r.sprintf(r"%TCP.flags%")
)) |
def options(argv=[]):
"""
A helper function that returns a dictionary of the default key-values pairs
"""
parser = HendrixOptionParser
parsed_args = parser.parse_args(argv)
return vars(parsed_args[0]) | A helper function that returns a dictionary of the default key-values pairs | Below is the the instruction that describes the task:
### Input:
A helper function that returns a dictionary of the default key-values pairs
### Response:
def options(argv=[]):
"""
A helper function that returns a dictionary of the default key-values pairs
"""
parser = HendrixOptionParser
parsed_args = parser.parse_args(argv)
return vars(parsed_args[0]) |
def ber_code(self):
"""
This method gets ber code listed in Daft.
:return:
"""
try:
alt_text = self._ad_page_content.find(
'span', {'class': 'ber-hover'}
).find('img')['alt']
if ('exempt' in alt_text):
return 'exempt'
else:
alt_arr = alt_text.split()
if 'ber' in alt_arr[0].lower():
return alt_arr[1].lower()
else:
return None
except Exception as e:
if self._debug:
logging.error(
"Error getting the Ber Code. Error message: " + e.args[0])
return None | This method gets ber code listed in Daft.
:return: | Below is the the instruction that describes the task:
### Input:
This method gets ber code listed in Daft.
:return:
### Response:
def ber_code(self):
"""
This method gets ber code listed in Daft.
:return:
"""
try:
alt_text = self._ad_page_content.find(
'span', {'class': 'ber-hover'}
).find('img')['alt']
if ('exempt' in alt_text):
return 'exempt'
else:
alt_arr = alt_text.split()
if 'ber' in alt_arr[0].lower():
return alt_arr[1].lower()
else:
return None
except Exception as e:
if self._debug:
logging.error(
"Error getting the Ber Code. Error message: " + e.args[0])
return None |
def is_img_id_valid(img_id):
"""
Checks if img_id is valid.
"""
t = re.sub(r'[^a-z0-9_:\-\.]', '', img_id, re.IGNORECASE)
t = re.sub(r'\.+', '.', t)
if img_id != t or img_id.count(':') != 1:
return False
profile, base_name = img_id.split(':', 1)
if not profile or not base_name:
return False
try:
get_profile_configs(profile)
except ValueError:
return False
return True | Checks if img_id is valid. | Below is the the instruction that describes the task:
### Input:
Checks if img_id is valid.
### Response:
def is_img_id_valid(img_id):
"""
Checks if img_id is valid.
"""
t = re.sub(r'[^a-z0-9_:\-\.]', '', img_id, re.IGNORECASE)
t = re.sub(r'\.+', '.', t)
if img_id != t or img_id.count(':') != 1:
return False
profile, base_name = img_id.split(':', 1)
if not profile or not base_name:
return False
try:
get_profile_configs(profile)
except ValueError:
return False
return True |
def mean_by_panel(self, length):
"""
Compute the mean across fixed sized panels of each record.
Splits each record into panels of size `length`,
and then computes the mean across panels.
Panel length must subdivide record exactly.
Parameters
----------
length : int
Fixed length with which to subdivide.
"""
self._check_panel(length)
func = lambda v: v.reshape(-1, length).mean(axis=0)
newindex = arange(length)
return self.map(func, index=newindex) | Compute the mean across fixed sized panels of each record.
Splits each record into panels of size `length`,
and then computes the mean across panels.
Panel length must subdivide record exactly.
Parameters
----------
length : int
Fixed length with which to subdivide. | Below is the the instruction that describes the task:
### Input:
Compute the mean across fixed sized panels of each record.
Splits each record into panels of size `length`,
and then computes the mean across panels.
Panel length must subdivide record exactly.
Parameters
----------
length : int
Fixed length with which to subdivide.
### Response:
def mean_by_panel(self, length):
"""
Compute the mean across fixed sized panels of each record.
Splits each record into panels of size `length`,
and then computes the mean across panels.
Panel length must subdivide record exactly.
Parameters
----------
length : int
Fixed length with which to subdivide.
"""
self._check_panel(length)
func = lambda v: v.reshape(-1, length).mean(axis=0)
newindex = arange(length)
return self.map(func, index=newindex) |
def check_str(obj):
""" Returns a string for various input types """
if isinstance(obj, str):
return obj
if isinstance(obj, float):
return str(int(obj))
else:
return str(obj) | Returns a string for various input types | Below is the the instruction that describes the task:
### Input:
Returns a string for various input types
### Response:
def check_str(obj):
""" Returns a string for various input types """
if isinstance(obj, str):
return obj
if isinstance(obj, float):
return str(int(obj))
else:
return str(obj) |
def _parse_wsgi_headers(wsgi_environ):
"""
HTTP headers are presented in WSGI environment with 'HTTP_' prefix.
This method finds those headers, removes the prefix, converts
underscores to dashes, and converts to lower case.
:param wsgi_environ:
:return: returns a dictionary of headers
"""
prefix = 'HTTP_'
p_len = len(prefix)
# use .items() despite suspected memory pressure bc GC occasionally
# collects wsgi_environ.iteritems() during iteration.
headers = {
key[p_len:].replace('_', '-').lower():
val for (key, val) in wsgi_environ.items()
if key.startswith(prefix)}
return headers | HTTP headers are presented in WSGI environment with 'HTTP_' prefix.
This method finds those headers, removes the prefix, converts
underscores to dashes, and converts to lower case.
:param wsgi_environ:
:return: returns a dictionary of headers | Below is the the instruction that describes the task:
### Input:
HTTP headers are presented in WSGI environment with 'HTTP_' prefix.
This method finds those headers, removes the prefix, converts
underscores to dashes, and converts to lower case.
:param wsgi_environ:
:return: returns a dictionary of headers
### Response:
def _parse_wsgi_headers(wsgi_environ):
"""
HTTP headers are presented in WSGI environment with 'HTTP_' prefix.
This method finds those headers, removes the prefix, converts
underscores to dashes, and converts to lower case.
:param wsgi_environ:
:return: returns a dictionary of headers
"""
prefix = 'HTTP_'
p_len = len(prefix)
# use .items() despite suspected memory pressure bc GC occasionally
# collects wsgi_environ.iteritems() during iteration.
headers = {
key[p_len:].replace('_', '-').lower():
val for (key, val) in wsgi_environ.items()
if key.startswith(prefix)}
return headers |
def consume_changes(self, start, end):
"""Clear the changed status of lines from start till end"""
left, right = self._get_changed(start, end)
if left < right:
del self.lines[left:right]
return left < right | Clear the changed status of lines from start till end | Below is the the instruction that describes the task:
### Input:
Clear the changed status of lines from start till end
### Response:
def consume_changes(self, start, end):
"""Clear the changed status of lines from start till end"""
left, right = self._get_changed(start, end)
if left < right:
del self.lines[left:right]
return left < right |
async def deregister(self, check):
"""Deregisters a local check
Parameters:
check (ObjectID): Check ID
Returns:
bool: ``True`` on success
The agent will take care of deregistering the check from the Catalog.
"""
check_id = extract_attr(check, keys=["CheckID", "ID"])
response = await self._api.get("/v1/agent/check/deregister", check_id)
return response.status == 200 | Deregisters a local check
Parameters:
check (ObjectID): Check ID
Returns:
bool: ``True`` on success
The agent will take care of deregistering the check from the Catalog. | Below is the the instruction that describes the task:
### Input:
Deregisters a local check
Parameters:
check (ObjectID): Check ID
Returns:
bool: ``True`` on success
The agent will take care of deregistering the check from the Catalog.
### Response:
async def deregister(self, check):
"""Deregisters a local check
Parameters:
check (ObjectID): Check ID
Returns:
bool: ``True`` on success
The agent will take care of deregistering the check from the Catalog.
"""
check_id = extract_attr(check, keys=["CheckID", "ID"])
response = await self._api.get("/v1/agent/check/deregister", check_id)
return response.status == 200 |
def set(ctx, key, value):
""" Set configuration parameters
"""
if key == "default_account" and value[0] == "@":
value = value[1:]
ctx.bitshares.config[key] = value | Set configuration parameters | Below is the the instruction that describes the task:
### Input:
Set configuration parameters
### Response:
def set(ctx, key, value):
""" Set configuration parameters
"""
if key == "default_account" and value[0] == "@":
value = value[1:]
ctx.bitshares.config[key] = value |
def price_change(self):
"""
This method returns any price change.
:return:
"""
try:
if self._data_from_search:
return self._data_from_search.find('div', {'class': 'price-changes-sr'}).text
else:
return self._ad_page_content.find('div', {'class': 'price-changes-sr'}).text
except Exception as e:
if self._debug:
logging.error(
"Error getting price_change. Error message: " + e.args[0])
return | This method returns any price change.
:return: | Below is the the instruction that describes the task:
### Input:
This method returns any price change.
:return:
### Response:
def price_change(self):
"""
This method returns any price change.
:return:
"""
try:
if self._data_from_search:
return self._data_from_search.find('div', {'class': 'price-changes-sr'}).text
else:
return self._ad_page_content.find('div', {'class': 'price-changes-sr'}).text
except Exception as e:
if self._debug:
logging.error(
"Error getting price_change. Error message: " + e.args[0])
return |
def fromJSON(value):
"""loads the GP object from a JSON string """
j = json.loads(value)
v = GPString()
if "defaultValue" in j:
v.value = j['defaultValue']
else:
v.value = j['value']
if 'paramName' in j:
v.paramName = j['paramName']
elif 'name' in j:
v.paramName = j['name']
return v | loads the GP object from a JSON string | Below is the the instruction that describes the task:
### Input:
loads the GP object from a JSON string
### Response:
def fromJSON(value):
"""loads the GP object from a JSON string """
j = json.loads(value)
v = GPString()
if "defaultValue" in j:
v.value = j['defaultValue']
else:
v.value = j['value']
if 'paramName' in j:
v.paramName = j['paramName']
elif 'name' in j:
v.paramName = j['name']
return v |
def delete_image(self, identifier):
"""
::
DELETE /:login/images/:id
:param identifier: match on the listed image identifier
:type identifier: :py:class:`basestring` or :py:class:`dict`
A string or a dictionary containing an ``id`` key may be
passed in. Will raise an error if the response was an error.
"""
if isinstance(identifier, dict):
identifier = identifier.get('id', '')
j, r = self.request('DELETE', '/images/' + str(identifier))
r.raise_for_status()
return j | ::
DELETE /:login/images/:id
:param identifier: match on the listed image identifier
:type identifier: :py:class:`basestring` or :py:class:`dict`
A string or a dictionary containing an ``id`` key may be
passed in. Will raise an error if the response was an error. | Below is the the instruction that describes the task:
### Input:
::
DELETE /:login/images/:id
:param identifier: match on the listed image identifier
:type identifier: :py:class:`basestring` or :py:class:`dict`
A string or a dictionary containing an ``id`` key may be
passed in. Will raise an error if the response was an error.
### Response:
def delete_image(self, identifier):
"""
::
DELETE /:login/images/:id
:param identifier: match on the listed image identifier
:type identifier: :py:class:`basestring` or :py:class:`dict`
A string or a dictionary containing an ``id`` key may be
passed in. Will raise an error if the response was an error.
"""
if isinstance(identifier, dict):
identifier = identifier.get('id', '')
j, r = self.request('DELETE', '/images/' + str(identifier))
r.raise_for_status()
return j |
def remove_entry(self, entry):
"""
Remove specified entry.
:param entry: The Entry object to remove.
:type entry: :class:`keepassdb.model.Entry`
"""
if not isinstance(entry, Entry):
raise TypeError("entry param must be of type Entry.")
if not entry in self.entries:
raise ValueError("Entry doesn't exist / not bound to this datbase.")
entry.group.entries.remove(entry)
self.entries.remove(entry) | Remove specified entry.
:param entry: The Entry object to remove.
:type entry: :class:`keepassdb.model.Entry` | Below is the the instruction that describes the task:
### Input:
Remove specified entry.
:param entry: The Entry object to remove.
:type entry: :class:`keepassdb.model.Entry`
### Response:
def remove_entry(self, entry):
"""
Remove specified entry.
:param entry: The Entry object to remove.
:type entry: :class:`keepassdb.model.Entry`
"""
if not isinstance(entry, Entry):
raise TypeError("entry param must be of type Entry.")
if not entry in self.entries:
raise ValueError("Entry doesn't exist / not bound to this datbase.")
entry.group.entries.remove(entry)
self.entries.remove(entry) |
def addText(self, text):
"""append text in the chosen color"""
# move to the end of the doc
self.moveCursor(QtGui.QTextCursor.End)
# insert the text
self.setTextColor(self._currentColor)
self.textCursor().insertText(text) | append text in the chosen color | Below is the the instruction that describes the task:
### Input:
append text in the chosen color
### Response:
def addText(self, text):
"""append text in the chosen color"""
# move to the end of the doc
self.moveCursor(QtGui.QTextCursor.End)
# insert the text
self.setTextColor(self._currentColor)
self.textCursor().insertText(text) |
def robot_files(self):
'''Return a list of all folders, and test suite files (.txt, .robot)
'''
result = []
for name in os.listdir(self.path):
fullpath = os.path.join(self.path, name)
if os.path.isdir(fullpath):
result.append(RobotFactory(fullpath, parent=self))
else:
if ((name.endswith(".txt") or name.endswith(".robot")) and
(name not in ("__init__.txt", "__init__.robot"))):
result.append(RobotFactory(fullpath, parent=self))
return result | Return a list of all folders, and test suite files (.txt, .robot) | Below is the the instruction that describes the task:
### Input:
Return a list of all folders, and test suite files (.txt, .robot)
### Response:
def robot_files(self):
'''Return a list of all folders, and test suite files (.txt, .robot)
'''
result = []
for name in os.listdir(self.path):
fullpath = os.path.join(self.path, name)
if os.path.isdir(fullpath):
result.append(RobotFactory(fullpath, parent=self))
else:
if ((name.endswith(".txt") or name.endswith(".robot")) and
(name not in ("__init__.txt", "__init__.robot"))):
result.append(RobotFactory(fullpath, parent=self))
return result |
def load_preferences(session, config, valid_paths,
cull_disabled=False, openid=None,
cull_backends=None):
""" Every rule for every filter for every context for every user.
Any preferences in the DB that are for contexts that are disabled in the
config are omitted here.
If the `openid` argument is None, then this is an expensive query that
loads, practically, the whole database. However, if an openid string is
submitted, then only the preferences of that user are returned (and this is
less expensive).
"""
cull_backends = cull_backends or []
query = session.query(fmn.lib.models.Preference)
if openid:
query = query.filter(fmn.lib.models.Preference.openid==openid)
preferences = query.all()
return [
preference.__json__(reify=True)
for preference in preferences
if (
preference.context.name in config['fmn.backends']
and preference.context.name not in cull_backends
and (not cull_disabled or preference.enabled)
)
] | Every rule for every filter for every context for every user.
Any preferences in the DB that are for contexts that are disabled in the
config are omitted here.
If the `openid` argument is None, then this is an expensive query that
loads, practically, the whole database. However, if an openid string is
submitted, then only the preferences of that user are returned (and this is
less expensive). | Below is the the instruction that describes the task:
### Input:
Every rule for every filter for every context for every user.
Any preferences in the DB that are for contexts that are disabled in the
config are omitted here.
If the `openid` argument is None, then this is an expensive query that
loads, practically, the whole database. However, if an openid string is
submitted, then only the preferences of that user are returned (and this is
less expensive).
### Response:
def load_preferences(session, config, valid_paths,
cull_disabled=False, openid=None,
cull_backends=None):
""" Every rule for every filter for every context for every user.
Any preferences in the DB that are for contexts that are disabled in the
config are omitted here.
If the `openid` argument is None, then this is an expensive query that
loads, practically, the whole database. However, if an openid string is
submitted, then only the preferences of that user are returned (and this is
less expensive).
"""
cull_backends = cull_backends or []
query = session.query(fmn.lib.models.Preference)
if openid:
query = query.filter(fmn.lib.models.Preference.openid==openid)
preferences = query.all()
return [
preference.__json__(reify=True)
for preference in preferences
if (
preference.context.name in config['fmn.backends']
and preference.context.name not in cull_backends
and (not cull_disabled or preference.enabled)
)
] |
def GetRowHeaders(self) -> list:
"""
Call IUIAutomationTablePattern::GetCurrentRowHeaders.
Return list, a list of `Control` subclasses, representing all the row headers in a table.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtablepattern-getcurrentrowheaders
"""
eleArray = self.pattern.GetCurrentRowHeaders()
if eleArray:
controls = []
for i in range(eleArray.Length):
ele = eleArray.GetElement(i)
con = Control.CreateControlFromElement(element=ele)
if con:
controls.append(con)
return controls
return [] | Call IUIAutomationTablePattern::GetCurrentRowHeaders.
Return list, a list of `Control` subclasses, representing all the row headers in a table.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtablepattern-getcurrentrowheaders | Below is the the instruction that describes the task:
### Input:
Call IUIAutomationTablePattern::GetCurrentRowHeaders.
Return list, a list of `Control` subclasses, representing all the row headers in a table.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtablepattern-getcurrentrowheaders
### Response:
def GetRowHeaders(self) -> list:
"""
Call IUIAutomationTablePattern::GetCurrentRowHeaders.
Return list, a list of `Control` subclasses, representing all the row headers in a table.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtablepattern-getcurrentrowheaders
"""
eleArray = self.pattern.GetCurrentRowHeaders()
if eleArray:
controls = []
for i in range(eleArray.Length):
ele = eleArray.GetElement(i)
con = Control.CreateControlFromElement(element=ele)
if con:
controls.append(con)
return controls
return [] |
def get_instance(self, payload):
"""
Build an instance of MonthlyInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance
"""
return MonthlyInstance(self._version, payload, account_sid=self._solution['account_sid'], ) | Build an instance of MonthlyInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of MonthlyInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of MonthlyInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance
"""
return MonthlyInstance(self._version, payload, account_sid=self._solution['account_sid'], ) |
def delete_collection_pod_security_policy(self, **kwargs):
"""
delete collection of PodSecurityPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_pod_security_policy(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_pod_security_policy_with_http_info(**kwargs)
else:
(data) = self.delete_collection_pod_security_policy_with_http_info(**kwargs)
return data | delete collection of PodSecurityPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_pod_security_policy(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
delete collection of PodSecurityPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_pod_security_policy(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
### Response:
def delete_collection_pod_security_policy(self, **kwargs):
"""
delete collection of PodSecurityPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_pod_security_policy(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_pod_security_policy_with_http_info(**kwargs)
else:
(data) = self.delete_collection_pod_security_policy_with_http_info(**kwargs)
return data |
def contains_entry(self, key, value):
"""
Returns whether the multimap contains an entry with the value.
:param key: (object), the specified key.
:param value: (object), the specified value.
:return: (bool), ``true`` if this multimap contains the key-value tuple.
"""
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(multi_map_contains_entry_codec, key_data, key=key_data,
value=value_data, thread_id=thread_id()) | Returns whether the multimap contains an entry with the value.
:param key: (object), the specified key.
:param value: (object), the specified value.
:return: (bool), ``true`` if this multimap contains the key-value tuple. | Below is the the instruction that describes the task:
### Input:
Returns whether the multimap contains an entry with the value.
:param key: (object), the specified key.
:param value: (object), the specified value.
:return: (bool), ``true`` if this multimap contains the key-value tuple.
### Response:
def contains_entry(self, key, value):
"""
Returns whether the multimap contains an entry with the value.
:param key: (object), the specified key.
:param value: (object), the specified value.
:return: (bool), ``true`` if this multimap contains the key-value tuple.
"""
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(multi_map_contains_entry_codec, key_data, key=key_data,
value=value_data, thread_id=thread_id()) |
def parse_and_normalize_url_date(date_str):
"""Parse a ISO 8601 date-time with optional timezone.
- Return as datetime with timezone adjusted to UTC.
- Return naive date-time set to UTC.
"""
if date_str is None:
return None
try:
return d1_common.date_time.dt_from_iso8601_str(date_str)
except d1_common.date_time.iso8601.ParseError as e:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Invalid date format for URL parameter. date="{}" error="{}"'.format(
date_str, str(e)
),
) | Parse a ISO 8601 date-time with optional timezone.
- Return as datetime with timezone adjusted to UTC.
- Return naive date-time set to UTC. | Below is the the instruction that describes the task:
### Input:
Parse a ISO 8601 date-time with optional timezone.
- Return as datetime with timezone adjusted to UTC.
- Return naive date-time set to UTC.
### Response:
def parse_and_normalize_url_date(date_str):
"""Parse a ISO 8601 date-time with optional timezone.
- Return as datetime with timezone adjusted to UTC.
- Return naive date-time set to UTC.
"""
if date_str is None:
return None
try:
return d1_common.date_time.dt_from_iso8601_str(date_str)
except d1_common.date_time.iso8601.ParseError as e:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Invalid date format for URL parameter. date="{}" error="{}"'.format(
date_str, str(e)
),
) |
def create_layout(self, size = None):
"""utility function to create layout with the default font. Size and
alignment parameters are shortcuts to according functions of the
pango.Layout"""
if not self.context:
# TODO - this is rather sloppy as far as exception goes
# should explain better
raise Exception("Can not create layout without existing context!")
layout = pangocairo.create_layout(self.context)
font_desc = pango.FontDescription(_font_desc)
if size: font_desc.set_absolute_size(size * pango.SCALE)
layout.set_font_description(font_desc)
return layout | utility function to create layout with the default font. Size and
alignment parameters are shortcuts to according functions of the
pango.Layout | Below is the the instruction that describes the task:
### Input:
utility function to create layout with the default font. Size and
alignment parameters are shortcuts to according functions of the
pango.Layout
### Response:
def create_layout(self, size = None):
"""utility function to create layout with the default font. Size and
alignment parameters are shortcuts to according functions of the
pango.Layout"""
if not self.context:
# TODO - this is rather sloppy as far as exception goes
# should explain better
raise Exception("Can not create layout without existing context!")
layout = pangocairo.create_layout(self.context)
font_desc = pango.FontDescription(_font_desc)
if size: font_desc.set_absolute_size(size * pango.SCALE)
layout.set_font_description(font_desc)
return layout |
def merge_code(left_code, right_code):
"""
{ relative_line:
((left_abs_line, ((offset, op, args), ...)),
(right_abs_line, ((offset, op, args), ...))),
... }
"""
data = dict()
code_lines = (left_code and left_code.iter_code_by_lines()) or tuple()
for abs_line, rel_line, dis in code_lines:
data[rel_line] = [(abs_line, dis), None]
code_lines = (right_code and right_code.iter_code_by_lines()) or tuple()
for abs_line, rel_line, dis in code_lines:
found = data.get(rel_line, None)
if found is None:
found = [None, (abs_line, dis)]
data[rel_line] = found
else:
found[1] = (abs_line, dis)
return data | { relative_line:
((left_abs_line, ((offset, op, args), ...)),
(right_abs_line, ((offset, op, args), ...))),
... } | Below is the the instruction that describes the task:
### Input:
{ relative_line:
((left_abs_line, ((offset, op, args), ...)),
(right_abs_line, ((offset, op, args), ...))),
... }
### Response:
def merge_code(left_code, right_code):
"""
{ relative_line:
((left_abs_line, ((offset, op, args), ...)),
(right_abs_line, ((offset, op, args), ...))),
... }
"""
data = dict()
code_lines = (left_code and left_code.iter_code_by_lines()) or tuple()
for abs_line, rel_line, dis in code_lines:
data[rel_line] = [(abs_line, dis), None]
code_lines = (right_code and right_code.iter_code_by_lines()) or tuple()
for abs_line, rel_line, dis in code_lines:
found = data.get(rel_line, None)
if found is None:
found = [None, (abs_line, dis)]
data[rel_line] = found
else:
found[1] = (abs_line, dis)
return data |
def light_to_gl(light, transform, lightN):
"""
Convert trimesh.scene.lighting.Light objects into
args for gl.glLightFv calls
Parameters
--------------
light : trimesh.scene.lighting.Light
Light object to be converted to GL
transform : (4, 4) float
Transformation matrix of light
lightN : int
Result of gl.GL_LIGHT0, gl.GL_LIGHT1, etc
Returns
--------------
multiarg : [tuple]
List of args to pass to gl.glLightFv eg:
[gl.glLightfb(*a) for a in multiarg]
"""
# convert color to opengl
gl_color = vector_to_gl(light.color.astype(np.float64) / 255.0)
assert len(gl_color) == 4
# cartesian translation from matrix
gl_position = vector_to_gl(transform[:3, 3])
# create the different position and color arguments
args = [(lightN, gl.GL_POSITION, gl_position),
(lightN, gl.GL_SPECULAR, gl_color),
(lightN, gl.GL_DIFFUSE, gl_color),
(lightN, gl.GL_AMBIENT, gl_color)]
return args | Convert trimesh.scene.lighting.Light objects into
args for gl.glLightFv calls
Parameters
--------------
light : trimesh.scene.lighting.Light
Light object to be converted to GL
transform : (4, 4) float
Transformation matrix of light
lightN : int
Result of gl.GL_LIGHT0, gl.GL_LIGHT1, etc
Returns
--------------
multiarg : [tuple]
List of args to pass to gl.glLightFv eg:
[gl.glLightfb(*a) for a in multiarg] | Below is the the instruction that describes the task:
### Input:
Convert trimesh.scene.lighting.Light objects into
args for gl.glLightFv calls
Parameters
--------------
light : trimesh.scene.lighting.Light
Light object to be converted to GL
transform : (4, 4) float
Transformation matrix of light
lightN : int
Result of gl.GL_LIGHT0, gl.GL_LIGHT1, etc
Returns
--------------
multiarg : [tuple]
List of args to pass to gl.glLightFv eg:
[gl.glLightfb(*a) for a in multiarg]
### Response:
def light_to_gl(light, transform, lightN):
"""
Convert trimesh.scene.lighting.Light objects into
args for gl.glLightFv calls
Parameters
--------------
light : trimesh.scene.lighting.Light
Light object to be converted to GL
transform : (4, 4) float
Transformation matrix of light
lightN : int
Result of gl.GL_LIGHT0, gl.GL_LIGHT1, etc
Returns
--------------
multiarg : [tuple]
List of args to pass to gl.glLightFv eg:
[gl.glLightfb(*a) for a in multiarg]
"""
# convert color to opengl
gl_color = vector_to_gl(light.color.astype(np.float64) / 255.0)
assert len(gl_color) == 4
# cartesian translation from matrix
gl_position = vector_to_gl(transform[:3, 3])
# create the different position and color arguments
args = [(lightN, gl.GL_POSITION, gl_position),
(lightN, gl.GL_SPECULAR, gl_color),
(lightN, gl.GL_DIFFUSE, gl_color),
(lightN, gl.GL_AMBIENT, gl_color)]
return args |
async def pause(self, pause: bool = True):
"""
Pauses the current song.
Parameters
----------
pause : bool
Set to ``False`` to resume.
"""
self._paused = pause
await self.node.pause(self.channel.guild.id, pause) | Pauses the current song.
Parameters
----------
pause : bool
Set to ``False`` to resume. | Below is the the instruction that describes the task:
### Input:
Pauses the current song.
Parameters
----------
pause : bool
Set to ``False`` to resume.
### Response:
async def pause(self, pause: bool = True):
"""
Pauses the current song.
Parameters
----------
pause : bool
Set to ``False`` to resume.
"""
self._paused = pause
await self.node.pause(self.channel.guild.id, pause) |
def _rotate_tr(self):
"""Rotate the transformation matrix based on camera parameters"""
rot, x, y, z = self._quaternion.get_axis_angle()
up, forward, right = self._get_dim_vectors()
self.transform.rotate(180 * rot / np.pi, (x, z, y)) | Rotate the transformation matrix based on camera parameters | Below is the the instruction that describes the task:
### Input:
Rotate the transformation matrix based on camera parameters
### Response:
def _rotate_tr(self):
"""Rotate the transformation matrix based on camera parameters"""
rot, x, y, z = self._quaternion.get_axis_angle()
up, forward, right = self._get_dim_vectors()
self.transform.rotate(180 * rot / np.pi, (x, z, y)) |
def _cbc_decrypt(self, final_key, crypted_content):
"""This method decrypts the database"""
# Just decrypt the content with the created key
aes = AES.new(final_key, AES.MODE_CBC, self._enc_iv)
decrypted_content = aes.decrypt(crypted_content)
padding = decrypted_content[-1]
if sys.version > '3':
padding = decrypted_content[-1]
else:
padding = ord(decrypted_content[-1])
decrypted_content = decrypted_content[:len(decrypted_content)-padding]
return decrypted_content | This method decrypts the database | Below is the the instruction that describes the task:
### Input:
This method decrypts the database
### Response:
def _cbc_decrypt(self, final_key, crypted_content):
"""This method decrypts the database"""
# Just decrypt the content with the created key
aes = AES.new(final_key, AES.MODE_CBC, self._enc_iv)
decrypted_content = aes.decrypt(crypted_content)
padding = decrypted_content[-1]
if sys.version > '3':
padding = decrypted_content[-1]
else:
padding = ord(decrypted_content[-1])
decrypted_content = decrypted_content[:len(decrypted_content)-padding]
return decrypted_content |
def add_sent(self, sent_obj):
''' Add a ttl.Sentence object to this document '''
if sent_obj is None:
raise Exception("Sentence object cannot be None")
elif sent_obj.ID is None:
# if sentID is None, create a new ID
sent_obj.ID = next(self.__idgen)
elif self.has_id(sent_obj.ID):
raise Exception("Sentence ID {} exists".format(sent_obj.ID))
self.__sent_map[sent_obj.ID] = sent_obj
self.__sents.append(sent_obj)
return sent_obj | Add a ttl.Sentence object to this document | Below is the the instruction that describes the task:
### Input:
Add a ttl.Sentence object to this document
### Response:
def add_sent(self, sent_obj):
''' Add a ttl.Sentence object to this document '''
if sent_obj is None:
raise Exception("Sentence object cannot be None")
elif sent_obj.ID is None:
# if sentID is None, create a new ID
sent_obj.ID = next(self.__idgen)
elif self.has_id(sent_obj.ID):
raise Exception("Sentence ID {} exists".format(sent_obj.ID))
self.__sent_map[sent_obj.ID] = sent_obj
self.__sents.append(sent_obj)
return sent_obj |
def save(self, set_cookie, **params):
"""Update cookies if the session has been changed."""
if set(self.store.items()) ^ set(self.items()):
value = dict(self.items())
value = json.dumps(value)
value = self.encrypt(value)
if not isinstance(value, str):
value = value.encode(self.encoding)
set_cookie(self.key, value, **self.params)
return True
return False | Update cookies if the session has been changed. | Below is the the instruction that describes the task:
### Input:
Update cookies if the session has been changed.
### Response:
def save(self, set_cookie, **params):
"""Update cookies if the session has been changed."""
if set(self.store.items()) ^ set(self.items()):
value = dict(self.items())
value = json.dumps(value)
value = self.encrypt(value)
if not isinstance(value, str):
value = value.encode(self.encoding)
set_cookie(self.key, value, **self.params)
return True
return False |
async def teardown_client(self, client_id):
"""Release all resources held by a client.
This method must be called and awaited whenever a client is
disconnected. It ensures that all of the client's resources are
properly released and any devices they have connected to are
disconnected cleanly.
Args:
client_id (str): The client that we should tear down.
Raises:
ArgumentError: The client_id is unknown.
"""
client_info = self._client_info(client_id)
self.adapter.remove_monitor(client_info['monitor'])
conns = client_info['connections']
for conn_string, conn_id in conns.items():
try:
self._logger.debug("Disconnecting client %s from conn %s at teardown", client_id, conn_string)
await self.adapter.disconnect(conn_id)
except: #pylint:disable=bare-except; This is a finalization method that should not raise unexpectedly
self._logger.exception("Error disconnecting device during teardown_client: conn_string=%s", conn_string)
del self._clients[client_id] | Release all resources held by a client.
This method must be called and awaited whenever a client is
disconnected. It ensures that all of the client's resources are
properly released and any devices they have connected to are
disconnected cleanly.
Args:
client_id (str): The client that we should tear down.
Raises:
ArgumentError: The client_id is unknown. | Below is the the instruction that describes the task:
### Input:
Release all resources held by a client.
This method must be called and awaited whenever a client is
disconnected. It ensures that all of the client's resources are
properly released and any devices they have connected to are
disconnected cleanly.
Args:
client_id (str): The client that we should tear down.
Raises:
ArgumentError: The client_id is unknown.
### Response:
async def teardown_client(self, client_id):
"""Release all resources held by a client.
This method must be called and awaited whenever a client is
disconnected. It ensures that all of the client's resources are
properly released and any devices they have connected to are
disconnected cleanly.
Args:
client_id (str): The client that we should tear down.
Raises:
ArgumentError: The client_id is unknown.
"""
client_info = self._client_info(client_id)
self.adapter.remove_monitor(client_info['monitor'])
conns = client_info['connections']
for conn_string, conn_id in conns.items():
try:
self._logger.debug("Disconnecting client %s from conn %s at teardown", client_id, conn_string)
await self.adapter.disconnect(conn_id)
except: #pylint:disable=bare-except; This is a finalization method that should not raise unexpectedly
self._logger.exception("Error disconnecting device during teardown_client: conn_string=%s", conn_string)
del self._clients[client_id] |
def set_data_length(self, length):
# type: (int) -> None
'''
A method to set the length of the data that this UDF File Entry
points to.
Parameters:
length - The new length for the data.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')
len_diff = length - self.info_len
if len_diff > 0:
# If we are increasing the length, update the last alloc_desc up
# to the max of 0x3ffff800, and throw an exception if we overflow.
new_len = self.alloc_descs[-1][0] + len_diff
if new_len > 0x3ffff800:
raise pycdlibexception.PyCdlibInvalidInput('Cannot increase the size of a UDF file beyond the current descriptor')
self.alloc_descs[-1][0] = new_len
elif len_diff < 0:
# We are decreasing the length. It's possible we are removing one
# or more alloc_descs, so run through the list updating all of the
# descriptors and remove any we no longer need.
len_left = length
alloc_descs_needed = 0
index = 0
while len_left > 0:
this_len = min(len_left, 0x3ffff800)
alloc_descs_needed += 1
self.alloc_descs[index][0] = this_len
index += 1
len_left -= this_len
self.alloc_descs = self.alloc_descs[:alloc_descs_needed]
self.info_len = length | A method to set the length of the data that this UDF File Entry
points to.
Parameters:
length - The new length for the data.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
A method to set the length of the data that this UDF File Entry
points to.
Parameters:
length - The new length for the data.
Returns:
Nothing.
### Response:
def set_data_length(self, length):
# type: (int) -> None
'''
A method to set the length of the data that this UDF File Entry
points to.
Parameters:
length - The new length for the data.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')
len_diff = length - self.info_len
if len_diff > 0:
# If we are increasing the length, update the last alloc_desc up
# to the max of 0x3ffff800, and throw an exception if we overflow.
new_len = self.alloc_descs[-1][0] + len_diff
if new_len > 0x3ffff800:
raise pycdlibexception.PyCdlibInvalidInput('Cannot increase the size of a UDF file beyond the current descriptor')
self.alloc_descs[-1][0] = new_len
elif len_diff < 0:
# We are decreasing the length. It's possible we are removing one
# or more alloc_descs, so run through the list updating all of the
# descriptors and remove any we no longer need.
len_left = length
alloc_descs_needed = 0
index = 0
while len_left > 0:
this_len = min(len_left, 0x3ffff800)
alloc_descs_needed += 1
self.alloc_descs[index][0] = this_len
index += 1
len_left -= this_len
self.alloc_descs = self.alloc_descs[:alloc_descs_needed]
self.info_len = length |
def rsi(self, n, array=False):
"""RSI指标"""
result = talib.RSI(self.close, n)
if array:
return result
return result[-1] | RSI指标 | Below is the the instruction that describes the task:
### Input:
RSI指标
### Response:
def rsi(self, n, array=False):
"""RSI指标"""
result = talib.RSI(self.close, n)
if array:
return result
return result[-1] |
def cidr_notation(ip_address, netmask):
"""
Retrieve the cidr notation given an ip address and netmask.
For example:
cidr_notation('12.34.56.78', '255.255.255.248')
Would return: 12.34.56.72/29
@see http://terminalmage.net/2012/06/10/how-to-find-out-the-cidr-notation-for-a-subne-given-an-ip-and-netmask/
@see http://www.aelius.com/njh/subnet_sheet.html
"""
try:
inet_aton(ip_address)
except:
raise Exception("Invalid ip address '%s'" % ip_address)
try:
inet_aton(netmask)
except:
raise Exception("Invalid netmask '%s'" % netmask)
ip_address_split = ip_address.split('.')
netmask_split = netmask.split('.')
# calculate network start
net_start = [str(int(ip_address_split[x]) & int(netmask_split[x]))
for x in range(0,4)]
return '.'.join(net_start) + '/' + get_net_size(netmask_split) | Retrieve the cidr notation given an ip address and netmask.
For example:
cidr_notation('12.34.56.78', '255.255.255.248')
Would return: 12.34.56.72/29
@see http://terminalmage.net/2012/06/10/how-to-find-out-the-cidr-notation-for-a-subne-given-an-ip-and-netmask/
@see http://www.aelius.com/njh/subnet_sheet.html | Below is the the instruction that describes the task:
### Input:
Retrieve the cidr notation given an ip address and netmask.
For example:
cidr_notation('12.34.56.78', '255.255.255.248')
Would return: 12.34.56.72/29
@see http://terminalmage.net/2012/06/10/how-to-find-out-the-cidr-notation-for-a-subne-given-an-ip-and-netmask/
@see http://www.aelius.com/njh/subnet_sheet.html
### Response:
def cidr_notation(ip_address, netmask):
"""
Retrieve the cidr notation given an ip address and netmask.
For example:
cidr_notation('12.34.56.78', '255.255.255.248')
Would return: 12.34.56.72/29
@see http://terminalmage.net/2012/06/10/how-to-find-out-the-cidr-notation-for-a-subne-given-an-ip-and-netmask/
@see http://www.aelius.com/njh/subnet_sheet.html
"""
try:
inet_aton(ip_address)
except:
raise Exception("Invalid ip address '%s'" % ip_address)
try:
inet_aton(netmask)
except:
raise Exception("Invalid netmask '%s'" % netmask)
ip_address_split = ip_address.split('.')
netmask_split = netmask.split('.')
# calculate network start
net_start = [str(int(ip_address_split[x]) & int(netmask_split[x]))
for x in range(0,4)]
return '.'.join(net_start) + '/' + get_net_size(netmask_split) |
def subscribe_topics(self):
"""subscribe to all registered device and node topics"""
base = self.topic
subscribe = self.mqtt.subscribe
# device topics
subscribe(b"/".join((base, b"$stats/interval/set")))
subscribe(b"/".join((self.settings.MQTT_BASE_TOPIC, b"$broadcast/#")))
# node topics
nodes = self.nodes
for node in nodes:
for topic in node.subscribe:
topic = b"/".join((base, topic))
# print('MQTT SUBSCRIBE: {}'.format(topic))
subscribe(topic)
self.topic_callbacks[topic] = node.callback | subscribe to all registered device and node topics | Below is the the instruction that describes the task:
### Input:
subscribe to all registered device and node topics
### Response:
def subscribe_topics(self):
"""subscribe to all registered device and node topics"""
base = self.topic
subscribe = self.mqtt.subscribe
# device topics
subscribe(b"/".join((base, b"$stats/interval/set")))
subscribe(b"/".join((self.settings.MQTT_BASE_TOPIC, b"$broadcast/#")))
# node topics
nodes = self.nodes
for node in nodes:
for topic in node.subscribe:
topic = b"/".join((base, topic))
# print('MQTT SUBSCRIBE: {}'.format(topic))
subscribe(topic)
self.topic_callbacks[topic] = node.callback |
def stop(cls, app_id):
"""
Stops an app by issuing a PUT request to the /apps/ID/stop endpoint.
"""
conn = Qubole.agent()
return conn.put(cls.element_path(app_id) + "/stop") | Stops an app by issuing a PUT request to the /apps/ID/stop endpoint. | Below is the the instruction that describes the task:
### Input:
Stops an app by issuing a PUT request to the /apps/ID/stop endpoint.
### Response:
def stop(cls, app_id):
"""
Stops an app by issuing a PUT request to the /apps/ID/stop endpoint.
"""
conn = Qubole.agent()
return conn.put(cls.element_path(app_id) + "/stop") |
def playlist_create(
self,
name,
description='',
*,
make_public=False,
songs=None
):
"""Create a playlist.
Parameters:
name (str): Name to give the playlist.
description (str): Description to give the playlist.
make_public (bool, Optional): If ``True`` and account has a subscription,
make playlist public.
Default: ``False``
songs (list, Optional): A list of song dicts to add to the playlist.
Returns:
dict: Playlist information.
"""
share_state = 'PUBLIC' if make_public else 'PRIVATE'
playlist = self._call(
mc_calls.PlaylistsCreate,
name,
description,
share_state
).body
if songs:
playlist = self.playlist_songs_add(songs, playlist)
return playlist | Create a playlist.
Parameters:
name (str): Name to give the playlist.
description (str): Description to give the playlist.
make_public (bool, Optional): If ``True`` and account has a subscription,
make playlist public.
Default: ``False``
songs (list, Optional): A list of song dicts to add to the playlist.
Returns:
dict: Playlist information. | Below is the the instruction that describes the task:
### Input:
Create a playlist.
Parameters:
name (str): Name to give the playlist.
description (str): Description to give the playlist.
make_public (bool, Optional): If ``True`` and account has a subscription,
make playlist public.
Default: ``False``
songs (list, Optional): A list of song dicts to add to the playlist.
Returns:
dict: Playlist information.
### Response:
def playlist_create(
self,
name,
description='',
*,
make_public=False,
songs=None
):
"""Create a playlist.
Parameters:
name (str): Name to give the playlist.
description (str): Description to give the playlist.
make_public (bool, Optional): If ``True`` and account has a subscription,
make playlist public.
Default: ``False``
songs (list, Optional): A list of song dicts to add to the playlist.
Returns:
dict: Playlist information.
"""
share_state = 'PUBLIC' if make_public else 'PRIVATE'
playlist = self._call(
mc_calls.PlaylistsCreate,
name,
description,
share_state
).body
if songs:
playlist = self.playlist_songs_add(songs, playlist)
return playlist |
def loadfn(fn, *args, **kwargs):
"""
Loads json/yaml/msgpack directly from a filename instead of a
File-like object. For YAML, ruamel.yaml must be installed. The file type is
automatically detected. YAML is assumed if the filename contains "yaml"
(lower or upper case). Otherwise, json is always assumed.
Args:
fn (str/Path): filename or pathlib.Path.
\*args: Any of the args supported by json/yaml.load.
\*\*kwargs: Any of the kwargs supported by json/yaml.load.
Returns:
(object) Result of json/yaml/msgpack.load.
"""
if "mpk" in os.path.basename(fn).lower():
if msgpack is None:
raise RuntimeError(
"Loading of message pack files is not "
"possible as msgpack-python is not installed.")
if "object_hook" not in kwargs:
kwargs["object_hook"] = object_hook
with zopen(fn, "rb") as fp:
return msgpack.load(fp, *args, **kwargs)
else:
with zopen(fn) as fp:
if "yaml" in os.path.basename(fn).lower():
if yaml is None:
raise RuntimeError("Loading of YAML files is not "
"possible as ruamel.yaml is not installed.")
if "Loader" not in kwargs:
kwargs["Loader"] = Loader
return yaml.load(fp, *args, **kwargs)
else:
if "cls" not in kwargs:
kwargs["cls"] = MontyDecoder
return json.load(fp, *args, **kwargs) | Loads json/yaml/msgpack directly from a filename instead of a
File-like object. For YAML, ruamel.yaml must be installed. The file type is
automatically detected. YAML is assumed if the filename contains "yaml"
(lower or upper case). Otherwise, json is always assumed.
Args:
fn (str/Path): filename or pathlib.Path.
\*args: Any of the args supported by json/yaml.load.
\*\*kwargs: Any of the kwargs supported by json/yaml.load.
Returns:
(object) Result of json/yaml/msgpack.load. | Below is the the instruction that describes the task:
### Input:
Loads json/yaml/msgpack directly from a filename instead of a
File-like object. For YAML, ruamel.yaml must be installed. The file type is
automatically detected. YAML is assumed if the filename contains "yaml"
(lower or upper case). Otherwise, json is always assumed.
Args:
fn (str/Path): filename or pathlib.Path.
\*args: Any of the args supported by json/yaml.load.
\*\*kwargs: Any of the kwargs supported by json/yaml.load.
Returns:
(object) Result of json/yaml/msgpack.load.
### Response:
def loadfn(fn, *args, **kwargs):
"""
Loads json/yaml/msgpack directly from a filename instead of a
File-like object. For YAML, ruamel.yaml must be installed. The file type is
automatically detected. YAML is assumed if the filename contains "yaml"
(lower or upper case). Otherwise, json is always assumed.
Args:
fn (str/Path): filename or pathlib.Path.
\*args: Any of the args supported by json/yaml.load.
\*\*kwargs: Any of the kwargs supported by json/yaml.load.
Returns:
(object) Result of json/yaml/msgpack.load.
"""
if "mpk" in os.path.basename(fn).lower():
if msgpack is None:
raise RuntimeError(
"Loading of message pack files is not "
"possible as msgpack-python is not installed.")
if "object_hook" not in kwargs:
kwargs["object_hook"] = object_hook
with zopen(fn, "rb") as fp:
return msgpack.load(fp, *args, **kwargs)
else:
with zopen(fn) as fp:
if "yaml" in os.path.basename(fn).lower():
if yaml is None:
raise RuntimeError("Loading of YAML files is not "
"possible as ruamel.yaml is not installed.")
if "Loader" not in kwargs:
kwargs["Loader"] = Loader
return yaml.load(fp, *args, **kwargs)
else:
if "cls" not in kwargs:
kwargs["cls"] = MontyDecoder
return json.load(fp, *args, **kwargs) |
def Update(self, env, args=None):
"""
Update an environment with the option variables.
env - the environment to update.
"""
values = {}
# first set the defaults:
for option in self.options:
if not option.default is None:
values[option.key] = option.default
# next set the value specified in the options file
for filename in self.files:
if os.path.exists(filename):
dir = os.path.split(os.path.abspath(filename))[0]
if dir:
sys.path.insert(0, dir)
try:
values['__name__'] = filename
with open(filename, 'r') as f:
contents = f.read()
exec(contents, {}, values)
finally:
if dir:
del sys.path[0]
del values['__name__']
# set the values specified on the command line
if args is None:
args = self.args
for arg, value in args.items():
added = False
for option in self.options:
if arg in list(option.aliases) + [ option.key ]:
values[option.key] = value
added = True
if not added:
self.unknown[arg] = value
# put the variables in the environment:
# (don't copy over variables that are not declared as options)
for option in self.options:
try:
env[option.key] = values[option.key]
except KeyError:
pass
# Call the convert functions:
for option in self.options:
if option.converter and option.key in values:
value = env.subst('${%s}'%option.key)
try:
try:
env[option.key] = option.converter(value)
except TypeError:
env[option.key] = option.converter(value, env)
except ValueError as x:
raise SCons.Errors.UserError('Error converting option: %s\n%s'%(option.key, x))
# Finally validate the values:
for option in self.options:
if option.validator and option.key in values:
option.validator(option.key, env.subst('${%s}'%option.key), env) | Update an environment with the option variables.
env - the environment to update. | Below is the the instruction that describes the task:
### Input:
Update an environment with the option variables.
env - the environment to update.
### Response:
def Update(self, env, args=None):
"""
Update an environment with the option variables.
env - the environment to update.
"""
values = {}
# first set the defaults:
for option in self.options:
if not option.default is None:
values[option.key] = option.default
# next set the value specified in the options file
for filename in self.files:
if os.path.exists(filename):
dir = os.path.split(os.path.abspath(filename))[0]
if dir:
sys.path.insert(0, dir)
try:
values['__name__'] = filename
with open(filename, 'r') as f:
contents = f.read()
exec(contents, {}, values)
finally:
if dir:
del sys.path[0]
del values['__name__']
# set the values specified on the command line
if args is None:
args = self.args
for arg, value in args.items():
added = False
for option in self.options:
if arg in list(option.aliases) + [ option.key ]:
values[option.key] = value
added = True
if not added:
self.unknown[arg] = value
# put the variables in the environment:
# (don't copy over variables that are not declared as options)
for option in self.options:
try:
env[option.key] = values[option.key]
except KeyError:
pass
# Call the convert functions:
for option in self.options:
if option.converter and option.key in values:
value = env.subst('${%s}'%option.key)
try:
try:
env[option.key] = option.converter(value)
except TypeError:
env[option.key] = option.converter(value, env)
except ValueError as x:
raise SCons.Errors.UserError('Error converting option: %s\n%s'%(option.key, x))
# Finally validate the values:
for option in self.options:
if option.validator and option.key in values:
option.validator(option.key, env.subst('${%s}'%option.key), env) |
def Sonnad_Goudar_2006(Re, eD):
r'''Calculates Darcy friction factor using the method in Sonnad and Goudar
(2006) [2]_ as shown in [1]_.
.. math::
\frac{1}{\sqrt{f_d}} = 0.8686\ln\left(\frac{0.4587Re}{S^{S/(S+1)}}\right)
.. math::
S = 0.1240\times\frac{\epsilon}{D}\times Re + \ln(0.4587Re)
Parameters
----------
Re : float
Reynolds number, [-]
eD : float
Relative roughness, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
Range is 4E3 <= Re <= 1E8; 1E-6 <= eD <= 5E-2
Examples
--------
>>> Sonnad_Goudar_2006(1E5, 1E-4)
0.0185971269898162
References
----------
.. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and
Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence
and Combustion 90, no. 1 (January 1, 2013): 1-27.
doi:10.1007/s10494-012-9419-7
.. [2] Travis, Quentin B., and Larry W. Mays."Relationship between
Hazen-William and Colebrook-White Roughness Values." Journal of
Hydraulic Engineering 133, no. 11 (November 2007): 1270-73.
doi:10.1061/(ASCE)0733-9429(2007)133:11(1270).
'''
S = 0.124*eD*Re + log(0.4587*Re)
return (.8686*log(.4587*Re/S**(S/(S+1))))**-2 | r'''Calculates Darcy friction factor using the method in Sonnad and Goudar
(2006) [2]_ as shown in [1]_.
.. math::
\frac{1}{\sqrt{f_d}} = 0.8686\ln\left(\frac{0.4587Re}{S^{S/(S+1)}}\right)
.. math::
S = 0.1240\times\frac{\epsilon}{D}\times Re + \ln(0.4587Re)
Parameters
----------
Re : float
Reynolds number, [-]
eD : float
Relative roughness, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
Range is 4E3 <= Re <= 1E8; 1E-6 <= eD <= 5E-2
Examples
--------
>>> Sonnad_Goudar_2006(1E5, 1E-4)
0.0185971269898162
References
----------
.. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and
Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence
and Combustion 90, no. 1 (January 1, 2013): 1-27.
doi:10.1007/s10494-012-9419-7
.. [2] Travis, Quentin B., and Larry W. Mays."Relationship between
Hazen-William and Colebrook-White Roughness Values." Journal of
Hydraulic Engineering 133, no. 11 (November 2007): 1270-73.
doi:10.1061/(ASCE)0733-9429(2007)133:11(1270). | Below is the the instruction that describes the task:
### Input:
r'''Calculates Darcy friction factor using the method in Sonnad and Goudar
(2006) [2]_ as shown in [1]_.
.. math::
\frac{1}{\sqrt{f_d}} = 0.8686\ln\left(\frac{0.4587Re}{S^{S/(S+1)}}\right)
.. math::
S = 0.1240\times\frac{\epsilon}{D}\times Re + \ln(0.4587Re)
Parameters
----------
Re : float
Reynolds number, [-]
eD : float
Relative roughness, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
Range is 4E3 <= Re <= 1E8; 1E-6 <= eD <= 5E-2
Examples
--------
>>> Sonnad_Goudar_2006(1E5, 1E-4)
0.0185971269898162
References
----------
.. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and
Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence
and Combustion 90, no. 1 (January 1, 2013): 1-27.
doi:10.1007/s10494-012-9419-7
.. [2] Travis, Quentin B., and Larry W. Mays."Relationship between
Hazen-William and Colebrook-White Roughness Values." Journal of
Hydraulic Engineering 133, no. 11 (November 2007): 1270-73.
doi:10.1061/(ASCE)0733-9429(2007)133:11(1270).
### Response:
def Sonnad_Goudar_2006(Re, eD):
r'''Calculates Darcy friction factor using the method in Sonnad and Goudar
(2006) [2]_ as shown in [1]_.
.. math::
\frac{1}{\sqrt{f_d}} = 0.8686\ln\left(\frac{0.4587Re}{S^{S/(S+1)}}\right)
.. math::
S = 0.1240\times\frac{\epsilon}{D}\times Re + \ln(0.4587Re)
Parameters
----------
Re : float
Reynolds number, [-]
eD : float
Relative roughness, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
Range is 4E3 <= Re <= 1E8; 1E-6 <= eD <= 5E-2
Examples
--------
>>> Sonnad_Goudar_2006(1E5, 1E-4)
0.0185971269898162
References
----------
.. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and
Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence
and Combustion 90, no. 1 (January 1, 2013): 1-27.
doi:10.1007/s10494-012-9419-7
.. [2] Travis, Quentin B., and Larry W. Mays."Relationship between
Hazen-William and Colebrook-White Roughness Values." Journal of
Hydraulic Engineering 133, no. 11 (November 2007): 1270-73.
doi:10.1061/(ASCE)0733-9429(2007)133:11(1270).
'''
S = 0.124*eD*Re + log(0.4587*Re)
return (.8686*log(.4587*Re/S**(S/(S+1))))**-2 |
def build_image_from_inherited_image(self, image_name: str, image_tag: str,
repo_path: Path,
requirements_option: RequirementsOptions):
"""
Builds a image with installed requirements from the inherited image. (Or just tags the image
if there are no requirements.)
See :meth:`build_image` for parameters descriptions.
:rtype: docker.models.images.Image
"""
base_name, base_tag = self.get_inherit_image()
if requirements_option == RequirementsOptions.no_requirements:
image = self.get_image(base_name, base_tag)
image.tag(image_name, image_tag) # so ``build_image`` doesn't have to be called next time
return image
dockerfile = self.get_install_requirements_dockerfile(base_name, base_tag, repo_path, requirements_option)
self.get_or_build_image(image_name, image_tag, dockerfile, build_context=repo_path.parent, pull=False)
return self.get_image(image_name, image_tag) | Builds a image with installed requirements from the inherited image. (Or just tags the image
if there are no requirements.)
See :meth:`build_image` for parameters descriptions.
:rtype: docker.models.images.Image | Below is the the instruction that describes the task:
### Input:
Builds a image with installed requirements from the inherited image. (Or just tags the image
if there are no requirements.)
See :meth:`build_image` for parameters descriptions.
:rtype: docker.models.images.Image
### Response:
def build_image_from_inherited_image(self, image_name: str, image_tag: str,
repo_path: Path,
requirements_option: RequirementsOptions):
"""
Builds a image with installed requirements from the inherited image. (Or just tags the image
if there are no requirements.)
See :meth:`build_image` for parameters descriptions.
:rtype: docker.models.images.Image
"""
base_name, base_tag = self.get_inherit_image()
if requirements_option == RequirementsOptions.no_requirements:
image = self.get_image(base_name, base_tag)
image.tag(image_name, image_tag) # so ``build_image`` doesn't have to be called next time
return image
dockerfile = self.get_install_requirements_dockerfile(base_name, base_tag, repo_path, requirements_option)
self.get_or_build_image(image_name, image_tag, dockerfile, build_context=repo_path.parent, pull=False)
return self.get_image(image_name, image_tag) |
def save_aggregate_report_to_elasticsearch(aggregate_report,
index_suffix=None,
monthly_indexes=False):
"""
Saves a parsed DMARC aggregate report to ElasticSearch
Args:
aggregate_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily indexes
Raises:
AlreadySaved
"""
logger.debug("Saving aggregate report to Elasticsearch")
aggregate_report = aggregate_report.copy()
metadata = aggregate_report["report_metadata"]
org_name = metadata["org_name"]
report_id = metadata["report_id"]
domain = aggregate_report["policy_published"]["domain"]
begin_date = human_timestamp_to_datetime(metadata["begin_date"])
end_date = human_timestamp_to_datetime(metadata["end_date"])
begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%S")
end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%S")
if monthly_indexes:
index_date = begin_date.strftime("%Y-%m")
else:
index_date = begin_date.strftime("%Y-%m-%d")
aggregate_report["begin_date"] = begin_date
aggregate_report["end_date"] = end_date
date_range = [aggregate_report["begin_date"],
aggregate_report["end_date"]]
org_name_query = Q(dict(match=dict(org_name=org_name)))
report_id_query = Q(dict(match=dict(report_id=report_id)))
domain_query = Q(dict(match={"published_policy.domain": domain}))
begin_date_query = Q(dict(match=dict(date_range=begin_date)))
end_date_query = Q(dict(match=dict(date_range=end_date)))
search = Search(index="dmarc_aggregate*")
query = org_name_query & report_id_query & domain_query
query = query & begin_date_query & end_date_query
search.query = query
existing = search.execute()
if len(existing) > 0:
raise AlreadySaved("An aggregate report ID {0} from {1} about {2} "
"with a date range of {3} UTC to {4} UTC already "
"exists in "
"Elasticsearch".format(report_id,
org_name,
domain,
begin_date_human,
end_date_human))
published_policy = _PublishedPolicy(
domain=aggregate_report["policy_published"]["domain"],
adkim=aggregate_report["policy_published"]["adkim"],
aspf=aggregate_report["policy_published"]["aspf"],
p=aggregate_report["policy_published"]["p"],
sp=aggregate_report["policy_published"]["sp"],
pct=aggregate_report["policy_published"]["pct"],
fo=aggregate_report["policy_published"]["fo"]
)
for record in aggregate_report["records"]:
agg_doc = _AggregateReportDoc(
xml_schemea=aggregate_report["xml_schema"],
org_name=metadata["org_name"],
org_email=metadata["org_email"],
org_extra_contact_info=metadata["org_extra_contact_info"],
report_id=metadata["report_id"],
date_range=date_range,
errors=metadata["errors"],
published_policy=published_policy,
source_ip_address=record["source"]["ip_address"],
source_country=record["source"]["country"],
source_reverse_dns=record["source"]["reverse_dns"],
source_base_domain=record["source"]["base_domain"],
message_count=record["count"],
disposition=record["policy_evaluated"]["disposition"],
dkim_aligned=record["policy_evaluated"]["dkim"] == "pass",
spf_aligned=record["policy_evaluated"]["spf"] == "pass",
header_from=record["identifiers"]["header_from"],
envelope_from=record["identifiers"]["envelope_from"],
envelope_to=record["identifiers"]["envelope_to"]
)
for override in record["policy_evaluated"]["policy_override_reasons"]:
agg_doc.add_policy_override(type_=override["type"],
comment=override["comment"])
for dkim_result in record["auth_results"]["dkim"]:
agg_doc.add_dkim_result(domain=dkim_result["domain"],
selector=dkim_result["selector"],
result=dkim_result["result"])
for spf_result in record["auth_results"]["spf"]:
agg_doc.add_spf_result(domain=spf_result["domain"],
scope=spf_result["scope"],
result=spf_result["result"])
index = "dmarc_aggregate"
if index_suffix:
index = "{0}_{1}".format(index, index_suffix)
index = "{0}-{1}".format(index, index_date)
create_indexes([index])
agg_doc.meta.index = index
try:
agg_doc.save()
except Exception as e:
raise ElasticsearchError(
"Elasticsearch error: {0}".format(e.__str__())) | Saves a parsed DMARC aggregate report to ElasticSearch
Args:
aggregate_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily indexes
Raises:
AlreadySaved | Below is the the instruction that describes the task:
### Input:
Saves a parsed DMARC aggregate report to ElasticSearch
Args:
aggregate_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily indexes
Raises:
AlreadySaved
### Response:
def save_aggregate_report_to_elasticsearch(aggregate_report,
index_suffix=None,
monthly_indexes=False):
"""
Saves a parsed DMARC aggregate report to ElasticSearch
Args:
aggregate_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily indexes
Raises:
AlreadySaved
"""
logger.debug("Saving aggregate report to Elasticsearch")
aggregate_report = aggregate_report.copy()
metadata = aggregate_report["report_metadata"]
org_name = metadata["org_name"]
report_id = metadata["report_id"]
domain = aggregate_report["policy_published"]["domain"]
begin_date = human_timestamp_to_datetime(metadata["begin_date"])
end_date = human_timestamp_to_datetime(metadata["end_date"])
begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%S")
end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%S")
if monthly_indexes:
index_date = begin_date.strftime("%Y-%m")
else:
index_date = begin_date.strftime("%Y-%m-%d")
aggregate_report["begin_date"] = begin_date
aggregate_report["end_date"] = end_date
date_range = [aggregate_report["begin_date"],
aggregate_report["end_date"]]
org_name_query = Q(dict(match=dict(org_name=org_name)))
report_id_query = Q(dict(match=dict(report_id=report_id)))
domain_query = Q(dict(match={"published_policy.domain": domain}))
begin_date_query = Q(dict(match=dict(date_range=begin_date)))
end_date_query = Q(dict(match=dict(date_range=end_date)))
search = Search(index="dmarc_aggregate*")
query = org_name_query & report_id_query & domain_query
query = query & begin_date_query & end_date_query
search.query = query
existing = search.execute()
if len(existing) > 0:
raise AlreadySaved("An aggregate report ID {0} from {1} about {2} "
"with a date range of {3} UTC to {4} UTC already "
"exists in "
"Elasticsearch".format(report_id,
org_name,
domain,
begin_date_human,
end_date_human))
published_policy = _PublishedPolicy(
domain=aggregate_report["policy_published"]["domain"],
adkim=aggregate_report["policy_published"]["adkim"],
aspf=aggregate_report["policy_published"]["aspf"],
p=aggregate_report["policy_published"]["p"],
sp=aggregate_report["policy_published"]["sp"],
pct=aggregate_report["policy_published"]["pct"],
fo=aggregate_report["policy_published"]["fo"]
)
for record in aggregate_report["records"]:
agg_doc = _AggregateReportDoc(
xml_schemea=aggregate_report["xml_schema"],
org_name=metadata["org_name"],
org_email=metadata["org_email"],
org_extra_contact_info=metadata["org_extra_contact_info"],
report_id=metadata["report_id"],
date_range=date_range,
errors=metadata["errors"],
published_policy=published_policy,
source_ip_address=record["source"]["ip_address"],
source_country=record["source"]["country"],
source_reverse_dns=record["source"]["reverse_dns"],
source_base_domain=record["source"]["base_domain"],
message_count=record["count"],
disposition=record["policy_evaluated"]["disposition"],
dkim_aligned=record["policy_evaluated"]["dkim"] == "pass",
spf_aligned=record["policy_evaluated"]["spf"] == "pass",
header_from=record["identifiers"]["header_from"],
envelope_from=record["identifiers"]["envelope_from"],
envelope_to=record["identifiers"]["envelope_to"]
)
for override in record["policy_evaluated"]["policy_override_reasons"]:
agg_doc.add_policy_override(type_=override["type"],
comment=override["comment"])
for dkim_result in record["auth_results"]["dkim"]:
agg_doc.add_dkim_result(domain=dkim_result["domain"],
selector=dkim_result["selector"],
result=dkim_result["result"])
for spf_result in record["auth_results"]["spf"]:
agg_doc.add_spf_result(domain=spf_result["domain"],
scope=spf_result["scope"],
result=spf_result["result"])
index = "dmarc_aggregate"
if index_suffix:
index = "{0}_{1}".format(index, index_suffix)
index = "{0}-{1}".format(index, index_date)
create_indexes([index])
agg_doc.meta.index = index
try:
agg_doc.save()
except Exception as e:
raise ElasticsearchError(
"Elasticsearch error: {0}".format(e.__str__())) |
def dictlist_convert_to_bool(dict_list: Iterable[Dict], key: str) -> None:
"""
Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a bool. If that fails, convert it to ``None``.
"""
for d in dict_list:
# d[key] = True if d[key] == "Y" else False
d[key] = 1 if d[key] == "Y" else 0 | Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a bool. If that fails, convert it to ``None``. | Below is the the instruction that describes the task:
### Input:
Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a bool. If that fails, convert it to ``None``.
### Response:
def dictlist_convert_to_bool(dict_list: Iterable[Dict], key: str) -> None:
"""
Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a bool. If that fails, convert it to ``None``.
"""
for d in dict_list:
# d[key] = True if d[key] == "Y" else False
d[key] = 1 if d[key] == "Y" else 0 |
def _get_new_connection(self, conn_params):
"""Opens a connection to the database."""
self.__connection_string = conn_params.get('connection_string', '')
conn = self.Database.connect(**conn_params)
return conn | Opens a connection to the database. | Below is the the instruction that describes the task:
### Input:
Opens a connection to the database.
### Response:
def _get_new_connection(self, conn_params):
"""Opens a connection to the database."""
self.__connection_string = conn_params.get('connection_string', '')
conn = self.Database.connect(**conn_params)
return conn |
def load(self):
""" Loads the user's account details and
Raises
parseException
"""
pg = self.usr.getPage("http://www.neopets.com/bank.phtml")
# Verifies account exists
if not "great to see you again" in pg.content:
logging.getLogger("neolib.user").info("Could not load user's bank. Most likely does not have an account.", {'pg': pg})
raise noBankAcct
self.__loadDetails(pg) | Loads the user's account details and
Raises
parseException | Below is the the instruction that describes the task:
### Input:
Loads the user's account details and
Raises
parseException
### Response:
def load(self):
""" Loads the user's account details and
Raises
parseException
"""
pg = self.usr.getPage("http://www.neopets.com/bank.phtml")
# Verifies account exists
if not "great to see you again" in pg.content:
logging.getLogger("neolib.user").info("Could not load user's bank. Most likely does not have an account.", {'pg': pg})
raise noBankAcct
self.__loadDetails(pg) |
def get_arctic_version(self, symbol, as_of=None):
"""
Return the numerical representation of the arctic version used to write the last (or as_of) version for
the given symbol.
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or int or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
Returns
-------
arctic_version : int
The numerical representation of Arctic version, used to create the specified symbol version
"""
return self._read_metadata(symbol, as_of=as_of).get('arctic_version', 0) | Return the numerical representation of the arctic version used to write the last (or as_of) version for
the given symbol.
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or int or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
Returns
-------
arctic_version : int
The numerical representation of Arctic version, used to create the specified symbol version | Below is the the instruction that describes the task:
### Input:
Return the numerical representation of the arctic version used to write the last (or as_of) version for
the given symbol.
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or int or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
Returns
-------
arctic_version : int
The numerical representation of Arctic version, used to create the specified symbol version
### Response:
def get_arctic_version(self, symbol, as_of=None):
"""
Return the numerical representation of the arctic version used to write the last (or as_of) version for
the given symbol.
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or int or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
Returns
-------
arctic_version : int
The numerical representation of Arctic version, used to create the specified symbol version
"""
return self._read_metadata(symbol, as_of=as_of).get('arctic_version', 0) |
def space_cluster(catalog, d_thresh, show=True):
"""
Cluster a catalog by distance only.
Will compute the matrix of physical distances between events and utilize
the :mod:`scipy.clustering.hierarchy` module to perform the clustering.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog of events to clustered
:type d_thresh: float
:param d_thresh: Maximum inter-event distance threshold
:returns: list of :class:`obspy.core.event.Catalog` objects
:rtype: list
>>> from eqcorrscan.utils.clustering import space_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("NCEDC")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=2)
>>> groups = space_cluster(catalog=cat, d_thresh=2, show=False)
>>> from eqcorrscan.utils.clustering import space_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("https://earthquake.usgs.gov")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=6)
>>> groups = space_cluster(catalog=cat, d_thresh=1000, show=False)
"""
# Compute the distance matrix and linkage
dist_mat = dist_mat_km(catalog)
dist_vec = squareform(dist_mat)
Z = linkage(dist_vec, method='average')
# Cluster the linkage using the given threshold as the cutoff
indices = fcluster(Z, t=d_thresh, criterion='distance')
group_ids = list(set(indices))
indices = [(indices[i], i) for i in range(len(indices))]
if show:
# Plot the dendrogram...if it's not way too huge
dendrogram(Z, color_threshold=d_thresh,
distance_sort='ascending')
plt.show()
# Sort by group id
indices.sort(key=lambda tup: tup[0])
groups = []
for group_id in group_ids:
group = Catalog()
for ind in indices:
if ind[0] == group_id:
group.append(catalog[ind[1]])
elif ind[0] > group_id:
# Because we have sorted by group id, when the index is greater
# than the group_id we can break the inner loop.
# Patch applied by CJC 05/11/2015
groups.append(group)
break
groups.append(group)
return groups | Cluster a catalog by distance only.
Will compute the matrix of physical distances between events and utilize
the :mod:`scipy.clustering.hierarchy` module to perform the clustering.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog of events to clustered
:type d_thresh: float
:param d_thresh: Maximum inter-event distance threshold
:returns: list of :class:`obspy.core.event.Catalog` objects
:rtype: list
>>> from eqcorrscan.utils.clustering import space_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("NCEDC")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=2)
>>> groups = space_cluster(catalog=cat, d_thresh=2, show=False)
>>> from eqcorrscan.utils.clustering import space_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("https://earthquake.usgs.gov")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=6)
>>> groups = space_cluster(catalog=cat, d_thresh=1000, show=False) | Below is the the instruction that describes the task:
### Input:
Cluster a catalog by distance only.
Will compute the matrix of physical distances between events and utilize
the :mod:`scipy.clustering.hierarchy` module to perform the clustering.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog of events to clustered
:type d_thresh: float
:param d_thresh: Maximum inter-event distance threshold
:returns: list of :class:`obspy.core.event.Catalog` objects
:rtype: list
>>> from eqcorrscan.utils.clustering import space_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("NCEDC")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=2)
>>> groups = space_cluster(catalog=cat, d_thresh=2, show=False)
>>> from eqcorrscan.utils.clustering import space_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("https://earthquake.usgs.gov")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=6)
>>> groups = space_cluster(catalog=cat, d_thresh=1000, show=False)
### Response:
def space_cluster(catalog, d_thresh, show=True):
"""
Cluster a catalog by distance only.
Will compute the matrix of physical distances between events and utilize
the :mod:`scipy.clustering.hierarchy` module to perform the clustering.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog of events to clustered
:type d_thresh: float
:param d_thresh: Maximum inter-event distance threshold
:returns: list of :class:`obspy.core.event.Catalog` objects
:rtype: list
>>> from eqcorrscan.utils.clustering import space_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("NCEDC")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=2)
>>> groups = space_cluster(catalog=cat, d_thresh=2, show=False)
>>> from eqcorrscan.utils.clustering import space_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("https://earthquake.usgs.gov")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=6)
>>> groups = space_cluster(catalog=cat, d_thresh=1000, show=False)
"""
# Compute the distance matrix and linkage
dist_mat = dist_mat_km(catalog)
dist_vec = squareform(dist_mat)
Z = linkage(dist_vec, method='average')
# Cluster the linkage using the given threshold as the cutoff
indices = fcluster(Z, t=d_thresh, criterion='distance')
group_ids = list(set(indices))
indices = [(indices[i], i) for i in range(len(indices))]
if show:
# Plot the dendrogram...if it's not way too huge
dendrogram(Z, color_threshold=d_thresh,
distance_sort='ascending')
plt.show()
# Sort by group id
indices.sort(key=lambda tup: tup[0])
groups = []
for group_id in group_ids:
group = Catalog()
for ind in indices:
if ind[0] == group_id:
group.append(catalog[ind[1]])
elif ind[0] > group_id:
# Because we have sorted by group id, when the index is greater
# than the group_id we can break the inner loop.
# Patch applied by CJC 05/11/2015
groups.append(group)
break
groups.append(group)
return groups |
def is_valid_number_for_region(numobj, region_code):
"""Tests whether a phone number is valid for a certain region.
Note this doesn't verify the number is actually in use, which is
impossible to tell by just looking at a number itself. If the country
calling code is not the same as the country calling code for the region,
this immediately exits with false. After this, the specific number pattern
rules for the region are examined. This is useful for determining for
example whether a particular number is valid for Canada, rather than just
a valid NANPA number.
Warning: In most cases, you want to use is_valid_number instead. For
example, this method will mark numbers from British Crown dependencies
such as the Isle of Man as invalid for the region "GB" (United Kingdom),
since it has its own region code, "IM", which may be undesirable.
Arguments:
numobj -- The phone number object that we want to validate.
region_code -- The region that we want to validate the phone number for.
Returns a boolean that indicates whether the number is of a valid pattern.
"""
country_code = numobj.country_code
if region_code is None:
return False
metadata = PhoneMetadata.metadata_for_region_or_calling_code(country_code, region_code.upper())
if (metadata is None or
(region_code != REGION_CODE_FOR_NON_GEO_ENTITY and
country_code != country_code_for_valid_region(region_code))):
# Either the region code was invalid, or the country calling code for
# this number does not match that of the region code.
return False
nsn = national_significant_number(numobj)
return (_number_type_helper(nsn, metadata) != PhoneNumberType.UNKNOWN) | Tests whether a phone number is valid for a certain region.
Note this doesn't verify the number is actually in use, which is
impossible to tell by just looking at a number itself. If the country
calling code is not the same as the country calling code for the region,
this immediately exits with false. After this, the specific number pattern
rules for the region are examined. This is useful for determining for
example whether a particular number is valid for Canada, rather than just
a valid NANPA number.
Warning: In most cases, you want to use is_valid_number instead. For
example, this method will mark numbers from British Crown dependencies
such as the Isle of Man as invalid for the region "GB" (United Kingdom),
since it has its own region code, "IM", which may be undesirable.
Arguments:
numobj -- The phone number object that we want to validate.
region_code -- The region that we want to validate the phone number for.
Returns a boolean that indicates whether the number is of a valid pattern. | Below is the the instruction that describes the task:
### Input:
Tests whether a phone number is valid for a certain region.
Note this doesn't verify the number is actually in use, which is
impossible to tell by just looking at a number itself. If the country
calling code is not the same as the country calling code for the region,
this immediately exits with false. After this, the specific number pattern
rules for the region are examined. This is useful for determining for
example whether a particular number is valid for Canada, rather than just
a valid NANPA number.
Warning: In most cases, you want to use is_valid_number instead. For
example, this method will mark numbers from British Crown dependencies
such as the Isle of Man as invalid for the region "GB" (United Kingdom),
since it has its own region code, "IM", which may be undesirable.
Arguments:
numobj -- The phone number object that we want to validate.
region_code -- The region that we want to validate the phone number for.
Returns a boolean that indicates whether the number is of a valid pattern.
### Response:
def is_valid_number_for_region(numobj, region_code):
"""Tests whether a phone number is valid for a certain region.
Note this doesn't verify the number is actually in use, which is
impossible to tell by just looking at a number itself. If the country
calling code is not the same as the country calling code for the region,
this immediately exits with false. After this, the specific number pattern
rules for the region are examined. This is useful for determining for
example whether a particular number is valid for Canada, rather than just
a valid NANPA number.
Warning: In most cases, you want to use is_valid_number instead. For
example, this method will mark numbers from British Crown dependencies
such as the Isle of Man as invalid for the region "GB" (United Kingdom),
since it has its own region code, "IM", which may be undesirable.
Arguments:
numobj -- The phone number object that we want to validate.
region_code -- The region that we want to validate the phone number for.
Returns a boolean that indicates whether the number is of a valid pattern.
"""
country_code = numobj.country_code
if region_code is None:
return False
metadata = PhoneMetadata.metadata_for_region_or_calling_code(country_code, region_code.upper())
if (metadata is None or
(region_code != REGION_CODE_FOR_NON_GEO_ENTITY and
country_code != country_code_for_valid_region(region_code))):
# Either the region code was invalid, or the country calling code for
# this number does not match that of the region code.
return False
nsn = national_significant_number(numobj)
return (_number_type_helper(nsn, metadata) != PhoneNumberType.UNKNOWN) |
def result(self):
"""
Returns a ``string`` constant to indicate whether the team lost in
regulation, lost in overtime, or won.
"""
if self._result.lower() == 'w':
return WIN
if self._result.lower() == 'l' and \
self.overtime != 0:
return OVERTIME_LOSS
return LOSS | Returns a ``string`` constant to indicate whether the team lost in
regulation, lost in overtime, or won. | Below is the the instruction that describes the task:
### Input:
Returns a ``string`` constant to indicate whether the team lost in
regulation, lost in overtime, or won.
### Response:
def result(self):
"""
Returns a ``string`` constant to indicate whether the team lost in
regulation, lost in overtime, or won.
"""
if self._result.lower() == 'w':
return WIN
if self._result.lower() == 'l' and \
self.overtime != 0:
return OVERTIME_LOSS
return LOSS |
def _trunc(x, minval=None, maxval=None):
"""Truncate vector values to have values on range [minval, maxval]
"""
x = np.copy(x)
if minval is not None:
x[x < minval] = minval
if maxval is not None:
x[x > maxval] = maxval
return x | Truncate vector values to have values on range [minval, maxval] | Below is the the instruction that describes the task:
### Input:
Truncate vector values to have values on range [minval, maxval]
### Response:
def _trunc(x, minval=None, maxval=None):
"""Truncate vector values to have values on range [minval, maxval]
"""
x = np.copy(x)
if minval is not None:
x[x < minval] = minval
if maxval is not None:
x[x > maxval] = maxval
return x |
def _get_task_with_policy(queue_name, task_id, owner):
"""Fetches the specified task and enforces ownership policy.
Args:
queue_name: Name of the queue the work item is on.
task_id: ID of the task that is finished.
owner: Who or what has the current lease on the task.
Returns:
The valid WorkQueue task that is currently owned.
Raises:
TaskDoesNotExistError if the task does not exist.
LeaseExpiredError if the lease is no longer active.
NotOwnerError if the specified owner no longer owns the task.
"""
now = datetime.datetime.utcnow()
task = (
WorkQueue.query
.filter_by(queue_name=queue_name, task_id=task_id)
.with_lockmode('update')
.first())
if not task:
raise TaskDoesNotExistError('task_id=%r' % task_id)
# Lease delta should be positive, meaning it has not yet expired!
lease_delta = now - task.eta
if lease_delta > datetime.timedelta(0):
db.session.rollback()
raise LeaseExpiredError('queue=%r, task_id=%r expired %s' % (
task.queue_name, task_id, lease_delta))
if task.last_owner != owner:
db.session.rollback()
raise NotOwnerError('queue=%r, task_id=%r, owner=%r' % (
task.queue_name, task_id, task.last_owner))
return task | Fetches the specified task and enforces ownership policy.
Args:
queue_name: Name of the queue the work item is on.
task_id: ID of the task that is finished.
owner: Who or what has the current lease on the task.
Returns:
The valid WorkQueue task that is currently owned.
Raises:
TaskDoesNotExistError if the task does not exist.
LeaseExpiredError if the lease is no longer active.
NotOwnerError if the specified owner no longer owns the task. | Below is the the instruction that describes the task:
### Input:
Fetches the specified task and enforces ownership policy.
Args:
queue_name: Name of the queue the work item is on.
task_id: ID of the task that is finished.
owner: Who or what has the current lease on the task.
Returns:
The valid WorkQueue task that is currently owned.
Raises:
TaskDoesNotExistError if the task does not exist.
LeaseExpiredError if the lease is no longer active.
NotOwnerError if the specified owner no longer owns the task.
### Response:
def _get_task_with_policy(queue_name, task_id, owner):
"""Fetches the specified task and enforces ownership policy.
Args:
queue_name: Name of the queue the work item is on.
task_id: ID of the task that is finished.
owner: Who or what has the current lease on the task.
Returns:
The valid WorkQueue task that is currently owned.
Raises:
TaskDoesNotExistError if the task does not exist.
LeaseExpiredError if the lease is no longer active.
NotOwnerError if the specified owner no longer owns the task.
"""
now = datetime.datetime.utcnow()
task = (
WorkQueue.query
.filter_by(queue_name=queue_name, task_id=task_id)
.with_lockmode('update')
.first())
if not task:
raise TaskDoesNotExistError('task_id=%r' % task_id)
# Lease delta should be positive, meaning it has not yet expired!
lease_delta = now - task.eta
if lease_delta > datetime.timedelta(0):
db.session.rollback()
raise LeaseExpiredError('queue=%r, task_id=%r expired %s' % (
task.queue_name, task_id, lease_delta))
if task.last_owner != owner:
db.session.rollback()
raise NotOwnerError('queue=%r, task_id=%r, owner=%r' % (
task.queue_name, task_id, task.last_owner))
return task |
def register_entity_to_group(self, entity, group):
'''
Add entity to a group.
If group does not exist, entity will be added as first member
entity is of type Entity
group is a string that is the name of the group
'''
if entity in self._entities:
if group in self._groups:
self._groups[group].append(entity)
else:
self._groups[group] = [entity]
else:
raise UnmanagedEntityError(entity) | Add entity to a group.
If group does not exist, entity will be added as first member
entity is of type Entity
group is a string that is the name of the group | Below is the the instruction that describes the task:
### Input:
Add entity to a group.
If group does not exist, entity will be added as first member
entity is of type Entity
group is a string that is the name of the group
### Response:
def register_entity_to_group(self, entity, group):
'''
Add entity to a group.
If group does not exist, entity will be added as first member
entity is of type Entity
group is a string that is the name of the group
'''
if entity in self._entities:
if group in self._groups:
self._groups[group].append(entity)
else:
self._groups[group] = [entity]
else:
raise UnmanagedEntityError(entity) |
def get_gaf_format(self):
"""Return a GAF 2.0-compatible string representation of the annotation.
Parameters
----------
Returns
-------
str
The formatted string.
"""
sep = '\t'
return sep.join(
[self.gene, self.db_ref, self.term.id, self.evidence,
'|'.join(self.db_ref), '|'.join(self.with_)]) | Return a GAF 2.0-compatible string representation of the annotation.
Parameters
----------
Returns
-------
str
The formatted string. | Below is the the instruction that describes the task:
### Input:
Return a GAF 2.0-compatible string representation of the annotation.
Parameters
----------
Returns
-------
str
The formatted string.
### Response:
def get_gaf_format(self):
"""Return a GAF 2.0-compatible string representation of the annotation.
Parameters
----------
Returns
-------
str
The formatted string.
"""
sep = '\t'
return sep.join(
[self.gene, self.db_ref, self.term.id, self.evidence,
'|'.join(self.db_ref), '|'.join(self.with_)]) |
def _translate_segment_glob(pattern):
"""
Translates the glob pattern to a regular expression. This is used in
the constructor to translate a path segment glob pattern to its
corresponding regular expression.
*pattern* (:class:`str`) is the glob pattern.
Returns the regular expression (:class:`str`).
"""
# NOTE: This is derived from `fnmatch.translate()` and is similar to
# the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.
escape = False
regex = ''
i, end = 0, len(pattern)
while i < end:
# Get next character.
char = pattern[i]
i += 1
if escape:
# Escape the character.
escape = False
regex += re.escape(char)
elif char == '\\':
# Escape character, escape next character.
escape = True
elif char == '*':
# Multi-character wildcard. Match any string (except slashes),
# including an empty string.
regex += '[^/]*'
elif char == '?':
# Single-character wildcard. Match any single character (except
# a slash).
regex += '[^/]'
elif char == '[':
# Braket expression wildcard. Except for the beginning
# exclamation mark, the whole braket expression can be used
# directly as regex but we have to find where the expression
# ends.
# - "[][!]" matchs ']', '[' and '!'.
# - "[]-]" matchs ']' and '-'.
# - "[!]a-]" matchs any character except ']', 'a' and '-'.
j = i
# Pass brack expression negation.
if j < end and pattern[j] == '!':
j += 1
# Pass first closing braket if it is at the beginning of the
# expression.
if j < end and pattern[j] == ']':
j += 1
# Find closing braket. Stop once we reach the end or find it.
while j < end and pattern[j] != ']':
j += 1
if j < end:
# Found end of braket expression. Increment j to be one past
# the closing braket:
#
# [...]
# ^ ^
# i j
#
j += 1
expr = '['
if pattern[i] == '!':
# Braket expression needs to be negated.
expr += '^'
i += 1
elif pattern[i] == '^':
# POSIX declares that the regex braket expression negation
# "[^...]" is undefined in a glob pattern. Python's
# `fnmatch.translate()` escapes the caret ('^') as a
# literal. To maintain consistency with undefined behavior,
# I am escaping the '^' as well.
expr += '\\^'
i += 1
# Build regex braket expression. Escape slashes so they are
# treated as literal slashes by regex as defined by POSIX.
expr += pattern[i:j].replace('\\', '\\\\')
# Add regex braket expression to regex result.
regex += expr
# Set i to one past the closing braket.
i = j
else:
# Failed to find closing braket, treat opening braket as a
# braket literal instead of as an expression.
regex += '\\['
else:
# Regular character, escape it for regex.
regex += re.escape(char)
return regex | Translates the glob pattern to a regular expression. This is used in
the constructor to translate a path segment glob pattern to its
corresponding regular expression.
*pattern* (:class:`str`) is the glob pattern.
Returns the regular expression (:class:`str`). | Below is the the instruction that describes the task:
### Input:
Translates the glob pattern to a regular expression. This is used in
the constructor to translate a path segment glob pattern to its
corresponding regular expression.
*pattern* (:class:`str`) is the glob pattern.
Returns the regular expression (:class:`str`).
### Response:
def _translate_segment_glob(pattern):
"""
Translates the glob pattern to a regular expression. This is used in
the constructor to translate a path segment glob pattern to its
corresponding regular expression.
*pattern* (:class:`str`) is the glob pattern.
Returns the regular expression (:class:`str`).
"""
# NOTE: This is derived from `fnmatch.translate()` and is similar to
# the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.
escape = False
regex = ''
i, end = 0, len(pattern)
while i < end:
# Get next character.
char = pattern[i]
i += 1
if escape:
# Escape the character.
escape = False
regex += re.escape(char)
elif char == '\\':
# Escape character, escape next character.
escape = True
elif char == '*':
# Multi-character wildcard. Match any string (except slashes),
# including an empty string.
regex += '[^/]*'
elif char == '?':
# Single-character wildcard. Match any single character (except
# a slash).
regex += '[^/]'
elif char == '[':
# Braket expression wildcard. Except for the beginning
# exclamation mark, the whole braket expression can be used
# directly as regex but we have to find where the expression
# ends.
# - "[][!]" matchs ']', '[' and '!'.
# - "[]-]" matchs ']' and '-'.
# - "[!]a-]" matchs any character except ']', 'a' and '-'.
j = i
# Pass brack expression negation.
if j < end and pattern[j] == '!':
j += 1
# Pass first closing braket if it is at the beginning of the
# expression.
if j < end and pattern[j] == ']':
j += 1
# Find closing braket. Stop once we reach the end or find it.
while j < end and pattern[j] != ']':
j += 1
if j < end:
# Found end of braket expression. Increment j to be one past
# the closing braket:
#
# [...]
# ^ ^
# i j
#
j += 1
expr = '['
if pattern[i] == '!':
# Braket expression needs to be negated.
expr += '^'
i += 1
elif pattern[i] == '^':
# POSIX declares that the regex braket expression negation
# "[^...]" is undefined in a glob pattern. Python's
# `fnmatch.translate()` escapes the caret ('^') as a
# literal. To maintain consistency with undefined behavior,
# I am escaping the '^' as well.
expr += '\\^'
i += 1
# Build regex braket expression. Escape slashes so they are
# treated as literal slashes by regex as defined by POSIX.
expr += pattern[i:j].replace('\\', '\\\\')
# Add regex braket expression to regex result.
regex += expr
# Set i to one past the closing braket.
i = j
else:
# Failed to find closing braket, treat opening braket as a
# braket literal instead of as an expression.
regex += '\\['
else:
# Regular character, escape it for regex.
regex += re.escape(char)
return regex |
def GET_names( self, path_info ):
"""
Get all names in existence
If `all=true` is set, then include expired names.
Returns the list on success
Returns 400 on invalid arguments
Returns 502 on failure to get names
"""
include_expired = False
qs_values = path_info['qs_values']
page = qs_values.get('page', None)
if page is None:
log.error("Page required")
return self._reply_json({'error': 'page= argument required'}, status_code=400)
try:
page = int(page)
if page < 0:
raise ValueError("Page is negative")
except ValueError:
log.error("Invalid page")
return self._reply_json({'error': 'Invalid page= value'}, status_code=400)
if qs_values.get('all', '').lower() in ['1', 'true']:
include_expired = True
offset = page * 100
count = 100
blockstackd_url = get_blockstackd_url()
res = blockstackd_client.get_all_names(offset, count, include_expired=include_expired, hostport=blockstackd_url)
if json_is_error(res):
log.error("Failed to list all names (offset={}, count={}): {}".format(offset, count, res['error']))
return self._reply_json({'error': 'Failed to list all names'}, status_code=res.get('http_status', 502))
return self._reply_json(res) | Get all names in existence
If `all=true` is set, then include expired names.
Returns the list on success
Returns 400 on invalid arguments
Returns 502 on failure to get names | Below is the the instruction that describes the task:
### Input:
Get all names in existence
If `all=true` is set, then include expired names.
Returns the list on success
Returns 400 on invalid arguments
Returns 502 on failure to get names
### Response:
def GET_names( self, path_info ):
"""
Get all names in existence
If `all=true` is set, then include expired names.
Returns the list on success
Returns 400 on invalid arguments
Returns 502 on failure to get names
"""
include_expired = False
qs_values = path_info['qs_values']
page = qs_values.get('page', None)
if page is None:
log.error("Page required")
return self._reply_json({'error': 'page= argument required'}, status_code=400)
try:
page = int(page)
if page < 0:
raise ValueError("Page is negative")
except ValueError:
log.error("Invalid page")
return self._reply_json({'error': 'Invalid page= value'}, status_code=400)
if qs_values.get('all', '').lower() in ['1', 'true']:
include_expired = True
offset = page * 100
count = 100
blockstackd_url = get_blockstackd_url()
res = blockstackd_client.get_all_names(offset, count, include_expired=include_expired, hostport=blockstackd_url)
if json_is_error(res):
log.error("Failed to list all names (offset={}, count={}): {}".format(offset, count, res['error']))
return self._reply_json({'error': 'Failed to list all names'}, status_code=res.get('http_status', 502))
return self._reply_json(res) |
def _has_population_germline(rec):
"""Check if header defines population annotated germline samples for tumor only.
"""
for k in population_keys:
if k in rec.header.info:
return True
return False | Check if header defines population annotated germline samples for tumor only. | Below is the the instruction that describes the task:
### Input:
Check if header defines population annotated germline samples for tumor only.
### Response:
def _has_population_germline(rec):
"""Check if header defines population annotated germline samples for tumor only.
"""
for k in population_keys:
if k in rec.header.info:
return True
return False |
def printSegmentForCell(tm, cell):
"""Print segment information for this cell"""
print "Segments for cell", cell, ":"
for seg in tm.basalConnections._cells[cell]._segments:
print " ",
synapses = seg._synapses
for s in synapses:
print "%d:%g" %(s.presynapticCell,s.permanence),
print | Print segment information for this cell | Below is the the instruction that describes the task:
### Input:
Print segment information for this cell
### Response:
def printSegmentForCell(tm, cell):
"""Print segment information for this cell"""
print "Segments for cell", cell, ":"
for seg in tm.basalConnections._cells[cell]._segments:
print " ",
synapses = seg._synapses
for s in synapses:
print "%d:%g" %(s.presynapticCell,s.permanence),
print |
def read(self, want=0):
'''
Read method, gets data from internal buffer while releasing
:meth:`write` locks when needed.
The lock usage means it must ran on a different thread than
:meth:`fill`, ie. the main thread, otherwise will deadlock.
The combination of both write and this method running on different
threads makes tarfile being streamed on-the-fly, with data chunks being
processed and retrieved on demand.
:param want: number bytes to read, defaults to 0 (all available)
:type want: int
:returns: tarfile data as bytes
:rtype: bytes
'''
if self._finished:
if self._finished == 1:
self._finished += 1
return ""
return EOFError("EOF reached")
# Thread communication
self._want = want
self._add.set()
self._result.wait()
self._result.clear()
if want:
data = self._data[:want]
self._data = self._data[want:]
else:
data = self._data
self._data = bytes()
return data | Read method, gets data from internal buffer while releasing
:meth:`write` locks when needed.
The lock usage means it must ran on a different thread than
:meth:`fill`, ie. the main thread, otherwise will deadlock.
The combination of both write and this method running on different
threads makes tarfile being streamed on-the-fly, with data chunks being
processed and retrieved on demand.
:param want: number bytes to read, defaults to 0 (all available)
:type want: int
:returns: tarfile data as bytes
:rtype: bytes | Below is the the instruction that describes the task:
### Input:
Read method, gets data from internal buffer while releasing
:meth:`write` locks when needed.
The lock usage means it must ran on a different thread than
:meth:`fill`, ie. the main thread, otherwise will deadlock.
The combination of both write and this method running on different
threads makes tarfile being streamed on-the-fly, with data chunks being
processed and retrieved on demand.
:param want: number bytes to read, defaults to 0 (all available)
:type want: int
:returns: tarfile data as bytes
:rtype: bytes
### Response:
def read(self, want=0):
'''
Read method, gets data from internal buffer while releasing
:meth:`write` locks when needed.
The lock usage means it must ran on a different thread than
:meth:`fill`, ie. the main thread, otherwise will deadlock.
The combination of both write and this method running on different
threads makes tarfile being streamed on-the-fly, with data chunks being
processed and retrieved on demand.
:param want: number bytes to read, defaults to 0 (all available)
:type want: int
:returns: tarfile data as bytes
:rtype: bytes
'''
if self._finished:
if self._finished == 1:
self._finished += 1
return ""
return EOFError("EOF reached")
# Thread communication
self._want = want
self._add.set()
self._result.wait()
self._result.clear()
if want:
data = self._data[:want]
self._data = self._data[want:]
else:
data = self._data
self._data = bytes()
return data |
def get_serializer(name):
'''
Return the serialize function.
'''
try:
log.debug('Using %s as serializer', name)
return SERIALIZER_LOOKUP[name]
except KeyError:
msg = 'Serializer {} is not available'.format(name)
log.error(msg, exc_info=True)
raise InvalidSerializerException(msg) | Return the serialize function. | Below is the the instruction that describes the task:
### Input:
Return the serialize function.
### Response:
def get_serializer(name):
'''
Return the serialize function.
'''
try:
log.debug('Using %s as serializer', name)
return SERIALIZER_LOOKUP[name]
except KeyError:
msg = 'Serializer {} is not available'.format(name)
log.error(msg, exc_info=True)
raise InvalidSerializerException(msg) |
def _shrink_file(dicom_file_in, subsample_factor):
"""
Anonimize a single dicomfile
:param dicom_file_in: filepath for input file
:param dicom_file_out: filepath for output file
:param fields_to_keep: dicom tags to keep
"""
# Default meta_fields
# Required fields according to reference
dicom_file_out = dicom_file_in
# Load dicom_file_in
dicom_in = compressed_dicom.read_file(dicom_file_in)
# Create new dicom file
# Set new file meta information
file_meta = pydicom.dataset.Dataset()
for key, value in dicom_in.file_meta.items():
file_meta.add(value)
# Create the FileDataset instance (initially no data elements, but file_meta supplied)
dicom_out = pydicom.dataset.FileDataset(dicom_file_out, {}, file_meta=file_meta, preamble=b'\0' * 128)
# Copy transfer syntax
dicom_out.is_little_endian = dicom_in.is_little_endian
dicom_out.is_implicit_VR = dicom_in.is_implicit_VR
rows = 0
columns = 0
# Add the data elements
for field_key, field_value in dicom_in.items():
logging.info(field_key)
if field_key == (0x7fe0, 0x0010):
pixel_array = dicom_in.pixel_array[::subsample_factor, ::subsample_factor]
dicom_out.PixelData = pixel_array.tostring() # = byte array (see pydicom docs)
rows = pixel_array.shape[1]
columns = pixel_array.shape[0]
# noinspection PyPep8Naming
dicom_out[0x7fe0, 0x0010].VR = 'OB'
else:
dicom_out.add(field_value)
dicom_out.PixelSpacing[0] *= subsample_factor
dicom_out.PixelSpacing[1] *= subsample_factor
dicom_out.Rows = rows
dicom_out.Columns = columns
# Save dicom_file_out
# Make sure we have a directory
if not os.path.exists(os.path.dirname(dicom_file_out)):
logging.info('Decompressing files')
# Save the file
dicom_out.save_as(dicom_file_out, write_like_original=False) | Anonimize a single dicomfile
:param dicom_file_in: filepath for input file
:param dicom_file_out: filepath for output file
:param fields_to_keep: dicom tags to keep | Below is the the instruction that describes the task:
### Input:
Anonimize a single dicomfile
:param dicom_file_in: filepath for input file
:param dicom_file_out: filepath for output file
:param fields_to_keep: dicom tags to keep
### Response:
def _shrink_file(dicom_file_in, subsample_factor):
"""
Anonimize a single dicomfile
:param dicom_file_in: filepath for input file
:param dicom_file_out: filepath for output file
:param fields_to_keep: dicom tags to keep
"""
# Default meta_fields
# Required fields according to reference
dicom_file_out = dicom_file_in
# Load dicom_file_in
dicom_in = compressed_dicom.read_file(dicom_file_in)
# Create new dicom file
# Set new file meta information
file_meta = pydicom.dataset.Dataset()
for key, value in dicom_in.file_meta.items():
file_meta.add(value)
# Create the FileDataset instance (initially no data elements, but file_meta supplied)
dicom_out = pydicom.dataset.FileDataset(dicom_file_out, {}, file_meta=file_meta, preamble=b'\0' * 128)
# Copy transfer syntax
dicom_out.is_little_endian = dicom_in.is_little_endian
dicom_out.is_implicit_VR = dicom_in.is_implicit_VR
rows = 0
columns = 0
# Add the data elements
for field_key, field_value in dicom_in.items():
logging.info(field_key)
if field_key == (0x7fe0, 0x0010):
pixel_array = dicom_in.pixel_array[::subsample_factor, ::subsample_factor]
dicom_out.PixelData = pixel_array.tostring() # = byte array (see pydicom docs)
rows = pixel_array.shape[1]
columns = pixel_array.shape[0]
# noinspection PyPep8Naming
dicom_out[0x7fe0, 0x0010].VR = 'OB'
else:
dicom_out.add(field_value)
dicom_out.PixelSpacing[0] *= subsample_factor
dicom_out.PixelSpacing[1] *= subsample_factor
dicom_out.Rows = rows
dicom_out.Columns = columns
# Save dicom_file_out
# Make sure we have a directory
if not os.path.exists(os.path.dirname(dicom_file_out)):
logging.info('Decompressing files')
# Save the file
dicom_out.save_as(dicom_file_out, write_like_original=False) |
def _build_urlmapping(urls, strict_slashes=False, **kwargs):
"""Convers the anillo urlmappings list into
werkzeug Map instance.
:return: a werkzeug Map instance
:rtype: Map
"""
rules = _build_rules(urls)
return Map(rules=list(rules), strict_slashes=strict_slashes, **kwargs) | Convers the anillo urlmappings list into
werkzeug Map instance.
:return: a werkzeug Map instance
:rtype: Map | Below is the the instruction that describes the task:
### Input:
Convers the anillo urlmappings list into
werkzeug Map instance.
:return: a werkzeug Map instance
:rtype: Map
### Response:
def _build_urlmapping(urls, strict_slashes=False, **kwargs):
"""Convers the anillo urlmappings list into
werkzeug Map instance.
:return: a werkzeug Map instance
:rtype: Map
"""
rules = _build_rules(urls)
return Map(rules=list(rules), strict_slashes=strict_slashes, **kwargs) |
def _deserialize_datetime(self, data):
"""Take any values coming in as a datetime and deserialize them
"""
for key in data:
if isinstance(data[key], dict):
if data[key].get('type') == 'datetime':
data[key] = \
datetime.datetime.fromtimestamp(data[key]['value'])
return data | Take any values coming in as a datetime and deserialize them | Below is the the instruction that describes the task:
### Input:
Take any values coming in as a datetime and deserialize them
### Response:
def _deserialize_datetime(self, data):
"""Take any values coming in as a datetime and deserialize them
"""
for key in data:
if isinstance(data[key], dict):
if data[key].get('type') == 'datetime':
data[key] = \
datetime.datetime.fromtimestamp(data[key]['value'])
return data |
def login(self, token, use_token=True, mount_point=DEFAULT_MOUNT_POINT):
"""Login using GitHub access token.
Supported methods:
POST: /auth/{mount_point}/login. Produces: 200 application/json
:param token: GitHub personal API token.
:type token: str | unicode
:param use_token: if True, uses the token in the response received from the auth request to set the "token"
attribute on the the :py:meth:`hvac.adapters.Adapter` instance under the _adapater Client attribute.
:type use_token: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the login request.
:rtype: dict
"""
params = {
'token': token,
}
api_path = '/v1/auth/{mount_point}/login'.format(mount_point=mount_point)
return self._adapter.login(
url=api_path,
use_token=use_token,
json=params,
) | Login using GitHub access token.
Supported methods:
POST: /auth/{mount_point}/login. Produces: 200 application/json
:param token: GitHub personal API token.
:type token: str | unicode
:param use_token: if True, uses the token in the response received from the auth request to set the "token"
attribute on the the :py:meth:`hvac.adapters.Adapter` instance under the _adapater Client attribute.
:type use_token: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the login request.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Login using GitHub access token.
Supported methods:
POST: /auth/{mount_point}/login. Produces: 200 application/json
:param token: GitHub personal API token.
:type token: str | unicode
:param use_token: if True, uses the token in the response received from the auth request to set the "token"
attribute on the the :py:meth:`hvac.adapters.Adapter` instance under the _adapater Client attribute.
:type use_token: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the login request.
:rtype: dict
### Response:
def login(self, token, use_token=True, mount_point=DEFAULT_MOUNT_POINT):
"""Login using GitHub access token.
Supported methods:
POST: /auth/{mount_point}/login. Produces: 200 application/json
:param token: GitHub personal API token.
:type token: str | unicode
:param use_token: if True, uses the token in the response received from the auth request to set the "token"
attribute on the the :py:meth:`hvac.adapters.Adapter` instance under the _adapater Client attribute.
:type use_token: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the login request.
:rtype: dict
"""
params = {
'token': token,
}
api_path = '/v1/auth/{mount_point}/login'.format(mount_point=mount_point)
return self._adapter.login(
url=api_path,
use_token=use_token,
json=params,
) |
def _on_changed(self):
"""Slot for changed events"""
page = self._get_page()
if not page.flag_autosave:
page.flag_changed = True
self._update_gui_text_tabs() | Slot for changed events | Below is the the instruction that describes the task:
### Input:
Slot for changed events
### Response:
def _on_changed(self):
"""Slot for changed events"""
page = self._get_page()
if not page.flag_autosave:
page.flag_changed = True
self._update_gui_text_tabs() |
def date_parser(items):
""" datetime parser to help load smp files
Parameters
----------
items : iterable
something or somethings to try to parse into datetimes
Returns
-------
dt : iterable
the cast datetime things
"""
try:
dt = datetime.strptime(items,"%d/%m/%Y %H:%M:%S")
except Exception as e:
try:
dt = datetime.strptime(items,"%m/%d/%Y %H:%M:%S")
except Exception as ee:
raise Exception("error parsing datetime string" +\
" {0}: \n{1}\n{2}".format(str(items),str(e),str(ee)))
return dt | datetime parser to help load smp files
Parameters
----------
items : iterable
something or somethings to try to parse into datetimes
Returns
-------
dt : iterable
the cast datetime things | Below is the the instruction that describes the task:
### Input:
datetime parser to help load smp files
Parameters
----------
items : iterable
something or somethings to try to parse into datetimes
Returns
-------
dt : iterable
the cast datetime things
### Response:
def date_parser(items):
""" datetime parser to help load smp files
Parameters
----------
items : iterable
something or somethings to try to parse into datetimes
Returns
-------
dt : iterable
the cast datetime things
"""
try:
dt = datetime.strptime(items,"%d/%m/%Y %H:%M:%S")
except Exception as e:
try:
dt = datetime.strptime(items,"%m/%d/%Y %H:%M:%S")
except Exception as ee:
raise Exception("error parsing datetime string" +\
" {0}: \n{1}\n{2}".format(str(items),str(e),str(ee)))
return dt |
def _send_content(self, content, connection):
""" Send a content array from the connection """
if connection:
if connection.async:
callback = connection.callbacks['remote']
if callback:
callback(self, self.parent_object, content)
self.current_connection.reset()
self.current_connection = None
else:
return (self, self.parent_object, content) | Send a content array from the connection | Below is the the instruction that describes the task:
### Input:
Send a content array from the connection
### Response:
def _send_content(self, content, connection):
""" Send a content array from the connection """
if connection:
if connection.async:
callback = connection.callbacks['remote']
if callback:
callback(self, self.parent_object, content)
self.current_connection.reset()
self.current_connection = None
else:
return (self, self.parent_object, content) |
def p_new_expr(self, p):
"""new_expr : member_expr
| NEW new_expr
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = self.asttypes.NewExpr(p[2])
p[0].setpos(p) | new_expr : member_expr
| NEW new_expr | Below is the the instruction that describes the task:
### Input:
new_expr : member_expr
| NEW new_expr
### Response:
def p_new_expr(self, p):
"""new_expr : member_expr
| NEW new_expr
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = self.asttypes.NewExpr(p[2])
p[0].setpos(p) |
def map_nested(function, data_struct, dict_only=False, map_tuple=False):
"""Apply a function recursively to each element of a nested data struct."""
# Could add support for more exotic data_struct, like OrderedDict
if isinstance(data_struct, dict):
return {
k: map_nested(function, v, dict_only, map_tuple)
for k, v in data_struct.items()
}
elif not dict_only:
types = [list]
if map_tuple:
types.append(tuple)
if isinstance(data_struct, tuple(types)):
mapped = [map_nested(function, v, dict_only, map_tuple)
for v in data_struct]
if isinstance(data_struct, list):
return mapped
else:
return tuple(mapped)
# Singleton
return function(data_struct) | Apply a function recursively to each element of a nested data struct. | Below is the the instruction that describes the task:
### Input:
Apply a function recursively to each element of a nested data struct.
### Response:
def map_nested(function, data_struct, dict_only=False, map_tuple=False):
"""Apply a function recursively to each element of a nested data struct."""
# Could add support for more exotic data_struct, like OrderedDict
if isinstance(data_struct, dict):
return {
k: map_nested(function, v, dict_only, map_tuple)
for k, v in data_struct.items()
}
elif not dict_only:
types = [list]
if map_tuple:
types.append(tuple)
if isinstance(data_struct, tuple(types)):
mapped = [map_nested(function, v, dict_only, map_tuple)
for v in data_struct]
if isinstance(data_struct, list):
return mapped
else:
return tuple(mapped)
# Singleton
return function(data_struct) |
def setup(self):
"""
Set up the power system object by executing the following workflow:
* Sort the loaded models to meet the initialization sequence
* Create call strings for routines
* Call the ``setup`` function of the loaded models
* Assign addresses for the loaded models
* Call ``dae.setup`` to assign memory for the numerical dae structure
* Convert model parameters to the system base
Returns
-------
PowerSystem
The instance of the PowerSystem
"""
self.devman.sort_device()
self.call.setup()
self.model_setup()
self.xy_addr0()
self.dae.setup()
self.to_sysbase()
return self | Set up the power system object by executing the following workflow:
* Sort the loaded models to meet the initialization sequence
* Create call strings for routines
* Call the ``setup`` function of the loaded models
* Assign addresses for the loaded models
* Call ``dae.setup`` to assign memory for the numerical dae structure
* Convert model parameters to the system base
Returns
-------
PowerSystem
The instance of the PowerSystem | Below is the the instruction that describes the task:
### Input:
Set up the power system object by executing the following workflow:
* Sort the loaded models to meet the initialization sequence
* Create call strings for routines
* Call the ``setup`` function of the loaded models
* Assign addresses for the loaded models
* Call ``dae.setup`` to assign memory for the numerical dae structure
* Convert model parameters to the system base
Returns
-------
PowerSystem
The instance of the PowerSystem
### Response:
def setup(self):
"""
Set up the power system object by executing the following workflow:
* Sort the loaded models to meet the initialization sequence
* Create call strings for routines
* Call the ``setup`` function of the loaded models
* Assign addresses for the loaded models
* Call ``dae.setup`` to assign memory for the numerical dae structure
* Convert model parameters to the system base
Returns
-------
PowerSystem
The instance of the PowerSystem
"""
self.devman.sort_device()
self.call.setup()
self.model_setup()
self.xy_addr0()
self.dae.setup()
self.to_sysbase()
return self |
def parse_reports(self):
""" Find RSeQC infer_experiment reports and parse their data """
# Set up vars
self.infer_exp = dict()
regexes = {
'pe_sense': r"\"1\+\+,1--,2\+-,2-\+\": (\d\.\d+)",
'pe_antisense': r"\"1\+-,1-\+,2\+\+,2--\": (\d\.\d+)",
'se_sense': r"\"\+\+,--\": (\d\.\d+)",
'se_antisense': r"\+-,-\+\": (\d\.\d+)",
'failed': r"Fraction of reads failed to determine: (\d\.\d+)"
}
# Go through files and parse data using regexes
for f in self.find_log_files('rseqc/infer_experiment'):
d = dict()
for k, r in regexes.items():
r_search = re.search(r, f['f'], re.MULTILINE)
if r_search:
d[k] = float(r_search.group(1))
if len(d) > 0:
if f['s_name'] in self.infer_exp:
log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name']))
self.add_data_source(f, section='infer_experiment')
self.infer_exp[f['s_name']] = d
# Filter to strip out ignored sample names
self.infer_exp = self.ignore_samples(self.infer_exp)
if len(self.infer_exp) > 0:
# Write to file
self.write_data_file(self.infer_exp, 'multiqc_rseqc_infer_experiment')
# Merge PE and SE for plot
pdata = dict()
for s_name, vals in self.infer_exp.items():
pdata[s_name] = dict()
for k, v in vals.items():
v *= 100.0 # Multiply to get percentage
if k[:2] == 'pe' or k[:2] == 'se':
k = k[3:]
pdata[s_name][k] = v + pdata[s_name].get(k, 0)
# Plot bar graph of groups
keys = OrderedDict()
keys['sense'] = {'name': "Sense"}
keys['antisense'] = {'name': "Antisense"}
keys['failed'] = {'name': "Undetermined"}
# Config for the plot
pconfig = {
'id': 'rseqc_infer_experiment_plot',
'title': 'RSeQC: Infer experiment',
'ylab': '% Tags',
'ymin': 0,
'ymax': 100,
'tt_percentages': False,
'ylab_format': '{value}%',
'cpswitch': False
}
self.add_section (
name = 'Infer experiment',
anchor = 'rseqc-infer_experiment',
description = '<a href="http://rseqc.sourceforge.net/#infer-experiment-py" target="_blank">Infer experiment</a>' \
" counts the percentage of reads and read pairs that match the strandedness of overlapping transcripts." \
" It can be used to infer whether RNA-seq library preps are stranded (sense or antisense).",
plot = bargraph.plot(pdata, keys, pconfig)
)
# Return number of samples found
return len(self.infer_exp) | Find RSeQC infer_experiment reports and parse their data | Below is the the instruction that describes the task:
### Input:
Find RSeQC infer_experiment reports and parse their data
### Response:
def parse_reports(self):
""" Find RSeQC infer_experiment reports and parse their data """
# Set up vars
self.infer_exp = dict()
regexes = {
'pe_sense': r"\"1\+\+,1--,2\+-,2-\+\": (\d\.\d+)",
'pe_antisense': r"\"1\+-,1-\+,2\+\+,2--\": (\d\.\d+)",
'se_sense': r"\"\+\+,--\": (\d\.\d+)",
'se_antisense': r"\+-,-\+\": (\d\.\d+)",
'failed': r"Fraction of reads failed to determine: (\d\.\d+)"
}
# Go through files and parse data using regexes
for f in self.find_log_files('rseqc/infer_experiment'):
d = dict()
for k, r in regexes.items():
r_search = re.search(r, f['f'], re.MULTILINE)
if r_search:
d[k] = float(r_search.group(1))
if len(d) > 0:
if f['s_name'] in self.infer_exp:
log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name']))
self.add_data_source(f, section='infer_experiment')
self.infer_exp[f['s_name']] = d
# Filter to strip out ignored sample names
self.infer_exp = self.ignore_samples(self.infer_exp)
if len(self.infer_exp) > 0:
# Write to file
self.write_data_file(self.infer_exp, 'multiqc_rseqc_infer_experiment')
# Merge PE and SE for plot
pdata = dict()
for s_name, vals in self.infer_exp.items():
pdata[s_name] = dict()
for k, v in vals.items():
v *= 100.0 # Multiply to get percentage
if k[:2] == 'pe' or k[:2] == 'se':
k = k[3:]
pdata[s_name][k] = v + pdata[s_name].get(k, 0)
# Plot bar graph of groups
keys = OrderedDict()
keys['sense'] = {'name': "Sense"}
keys['antisense'] = {'name': "Antisense"}
keys['failed'] = {'name': "Undetermined"}
# Config for the plot
pconfig = {
'id': 'rseqc_infer_experiment_plot',
'title': 'RSeQC: Infer experiment',
'ylab': '% Tags',
'ymin': 0,
'ymax': 100,
'tt_percentages': False,
'ylab_format': '{value}%',
'cpswitch': False
}
self.add_section (
name = 'Infer experiment',
anchor = 'rseqc-infer_experiment',
description = '<a href="http://rseqc.sourceforge.net/#infer-experiment-py" target="_blank">Infer experiment</a>' \
" counts the percentage of reads and read pairs that match the strandedness of overlapping transcripts." \
" It can be used to infer whether RNA-seq library preps are stranded (sense or antisense).",
plot = bargraph.plot(pdata, keys, pconfig)
)
# Return number of samples found
return len(self.infer_exp) |
def cert_from_instance(instance):
""" Find certificates that are part of an instance
:param instance: An instance
:return: possible empty list of certificates
"""
if instance.signature:
if instance.signature.key_info:
return cert_from_key_info(instance.signature.key_info,
ignore_age=True)
return [] | Find certificates that are part of an instance
:param instance: An instance
:return: possible empty list of certificates | Below is the the instruction that describes the task:
### Input:
Find certificates that are part of an instance
:param instance: An instance
:return: possible empty list of certificates
### Response:
def cert_from_instance(instance):
""" Find certificates that are part of an instance
:param instance: An instance
:return: possible empty list of certificates
"""
if instance.signature:
if instance.signature.key_info:
return cert_from_key_info(instance.signature.key_info,
ignore_age=True)
return [] |
def _decode_image(fobj, session, filename):
"""Reads and decodes an image from a file object as a Numpy array.
The SUN dataset contains images in several formats (despite the fact that
all of them have .jpg extension). Some of them are:
- BMP (RGB)
- PNG (grayscale, RGBA, RGB interlaced)
- JPEG (RGB)
- GIF (1-frame RGB)
Since TFDS assumes that all images have the same number of channels, we
convert all of them to RGB.
Args:
fobj: File object to read from.
session: TF session used to decode the images.
filename: Filename of the original image in the archive.
Returns:
Numpy array with shape (height, width, channels).
"""
buf = fobj.read()
image = tfds.core.lazy_imports.cv2.imdecode(
np.fromstring(buf, dtype=np.uint8), flags=3) # Note: Converts to RGB.
if image is None:
logging.warning(
"Image %s could not be decoded by OpenCV, falling back to TF", filename)
try:
image = tf.image.decode_image(buf, channels=3)
image = session.run(image)
except tf.errors.InvalidArgumentError:
logging.fatal("Image %s could not be decoded by Tensorflow", filename)
# The GIF images contain a single frame.
if len(image.shape) == 4: # rank=4 -> rank=3
image = image.reshape(image.shape[1:])
return image | Reads and decodes an image from a file object as a Numpy array.
The SUN dataset contains images in several formats (despite the fact that
all of them have .jpg extension). Some of them are:
- BMP (RGB)
- PNG (grayscale, RGBA, RGB interlaced)
- JPEG (RGB)
- GIF (1-frame RGB)
Since TFDS assumes that all images have the same number of channels, we
convert all of them to RGB.
Args:
fobj: File object to read from.
session: TF session used to decode the images.
filename: Filename of the original image in the archive.
Returns:
Numpy array with shape (height, width, channels). | Below is the the instruction that describes the task:
### Input:
Reads and decodes an image from a file object as a Numpy array.
The SUN dataset contains images in several formats (despite the fact that
all of them have .jpg extension). Some of them are:
- BMP (RGB)
- PNG (grayscale, RGBA, RGB interlaced)
- JPEG (RGB)
- GIF (1-frame RGB)
Since TFDS assumes that all images have the same number of channels, we
convert all of them to RGB.
Args:
fobj: File object to read from.
session: TF session used to decode the images.
filename: Filename of the original image in the archive.
Returns:
Numpy array with shape (height, width, channels).
### Response:
def _decode_image(fobj, session, filename):
"""Reads and decodes an image from a file object as a Numpy array.
The SUN dataset contains images in several formats (despite the fact that
all of them have .jpg extension). Some of them are:
- BMP (RGB)
- PNG (grayscale, RGBA, RGB interlaced)
- JPEG (RGB)
- GIF (1-frame RGB)
Since TFDS assumes that all images have the same number of channels, we
convert all of them to RGB.
Args:
fobj: File object to read from.
session: TF session used to decode the images.
filename: Filename of the original image in the archive.
Returns:
Numpy array with shape (height, width, channels).
"""
buf = fobj.read()
image = tfds.core.lazy_imports.cv2.imdecode(
np.fromstring(buf, dtype=np.uint8), flags=3) # Note: Converts to RGB.
if image is None:
logging.warning(
"Image %s could not be decoded by OpenCV, falling back to TF", filename)
try:
image = tf.image.decode_image(buf, channels=3)
image = session.run(image)
except tf.errors.InvalidArgumentError:
logging.fatal("Image %s could not be decoded by Tensorflow", filename)
# The GIF images contain a single frame.
if len(image.shape) == 4: # rank=4 -> rank=3
image = image.reshape(image.shape[1:])
return image |
def scopes(self):
"""
Gets the Scopes API client.
Returns:
Scopes:
"""
if not self.__scopes:
self.__scopes = Scopes(self.__connection)
return self.__scopes | Gets the Scopes API client.
Returns:
Scopes: | Below is the the instruction that describes the task:
### Input:
Gets the Scopes API client.
Returns:
Scopes:
### Response:
def scopes(self):
"""
Gets the Scopes API client.
Returns:
Scopes:
"""
if not self.__scopes:
self.__scopes = Scopes(self.__connection)
return self.__scopes |
def add_default_parameter_values(self, sam_template):
"""
Method to read default values for template parameters and merge with user supplied values.
Example:
If the template contains the following parameters defined
Parameters:
Param1:
Type: String
Default: default_value
Param2:
Type: String
Default: default_value
And, the user explicitly provided the following parameter values:
{
Param2: "new value"
}
then, this method will grab default value for Param1 and return the following result:
{
Param1: "default_value",
Param2: "new value"
}
:param dict sam_template: SAM template
:param dict parameter_values: Dictionary of parameter values provided by the user
:return dict: Merged parameter values
"""
parameter_definition = sam_template.get("Parameters", None)
if not parameter_definition or not isinstance(parameter_definition, dict):
return self.parameter_values
for param_name, value in parameter_definition.items():
if param_name not in self.parameter_values and isinstance(value, dict) and "Default" in value:
self.parameter_values[param_name] = value["Default"] | Method to read default values for template parameters and merge with user supplied values.
Example:
If the template contains the following parameters defined
Parameters:
Param1:
Type: String
Default: default_value
Param2:
Type: String
Default: default_value
And, the user explicitly provided the following parameter values:
{
Param2: "new value"
}
then, this method will grab default value for Param1 and return the following result:
{
Param1: "default_value",
Param2: "new value"
}
:param dict sam_template: SAM template
:param dict parameter_values: Dictionary of parameter values provided by the user
:return dict: Merged parameter values | Below is the the instruction that describes the task:
### Input:
Method to read default values for template parameters and merge with user supplied values.
Example:
If the template contains the following parameters defined
Parameters:
Param1:
Type: String
Default: default_value
Param2:
Type: String
Default: default_value
And, the user explicitly provided the following parameter values:
{
Param2: "new value"
}
then, this method will grab default value for Param1 and return the following result:
{
Param1: "default_value",
Param2: "new value"
}
:param dict sam_template: SAM template
:param dict parameter_values: Dictionary of parameter values provided by the user
:return dict: Merged parameter values
### Response:
def add_default_parameter_values(self, sam_template):
"""
Method to read default values for template parameters and merge with user supplied values.
Example:
If the template contains the following parameters defined
Parameters:
Param1:
Type: String
Default: default_value
Param2:
Type: String
Default: default_value
And, the user explicitly provided the following parameter values:
{
Param2: "new value"
}
then, this method will grab default value for Param1 and return the following result:
{
Param1: "default_value",
Param2: "new value"
}
:param dict sam_template: SAM template
:param dict parameter_values: Dictionary of parameter values provided by the user
:return dict: Merged parameter values
"""
parameter_definition = sam_template.get("Parameters", None)
if not parameter_definition or not isinstance(parameter_definition, dict):
return self.parameter_values
for param_name, value in parameter_definition.items():
if param_name not in self.parameter_values and isinstance(value, dict) and "Default" in value:
self.parameter_values[param_name] = value["Default"] |