code
stringlengths 64
7.01k
| docstring
stringlengths 2
15.8k
| text
stringlengths 144
19.2k
|
---|---|---|
#vtb
def _get_struct_fillstyle(self, shape_number):
obj = _make_object("FillStyle")
obj.FillStyleType = style_type = unpack_ui8(self._src)
if style_type == 0x00:
if shape_number <= 2:
obj.Color = self._get_struct_rgb()
else:
obj.Color = self._get_struct_rgba()
if style_type in (0x10, 0x12, 0x13):
obj.GradientMatrix = self._get_struct_matrix()
if style_type in (0x10, 0x12):
obj.Gradient = self._get_struct_gradient(shape_number)
if style_type == 0x13:
obj.Gradient = self._get_struct_focalgradient(shape_number)
if style_type in (0x40, 0x41, 0x42, 0x43):
obj.BitmapId = unpack_ui16(self._src)
obj.BitmapMatrix = self._get_struct_matrix()
return obj
|
Get the values for the FILLSTYLE record.
|
### Input:
Get the values for the FILLSTYLE record.
### Response:
#vtb
def _get_struct_fillstyle(self, shape_number):
obj = _make_object("FillStyle")
obj.FillStyleType = style_type = unpack_ui8(self._src)
if style_type == 0x00:
if shape_number <= 2:
obj.Color = self._get_struct_rgb()
else:
obj.Color = self._get_struct_rgba()
if style_type in (0x10, 0x12, 0x13):
obj.GradientMatrix = self._get_struct_matrix()
if style_type in (0x10, 0x12):
obj.Gradient = self._get_struct_gradient(shape_number)
if style_type == 0x13:
obj.Gradient = self._get_struct_focalgradient(shape_number)
if style_type in (0x40, 0x41, 0x42, 0x43):
obj.BitmapId = unpack_ui16(self._src)
obj.BitmapMatrix = self._get_struct_matrix()
return obj
|
#vtb
def register_callback(self, callback):
if not callable(callback):
raise ValueError()
self.callbacks.append(callback)
|
Register a new callback.
Note:
The callback will be executed in the AlarmTask context and for
this reason it should not block, otherwise we can miss block
changes.
|
### Input:
Register a new callback.
Note:
The callback will be executed in the AlarmTask context and for
this reason it should not block, otherwise we can miss block
changes.
### Response:
#vtb
def register_callback(self, callback):
if not callable(callback):
raise ValueError()
self.callbacks.append(callback)
|
#vtb
def compute_consistency_score(returns_test, preds):
returns_test_cum = cum_returns(returns_test, starting_value=1.)
cum_preds = np.cumprod(preds + 1, 1)
q = [sp.stats.percentileofscore(cum_preds[:, i],
returns_test_cum.iloc[i],
kind=)
for i in range(len(returns_test_cum))]
return 100 - np.abs(50 - np.mean(q)) / .5
|
Compute Bayesian consistency score.
Parameters
----------
returns_test : pd.Series
Observed cumulative returns.
preds : numpy.array
Multiple (simulated) cumulative returns.
Returns
-------
Consistency score
Score from 100 (returns_test perfectly on the median line of the
Bayesian cone spanned by preds) to 0 (returns_test completely
outside of Bayesian cone.)
|
### Input:
Compute Bayesian consistency score.
Parameters
----------
returns_test : pd.Series
Observed cumulative returns.
preds : numpy.array
Multiple (simulated) cumulative returns.
Returns
-------
Consistency score
Score from 100 (returns_test perfectly on the median line of the
Bayesian cone spanned by preds) to 0 (returns_test completely
outside of Bayesian cone.)
### Response:
#vtb
def compute_consistency_score(returns_test, preds):
returns_test_cum = cum_returns(returns_test, starting_value=1.)
cum_preds = np.cumprod(preds + 1, 1)
q = [sp.stats.percentileofscore(cum_preds[:, i],
returns_test_cum.iloc[i],
kind=)
for i in range(len(returns_test_cum))]
return 100 - np.abs(50 - np.mean(q)) / .5
|
#vtb
def add_edge(self, edge):
"Add edge to chart, and see if it extends or predicts another edge."
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print % (caller(2), edge)
if not expects:
self.extender(edge)
else:
self.predictor(edge)
|
Add edge to chart, and see if it extends or predicts another edge.
|
### Input:
Add edge to chart, and see if it extends or predicts another edge.
### Response:
#vtb
def add_edge(self, edge):
"Add edge to chart, and see if it extends or predicts another edge."
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print % (caller(2), edge)
if not expects:
self.extender(edge)
else:
self.predictor(edge)
|
#vtb
def _parse_fmt(fmt, color_key=, ls_key=,
marker_key=):
s _process_plot_format function.-------.-.-. None Illegal format string; two linestyle symbolsIllegal format string; two marker symbolsIllegal format string; two color symbolsUnrecognized character %c in format string' % c)
return result
|
Modified from matplotlib's _process_plot_format function.
|
### Input:
Modified from matplotlib's _process_plot_format function.
### Response:
#vtb
def _parse_fmt(fmt, color_key=, ls_key=,
marker_key=):
s _process_plot_format function.-------.-.-. None Illegal format string; two linestyle symbolsIllegal format string; two marker symbolsIllegal format string; two color symbolsUnrecognized character %c in format string' % c)
return result
|
#vtb
def writes(nb, format, **kwargs):
format = unicode(format)
if format == u or format == u:
return writes_json(nb, **kwargs)
elif format == u:
return writes_py(nb, **kwargs)
else:
raise NBFormatError( % format)
|
Write a notebook to a string in a given format in the current nbformat version.
This function always writes the notebook in the current nbformat version.
Parameters
----------
nb : NotebookNode
The notebook to write.
format : (u'json', u'ipynb', u'py')
The format to write the notebook in.
Returns
-------
s : unicode
The notebook string.
|
### Input:
Write a notebook to a string in a given format in the current nbformat version.
This function always writes the notebook in the current nbformat version.
Parameters
----------
nb : NotebookNode
The notebook to write.
format : (u'json', u'ipynb', u'py')
The format to write the notebook in.
Returns
-------
s : unicode
The notebook string.
### Response:
#vtb
def writes(nb, format, **kwargs):
format = unicode(format)
if format == u or format == u:
return writes_json(nb, **kwargs)
elif format == u:
return writes_py(nb, **kwargs)
else:
raise NBFormatError( % format)
|
#vtb
def migrate_v0_rules(self):
ideniden
for iden, valu in self.core.slab.scanByFull(db=self.trigdb):
ruledict = s_msgpack.un(valu)
ver = ruledict.get()
if ver != 0:
continue
user = ruledict.pop()
if user is None:
logger.warning(, iden)
continue
user = self.core.auth.getUserByName(user).iden
if user is None:
logger.warning(, iden)
continue
ruledict[] = 1
ruledict[] = user
newiden = s_common.ehex(iden)
self.core.slab.pop(iden, db=self.trigdb)
self.core.slab.put(newiden.encode(), s_msgpack.en(ruledict), db=self.trigdb)
|
Remove any v0 (i.e. pre-010) rules from storage and replace them with v1 rules.
Notes:
v0 had two differences user was a username. Replaced with iden of user as 'iden' field.
Also 'iden' was storage as binary. Now it is stored as hex string.
|
### Input:
Remove any v0 (i.e. pre-010) rules from storage and replace them with v1 rules.
Notes:
v0 had two differences user was a username. Replaced with iden of user as 'iden' field.
Also 'iden' was storage as binary. Now it is stored as hex string.
### Response:
#vtb
def migrate_v0_rules(self):
ideniden
for iden, valu in self.core.slab.scanByFull(db=self.trigdb):
ruledict = s_msgpack.un(valu)
ver = ruledict.get()
if ver != 0:
continue
user = ruledict.pop()
if user is None:
logger.warning(, iden)
continue
user = self.core.auth.getUserByName(user).iden
if user is None:
logger.warning(, iden)
continue
ruledict[] = 1
ruledict[] = user
newiden = s_common.ehex(iden)
self.core.slab.pop(iden, db=self.trigdb)
self.core.slab.put(newiden.encode(), s_msgpack.en(ruledict), db=self.trigdb)
|
#vtb
def make_document(self, titlestring):
root = etree.XML()
document = etree.ElementTree(root)
html = document.getroot()
head = etree.SubElement(html, )
etree.SubElement(html, )
title = etree.SubElement(head, )
title.text = titlestring
etree.SubElement(head,
,
{: ,
: ,
: })
return document
|
This method may be used to create a new document for writing as xml
to the OPS subdirectory of the ePub structure.
|
### Input:
This method may be used to create a new document for writing as xml
to the OPS subdirectory of the ePub structure.
### Response:
#vtb
def make_document(self, titlestring):
root = etree.XML()
document = etree.ElementTree(root)
html = document.getroot()
head = etree.SubElement(html, )
etree.SubElement(html, )
title = etree.SubElement(head, )
title.text = titlestring
etree.SubElement(head,
,
{: ,
: ,
: })
return document
|
#vtb
def delete(ctx, uri):
http_client = get_wva(ctx).get_http_client()
cli_pprint(http_client.delete(uri))
|
DELETE the specified URI
Example:
\b
$ wva get files/userfs/WEB/python
{'file_list': ['files/userfs/WEB/python/.ssh',
'files/userfs/WEB/python/README.md']}
$ wva delete files/userfs/WEB/python/README.md
''
$ wva get files/userfs/WEB/python
{'file_list': ['files/userfs/WEB/python/.ssh']}
|
### Input:
DELETE the specified URI
Example:
\b
$ wva get files/userfs/WEB/python
{'file_list': ['files/userfs/WEB/python/.ssh',
'files/userfs/WEB/python/README.md']}
$ wva delete files/userfs/WEB/python/README.md
''
$ wva get files/userfs/WEB/python
{'file_list': ['files/userfs/WEB/python/.ssh']}
### Response:
#vtb
def delete(ctx, uri):
http_client = get_wva(ctx).get_http_client()
cli_pprint(http_client.delete(uri))
|
#vtb
def unsign(wheelfile):
import wheel.install
vzf = wheel.install.VerifyingZipFile(wheelfile, "a")
info = vzf.infolist()
if not (len(info) and info[-1].filename.endswith()):
raise WheelError("RECORD.jws not found at end of archive.")
vzf.pop()
vzf.close()
|
Remove RECORD.jws from a wheel by truncating the zip file.
RECORD.jws must be at the end of the archive. The zip file must be an
ordinary archive, with the compressed files and the directory in the same
order, and without any non-zip content after the truncation point.
|
### Input:
Remove RECORD.jws from a wheel by truncating the zip file.
RECORD.jws must be at the end of the archive. The zip file must be an
ordinary archive, with the compressed files and the directory in the same
order, and without any non-zip content after the truncation point.
### Response:
#vtb
def unsign(wheelfile):
import wheel.install
vzf = wheel.install.VerifyingZipFile(wheelfile, "a")
info = vzf.infolist()
if not (len(info) and info[-1].filename.endswith()):
raise WheelError("RECORD.jws not found at end of archive.")
vzf.pop()
vzf.close()
|
#vtb
def serialize_data(data, compression=False, encryption=False, public_key=None):
message = json.dumps(data)
if compression:
message = zlib.compress(message)
message = binascii.b2a_base64(message)
if encryption and public_key:
message = encryption.encrypt(message, public_key)
encoded_message = str.encode(message)
return encoded_message
|
Serializes normal Python datatypes into plaintext using json.
You may also choose to enable compression and encryption when serializing
data to send over the network. Enabling one or both of these options will
incur additional overhead.
Args:
data (dict): The data to convert into plain text using json.
compression (boolean): True or False value on whether or not to compress
the serialized data.
encryption (rsa.encryption): An encryption instance used to encrypt the
message if encryption is desired.
public_key (str): The public key to use to encrypt if encryption is
enabled.
Returns:
The string message serialized using json.
|
### Input:
Serializes normal Python datatypes into plaintext using json.
You may also choose to enable compression and encryption when serializing
data to send over the network. Enabling one or both of these options will
incur additional overhead.
Args:
data (dict): The data to convert into plain text using json.
compression (boolean): True or False value on whether or not to compress
the serialized data.
encryption (rsa.encryption): An encryption instance used to encrypt the
message if encryption is desired.
public_key (str): The public key to use to encrypt if encryption is
enabled.
Returns:
The string message serialized using json.
### Response:
#vtb
def serialize_data(data, compression=False, encryption=False, public_key=None):
message = json.dumps(data)
if compression:
message = zlib.compress(message)
message = binascii.b2a_base64(message)
if encryption and public_key:
message = encryption.encrypt(message, public_key)
encoded_message = str.encode(message)
return encoded_message
|
#vtb
def get_paths(folder, ignore_endswith=ignore_endswith):
folder = pathlib.Path(folder).resolve()
files = folder.rglob("*")
for ie in ignore_endswith:
files = [ff for ff in files if not ff.name.endswith(ie)]
return sorted(files)
|
Return hologram file paths
Parameters
----------
folder: str or pathlib.Path
Path to search folder
ignore_endswith: list
List of filename ending strings indicating which
files should be ignored.
|
### Input:
Return hologram file paths
Parameters
----------
folder: str or pathlib.Path
Path to search folder
ignore_endswith: list
List of filename ending strings indicating which
files should be ignored.
### Response:
#vtb
def get_paths(folder, ignore_endswith=ignore_endswith):
folder = pathlib.Path(folder).resolve()
files = folder.rglob("*")
for ie in ignore_endswith:
files = [ff for ff in files if not ff.name.endswith(ie)]
return sorted(files)
|
#vtb
def diff_identifiers(a, b):
a_ids = set(a.identifiers)
b_ids = set(b.identifiers)
difference = []
for i in a_ids.difference(b_ids):
difference.append((i, True, False))
for i in b_ids.difference(a_ids):
difference.append((i, False, True))
return difference
|
Return list of tuples where identifiers in datasets differ.
Tuple structure:
(identifier, present in a, present in b)
:param a: first :class:`dtoolcore.DataSet`
:param b: second :class:`dtoolcore.DataSet`
:returns: list of tuples where identifiers in datasets differ
|
### Input:
Return list of tuples where identifiers in datasets differ.
Tuple structure:
(identifier, present in a, present in b)
:param a: first :class:`dtoolcore.DataSet`
:param b: second :class:`dtoolcore.DataSet`
:returns: list of tuples where identifiers in datasets differ
### Response:
#vtb
def diff_identifiers(a, b):
a_ids = set(a.identifiers)
b_ids = set(b.identifiers)
difference = []
for i in a_ids.difference(b_ids):
difference.append((i, True, False))
for i in b_ids.difference(a_ids):
difference.append((i, False, True))
return difference
|
#vtb
def __create_grid(self):
data_sizes, min_corner, max_corner = self.__get_data_size_derscription()
dimension = len(self.__data[0])
cell_sizes = [dimension_length / self.__amount_intervals for dimension_length in data_sizes]
self.__cells = [clique_block() for _ in range(pow(self.__amount_intervals, dimension))]
iterator = coordinate_iterator(dimension, self.__amount_intervals)
point_availability = [True] * len(self.__data)
self.__cell_map = {}
for index_cell in range(len(self.__cells)):
logical_location = iterator.get_coordinate()
iterator.increment()
self.__cells[index_cell].logical_location = logical_location[:]
cur_max_corner, cur_min_corner = self.__get_spatial_location(logical_location, min_corner, max_corner, cell_sizes)
self.__cells[index_cell].spatial_location = spatial_block(cur_max_corner, cur_min_corner)
self.__cells[index_cell].capture_points(self.__data, point_availability)
self.__cell_map[self.__location_to_key(logical_location)] = self.__cells[index_cell]
|
!
@brief Creates CLIQUE grid that consists of CLIQUE blocks for clustering process.
|
### Input:
!
@brief Creates CLIQUE grid that consists of CLIQUE blocks for clustering process.
### Response:
#vtb
def __create_grid(self):
data_sizes, min_corner, max_corner = self.__get_data_size_derscription()
dimension = len(self.__data[0])
cell_sizes = [dimension_length / self.__amount_intervals for dimension_length in data_sizes]
self.__cells = [clique_block() for _ in range(pow(self.__amount_intervals, dimension))]
iterator = coordinate_iterator(dimension, self.__amount_intervals)
point_availability = [True] * len(self.__data)
self.__cell_map = {}
for index_cell in range(len(self.__cells)):
logical_location = iterator.get_coordinate()
iterator.increment()
self.__cells[index_cell].logical_location = logical_location[:]
cur_max_corner, cur_min_corner = self.__get_spatial_location(logical_location, min_corner, max_corner, cell_sizes)
self.__cells[index_cell].spatial_location = spatial_block(cur_max_corner, cur_min_corner)
self.__cells[index_cell].capture_points(self.__data, point_availability)
self.__cell_map[self.__location_to_key(logical_location)] = self.__cells[index_cell]
|
#vtb
def hex_to_xy(self, h):
rgb = self.color.hex_to_rgb(h)
return self.rgb_to_xy(rgb[0], rgb[1], rgb[2])
|
Converts hexadecimal colors represented as a String to approximate CIE
1931 x and y coordinates.
|
### Input:
Converts hexadecimal colors represented as a String to approximate CIE
1931 x and y coordinates.
### Response:
#vtb
def hex_to_xy(self, h):
rgb = self.color.hex_to_rgb(h)
return self.rgb_to_xy(rgb[0], rgb[1], rgb[2])
|
#vtb
def get_backend():
backend = getattr(settings, , None)
if backend == :
from simditor.image import pillow_backend as backend
else:
from simditor.image import dummy_backend as backend
return backend
|
Get backend.
|
### Input:
Get backend.
### Response:
#vtb
def get_backend():
backend = getattr(settings, , None)
if backend == :
from simditor.image import pillow_backend as backend
else:
from simditor.image import dummy_backend as backend
return backend
|
#vtb
def get(msg_or_dict, key, default=_SENTINEL):
key, subkey = _resolve_subkeys(key)
if isinstance(msg_or_dict, message.Message):
answer = getattr(msg_or_dict, key, default)
elif isinstance(msg_or_dict, collections_abc.Mapping):
answer = msg_or_dict.get(key, default)
else:
raise TypeError(
"get() expected a dict or protobuf message, got {!r}.".format(
type(msg_or_dict)
)
)
if answer is _SENTINEL:
raise KeyError(key)
if subkey is not None and answer is not default:
return get(answer, subkey, default=default)
return answer
|
Retrieve a key's value from a protobuf Message or dictionary.
Args:
mdg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the
object.
key (str): The key to retrieve from the object.
default (Any): If the key is not present on the object, and a default
is set, returns that default instead. A type-appropriate falsy
default is generally recommended, as protobuf messages almost
always have default values for unset values and it is not always
possible to tell the difference between a falsy value and an
unset one. If no default is set then :class:`KeyError` will be
raised if the key is not present in the object.
Returns:
Any: The return value from the underlying Message or dict.
Raises:
KeyError: If the key is not found. Note that, for unset values,
messages and dictionaries may not have consistent behavior.
TypeError: If ``msg_or_dict`` is not a Message or Mapping.
|
### Input:
Retrieve a key's value from a protobuf Message or dictionary.
Args:
mdg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the
object.
key (str): The key to retrieve from the object.
default (Any): If the key is not present on the object, and a default
is set, returns that default instead. A type-appropriate falsy
default is generally recommended, as protobuf messages almost
always have default values for unset values and it is not always
possible to tell the difference between a falsy value and an
unset one. If no default is set then :class:`KeyError` will be
raised if the key is not present in the object.
Returns:
Any: The return value from the underlying Message or dict.
Raises:
KeyError: If the key is not found. Note that, for unset values,
messages and dictionaries may not have consistent behavior.
TypeError: If ``msg_or_dict`` is not a Message or Mapping.
### Response:
#vtb
def get(msg_or_dict, key, default=_SENTINEL):
key, subkey = _resolve_subkeys(key)
if isinstance(msg_or_dict, message.Message):
answer = getattr(msg_or_dict, key, default)
elif isinstance(msg_or_dict, collections_abc.Mapping):
answer = msg_or_dict.get(key, default)
else:
raise TypeError(
"get() expected a dict or protobuf message, got {!r}.".format(
type(msg_or_dict)
)
)
if answer is _SENTINEL:
raise KeyError(key)
if subkey is not None and answer is not default:
return get(answer, subkey, default=default)
return answer
|
#vtb
def declareLegacyItem(typeName, schemaVersion, attributes, dummyBases=()):
if (typeName, schemaVersion) in _legacyTypes:
return _legacyTypes[typeName, schemaVersion]
if dummyBases:
realBases = [declareLegacyItem(*A) for A in dummyBases]
else:
realBases = (Item,)
attributes = attributes.copy()
attributes[] =
attributes[] = True
attributes[] = typeName
attributes[] = schemaVersion
result = type(str( % (typeName, schemaVersion)),
realBases,
attributes)
assert result is not None, % (type,)
_legacyTypes[(typeName, schemaVersion)] = result
return result
|
Generate a dummy subclass of Item that will have the given attributes,
and the base Item methods, but no methods of its own. This is for use
with upgrading.
@param typeName: a string, the Axiom TypeName to have attributes for.
@param schemaVersion: an int, the (old) version of the schema this is a proxy
for.
@param attributes: a dict mapping {columnName: attr instance} describing
the schema of C{typeName} at C{schemaVersion}.
@param dummyBases: a sequence of 4-tuples of (baseTypeName,
baseSchemaVersion, baseAttributes, baseBases) representing the dummy bases
of this legacy class.
|
### Input:
Generate a dummy subclass of Item that will have the given attributes,
and the base Item methods, but no methods of its own. This is for use
with upgrading.
@param typeName: a string, the Axiom TypeName to have attributes for.
@param schemaVersion: an int, the (old) version of the schema this is a proxy
for.
@param attributes: a dict mapping {columnName: attr instance} describing
the schema of C{typeName} at C{schemaVersion}.
@param dummyBases: a sequence of 4-tuples of (baseTypeName,
baseSchemaVersion, baseAttributes, baseBases) representing the dummy bases
of this legacy class.
### Response:
#vtb
def declareLegacyItem(typeName, schemaVersion, attributes, dummyBases=()):
if (typeName, schemaVersion) in _legacyTypes:
return _legacyTypes[typeName, schemaVersion]
if dummyBases:
realBases = [declareLegacyItem(*A) for A in dummyBases]
else:
realBases = (Item,)
attributes = attributes.copy()
attributes[] =
attributes[] = True
attributes[] = typeName
attributes[] = schemaVersion
result = type(str( % (typeName, schemaVersion)),
realBases,
attributes)
assert result is not None, % (type,)
_legacyTypes[(typeName, schemaVersion)] = result
return result
|
#vtb
def set_coeffs(self, values, ls, ms):
values = _np.array(values)
ls = _np.array(ls)
ms = _np.array(ms)
mneg_mask = (ms < 0).astype(_np.int)
self.coeffs[mneg_mask, ls, _np.abs(ms)] = values
|
Set spherical harmonic coefficients in-place to specified values.
Usage
-----
x.set_coeffs(values, ls, ms)
Parameters
----------
values : float (list)
The value(s) of the spherical harmonic coefficient(s).
ls : int (list)
The degree(s) of the coefficient(s) that should be set.
ms : int (list)
The order(s) of the coefficient(s) that should be set. Positive
and negative values correspond to the cosine and sine
components, respectively.
Examples
--------
x.set_coeffs(10., 1, 1) # x.coeffs[0, 1, 1] = 10.
x.set_coeffs(5., 1, -1) # x.coeffs[1, 1, 1] = 5.
x.set_coeffs([1., 2], [1, 2], [0, -2]) # x.coeffs[0, 1, 0] = 1.
# x.coeffs[1, 2, 2] = 2.
|
### Input:
Set spherical harmonic coefficients in-place to specified values.
Usage
-----
x.set_coeffs(values, ls, ms)
Parameters
----------
values : float (list)
The value(s) of the spherical harmonic coefficient(s).
ls : int (list)
The degree(s) of the coefficient(s) that should be set.
ms : int (list)
The order(s) of the coefficient(s) that should be set. Positive
and negative values correspond to the cosine and sine
components, respectively.
Examples
--------
x.set_coeffs(10., 1, 1) # x.coeffs[0, 1, 1] = 10.
x.set_coeffs(5., 1, -1) # x.coeffs[1, 1, 1] = 5.
x.set_coeffs([1., 2], [1, 2], [0, -2]) # x.coeffs[0, 1, 0] = 1.
# x.coeffs[1, 2, 2] = 2.
### Response:
#vtb
def set_coeffs(self, values, ls, ms):
values = _np.array(values)
ls = _np.array(ls)
ms = _np.array(ms)
mneg_mask = (ms < 0).astype(_np.int)
self.coeffs[mneg_mask, ls, _np.abs(ms)] = values
|
#vtb
def iterate(self, iterable, element_timeout=None):
self._assert_active()
with self._queuelock:
self._thread_loop_ids[self._thread_num] += 1
loop_id = self._thread_loop_ids[self._thread_num]
return _IterableQueueIterator(
self._iter_queue, loop_id, self, iterable, element_timeout
)
|
Iterate over an iterable.
The iterator is executed in the host thread. The threads dynamically
grab the elements. The iterator elements must hence be picklable to
be transferred through the queue.
If there is only one thread, no special operations are performed.
Otherwise, effectively n-1 threads are used to process the iterable
elements, and the host thread is used to provide them.
You can specify a timeout for the clients to adhere.
|
### Input:
Iterate over an iterable.
The iterator is executed in the host thread. The threads dynamically
grab the elements. The iterator elements must hence be picklable to
be transferred through the queue.
If there is only one thread, no special operations are performed.
Otherwise, effectively n-1 threads are used to process the iterable
elements, and the host thread is used to provide them.
You can specify a timeout for the clients to adhere.
### Response:
#vtb
def iterate(self, iterable, element_timeout=None):
self._assert_active()
with self._queuelock:
self._thread_loop_ids[self._thread_num] += 1
loop_id = self._thread_loop_ids[self._thread_num]
return _IterableQueueIterator(
self._iter_queue, loop_id, self, iterable, element_timeout
)
|
#vtb
def remove_network(self, action, n_name, **kwargs):
c_kwargs = self.get_network_remove_kwargs(action, n_name, **kwargs)
res = action.client.remove_network(**c_kwargs)
del self._policy.network_names[action.client_name][n_name]
return res
|
Removes a network.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param n_name: Network name or id.
:type n_name: unicode | str
:param kwargs: Additional keyword arguments.
:type kwargs: dict
|
### Input:
Removes a network.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param n_name: Network name or id.
:type n_name: unicode | str
:param kwargs: Additional keyword arguments.
:type kwargs: dict
### Response:
#vtb
def remove_network(self, action, n_name, **kwargs):
c_kwargs = self.get_network_remove_kwargs(action, n_name, **kwargs)
res = action.client.remove_network(**c_kwargs)
del self._policy.network_names[action.client_name][n_name]
return res
|
#vtb
def get_partition_hash(self):
if self.has_partition_hash():
return unpack_from(FMT_BE_INT, self._buffer, PARTITION_HASH_OFFSET)[0]
return self.hash_code()
|
Returns partition hash calculated for serialized object.
Partition hash is used to determine partition of a Data and is calculated using
* PartitioningStrategy during serialization.
* If partition hash is not set then hash_code() is used.
:return: partition hash
|
### Input:
Returns partition hash calculated for serialized object.
Partition hash is used to determine partition of a Data and is calculated using
* PartitioningStrategy during serialization.
* If partition hash is not set then hash_code() is used.
:return: partition hash
### Response:
#vtb
def get_partition_hash(self):
if self.has_partition_hash():
return unpack_from(FMT_BE_INT, self._buffer, PARTITION_HASH_OFFSET)[0]
return self.hash_code()
|
#vtb
def traverse_setter(obj, attribute, value):
obj.traverse(lambda x: setattr(x, attribute, value))
|
Traverses the object and sets the supplied attribute on the
object. Supports Dimensioned and DimensionedPlot types.
|
### Input:
Traverses the object and sets the supplied attribute on the
object. Supports Dimensioned and DimensionedPlot types.
### Response:
#vtb
def traverse_setter(obj, attribute, value):
obj.traverse(lambda x: setattr(x, attribute, value))
|
#vtb
def restart_agent(self, agent_id, **kwargs):
host_medium = self.get_medium()
agent = host_medium.get_agent()
d = host_medium.get_document(agent_id)
d.addCallback(
lambda desc: agent.start_agent(desc.doc_id, **kwargs))
return d
|
tells the host agent running in this agency to restart the agent.
|
### Input:
tells the host agent running in this agency to restart the agent.
### Response:
#vtb
def restart_agent(self, agent_id, **kwargs):
host_medium = self.get_medium()
agent = host_medium.get_agent()
d = host_medium.get_document(agent_id)
d.addCallback(
lambda desc: agent.start_agent(desc.doc_id, **kwargs))
return d
|
#vtb
def cut_psf(psf_data, psf_size):
kernel = image_util.cut_edges(psf_data, psf_size)
kernel = kernel_norm(kernel)
return kernel
|
cut the psf properly
:param psf_data: image of PSF
:param psf_size: size of psf
:return: re-sized and re-normalized PSF
|
### Input:
cut the psf properly
:param psf_data: image of PSF
:param psf_size: size of psf
:return: re-sized and re-normalized PSF
### Response:
#vtb
def cut_psf(psf_data, psf_size):
kernel = image_util.cut_edges(psf_data, psf_size)
kernel = kernel_norm(kernel)
return kernel
|
#vtb
def get_rows(self):
possible_dataframes = [, , , ,
, , , ,
, ,
, ,
, ,
, , ]
for df in possible_dataframes:
if (df in self.__dict__) and (getattr(self, df) is not None):
return getattr(self, df).index.get_values()
else:
logging.warn("No attributes available to get row names")
return None
|
Returns the name of the rows of the extension
|
### Input:
Returns the name of the rows of the extension
### Response:
#vtb
def get_rows(self):
possible_dataframes = [, , , ,
, , , ,
, ,
, ,
, ,
, , ]
for df in possible_dataframes:
if (df in self.__dict__) and (getattr(self, df) is not None):
return getattr(self, df).index.get_values()
else:
logging.warn("No attributes available to get row names")
return None
|
#vtb
def headers(self):
headers = self.conn.issue_command("Headers")
res = []
for header in headers.split("\r"):
key, value = header.split(": ", 1)
for line in value.split("\n"):
res.append((_normalize_header(key), line))
return res
|
Returns a list of the last HTTP response headers.
Header keys are normalized to capitalized form, as in `User-Agent`.
|
### Input:
Returns a list of the last HTTP response headers.
Header keys are normalized to capitalized form, as in `User-Agent`.
### Response:
#vtb
def headers(self):
headers = self.conn.issue_command("Headers")
res = []
for header in headers.split("\r"):
key, value = header.split(": ", 1)
for line in value.split("\n"):
res.append((_normalize_header(key), line))
return res
|
#vtb
def noise_plot(signal, noise, normalise=False, **kwargs):
import matplotlib.pyplot as plt
n_traces = 0
for tr in signal:
try:
noise.select(id=tr.id)[0]
except IndexError:
continue
n_traces += 1
fig, axes = plt.subplots(n_traces, 2, sharex=True)
if len(signal) > 1:
axes = axes.ravel()
i = 0
lines = []
labels = []
for tr in signal:
try:
noise_tr = noise.select(id=tr.id)[0]
except IndexError:
continue
ax1 = axes[i]
ax2 = axes[i + 1]
fft_len = fftpack.next_fast_len(
max(noise_tr.stats.npts, tr.stats.npts))
if not normalise:
signal_fft = fftpack.rfft(tr.data, fft_len)
noise_fft = fftpack.rfft(noise_tr.data, fft_len)
else:
signal_fft = fftpack.rfft(tr.data / max(tr.data), fft_len)
noise_fft = fftpack.rfft(
noise_tr.data / max(noise_tr.data), fft_len)
frequencies = np.linspace(0, 1 / (2 * tr.stats.delta), fft_len // 2)
noise_line, = ax1.semilogy(
frequencies, 2.0 / fft_len * np.abs(noise_fft[0: fft_len // 2]),
, label="noise")
signal_line, = ax1.semilogy(
frequencies, 2.0 / fft_len * np.abs(signal_fft[0: fft_len // 2]),
, label="signal")
if "signal" not in labels:
labels.append("signal")
lines.append(signal_line)
if "noise" not in labels:
labels.append("noise")
lines.append(noise_line)
ax1.set_ylabel(tr.id, rotation=0, horizontalalignment=)
ax2.plot(
frequencies,
(2.0 / fft_len * np.abs(signal_fft[0: fft_len // 2])) -
(2.0 / fft_len * np.abs(noise_fft[0: fft_len // 2])), )
ax2.yaxis.tick_right()
ax2.set_ylim(bottom=0)
i += 2
axes[-1].set_xlabel("Frequency (Hz)")
axes[-2].set_xlabel("Frequency (Hz)")
axes[0].set_title("Spectra")
axes[1].set_title("Signal - noise")
plt.figlegend(lines, labels, )
plt.tight_layout()
plt.subplots_adjust(hspace=0)
fig = _finalise_figure(fig=fig, **kwargs)
return fig
|
Plot signal and noise fourier transforms and the difference.
:type signal: `obspy.core.stream.Stream`
:param signal: Stream of "signal" window
:type noise: `obspy.core.stream.Stream`
:param noise: Stream of the "noise" window.
:type normalise: bool
:param normalise: Whether to normalise the data before plotting or not.
:return: `matplotlib.pyplot.Figure`
|
### Input:
Plot signal and noise fourier transforms and the difference.
:type signal: `obspy.core.stream.Stream`
:param signal: Stream of "signal" window
:type noise: `obspy.core.stream.Stream`
:param noise: Stream of the "noise" window.
:type normalise: bool
:param normalise: Whether to normalise the data before plotting or not.
:return: `matplotlib.pyplot.Figure`
### Response:
#vtb
def noise_plot(signal, noise, normalise=False, **kwargs):
import matplotlib.pyplot as plt
n_traces = 0
for tr in signal:
try:
noise.select(id=tr.id)[0]
except IndexError:
continue
n_traces += 1
fig, axes = plt.subplots(n_traces, 2, sharex=True)
if len(signal) > 1:
axes = axes.ravel()
i = 0
lines = []
labels = []
for tr in signal:
try:
noise_tr = noise.select(id=tr.id)[0]
except IndexError:
continue
ax1 = axes[i]
ax2 = axes[i + 1]
fft_len = fftpack.next_fast_len(
max(noise_tr.stats.npts, tr.stats.npts))
if not normalise:
signal_fft = fftpack.rfft(tr.data, fft_len)
noise_fft = fftpack.rfft(noise_tr.data, fft_len)
else:
signal_fft = fftpack.rfft(tr.data / max(tr.data), fft_len)
noise_fft = fftpack.rfft(
noise_tr.data / max(noise_tr.data), fft_len)
frequencies = np.linspace(0, 1 / (2 * tr.stats.delta), fft_len // 2)
noise_line, = ax1.semilogy(
frequencies, 2.0 / fft_len * np.abs(noise_fft[0: fft_len // 2]),
, label="noise")
signal_line, = ax1.semilogy(
frequencies, 2.0 / fft_len * np.abs(signal_fft[0: fft_len // 2]),
, label="signal")
if "signal" not in labels:
labels.append("signal")
lines.append(signal_line)
if "noise" not in labels:
labels.append("noise")
lines.append(noise_line)
ax1.set_ylabel(tr.id, rotation=0, horizontalalignment=)
ax2.plot(
frequencies,
(2.0 / fft_len * np.abs(signal_fft[0: fft_len // 2])) -
(2.0 / fft_len * np.abs(noise_fft[0: fft_len // 2])), )
ax2.yaxis.tick_right()
ax2.set_ylim(bottom=0)
i += 2
axes[-1].set_xlabel("Frequency (Hz)")
axes[-2].set_xlabel("Frequency (Hz)")
axes[0].set_title("Spectra")
axes[1].set_title("Signal - noise")
plt.figlegend(lines, labels, )
plt.tight_layout()
plt.subplots_adjust(hspace=0)
fig = _finalise_figure(fig=fig, **kwargs)
return fig
|
#vtb
def getAnalogActionData(self, action, unActionDataSize, ulRestrictToDevice):
fn = self.function_table.getAnalogActionData
pActionData = InputAnalogActionData_t()
result = fn(action, byref(pActionData), unActionDataSize, ulRestrictToDevice)
return result, pActionData
|
Reads the state of an analog action given its handle. This will return VRInputError_WrongType if the type of
action is something other than analog
|
### Input:
Reads the state of an analog action given its handle. This will return VRInputError_WrongType if the type of
action is something other than analog
### Response:
#vtb
def getAnalogActionData(self, action, unActionDataSize, ulRestrictToDevice):
fn = self.function_table.getAnalogActionData
pActionData = InputAnalogActionData_t()
result = fn(action, byref(pActionData), unActionDataSize, ulRestrictToDevice)
return result, pActionData
|
#vtb
def traverse(obj, target:str, default=nodefault, executable:bool=False, separator:str=, protect:bool=True):
assert check_argument_types()
value = obj
remainder = target
if not target:
return obj
while separator:
name, separator, remainder = remainder.partition(separator)
numeric = name.lstrip().isdigit()
try:
if numeric or (protect and name.startswith()):
raise AttributeError()
value = getattr(value, name)
if executable and callable(value):
value = value()
except AttributeError:
try:
value = value[int(name) if numeric else name]
except (KeyError, TypeError):
if default is nodefault:
raise LookupError("Could not resolve on: " + repr(obj))
return default
return value
|
Traverse down an object, using getattr or getitem.
If ``executable`` is ``True`` any executable function encountered will be, with no arguments. Traversal will
continue on the result of that call. You can change the separator as desired, i.e. to a '/'.
By default attributes (but not array elements) prefixed with an underscore are taboo. They will not resolve,
raising a LookupError.
Certain allowances are made: if a 'path segment' is numerical, it's treated as an array index. If attribute
lookup fails, it will re-try on that object using array notation and continue from there. This makes lookup
very flexible.
|
### Input:
Traverse down an object, using getattr or getitem.
If ``executable`` is ``True`` any executable function encountered will be, with no arguments. Traversal will
continue on the result of that call. You can change the separator as desired, i.e. to a '/'.
By default attributes (but not array elements) prefixed with an underscore are taboo. They will not resolve,
raising a LookupError.
Certain allowances are made: if a 'path segment' is numerical, it's treated as an array index. If attribute
lookup fails, it will re-try on that object using array notation and continue from there. This makes lookup
very flexible.
### Response:
#vtb
def traverse(obj, target:str, default=nodefault, executable:bool=False, separator:str=, protect:bool=True):
assert check_argument_types()
value = obj
remainder = target
if not target:
return obj
while separator:
name, separator, remainder = remainder.partition(separator)
numeric = name.lstrip().isdigit()
try:
if numeric or (protect and name.startswith()):
raise AttributeError()
value = getattr(value, name)
if executable and callable(value):
value = value()
except AttributeError:
try:
value = value[int(name) if numeric else name]
except (KeyError, TypeError):
if default is nodefault:
raise LookupError("Could not resolve on: " + repr(obj))
return default
return value
|
#vtb
def _set_dst_vtep_ip(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={: u}), is_leaf=True, yang_name="dst-vtep-ip", rest_name="dst-vtep-ip-host", parent=self, choice=(u, u), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "inet:ipv4-address",
: ,
})
self.__dst_vtep_ip = t
if hasattr(self, ):
self._set()
|
Setter method for dst_vtep_ip, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/dst_vtep_ip (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_vtep_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_vtep_ip() directly.
|
### Input:
Setter method for dst_vtep_ip, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/dst_vtep_ip (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_vtep_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_vtep_ip() directly.
### Response:
#vtb
def _set_dst_vtep_ip(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={: u}), is_leaf=True, yang_name="dst-vtep-ip", rest_name="dst-vtep-ip-host", parent=self, choice=(u, u), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "inet:ipv4-address",
: ,
})
self.__dst_vtep_ip = t
if hasattr(self, ):
self._set()
|
#vtb
def fix_e502(self, result):
(line_index, _, target) = get_index_offset_contents(result,
self.source)
self.source[line_index] = target.rstrip() +
|
Remove extraneous escape of newline.
|
### Input:
Remove extraneous escape of newline.
### Response:
#vtb
def fix_e502(self, result):
(line_index, _, target) = get_index_offset_contents(result,
self.source)
self.source[line_index] = target.rstrip() +
|
#vtb
def find_revision_number(self, revision=None):
self.create()
revision = self.expand_branch_name(revision)
output = self.context.capture(, , revision, )
if not (output and output.isdigit()):
msg = "Failed to find local revision number! ( gave unexpected output)"
raise ValueError(msg)
return int(output)
|
Find the local revision number of the given revision.
|
### Input:
Find the local revision number of the given revision.
### Response:
#vtb
def find_revision_number(self, revision=None):
self.create()
revision = self.expand_branch_name(revision)
output = self.context.capture(, , revision, )
if not (output and output.isdigit()):
msg = "Failed to find local revision number! ( gave unexpected output)"
raise ValueError(msg)
return int(output)
|
#vtb
def task(self, _fn=None, queue=None, hard_timeout=None, unique=None,
lock=None, lock_key=None, retry=None, retry_on=None,
retry_method=None, schedule=None, batch=False,
max_queue_size=None):
def _delay(func):
def _delay_inner(*args, **kwargs):
return self.delay(func, args=args, kwargs=kwargs)
return _delay_inner
if schedule is not None:
unique = True
def _wrap(func):
if hard_timeout is not None:
func._task_hard_timeout = hard_timeout
if queue is not None:
func._task_queue = queue
if unique is not None:
func._task_unique = unique
if lock is not None:
func._task_lock = lock
if lock_key is not None:
func._task_lock_key = lock_key
if retry is not None:
func._task_retry = retry
if retry_on is not None:
func._task_retry_on = retry_on
if retry_method is not None:
func._task_retry_method = retry_method
if batch is not None:
func._task_batch = batch
if schedule is not None:
func._task_schedule = schedule
if max_queue_size is not None:
func._task_max_queue_size = max_queue_size
func.delay = _delay(func)
if schedule is not None:
serialized_func = serialize_func_name(func)
assert serialized_func not in self.periodic_task_funcs, \
"attempted duplicate registration of periodic task"
self.periodic_task_funcs[serialized_func] = func
return func
return _wrap if _fn is None else _wrap(_fn)
|
Function decorator that defines the behavior of the function when it is
used as a task. To use the default behavior, tasks don't need to be
decorated.
See README.rst for an explanation of the options.
|
### Input:
Function decorator that defines the behavior of the function when it is
used as a task. To use the default behavior, tasks don't need to be
decorated.
See README.rst for an explanation of the options.
### Response:
#vtb
def task(self, _fn=None, queue=None, hard_timeout=None, unique=None,
lock=None, lock_key=None, retry=None, retry_on=None,
retry_method=None, schedule=None, batch=False,
max_queue_size=None):
def _delay(func):
def _delay_inner(*args, **kwargs):
return self.delay(func, args=args, kwargs=kwargs)
return _delay_inner
if schedule is not None:
unique = True
def _wrap(func):
if hard_timeout is not None:
func._task_hard_timeout = hard_timeout
if queue is not None:
func._task_queue = queue
if unique is not None:
func._task_unique = unique
if lock is not None:
func._task_lock = lock
if lock_key is not None:
func._task_lock_key = lock_key
if retry is not None:
func._task_retry = retry
if retry_on is not None:
func._task_retry_on = retry_on
if retry_method is not None:
func._task_retry_method = retry_method
if batch is not None:
func._task_batch = batch
if schedule is not None:
func._task_schedule = schedule
if max_queue_size is not None:
func._task_max_queue_size = max_queue_size
func.delay = _delay(func)
if schedule is not None:
serialized_func = serialize_func_name(func)
assert serialized_func not in self.periodic_task_funcs, \
"attempted duplicate registration of periodic task"
self.periodic_task_funcs[serialized_func] = func
return func
return _wrap if _fn is None else _wrap(_fn)
|
#vtb
def expire(self, time=None):
if time is None:
time = self.__timer()
root = self.__root
curr = root.next
links = self.__links
cache_delitem = Cache.__delitem__
while curr is not root and curr.expire < time:
cache_delitem(self, curr.key)
del links[curr.key]
next = curr.next
curr.unlink()
curr = next
|
Remove expired items from the cache.
|
### Input:
Remove expired items from the cache.
### Response:
#vtb
def expire(self, time=None):
if time is None:
time = self.__timer()
root = self.__root
curr = root.next
links = self.__links
cache_delitem = Cache.__delitem__
while curr is not root and curr.expire < time:
cache_delitem(self, curr.key)
del links[curr.key]
next = curr.next
curr.unlink()
curr = next
|
#vtb
def load_yaml(path):
with open(path, ) as f:
yamldict = yaml.load(f.read(), Loader=yamlloader.ordereddict.CSafeLoader)
if not yamldict:
raise (LoadError( % path))
return yamldict
|
Load YAML file into an ordered dictionary
Args:
path (str): Path to YAML file
Returns:
OrderedDict: Ordered dictionary containing loaded YAML file
|
### Input:
Load YAML file into an ordered dictionary
Args:
path (str): Path to YAML file
Returns:
OrderedDict: Ordered dictionary containing loaded YAML file
### Response:
#vtb
def load_yaml(path):
with open(path, ) as f:
yamldict = yaml.load(f.read(), Loader=yamlloader.ordereddict.CSafeLoader)
if not yamldict:
raise (LoadError( % path))
return yamldict
|
#vtb
def update_privilege(self, obj, target):
if in obj[]:
os.chmod(target, int(obj[][], 8))
|
Get privileges from metadata of the source in s3, and apply them to target
|
### Input:
Get privileges from metadata of the source in s3, and apply them to target
### Response:
#vtb
def update_privilege(self, obj, target):
if in obj[]:
os.chmod(target, int(obj[][], 8))
|
#vtb
def find_children(self, tag=None, namespace=None):
results = []
if tag and namespace:
for element in self.children:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.children:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.children:
if element.namespace == namespace:
results.append(element)
else:
for element in self.children:
results.append(element)
return results
|
Searches child nodes for objects with the desired tag/namespace.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all children in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
:param tag: str (optional) The desired tag
:param namespace: str (optional) The desired namespace
:return: A list of elements whose tag and/or namespace match the
parameters values
|
### Input:
Searches child nodes for objects with the desired tag/namespace.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all children in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
:param tag: str (optional) The desired tag
:param namespace: str (optional) The desired namespace
:return: A list of elements whose tag and/or namespace match the
parameters values
### Response:
#vtb
def find_children(self, tag=None, namespace=None):
results = []
if tag and namespace:
for element in self.children:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.children:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.children:
if element.namespace == namespace:
results.append(element)
else:
for element in self.children:
results.append(element)
return results
|
#vtb
def samaccountname(self, base_dn, distinguished_name):
mappings = self.samaccountnames(base_dn, [distinguished_name])
try:
return mappings[distinguished_name]
except KeyError:
logging.info("%s - unable to retrieve object from AD by DistinguishedName",
distinguished_name)
|
Retrieve the sAMAccountName for a specific DistinguishedName
:param str base_dn: The base DN to search within
:param list distinguished_name: The base DN to search within
:param list attributes: Object attributes to populate, defaults to all
:return: A populated ADUser object
:rtype: ADUser
|
### Input:
Retrieve the sAMAccountName for a specific DistinguishedName
:param str base_dn: The base DN to search within
:param list distinguished_name: The base DN to search within
:param list attributes: Object attributes to populate, defaults to all
:return: A populated ADUser object
:rtype: ADUser
### Response:
#vtb
def samaccountname(self, base_dn, distinguished_name):
mappings = self.samaccountnames(base_dn, [distinguished_name])
try:
return mappings[distinguished_name]
except KeyError:
logging.info("%s - unable to retrieve object from AD by DistinguishedName",
distinguished_name)
|
#vtb
def avail_locations(call=None):
if call == :
raise SaltCloudSystemExit(
)
ret = {}
conn = get_conn(service=)
locations = conn.getLocations(id=50)
for location in locations:
ret[location[]] = {
: location[],
: location[],
: location[],
}
available = conn.getAvailableLocations(id=50)
for location in available:
if location.get(, 0) is 0:
continue
ret[location[]][] = True
return ret
|
List all available locations
|
### Input:
List all available locations
### Response:
#vtb
def avail_locations(call=None):
if call == :
raise SaltCloudSystemExit(
)
ret = {}
conn = get_conn(service=)
locations = conn.getLocations(id=50)
for location in locations:
ret[location[]] = {
: location[],
: location[],
: location[],
}
available = conn.getAvailableLocations(id=50)
for location in available:
if location.get(, 0) is 0:
continue
ret[location[]][] = True
return ret
|
#vtb
def validate_file(parser, arg):
if not os.path.isfile(arg):
parser.error("%s is not a file." % arg)
return arg
|
Validates that `arg` is a valid file.
|
### Input:
Validates that `arg` is a valid file.
### Response:
#vtb
def validate_file(parser, arg):
if not os.path.isfile(arg):
parser.error("%s is not a file." % arg)
return arg
|
#vtb
def ids_sharing_same_pgn(id_x, pgn_x, id_y, pgn_y):
for id_a, pgn_a in zip(id_x, pgn_x):
for id_b, pgn_b in zip(id_y, pgn_y):
if pgn_a == pgn_b:
yield (id_a, id_b)
|
Yield arbitration ids which has the same pgn.
|
### Input:
Yield arbitration ids which has the same pgn.
### Response:
#vtb
def ids_sharing_same_pgn(id_x, pgn_x, id_y, pgn_y):
for id_a, pgn_a in zip(id_x, pgn_x):
for id_b, pgn_b in zip(id_y, pgn_y):
if pgn_a == pgn_b:
yield (id_a, id_b)
|
#vtb
def set_wheel_mode(self, ids):
self.set_control_mode(dict(zip(ids, itertools.repeat())))
|
Sets the specified motors to wheel mode.
|
### Input:
Sets the specified motors to wheel mode.
### Response:
#vtb
def set_wheel_mode(self, ids):
self.set_control_mode(dict(zip(ids, itertools.repeat())))
|
#vtb
def server_bind(self):
TCPServer.server_bind(self)
_, self.server_port = self.socket.getsockname()[:2]
|
Override of TCPServer.server_bind() that tracks bind-time assigned random ports.
|
### Input:
Override of TCPServer.server_bind() that tracks bind-time assigned random ports.
### Response:
#vtb
def server_bind(self):
TCPServer.server_bind(self)
_, self.server_port = self.socket.getsockname()[:2]
|
#vtb
def find_repo_by_path(i):
p=i[]
if p!=: p=os.path.normpath(p)
found=False
if p==work[]:
uoa=cfg[]
uid=cfg[]
alias=uoa
found=True
elif p==work[]:
uoa=cfg[]
uid=cfg[]
alias=uoa
found=True
else:
r=reload_repo_cache({})
if r[]>0: return r
for q in cache_repo_info:
qq=cache_repo_info[q]
if p==qq[].get(,):
uoa=qq[]
uid=qq[]
alias=uid
if not is_uid(uoa): alias=uoa
found=True
break
if not found:
return {:16, : }
return {:0, : uoa, : uid, :alias}
|
Input: {
path - path to repo
}
Output: {
return - return code = 0, if successful
16, if repo not found (may be warning)
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
}
|
### Input:
Input: {
path - path to repo
}
Output: {
return - return code = 0, if successful
16, if repo not found (may be warning)
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
}
### Response:
#vtb
def find_repo_by_path(i):
p=i[]
if p!=: p=os.path.normpath(p)
found=False
if p==work[]:
uoa=cfg[]
uid=cfg[]
alias=uoa
found=True
elif p==work[]:
uoa=cfg[]
uid=cfg[]
alias=uoa
found=True
else:
r=reload_repo_cache({})
if r[]>0: return r
for q in cache_repo_info:
qq=cache_repo_info[q]
if p==qq[].get(,):
uoa=qq[]
uid=qq[]
alias=uid
if not is_uid(uoa): alias=uoa
found=True
break
if not found:
return {:16, : }
return {:0, : uoa, : uid, :alias}
|
#vtb
def tokenize_annotated(doc, annotation):
tokens = tokenize(doc, include_hrefs=False)
for tok in tokens:
tok.annotation = annotation
return tokens
|
Tokenize a document and add an annotation attribute to each token
|
### Input:
Tokenize a document and add an annotation attribute to each token
### Response:
#vtb
def tokenize_annotated(doc, annotation):
tokens = tokenize(doc, include_hrefs=False)
for tok in tokens:
tok.annotation = annotation
return tokens
|
#vtb
def create_cfg_segment(filename, filecontent, description, auth, url):
payload = {"confFileName": filename,
"confFileType": "2",
"cfgFileParent": "-1",
"confFileDesc": description,
"content": filecontent}
f_url = url + "/imcrs/icc/confFile"
response = requests.post(f_url, data=(json.dumps(payload)), auth=auth, headers=HEADERS)
try:
if response.status_code == 201:
print("Template successfully created")
return response.status_code
elif response.status_code is not 201:
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " create_cfg_segment: An Error has occured"
|
Takes a str into var filecontent which represents the entire content of a configuration
segment, or partial configuration file. Takes a str into var description which represents the
description of the configuration segment
:param filename: str containing the name of the configuration segment.
:param filecontent: str containing the entire contents of the configuration segment
:param description: str contrianing the description of the configuration segment
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: If successful, Boolena of type True
:rtype: Boolean
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.icc import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> filecontent = 'sample file content'
>>> create_new_file = create_cfg_segment('CW7SNMP.cfg',
filecontent,
'My New Template',
auth.creds,
auth.url)
>>> template_id = get_template_id('CW7SNMP.cfg', auth.creds, auth.url)
>>> assert type(template_id) is str
>>>
|
### Input:
Takes a str into var filecontent which represents the entire content of a configuration
segment, or partial configuration file. Takes a str into var description which represents the
description of the configuration segment
:param filename: str containing the name of the configuration segment.
:param filecontent: str containing the entire contents of the configuration segment
:param description: str contrianing the description of the configuration segment
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: If successful, Boolena of type True
:rtype: Boolean
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.icc import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> filecontent = 'sample file content'
>>> create_new_file = create_cfg_segment('CW7SNMP.cfg',
filecontent,
'My New Template',
auth.creds,
auth.url)
>>> template_id = get_template_id('CW7SNMP.cfg', auth.creds, auth.url)
>>> assert type(template_id) is str
>>>
### Response:
#vtb
def create_cfg_segment(filename, filecontent, description, auth, url):
payload = {"confFileName": filename,
"confFileType": "2",
"cfgFileParent": "-1",
"confFileDesc": description,
"content": filecontent}
f_url = url + "/imcrs/icc/confFile"
response = requests.post(f_url, data=(json.dumps(payload)), auth=auth, headers=HEADERS)
try:
if response.status_code == 201:
print("Template successfully created")
return response.status_code
elif response.status_code is not 201:
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " create_cfg_segment: An Error has occured"
|
#vtb
def create_new_label_by_content_id(self, content_id, label_names, callback=None):
assert isinstance(label_names, list)
assert all(isinstance(ln, dict) and set(ln.keys()) == {"prefix", "name"} for ln in label_names)
return self._service_post_request("rest/api/content/{id}/label".format(id=content_id),
data=json.dumps(label_names), headers={"Content-Type": "application/json"},
callback=callback)
|
Adds a list of labels to the specified content.
:param content_id (string): A string containing the id of the labels content container.
:param label_names (list): A list of labels (strings) to apply to the content.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the content/{id}/label endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
|
### Input:
Adds a list of labels to the specified content.
:param content_id (string): A string containing the id of the labels content container.
:param label_names (list): A list of labels (strings) to apply to the content.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the content/{id}/label endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
### Response:
#vtb
def create_new_label_by_content_id(self, content_id, label_names, callback=None):
assert isinstance(label_names, list)
assert all(isinstance(ln, dict) and set(ln.keys()) == {"prefix", "name"} for ln in label_names)
return self._service_post_request("rest/api/content/{id}/label".format(id=content_id),
data=json.dumps(label_names), headers={"Content-Type": "application/json"},
callback=callback)
|
#vtb
def urlToIds(url):
urlId = url.split("/")[-1]
convUrl = "https://join.skype.com/api/v2/conversation/"
json = SkypeConnection.externalCall("POST", convUrl, json={"shortId": urlId, "type": "wl"}).json()
return {"id": json.get("Resource"),
"long": json.get("Id"),
"blob": json.get("ChatBlob")}
|
Resolve a ``join.skype.com`` URL and returns various identifiers for the group conversation.
Args:
url (str): public join URL, or identifier from it
Returns:
dict: related conversation's identifiers -- keys: ``id``, ``long``, ``blob``
|
### Input:
Resolve a ``join.skype.com`` URL and returns various identifiers for the group conversation.
Args:
url (str): public join URL, or identifier from it
Returns:
dict: related conversation's identifiers -- keys: ``id``, ``long``, ``blob``
### Response:
#vtb
def urlToIds(url):
urlId = url.split("/")[-1]
convUrl = "https://join.skype.com/api/v2/conversation/"
json = SkypeConnection.externalCall("POST", convUrl, json={"shortId": urlId, "type": "wl"}).json()
return {"id": json.get("Resource"),
"long": json.get("Id"),
"blob": json.get("ChatBlob")}
|
#vtb
def copyCurrentLayout(self, sourceViewSUID, targetViewSUID, body, verbose=None):
response=api(url=self.___url++str(sourceViewSUID)++str(targetViewSUID)+, method="PUT", body=body, verbose=verbose)
return response
|
Copy one network view layout onto another, setting the node location and view scale to match. This makes visually comparing networks simple.
:param sourceViewSUID: Source network view SUID (or "current")
:param targetViewSUID: Target network view SUID (or "current")
:param body: Clone the specified network view layout onto another network view -- Not required, can be None
:param verbose: print more
:returns: 200: successful operation; 404: Network View does not exist
|
### Input:
Copy one network view layout onto another, setting the node location and view scale to match. This makes visually comparing networks simple.
:param sourceViewSUID: Source network view SUID (or "current")
:param targetViewSUID: Target network view SUID (or "current")
:param body: Clone the specified network view layout onto another network view -- Not required, can be None
:param verbose: print more
:returns: 200: successful operation; 404: Network View does not exist
### Response:
#vtb
def copyCurrentLayout(self, sourceViewSUID, targetViewSUID, body, verbose=None):
response=api(url=self.___url++str(sourceViewSUID)++str(targetViewSUID)+, method="PUT", body=body, verbose=verbose)
return response
|
#vtb
def log_error(msg, logger="TaskLogger"):
tasklogger = get_tasklogger(logger)
tasklogger.error(msg)
return tasklogger
|
Log an ERROR message
Convenience function to log a message to the default Logger
Parameters
----------
msg : str
Message to be logged
logger : str, optional (default: "TaskLogger")
Unique name of the logger to retrieve
Returns
-------
logger : TaskLogger
|
### Input:
Log an ERROR message
Convenience function to log a message to the default Logger
Parameters
----------
msg : str
Message to be logged
logger : str, optional (default: "TaskLogger")
Unique name of the logger to retrieve
Returns
-------
logger : TaskLogger
### Response:
#vtb
def log_error(msg, logger="TaskLogger"):
tasklogger = get_tasklogger(logger)
tasklogger.error(msg)
return tasklogger
|
#vtb
def remove_duplicates(vector_tuple):
array = np.column_stack(vector_tuple)
a = np.ascontiguousarray(array)
unique_a = np.unique(a.view([(, a.dtype)]*a.shape[1]))
b = unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
return list(b.T)
|
Remove duplicates rows from N equally-sized arrays
|
### Input:
Remove duplicates rows from N equally-sized arrays
### Response:
#vtb
def remove_duplicates(vector_tuple):
array = np.column_stack(vector_tuple)
a = np.ascontiguousarray(array)
unique_a = np.unique(a.view([(, a.dtype)]*a.shape[1]))
b = unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
return list(b.T)
|
#vtb
def update_storage_policy(policy, policy_dict, service_instance=None):
*policy name
log.trace(, policy_dict)
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy])
if not policies:
raise VMwareObjectRetrievalError({0}\
.format(policy))
policy_ref = policies[0]
policy_update_spec = pbm.profile.CapabilityBasedProfileUpdateSpec()
log.trace()
for prop in [, ]:
setattr(policy_update_spec, prop, getattr(policy_ref, prop))
_apply_policy_config(policy_update_spec, policy_dict)
salt.utils.pbm.update_storage_policy(profile_manager, policy_ref,
policy_update_spec)
return {: True}
|
Updates a storage policy.
Supported capability types: scalar, set, range.
policy
Name of the policy to update.
policy_dict
Dictionary containing the changes to apply to the policy.
(example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.update_storage_policy policy='policy name'
policy_dict="$policy_dict"
|
### Input:
Updates a storage policy.
Supported capability types: scalar, set, range.
policy
Name of the policy to update.
policy_dict
Dictionary containing the changes to apply to the policy.
(example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.update_storage_policy policy='policy name'
policy_dict="$policy_dict"
### Response:
#vtb
def update_storage_policy(policy, policy_dict, service_instance=None):
*policy name
log.trace(, policy_dict)
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy])
if not policies:
raise VMwareObjectRetrievalError({0}\
.format(policy))
policy_ref = policies[0]
policy_update_spec = pbm.profile.CapabilityBasedProfileUpdateSpec()
log.trace()
for prop in [, ]:
setattr(policy_update_spec, prop, getattr(policy_ref, prop))
_apply_policy_config(policy_update_spec, policy_dict)
salt.utils.pbm.update_storage_policy(profile_manager, policy_ref,
policy_update_spec)
return {: True}
|
#vtb
def _hash(self, obj, parent, parents_ids=EMPTY_FROZENSET):
try:
result = self[obj]
except (TypeError, KeyError):
pass
else:
return result
result = not_hashed
if self._skip_this(obj, parent):
return
elif obj is None:
result =
elif isinstance(obj, strings):
result = prepare_string_for_hashing(
obj, ignore_string_type_changes=self.ignore_string_type_changes,
ignore_string_case=self.ignore_string_case)
elif isinstance(obj, numbers):
result = self._prep_number(obj)
elif isinstance(obj, MutableMapping):
result = self._prep_dict(obj=obj, parent=parent, parents_ids=parents_ids)
elif isinstance(obj, tuple):
result = self._prep_tuple(obj=obj, parent=parent, parents_ids=parents_ids)
elif isinstance(obj, Iterable):
result = self._prep_iterable(obj=obj, parent=parent, parents_ids=parents_ids)
else:
result = self._prep_obj(obj=obj, parent=parent, parents_ids=parents_ids)
if result is not_hashed:
self[UNPROCESSED].append(obj)
elif result is unprocessed:
pass
elif self.apply_hash:
if isinstance(obj, strings):
result_cleaned = result
else:
result_cleaned = prepare_string_for_hashing(
result, ignore_string_type_changes=self.ignore_string_type_changes,
ignore_string_case=self.ignore_string_case)
result = self.hasher(result_cleaned)
try:
self[obj] = result
except TypeError:
obj_id = get_id(obj)
self[obj_id] = result
return result
|
The main diff method
|
### Input:
The main diff method
### Response:
#vtb
def _hash(self, obj, parent, parents_ids=EMPTY_FROZENSET):
try:
result = self[obj]
except (TypeError, KeyError):
pass
else:
return result
result = not_hashed
if self._skip_this(obj, parent):
return
elif obj is None:
result =
elif isinstance(obj, strings):
result = prepare_string_for_hashing(
obj, ignore_string_type_changes=self.ignore_string_type_changes,
ignore_string_case=self.ignore_string_case)
elif isinstance(obj, numbers):
result = self._prep_number(obj)
elif isinstance(obj, MutableMapping):
result = self._prep_dict(obj=obj, parent=parent, parents_ids=parents_ids)
elif isinstance(obj, tuple):
result = self._prep_tuple(obj=obj, parent=parent, parents_ids=parents_ids)
elif isinstance(obj, Iterable):
result = self._prep_iterable(obj=obj, parent=parent, parents_ids=parents_ids)
else:
result = self._prep_obj(obj=obj, parent=parent, parents_ids=parents_ids)
if result is not_hashed:
self[UNPROCESSED].append(obj)
elif result is unprocessed:
pass
elif self.apply_hash:
if isinstance(obj, strings):
result_cleaned = result
else:
result_cleaned = prepare_string_for_hashing(
result, ignore_string_type_changes=self.ignore_string_type_changes,
ignore_string_case=self.ignore_string_case)
result = self.hasher(result_cleaned)
try:
self[obj] = result
except TypeError:
obj_id = get_id(obj)
self[obj_id] = result
return result
|
#vtb
def _format_firewall_stdout(cmd_ret):
ret_dict = {: True,
: {}}
for line in cmd_ret[].splitlines():
if line.startswith():
continue
if line.startswith():
continue
ruleset_status = line.split()
ret_dict[][ruleset_status[0]] = bool(ruleset_status[1])
return ret_dict
|
Helper function to format the stdout from the get_firewall_status function.
cmd_ret
The return dictionary that comes from a cmd.run_all call.
|
### Input:
Helper function to format the stdout from the get_firewall_status function.
cmd_ret
The return dictionary that comes from a cmd.run_all call.
### Response:
#vtb
def _format_firewall_stdout(cmd_ret):
ret_dict = {: True,
: {}}
for line in cmd_ret[].splitlines():
if line.startswith():
continue
if line.startswith():
continue
ruleset_status = line.split()
ret_dict[][ruleset_status[0]] = bool(ruleset_status[1])
return ret_dict
|
#vtb
def _array_setitem_with_key_seq(self, array_name, index, key_seq, value):
table = self.array(array_name)[index]
key_so_far = tuple()
for key in key_seq[:-1]:
key_so_far += (key,)
new_table = self._array_make_sure_table_exists(array_name, index, key_so_far)
if new_table is not None:
table = new_table
else:
table = table[key]
table[key_seq[-1]] = value
|
Sets a the array value in the TOML file located by the given key sequence.
Example:
self._array_setitem(array_name, index, ('key1', 'key2', 'key3'), 'text_value')
is equivalent to doing
self.array(array_name)[index]['key1']['key2']['key3'] = 'text_value'
|
### Input:
Sets a the array value in the TOML file located by the given key sequence.
Example:
self._array_setitem(array_name, index, ('key1', 'key2', 'key3'), 'text_value')
is equivalent to doing
self.array(array_name)[index]['key1']['key2']['key3'] = 'text_value'
### Response:
#vtb
def _array_setitem_with_key_seq(self, array_name, index, key_seq, value):
table = self.array(array_name)[index]
key_so_far = tuple()
for key in key_seq[:-1]:
key_so_far += (key,)
new_table = self._array_make_sure_table_exists(array_name, index, key_so_far)
if new_table is not None:
table = new_table
else:
table = table[key]
table[key_seq[-1]] = value
|
#vtb
def get_turicreate_object_type(url):
modelgraphsframesarray
from .._connect import main as _glconnect
ret = _glconnect.get_unity().get_turicreate_object_type(_make_internal_url(url))
if ret == :
ret =
return ret
|
Given url where a Turi Create object is persisted, return the Turi
Create object type: 'model', 'graph', 'sframe', or 'sarray'
|
### Input:
Given url where a Turi Create object is persisted, return the Turi
Create object type: 'model', 'graph', 'sframe', or 'sarray'
### Response:
#vtb
def get_turicreate_object_type(url):
modelgraphsframesarray
from .._connect import main as _glconnect
ret = _glconnect.get_unity().get_turicreate_object_type(_make_internal_url(url))
if ret == :
ret =
return ret
|
#vtb
def unapply_patch(self, patch_name, force=False):
self._check(force)
patches = self.db.patches_after(Patch(patch_name))
for patch in reversed(patches):
self._unapply_patch(patch)
self.db.save()
self.unapplied(self.db.top_patch())
|
Unapply patches up to patch_name. patch_name will end up as top
patch
|
### Input:
Unapply patches up to patch_name. patch_name will end up as top
patch
### Response:
#vtb
def unapply_patch(self, patch_name, force=False):
self._check(force)
patches = self.db.patches_after(Patch(patch_name))
for patch in reversed(patches):
self._unapply_patch(patch)
self.db.save()
self.unapplied(self.db.top_patch())
|
#vtb
def compile_mako_files(self, app_config):
for subdir_name in self.SEARCH_DIRS:
subdir = subdir_name.format(
app_path=app_config.path,
app_name=app_config.name,
)
def recurse_path(path):
self.message(.format(path), 1)
if os.path.exists(path):
for filename in os.listdir(path):
filepath = os.path.join(path, filename)
_, ext = os.path.splitext(filename)
if filename.startswith():
continue
elif os.path.isdir(filepath):
recurse_path(filepath)
elif ext.lower() in ( , , ):
self.message(.format(filepath), 2)
try:
get_template_for_path(filepath)
except TemplateSyntaxError:
if not self.options.get():
raise
recurse_path(subdir)
|
Compiles the Mako templates within the apps of this system
|
### Input:
Compiles the Mako templates within the apps of this system
### Response:
#vtb
def compile_mako_files(self, app_config):
for subdir_name in self.SEARCH_DIRS:
subdir = subdir_name.format(
app_path=app_config.path,
app_name=app_config.name,
)
def recurse_path(path):
self.message(.format(path), 1)
if os.path.exists(path):
for filename in os.listdir(path):
filepath = os.path.join(path, filename)
_, ext = os.path.splitext(filename)
if filename.startswith():
continue
elif os.path.isdir(filepath):
recurse_path(filepath)
elif ext.lower() in ( , , ):
self.message(.format(filepath), 2)
try:
get_template_for_path(filepath)
except TemplateSyntaxError:
if not self.options.get():
raise
recurse_path(subdir)
|
#vtb
def orientation(self, value):
for values in self.__orientation:
if value in values:
self.server.jsonrpc.setOrientation(values[1])
break
else:
raise ValueError("Invalid orientation.")
|
setter of orientation property.
|
### Input:
setter of orientation property.
### Response:
#vtb
def orientation(self, value):
for values in self.__orientation:
if value in values:
self.server.jsonrpc.setOrientation(values[1])
break
else:
raise ValueError("Invalid orientation.")
|
#vtb
def drawcircle(self, x, y, r = 10, colour = None, label = None):
self.checkforpilimage()
colour = self.defaultcolour(colour)
self.changecolourmode(colour)
self.makedraw()
(pilx, pily) = self.pilcoords((x,y))
pilr = self.pilscale(r)
self.draw.ellipse([(pilx-pilr+1, pily-pilr+1), (pilx+pilr+1, pily+pilr+1)], outline = colour)
if label != None:
self.loadlabelfont()
textwidth = self.draw.textsize(label, font = self.labelfont)[0]
self.draw.text((pilx - float(textwidth)/2.0 + 2, pily + pilr + 4), label, fill = colour, font = self.labelfont)
|
Draws a circle centered on (x, y) with radius r. All these are in the coordinates of your initial image !
You give these x and y in the usual ds9 pixels, (0,0) is bottom left.
I will convert this into the right PIL coordiates.
|
### Input:
Draws a circle centered on (x, y) with radius r. All these are in the coordinates of your initial image !
You give these x and y in the usual ds9 pixels, (0,0) is bottom left.
I will convert this into the right PIL coordiates.
### Response:
#vtb
def drawcircle(self, x, y, r = 10, colour = None, label = None):
self.checkforpilimage()
colour = self.defaultcolour(colour)
self.changecolourmode(colour)
self.makedraw()
(pilx, pily) = self.pilcoords((x,y))
pilr = self.pilscale(r)
self.draw.ellipse([(pilx-pilr+1, pily-pilr+1), (pilx+pilr+1, pily+pilr+1)], outline = colour)
if label != None:
self.loadlabelfont()
textwidth = self.draw.textsize(label, font = self.labelfont)[0]
self.draw.text((pilx - float(textwidth)/2.0 + 2, pily + pilr + 4), label, fill = colour, font = self.labelfont)
|
#vtb
def get_context(self, data, accepted_media_type, renderer_context):
view = renderer_context[]
request = renderer_context[]
response = renderer_context[]
renderer = self.get_default_renderer(view)
raw_data_post_form = self.get_raw_data_form(data, view, , request)
raw_data_put_form = self.get_raw_data_form(data, view, , request)
raw_data_patch_form = self.get_raw_data_form(data, view, , request)
raw_data_put_or_patch_form = raw_data_put_form or raw_data_patch_form
response_headers = OrderedDict(sorted(response.items()))
renderer_content_type =
if renderer:
renderer_content_type = % renderer.media_type
if renderer.charset:
renderer_content_type += % renderer.charset
response_headers[] = renderer_content_type
if getattr(view, , None) and view.paginator.display_page_controls:
paginator = view.paginator
else:
paginator = None
context = {
: self.get_content(renderer, data, accepted_media_type, renderer_context),
: view,
: request,
: response,
: self.get_description(view, response.status_code),
: self.get_name(view),
: VERSION,
: paginator,
: self.get_breadcrumbs(request),
: view.allowed_methods,
: [renderer_cls.format for renderer_cls in view.renderer_classes],
: response_headers,
: self.get_rendered_html_form(data, view, , request),
: self.get_rendered_html_form(data, view, , request),
: self.get_rendered_html_form(data, view, , request),
: self.get_rendered_html_form(data, view, , request),
: self.get_filter_form(data, view, request),
: raw_data_put_form,
: raw_data_post_form,
: raw_data_patch_form,
: raw_data_put_or_patch_form,
: bool(response.status_code != 403),
: api_settings
}
return context
|
Returns the context used to render.
|
### Input:
Returns the context used to render.
### Response:
#vtb
def get_context(self, data, accepted_media_type, renderer_context):
view = renderer_context[]
request = renderer_context[]
response = renderer_context[]
renderer = self.get_default_renderer(view)
raw_data_post_form = self.get_raw_data_form(data, view, , request)
raw_data_put_form = self.get_raw_data_form(data, view, , request)
raw_data_patch_form = self.get_raw_data_form(data, view, , request)
raw_data_put_or_patch_form = raw_data_put_form or raw_data_patch_form
response_headers = OrderedDict(sorted(response.items()))
renderer_content_type =
if renderer:
renderer_content_type = % renderer.media_type
if renderer.charset:
renderer_content_type += % renderer.charset
response_headers[] = renderer_content_type
if getattr(view, , None) and view.paginator.display_page_controls:
paginator = view.paginator
else:
paginator = None
context = {
: self.get_content(renderer, data, accepted_media_type, renderer_context),
: view,
: request,
: response,
: self.get_description(view, response.status_code),
: self.get_name(view),
: VERSION,
: paginator,
: self.get_breadcrumbs(request),
: view.allowed_methods,
: [renderer_cls.format for renderer_cls in view.renderer_classes],
: response_headers,
: self.get_rendered_html_form(data, view, , request),
: self.get_rendered_html_form(data, view, , request),
: self.get_rendered_html_form(data, view, , request),
: self.get_rendered_html_form(data, view, , request),
: self.get_filter_form(data, view, request),
: raw_data_put_form,
: raw_data_post_form,
: raw_data_patch_form,
: raw_data_put_or_patch_form,
: bool(response.status_code != 403),
: api_settings
}
return context
|
#vtb
def lookups(self):
if self._lookups is None:
from twilio.rest.lookups import Lookups
self._lookups = Lookups(self)
return self._lookups
|
Access the Lookups Twilio Domain
:returns: Lookups Twilio Domain
:rtype: twilio.rest.lookups.Lookups
|
### Input:
Access the Lookups Twilio Domain
:returns: Lookups Twilio Domain
:rtype: twilio.rest.lookups.Lookups
### Response:
#vtb
def lookups(self):
if self._lookups is None:
from twilio.rest.lookups import Lookups
self._lookups = Lookups(self)
return self._lookups
|
#vtb
def register_validator(flag_name,
checker,
message=,
flag_values=_flagvalues.FLAGS):
v = SingleFlagValidator(flag_name, checker, message)
_add_validator(flag_values, v)
|
Adds a constraint, which will be enforced during program execution.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_name: str, name of the flag to be checked.
checker: callable, a function to validate the flag.
input - A single positional argument: The value of the corresponding
flag (string, boolean, etc. This value will be passed to checker
by the library).
output - bool, True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise flags.ValidationError(desired_error_message).
message: str, error text to be shown to the user if checker returns False.
If checker raises flags.ValidationError, message from the raised
error will be shown.
flag_values: flags.FlagValues, optional FlagValues instance to validate
against.
Raises:
AttributeError: Raised when flag_name is not registered as a valid flag
name.
|
### Input:
Adds a constraint, which will be enforced during program execution.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_name: str, name of the flag to be checked.
checker: callable, a function to validate the flag.
input - A single positional argument: The value of the corresponding
flag (string, boolean, etc. This value will be passed to checker
by the library).
output - bool, True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise flags.ValidationError(desired_error_message).
message: str, error text to be shown to the user if checker returns False.
If checker raises flags.ValidationError, message from the raised
error will be shown.
flag_values: flags.FlagValues, optional FlagValues instance to validate
against.
Raises:
AttributeError: Raised when flag_name is not registered as a valid flag
name.
### Response:
#vtb
def register_validator(flag_name,
checker,
message=,
flag_values=_flagvalues.FLAGS):
v = SingleFlagValidator(flag_name, checker, message)
_add_validator(flag_values, v)
|
#vtb
def calcDistMatchArr(matchArr, tKey, mKey):
matchArrSize = listvalues(matchArr)[0].size
distInfo = {: list(), : list()}
_matrix = numpy.swapaxes(numpy.array([matchArr[tKey], matchArr[mKey]]), 0, 1)
for pos1 in range(matchArrSize-1):
for pos2 in range(pos1+1, matchArrSize):
distInfo[].append((pos1, pos2))
distInfo[] = numpy.array(distInfo[])
distInfo[] = scipy.spatial.distance.pdist(_matrix)
distSort = numpy.argsort(distInfo[])
for key in list(viewkeys(distInfo)):
distInfo[key] = distInfo[key][distSort]
return distInfo
|
Calculate the euclidean distance of all array positions in "matchArr".
:param matchArr: a dictionary of ``numpy.arrays`` containing at least two
entries that are treated as cartesian coordinates.
:param tKey: #TODO: docstring
:param mKey: #TODO: docstring
:returns: #TODO: docstring
{'eucDist': numpy.array([eucDistance, eucDistance, ...]),
'posPairs': numpy.array([[pos1, pos2], [pos1, pos2], ...])
}
|
### Input:
Calculate the euclidean distance of all array positions in "matchArr".
:param matchArr: a dictionary of ``numpy.arrays`` containing at least two
entries that are treated as cartesian coordinates.
:param tKey: #TODO: docstring
:param mKey: #TODO: docstring
:returns: #TODO: docstring
{'eucDist': numpy.array([eucDistance, eucDistance, ...]),
'posPairs': numpy.array([[pos1, pos2], [pos1, pos2], ...])
}
### Response:
#vtb
def calcDistMatchArr(matchArr, tKey, mKey):
matchArrSize = listvalues(matchArr)[0].size
distInfo = {: list(), : list()}
_matrix = numpy.swapaxes(numpy.array([matchArr[tKey], matchArr[mKey]]), 0, 1)
for pos1 in range(matchArrSize-1):
for pos2 in range(pos1+1, matchArrSize):
distInfo[].append((pos1, pos2))
distInfo[] = numpy.array(distInfo[])
distInfo[] = scipy.spatial.distance.pdist(_matrix)
distSort = numpy.argsort(distInfo[])
for key in list(viewkeys(distInfo)):
distInfo[key] = distInfo[key][distSort]
return distInfo
|
#vtb
def scale_dataset(self, dsid, variable, info):
variable = remove_empties(variable)
scale = variable.attrs.get(, np.array(1))
offset = variable.attrs.get(, np.array(0))
if np.issubdtype((scale + offset).dtype, np.floating) or np.issubdtype(variable.dtype, np.floating):
if in variable.attrs:
variable = variable.where(
variable != variable.attrs[])
variable.attrs[] = np.nan
if in variable.attrs:
variable = variable.where(
variable <= variable.attrs[][1])
variable = variable.where(
variable >= variable.attrs[][0])
if in variable.attrs:
variable = variable.where(
variable <= variable.attrs[])
if in variable.attrs:
variable = variable.where(
variable >= variable.attrs[])
attrs = variable.attrs
variable = variable * scale + offset
variable.attrs = attrs
variable.attrs.update({: self.platform_name,
: self.sensor})
variable.attrs.setdefault(, )
ancillary_names = variable.attrs.get(, )
try:
variable.attrs[] = ancillary_names.split()
except AttributeError:
pass
if in variable.attrs:
variable.attrs[] = [int(val)
for val in variable.attrs[].split()]
if variable.attrs[][0] == 1:
variable.attrs[] = [0] + variable.attrs[]
variable = xr.DataArray(da.vstack((np.array(variable.attrs[]), variable.data)),
coords=variable.coords, dims=variable.dims, attrs=variable.attrs)
val, idx = np.unique(variable.attrs[], return_index=True)
variable.attrs[] = val
variable = variable[idx]
if in info:
variable.attrs.setdefault(, info[])
if self.sw_version == and dsid.name == :
variable = variable[1:, :]
return variable
|
Scale the data set, applying the attributes from the netCDF file
|
### Input:
Scale the data set, applying the attributes from the netCDF file
### Response:
#vtb
def scale_dataset(self, dsid, variable, info):
variable = remove_empties(variable)
scale = variable.attrs.get(, np.array(1))
offset = variable.attrs.get(, np.array(0))
if np.issubdtype((scale + offset).dtype, np.floating) or np.issubdtype(variable.dtype, np.floating):
if in variable.attrs:
variable = variable.where(
variable != variable.attrs[])
variable.attrs[] = np.nan
if in variable.attrs:
variable = variable.where(
variable <= variable.attrs[][1])
variable = variable.where(
variable >= variable.attrs[][0])
if in variable.attrs:
variable = variable.where(
variable <= variable.attrs[])
if in variable.attrs:
variable = variable.where(
variable >= variable.attrs[])
attrs = variable.attrs
variable = variable * scale + offset
variable.attrs = attrs
variable.attrs.update({: self.platform_name,
: self.sensor})
variable.attrs.setdefault(, )
ancillary_names = variable.attrs.get(, )
try:
variable.attrs[] = ancillary_names.split()
except AttributeError:
pass
if in variable.attrs:
variable.attrs[] = [int(val)
for val in variable.attrs[].split()]
if variable.attrs[][0] == 1:
variable.attrs[] = [0] + variable.attrs[]
variable = xr.DataArray(da.vstack((np.array(variable.attrs[]), variable.data)),
coords=variable.coords, dims=variable.dims, attrs=variable.attrs)
val, idx = np.unique(variable.attrs[], return_index=True)
variable.attrs[] = val
variable = variable[idx]
if in info:
variable.attrs.setdefault(, info[])
if self.sw_version == and dsid.name == :
variable = variable[1:, :]
return variable
|
#vtb
def afterglow(self, src=None, event=None, dst=None, **kargs):
if src is None:
src = lambda x: x[].src
if event is None:
event = lambda x: x[].dport
if dst is None:
dst = lambda x: x[].dst
sl = {}
el = {}
dl = {}
for i in self.res:
try:
s, e, d = src(i), event(i), dst(i)
if s in sl:
n, lst = sl[s]
n += 1
if e not in lst:
lst.append(e)
sl[s] = (n, lst)
else:
sl[s] = (1, [e])
if e in el:
n, lst = el[e]
n += 1
if d not in lst:
lst.append(d)
el[e] = (n, lst)
else:
el[e] = (1, [d])
dl[d] = dl.get(d, 0) + 1
except Exception:
continue
import math
def normalize(n):
return 2 + math.log(n) / 4.0
def minmax(x):
m, M = reduce(lambda a, b: (min(a[0], b[0]), max(a[1], b[1])),
((a, a) for a in x))
if m == M:
m = 0
if M == 0:
M = 1
return m, M
mins, maxs = minmax(x for x, _ in six.itervalues(sl))
mine, maxe = minmax(x for x, _ in six.itervalues(el))
mind, maxd = minmax(six.itervalues(dl))
gr =
gr += "
for s in sl:
n, _ = sl[s]
n = 1 + float(n - mins) / (maxs - mins)
gr += % (repr(s), repr(s), n, n)
gr += "
for e in el:
n, _ = el[e]
n = n = 1 + float(n - mine) / (maxe - mine)
gr += % (repr(e), repr(e), n, n)
for d in dl:
n = dl[d]
n = n = 1 + float(n - mind) / (maxd - mind)
gr += % (repr(d), repr(d), n, n)
gr += "
for s in sl:
n, lst = sl[s]
for e in lst:
gr += % (repr(s), repr(e))
for e in el:
n, lst = el[e]
for d in lst:
gr += % (repr(e), repr(d))
gr += "}"
return do_graph(gr, **kargs)
|
Experimental clone attempt of http://sourceforge.net/projects/afterglow
each datum is reduced as src -> event -> dst and the data are graphed.
by default we have IP.src -> IP.dport -> IP.dst
|
### Input:
Experimental clone attempt of http://sourceforge.net/projects/afterglow
each datum is reduced as src -> event -> dst and the data are graphed.
by default we have IP.src -> IP.dport -> IP.dst
### Response:
#vtb
def afterglow(self, src=None, event=None, dst=None, **kargs):
if src is None:
src = lambda x: x[].src
if event is None:
event = lambda x: x[].dport
if dst is None:
dst = lambda x: x[].dst
sl = {}
el = {}
dl = {}
for i in self.res:
try:
s, e, d = src(i), event(i), dst(i)
if s in sl:
n, lst = sl[s]
n += 1
if e not in lst:
lst.append(e)
sl[s] = (n, lst)
else:
sl[s] = (1, [e])
if e in el:
n, lst = el[e]
n += 1
if d not in lst:
lst.append(d)
el[e] = (n, lst)
else:
el[e] = (1, [d])
dl[d] = dl.get(d, 0) + 1
except Exception:
continue
import math
def normalize(n):
return 2 + math.log(n) / 4.0
def minmax(x):
m, M = reduce(lambda a, b: (min(a[0], b[0]), max(a[1], b[1])),
((a, a) for a in x))
if m == M:
m = 0
if M == 0:
M = 1
return m, M
mins, maxs = minmax(x for x, _ in six.itervalues(sl))
mine, maxe = minmax(x for x, _ in six.itervalues(el))
mind, maxd = minmax(six.itervalues(dl))
gr =
gr += "
for s in sl:
n, _ = sl[s]
n = 1 + float(n - mins) / (maxs - mins)
gr += % (repr(s), repr(s), n, n)
gr += "
for e in el:
n, _ = el[e]
n = n = 1 + float(n - mine) / (maxe - mine)
gr += % (repr(e), repr(e), n, n)
for d in dl:
n = dl[d]
n = n = 1 + float(n - mind) / (maxd - mind)
gr += % (repr(d), repr(d), n, n)
gr += "
for s in sl:
n, lst = sl[s]
for e in lst:
gr += % (repr(s), repr(e))
for e in el:
n, lst = el[e]
for d in lst:
gr += % (repr(e), repr(d))
gr += "}"
return do_graph(gr, **kargs)
|
#vtb
def displayhook(value):
global _displayhooks
new_hooks = []
for hook_ref in _displayhooks:
hook = hook_ref()
if hook:
hook(value)
new_hooks.append(hook_ref)
_displayhooks = new_hooks
sys.__displayhook__(value)
|
Runs all of the registered display hook methods with the given value.
Look at the sys.displayhook documentation for more information.
:param value | <variant>
|
### Input:
Runs all of the registered display hook methods with the given value.
Look at the sys.displayhook documentation for more information.
:param value | <variant>
### Response:
#vtb
def displayhook(value):
global _displayhooks
new_hooks = []
for hook_ref in _displayhooks:
hook = hook_ref()
if hook:
hook(value)
new_hooks.append(hook_ref)
_displayhooks = new_hooks
sys.__displayhook__(value)
|
#vtb
def get_joystick_buttons(joy):
count_value = ctypes.c_int(0)
count = ctypes.pointer(count_value)
result = _glfw.glfwGetJoystickButtons(joy, count)
return result, count_value.value
|
Returns the state of all buttons of the specified joystick.
Wrapper for:
const unsigned char* glfwGetJoystickButtons(int joy, int* count);
|
### Input:
Returns the state of all buttons of the specified joystick.
Wrapper for:
const unsigned char* glfwGetJoystickButtons(int joy, int* count);
### Response:
#vtb
def get_joystick_buttons(joy):
count_value = ctypes.c_int(0)
count = ctypes.pointer(count_value)
result = _glfw.glfwGetJoystickButtons(joy, count)
return result, count_value.value
|
#vtb
def _get_start_revision(self, graph, benchmark, entry_name):
start_revision = min(six.itervalues(self.revisions))
if graph.params.get():
branch_suffix = + graph.params.get()
else:
branch_suffix =
for regex, start_commit in six.iteritems(self.conf.regressions_first_commits):
if re.match(regex, entry_name + branch_suffix):
if start_commit is None:
return None
if self.conf.branches == [None]:
key = (start_commit, None)
else:
key = (start_commit, graph.params.get())
if key not in self._start_revisions:
spec = self.repo.get_new_range_spec(*key)
start_hash = self.repo.get_hash_from_name(start_commit)
for commit in [start_hash] + self.repo.get_hashes_from_range(spec):
rev = self.revisions.get(commit)
if rev is not None:
self._start_revisions[key] = rev
break
else:
log.warning(("Commit {0} specified in `regressions_first_commits` "
"not found in branch").format(start_commit))
self._start_revisions[key] = -1
start_revision = max(start_revision, self._start_revisions[key] + 1)
return start_revision
|
Compute the first revision allowed by asv.conf.json.
Revisions correspond to linearized commit history and the
regression detection runs on this order --- the starting commit
thus corresponds to a specific starting revision.
|
### Input:
Compute the first revision allowed by asv.conf.json.
Revisions correspond to linearized commit history and the
regression detection runs on this order --- the starting commit
thus corresponds to a specific starting revision.
### Response:
#vtb
def _get_start_revision(self, graph, benchmark, entry_name):
start_revision = min(six.itervalues(self.revisions))
if graph.params.get():
branch_suffix = + graph.params.get()
else:
branch_suffix =
for regex, start_commit in six.iteritems(self.conf.regressions_first_commits):
if re.match(regex, entry_name + branch_suffix):
if start_commit is None:
return None
if self.conf.branches == [None]:
key = (start_commit, None)
else:
key = (start_commit, graph.params.get())
if key not in self._start_revisions:
spec = self.repo.get_new_range_spec(*key)
start_hash = self.repo.get_hash_from_name(start_commit)
for commit in [start_hash] + self.repo.get_hashes_from_range(spec):
rev = self.revisions.get(commit)
if rev is not None:
self._start_revisions[key] = rev
break
else:
log.warning(("Commit {0} specified in `regressions_first_commits` "
"not found in branch").format(start_commit))
self._start_revisions[key] = -1
start_revision = max(start_revision, self._start_revisions[key] + 1)
return start_revision
|
#vtb
def unescape_LDAP(ldap_string):
if ldap_string is None:
return None
if ESCAPE_CHARACTER not in ldap_string:
return ldap_string
escaped = False
result = ""
for character in ldap_string:
if not escaped and character == ESCAPE_CHARACTER:
escaped = True
else:
escaped = False
result += character
return result
|
Unespaces an LDAP string
:param ldap_string: The string to unescape
:return: The unprotected string
|
### Input:
Unespaces an LDAP string
:param ldap_string: The string to unescape
:return: The unprotected string
### Response:
#vtb
def unescape_LDAP(ldap_string):
if ldap_string is None:
return None
if ESCAPE_CHARACTER not in ldap_string:
return ldap_string
escaped = False
result = ""
for character in ldap_string:
if not escaped and character == ESCAPE_CHARACTER:
escaped = True
else:
escaped = False
result += character
return result
|
#vtb
def datetime(self, to_timezone=None, naive=False):
if to_timezone:
dt = self.datetime().astimezone(pytz.timezone(to_timezone))
else:
dt = Datetime.utcfromtimestamp(self._epoch)
dt.replace(tzinfo=self._tz)
if naive:
return dt.replace(tzinfo=None)
else:
if dt.tzinfo is None:
dt = dt.replace(tzinfo=self._tz)
return dt
|
Returns a timezone-aware datetime...
Defaulting to UTC (as it should).
Keyword Arguments:
to_timezone {str} -- timezone to convert to (default: None/UTC)
naive {bool} -- if True,
the tzinfo is simply dropped (default: False)
|
### Input:
Returns a timezone-aware datetime...
Defaulting to UTC (as it should).
Keyword Arguments:
to_timezone {str} -- timezone to convert to (default: None/UTC)
naive {bool} -- if True,
the tzinfo is simply dropped (default: False)
### Response:
#vtb
def datetime(self, to_timezone=None, naive=False):
if to_timezone:
dt = self.datetime().astimezone(pytz.timezone(to_timezone))
else:
dt = Datetime.utcfromtimestamp(self._epoch)
dt.replace(tzinfo=self._tz)
if naive:
return dt.replace(tzinfo=None)
else:
if dt.tzinfo is None:
dt = dt.replace(tzinfo=self._tz)
return dt
|
#vtb
def get_country_by_name(self, country_name) -> :
VALID_STR.validate(country_name, , exc=ValueError)
if country_name not in self._countries_by_name.keys():
for country in self.countries:
if country.country_name == country_name:
return country
raise ValueError(country_name)
else:
return self._countries_by_name[country_name]
|
Gets a country in this coalition by its name
Args:
country_name: country name
Returns: Country
|
### Input:
Gets a country in this coalition by its name
Args:
country_name: country name
Returns: Country
### Response:
#vtb
def get_country_by_name(self, country_name) -> :
VALID_STR.validate(country_name, , exc=ValueError)
if country_name not in self._countries_by_name.keys():
for country in self.countries:
if country.country_name == country_name:
return country
raise ValueError(country_name)
else:
return self._countries_by_name[country_name]
|
#vtb
def get_server_networks(self, network, public=False, private=False,
key=None):
return _get_server_networks(network, public=public, private=private,
key=key)
|
Creates the dict of network UUIDs required by Cloud Servers when
creating a new server with isolated networks. By default, the UUID
values are returned with the key of "net-id", which is what novaclient
expects. Other tools may require different values, such as 'uuid'. If
that is the case, pass the desired key as the 'key' parameter.
By default only this network is included. If you wish to create a
server that has either the public (internet) or private (ServiceNet)
networks, you have to pass those parameters in with values of True.
|
### Input:
Creates the dict of network UUIDs required by Cloud Servers when
creating a new server with isolated networks. By default, the UUID
values are returned with the key of "net-id", which is what novaclient
expects. Other tools may require different values, such as 'uuid'. If
that is the case, pass the desired key as the 'key' parameter.
By default only this network is included. If you wish to create a
server that has either the public (internet) or private (ServiceNet)
networks, you have to pass those parameters in with values of True.
### Response:
#vtb
def get_server_networks(self, network, public=False, private=False,
key=None):
return _get_server_networks(network, public=public, private=private,
key=key)
|
#vtb
def headerData(self, section, orientation, role):
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return Qt.AlignCenter | Qt.AlignBottom
else:
return Qt.AlignRight | Qt.AlignVCenter
if role != Qt.DisplayRole and role != Qt.ToolTipRole:
return None
if self.axis == 1 and self._shape[1] <= 1:
return None
orient_axis = 0 if orientation == Qt.Horizontal else 1
if self.model.header_shape[orient_axis] > 1:
header = section
else:
header = self.model.header(self.axis, section)
if not is_type_text_string(header):
header = to_text_string(header)
return header
|
Get the information to put in the header.
|
### Input:
Get the information to put in the header.
### Response:
#vtb
def headerData(self, section, orientation, role):
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return Qt.AlignCenter | Qt.AlignBottom
else:
return Qt.AlignRight | Qt.AlignVCenter
if role != Qt.DisplayRole and role != Qt.ToolTipRole:
return None
if self.axis == 1 and self._shape[1] <= 1:
return None
orient_axis = 0 if orientation == Qt.Horizontal else 1
if self.model.header_shape[orient_axis] > 1:
header = section
else:
header = self.model.header(self.axis, section)
if not is_type_text_string(header):
header = to_text_string(header)
return header
|
#vtb
def distL2(x1,y1,x2,y2):
xdiff = x2 - x1
ydiff = y2 - y1
return int(math.sqrt(xdiff*xdiff + ydiff*ydiff) + .5)
|
Compute the L2-norm (Euclidean) distance between two points.
The distance is rounded to the closest integer, for compatibility
with the TSPLIB convention.
The two points are located on coordinates (x1,y1) and (x2,y2),
sent as parameters
|
### Input:
Compute the L2-norm (Euclidean) distance between two points.
The distance is rounded to the closest integer, for compatibility
with the TSPLIB convention.
The two points are located on coordinates (x1,y1) and (x2,y2),
sent as parameters
### Response:
#vtb
def distL2(x1,y1,x2,y2):
xdiff = x2 - x1
ydiff = y2 - y1
return int(math.sqrt(xdiff*xdiff + ydiff*ydiff) + .5)
|
#vtb
def str_display_width(s):
a去
s= str(s)
width = 0
len = s.__len__()
for i in range(0,len):
sublen = s[i].encode().__len__()
sublen = int(sublen/2 + 1/2)
width = width + sublen
return(width)
|
from elist.utils import *
str_display_width('a')
str_display_width('去')
|
### Input:
from elist.utils import *
str_display_width('a')
str_display_width('去')
### Response:
#vtb
def str_display_width(s):
a去
s= str(s)
width = 0
len = s.__len__()
for i in range(0,len):
sublen = s[i].encode().__len__()
sublen = int(sublen/2 + 1/2)
width = width + sublen
return(width)
|
#vtb
def command_x(self, x, to=None):
if to is None:
ActionChains(self.driver) \
.send_keys([Keys.COMMAND, x, Keys.COMMAND]) \
.perform()
else:
self.send_keys(to, [Keys.COMMAND, x, Keys.COMMAND])
|
Sends a character to the currently active element with Command
pressed. This method takes care of pressing and releasing
Command.
|
### Input:
Sends a character to the currently active element with Command
pressed. This method takes care of pressing and releasing
Command.
### Response:
#vtb
def command_x(self, x, to=None):
if to is None:
ActionChains(self.driver) \
.send_keys([Keys.COMMAND, x, Keys.COMMAND]) \
.perform()
else:
self.send_keys(to, [Keys.COMMAND, x, Keys.COMMAND])
|
#vtb
def get_concurrency(self):
method =
endpoint = .format(
self.client.sauce_username)
return self.client.request(method, endpoint)
|
Check account concurrency limits.
|
### Input:
Check account concurrency limits.
### Response:
#vtb
def get_concurrency(self):
method =
endpoint = .format(
self.client.sauce_username)
return self.client.request(method, endpoint)
|
#vtb
def parent_images(self):
parents = []
for instr in self.structure:
if instr[] != :
continue
image, _ = image_from(instr[])
if image is not None:
parents.append(image)
return parents
|
:return: list of parent images -- one image per each stage's FROM instruction
|
### Input:
:return: list of parent images -- one image per each stage's FROM instruction
### Response:
#vtb
def parent_images(self):
parents = []
for instr in self.structure:
if instr[] != :
continue
image, _ = image_from(instr[])
if image is not None:
parents.append(image)
return parents
|
#vtb
def constructRows(self, items):
rows = []
for item in items:
row = dict((colname, col.extractValue(self, item))
for (colname, col) in self.columns.iteritems())
link = self.linkToItem(item)
if link is not None:
row[u] = link
rows.append(row)
return rows
|
Build row objects that are serializable using Athena for sending to the
client.
@param items: an iterable of objects compatible with my columns'
C{extractValue} methods.
@return: a list of dictionaries, where each dictionary has a string key
for each column name in my list of columns.
|
### Input:
Build row objects that are serializable using Athena for sending to the
client.
@param items: an iterable of objects compatible with my columns'
C{extractValue} methods.
@return: a list of dictionaries, where each dictionary has a string key
for each column name in my list of columns.
### Response:
#vtb
def constructRows(self, items):
rows = []
for item in items:
row = dict((colname, col.extractValue(self, item))
for (colname, col) in self.columns.iteritems())
link = self.linkToItem(item)
if link is not None:
row[u] = link
rows.append(row)
return rows
|
#vtb
def get_timing_signal_1d(length,
channels,
min_timescale=1.0,
max_timescale=1.0e4,
start_index=0):
position = tf.to_float(tf.range(length) + start_index)
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
tf.maximum(tf.to_float(num_timescales) - 1, 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
signal = tf.reshape(signal, [1, length, channels])
return signal
|
Gets a bunch of sinusoids of different frequencies.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
length: scalar, length of timing signal sequence.
channels: scalar, size of timing embeddings to create. The number of
different timescales is equal to channels / 2.
min_timescale: a float
max_timescale: a float
start_index: index of first position
Returns:
a Tensor of timing signals [1, length, channels]
|
### Input:
Gets a bunch of sinusoids of different frequencies.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
length: scalar, length of timing signal sequence.
channels: scalar, size of timing embeddings to create. The number of
different timescales is equal to channels / 2.
min_timescale: a float
max_timescale: a float
start_index: index of first position
Returns:
a Tensor of timing signals [1, length, channels]
### Response:
#vtb
def get_timing_signal_1d(length,
channels,
min_timescale=1.0,
max_timescale=1.0e4,
start_index=0):
position = tf.to_float(tf.range(length) + start_index)
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
tf.maximum(tf.to_float(num_timescales) - 1, 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
signal = tf.reshape(signal, [1, length, channels])
return signal
|
#vtb
def normalizeGlyphNote(value):
if not isinstance(value, basestring):
raise TypeError("Note must be a string, not %s."
% type(value).__name__)
return unicode(value)
|
Normalizes Glyph Note.
* **value** must be a :ref:`type-string`.
* Returned value is an unencoded ``unicode`` string
|
### Input:
Normalizes Glyph Note.
* **value** must be a :ref:`type-string`.
* Returned value is an unencoded ``unicode`` string
### Response:
#vtb
def normalizeGlyphNote(value):
if not isinstance(value, basestring):
raise TypeError("Note must be a string, not %s."
% type(value).__name__)
return unicode(value)
|
#vtb
def crack_secret_exponent_from_k(generator, signed_value, sig, k):
r, s = sig
return ((s * k - signed_value) * generator.inverse(r)) % generator.order()
|
Given a signature of a signed_value and a known k, return the secret exponent.
|
### Input:
Given a signature of a signed_value and a known k, return the secret exponent.
### Response:
#vtb
def crack_secret_exponent_from_k(generator, signed_value, sig, k):
r, s = sig
return ((s * k - signed_value) * generator.inverse(r)) % generator.order()
|
#vtb
def back_slash_to_front_converter(string):
try:
if not string or not isinstance(string, str):
return string
return string.replace(, )
except Exception:
return string
|
Replacing all \ in the str to /
:param string: single string to modify
:type string: str
|
### Input:
Replacing all \ in the str to /
:param string: single string to modify
:type string: str
### Response:
#vtb
def back_slash_to_front_converter(string):
try:
if not string or not isinstance(string, str):
return string
return string.replace(, )
except Exception:
return string
|
#vtb
def urlize(text, trim_url_limit=None, nofollow=False):
trim_url = lambda x, limit=trim_url_limit: limit is not None \
and (x[:limit] + (len(x) >=limit and
or )) or x
words = _word_split_re.split(unicode(escape(text)))
nofollow_attr = nofollow and or
for i, word in enumerate(words):
match = _punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
if middle.startswith() or (
not in middle and
not middle.startswith() and
len(middle) > 0 and
middle[0] in _letters + _digits and (
middle.endswith() or
middle.endswith() or
middle.endswith()
)):
middle = % (middle,
nofollow_attr, trim_url(middle))
if middle.startswith() or \
middle.startswith():
middle = % (middle,
nofollow_attr, trim_url(middle))
if in middle and not middle.startswith() and \
not in middle and _simple_email_re.match(middle):
middle = % (middle, middle)
if lead + middle + trail != word:
words[i] = lead + middle + trail
return u.join(words)
|
Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
|
### Input:
Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
### Response:
#vtb
def urlize(text, trim_url_limit=None, nofollow=False):
trim_url = lambda x, limit=trim_url_limit: limit is not None \
and (x[:limit] + (len(x) >=limit and
or )) or x
words = _word_split_re.split(unicode(escape(text)))
nofollow_attr = nofollow and or
for i, word in enumerate(words):
match = _punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
if middle.startswith() or (
not in middle and
not middle.startswith() and
len(middle) > 0 and
middle[0] in _letters + _digits and (
middle.endswith() or
middle.endswith() or
middle.endswith()
)):
middle = % (middle,
nofollow_attr, trim_url(middle))
if middle.startswith() or \
middle.startswith():
middle = % (middle,
nofollow_attr, trim_url(middle))
if in middle and not middle.startswith() and \
not in middle and _simple_email_re.match(middle):
middle = % (middle, middle)
if lead + middle + trail != word:
words[i] = lead + middle + trail
return u.join(words)
|
#vtb
def relabel(label_list, projections):
unmapped_combinations = find_missing_projections(label_list, projections)
if len(unmapped_combinations) > 0:
raise UnmappedLabelsException(.format(unmapped_combinations))
new_labels = []
for labeled_segment in label_list.ranges():
combination = tuple(sorted([label.value for label in labeled_segment[2]]))
label_mapping = projections[combination] if combination in projections else projections[WILDCARD_COMBINATION]
if label_mapping == :
continue
new_labels.append(annotations.Label(label_mapping, labeled_segment[0], labeled_segment[1]))
return annotations.LabelList(idx=label_list.idx, labels=new_labels)
|
Relabel an entire :py:class:`~audiomate.annotations.LabelList` using user-defined projections.
Labels can be renamed, removed or overlapping labels can be flattened to a single label per segment.
Each entry in the dictionary of projections represents a single projection that maps a combination of labels (key)
to a single new label (value). The combination of labels to be mapped is a tuple of naturally sorted labels that
apply to one or more segments simultaneously. By defining a special wildcard projection using `('**',)` is is not
required to specify a projection for every single combination of labels.
This method raises a :py:class:`~audiomate.corpus.utils.labellist.UnmappedLabelsException` if a projection for one
or more combinations of labels is not defined.
Args:
label_list (audiomate.annotations.LabelList): The label list to relabel
projections (dict): A dictionary that maps tuples of label combinations to string
labels.
Returns:
audiomate.annotations.LabelList: New label list with remapped labels
Raises:
UnmappedLabelsException: If a projection for one or more combinations of labels is not defined.
Example:
>>> projections = {
... ('a',): 'a',
... ('b',): 'b',
... ('c',): 'c',
... ('a', 'b',): 'a_b',
... ('a', 'b', 'c',): 'a_b_c',
... ('**',): 'b_c',
... }
>>> label_list = annotations.LabelList(labels=[
... annotations.Label('a', 3.2, 4.5),
... annotations.Label('b', 4.0, 4.9),
... annotations.Label('c', 4.2, 5.1)
... ])
>>> ll = relabel(label_list, projections)
>>> [l.value for l in ll]
['a', 'a_b', 'a_b_c', 'b_c', 'c']
|
### Input:
Relabel an entire :py:class:`~audiomate.annotations.LabelList` using user-defined projections.
Labels can be renamed, removed or overlapping labels can be flattened to a single label per segment.
Each entry in the dictionary of projections represents a single projection that maps a combination of labels (key)
to a single new label (value). The combination of labels to be mapped is a tuple of naturally sorted labels that
apply to one or more segments simultaneously. By defining a special wildcard projection using `('**',)` is is not
required to specify a projection for every single combination of labels.
This method raises a :py:class:`~audiomate.corpus.utils.labellist.UnmappedLabelsException` if a projection for one
or more combinations of labels is not defined.
Args:
label_list (audiomate.annotations.LabelList): The label list to relabel
projections (dict): A dictionary that maps tuples of label combinations to string
labels.
Returns:
audiomate.annotations.LabelList: New label list with remapped labels
Raises:
UnmappedLabelsException: If a projection for one or more combinations of labels is not defined.
Example:
>>> projections = {
... ('a',): 'a',
... ('b',): 'b',
... ('c',): 'c',
... ('a', 'b',): 'a_b',
... ('a', 'b', 'c',): 'a_b_c',
... ('**',): 'b_c',
... }
>>> label_list = annotations.LabelList(labels=[
... annotations.Label('a', 3.2, 4.5),
... annotations.Label('b', 4.0, 4.9),
... annotations.Label('c', 4.2, 5.1)
... ])
>>> ll = relabel(label_list, projections)
>>> [l.value for l in ll]
['a', 'a_b', 'a_b_c', 'b_c', 'c']
### Response:
#vtb
def relabel(label_list, projections):
unmapped_combinations = find_missing_projections(label_list, projections)
if len(unmapped_combinations) > 0:
raise UnmappedLabelsException(.format(unmapped_combinations))
new_labels = []
for labeled_segment in label_list.ranges():
combination = tuple(sorted([label.value for label in labeled_segment[2]]))
label_mapping = projections[combination] if combination in projections else projections[WILDCARD_COMBINATION]
if label_mapping == :
continue
new_labels.append(annotations.Label(label_mapping, labeled_segment[0], labeled_segment[1]))
return annotations.LabelList(idx=label_list.idx, labels=new_labels)
|
#vtb
def _find_server(account, servername=None):
servers = servers = [s for s in account.resources() if in s.provides]
if servername is not None:
for server in servers:
if server.name == servername:
return server.connect()
raise SystemExit( % servername)
return utils.choose(, servers, ).connect()
|
Find and return a PlexServer object.
|
### Input:
Find and return a PlexServer object.
### Response:
#vtb
def _find_server(account, servername=None):
servers = servers = [s for s in account.resources() if in s.provides]
if servername is not None:
for server in servers:
if server.name == servername:
return server.connect()
raise SystemExit( % servername)
return utils.choose(, servers, ).connect()
|
#vtb
def upload(self, remote_path, local_path, progress=None):
if os.path.isdir(local_path):
self.upload_directory(local_path=local_path, remote_path=remote_path, progress=progress)
else:
self.upload_file(local_path=local_path, remote_path=remote_path)
|
Uploads resource to remote path on WebDAV server.
In case resource is directory it will upload all nested files and directories.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PUT
:param remote_path: the path for uploading resources on WebDAV server. Can be file and directory.
:param local_path: the path to local resource for uploading.
:param progress: Progress function. Not supported now.
|
### Input:
Uploads resource to remote path on WebDAV server.
In case resource is directory it will upload all nested files and directories.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PUT
:param remote_path: the path for uploading resources on WebDAV server. Can be file and directory.
:param local_path: the path to local resource for uploading.
:param progress: Progress function. Not supported now.
### Response:
#vtb
def upload(self, remote_path, local_path, progress=None):
if os.path.isdir(local_path):
self.upload_directory(local_path=local_path, remote_path=remote_path, progress=progress)
else:
self.upload_file(local_path=local_path, remote_path=remote_path)
|
#vtb
def iter_contributor_statistics(self, number=-1, etag=None):
url = self._build_url(, , base_url=self._api)
return self._iter(int(number), url, ContributorStats, etag=etag)
|
Iterate over the contributors list.
See also: http://developer.github.com/v3/repos/statistics/
:param int number: (optional), number of weeks to return. Default -1
will return all of the weeks.
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`ContributorStats <github3.repos.stats.ContributorStats>`
.. note:: All statistics methods may return a 202. On those occasions,
you will not receive any objects. You should store your
iterator and check the new ``last_status`` attribute. If it
is a 202 you should wait before re-requesting.
.. versionadded:: 0.7
|
### Input:
Iterate over the contributors list.
See also: http://developer.github.com/v3/repos/statistics/
:param int number: (optional), number of weeks to return. Default -1
will return all of the weeks.
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`ContributorStats <github3.repos.stats.ContributorStats>`
.. note:: All statistics methods may return a 202. On those occasions,
you will not receive any objects. You should store your
iterator and check the new ``last_status`` attribute. If it
is a 202 you should wait before re-requesting.
.. versionadded:: 0.7
### Response:
#vtb
def iter_contributor_statistics(self, number=-1, etag=None):
url = self._build_url(, , base_url=self._api)
return self._iter(int(number), url, ContributorStats, etag=etag)
|
#vtb
def _getH2singleTrait(self, K, verbose=None):
verbose = dlimix.getVerbose(verbose)
varg = sp.zeros(self.P)
varn = sp.zeros(self.P)
fixed = sp.zeros((1,self.P))
for p in range(self.P):
y = self.Y[:,p:p+1]
I = sp.isnan(y[:,0])
if I.sum()>0:
y = y[~I,:]
_K = K[~I,:][:,~I]
else:
_K = copy.copy(K)
lmm = dlimix.CLMM()
lmm.setK(_K)
lmm.setSNPs(sp.ones((y.shape[0],1)))
lmm.setPheno(y)
lmm.setCovs(sp.zeros((y.shape[0],1)))
lmm.setVarcompApprox0(-20, 20, 1000)
lmm.process()
delta = sp.exp(lmm.getLdelta0()[0,0])
Vtot = sp.exp(lmm.getLSigma()[0,0])
varg[p] = Vtot
varn[p] = delta*Vtot
fixed[:,p] = lmm.getBetaSNP()
if verbose: print(p)
sth = {}
sth[] = varg
sth[] = varn
sth[] = fixed
return sth
|
Internal function for parameter initialization
estimate variance components and fixed effect using a linear mixed model with an intercept and 2 random effects (one is noise)
Args:
K: covariance matrix of the non-noise random effect term
|
### Input:
Internal function for parameter initialization
estimate variance components and fixed effect using a linear mixed model with an intercept and 2 random effects (one is noise)
Args:
K: covariance matrix of the non-noise random effect term
### Response:
#vtb
def _getH2singleTrait(self, K, verbose=None):
verbose = dlimix.getVerbose(verbose)
varg = sp.zeros(self.P)
varn = sp.zeros(self.P)
fixed = sp.zeros((1,self.P))
for p in range(self.P):
y = self.Y[:,p:p+1]
I = sp.isnan(y[:,0])
if I.sum()>0:
y = y[~I,:]
_K = K[~I,:][:,~I]
else:
_K = copy.copy(K)
lmm = dlimix.CLMM()
lmm.setK(_K)
lmm.setSNPs(sp.ones((y.shape[0],1)))
lmm.setPheno(y)
lmm.setCovs(sp.zeros((y.shape[0],1)))
lmm.setVarcompApprox0(-20, 20, 1000)
lmm.process()
delta = sp.exp(lmm.getLdelta0()[0,0])
Vtot = sp.exp(lmm.getLSigma()[0,0])
varg[p] = Vtot
varn[p] = delta*Vtot
fixed[:,p] = lmm.getBetaSNP()
if verbose: print(p)
sth = {}
sth[] = varg
sth[] = varn
sth[] = fixed
return sth
|
#vtb
def transform(self, blocks, y=None):
preds = (self.kmeans.predict(make_weninger_features(blocks)) > 0).astype(int)
return np.reshape(preds, (-1, 1))
|
Computes the content to tag ratio per block, smooths the values, then
predicts content (1) or not-content (0) using a fit k-means cluster model.
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
:class:`np.ndarray`: 2D array of shape (len(feature_mat), 1), where
values are either 0 or 1, corresponding to the kmeans prediction
of content (1) or not-content (0).
|
### Input:
Computes the content to tag ratio per block, smooths the values, then
predicts content (1) or not-content (0) using a fit k-means cluster model.
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
:class:`np.ndarray`: 2D array of shape (len(feature_mat), 1), where
values are either 0 or 1, corresponding to the kmeans prediction
of content (1) or not-content (0).
### Response:
#vtb
def transform(self, blocks, y=None):
preds = (self.kmeans.predict(make_weninger_features(blocks)) > 0).astype(int)
return np.reshape(preds, (-1, 1))
|
#vtb
def job_exists(name=None):
*
if not name:
raise SaltInvocationError(name\)
server = _connect()
if server.job_exists(name):
return True
else:
return False
|
Check whether the job exists in configured Jenkins jobs.
:param name: The name of the job is check if it exists.
:return: True if job exists, False if job does not exist.
CLI Example:
.. code-block:: bash
salt '*' jenkins.job_exists jobname
|
### Input:
Check whether the job exists in configured Jenkins jobs.
:param name: The name of the job is check if it exists.
:return: True if job exists, False if job does not exist.
CLI Example:
.. code-block:: bash
salt '*' jenkins.job_exists jobname
### Response:
#vtb
def job_exists(name=None):
*
if not name:
raise SaltInvocationError(name\)
server = _connect()
if server.job_exists(name):
return True
else:
return False
|
#vtb
def load_bytes(self,
bytes_data,
key,
bucket_name=None,
replace=False,
encrypt=False):
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args[] = "AES256"
filelike_buffer = BytesIO(bytes_data)
client = self.get_conn()
client.upload_fileobj(filelike_buffer, bucket_name, key, ExtraArgs=extra_args)
|
Loads bytes to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param bytes_data: bytes to set as content for the key.
:type bytes_data: bytes
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
|
### Input:
Loads bytes to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param bytes_data: bytes to set as content for the key.
:type bytes_data: bytes
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
### Response:
#vtb
def load_bytes(self,
bytes_data,
key,
bucket_name=None,
replace=False,
encrypt=False):
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args[] = "AES256"
filelike_buffer = BytesIO(bytes_data)
client = self.get_conn()
client.upload_fileobj(filelike_buffer, bucket_name, key, ExtraArgs=extra_args)
|
#vtb
async def connect(url, *, apikey=None, insecure=False):
url = api_url(url)
url = urlparse(url)
if url.username is not None:
raise ConnectError(
"Cannot provide user-name explicitly in URL (%r) when connecting; "
"use login instead." % url.username)
if url.password is not None:
raise ConnectError(
"Cannot provide password explicitly in URL (%r) when connecting; "
"use login instead." % url.username)
if apikey is None:
credentials = None
else:
credentials = Credentials.parse(apikey)
description = await fetch_api_description(url, insecure)
return Profile(
name=url.netloc, url=url.geturl(), credentials=credentials,
description=description)
|
Connect to a remote MAAS instance with `apikey`.
Returns a new :class:`Profile` which has NOT been saved. To connect AND
save a new profile::
profile = connect(url, apikey=apikey)
profile = profile.replace(name="mad-hatter")
with profiles.ProfileStore.open() as config:
config.save(profile)
# Optionally, set it as the default.
config.default = profile.name
|
### Input:
Connect to a remote MAAS instance with `apikey`.
Returns a new :class:`Profile` which has NOT been saved. To connect AND
save a new profile::
profile = connect(url, apikey=apikey)
profile = profile.replace(name="mad-hatter")
with profiles.ProfileStore.open() as config:
config.save(profile)
# Optionally, set it as the default.
config.default = profile.name
### Response:
#vtb
async def connect(url, *, apikey=None, insecure=False):
url = api_url(url)
url = urlparse(url)
if url.username is not None:
raise ConnectError(
"Cannot provide user-name explicitly in URL (%r) when connecting; "
"use login instead." % url.username)
if url.password is not None:
raise ConnectError(
"Cannot provide password explicitly in URL (%r) when connecting; "
"use login instead." % url.username)
if apikey is None:
credentials = None
else:
credentials = Credentials.parse(apikey)
description = await fetch_api_description(url, insecure)
return Profile(
name=url.netloc, url=url.geturl(), credentials=credentials,
description=description)
|
#vtb
def _sensoryComputeInferenceMode(self, anchorInput):
if len(anchorInput) == 0:
return
overlaps = self.connections.computeActivity(anchorInput,
self.connectedPermanence)
activeSegments = np.where(overlaps >= self.activationThreshold)[0]
sensorySupportedCells = np.unique(
self.connections.mapSegmentsToCells(activeSegments))
self.bumpPhases = self.cellPhases[:,sensorySupportedCells]
self._computeActiveCells()
self.activeSegments = activeSegments
self.sensoryAssociatedCells = sensorySupportedCells
|
Infer the location from sensory input. Activate any cells with enough active
synapses to this sensory input. Deactivate all other cells.
@param anchorInput (numpy array)
A sensory input. This will often come from a feature-location pair layer.
|
### Input:
Infer the location from sensory input. Activate any cells with enough active
synapses to this sensory input. Deactivate all other cells.
@param anchorInput (numpy array)
A sensory input. This will often come from a feature-location pair layer.
### Response:
#vtb
def _sensoryComputeInferenceMode(self, anchorInput):
if len(anchorInput) == 0:
return
overlaps = self.connections.computeActivity(anchorInput,
self.connectedPermanence)
activeSegments = np.where(overlaps >= self.activationThreshold)[0]
sensorySupportedCells = np.unique(
self.connections.mapSegmentsToCells(activeSegments))
self.bumpPhases = self.cellPhases[:,sensorySupportedCells]
self._computeActiveCells()
self.activeSegments = activeSegments
self.sensoryAssociatedCells = sensorySupportedCells
|
#vtb
def __expand_cluster(self, index_point):
cluster = None
self.__visited[index_point] = True
neighbors = self.__neighbor_searcher(index_point)
if len(neighbors) >= self.__neighbors:
cluster = [index_point]
self.__belong[index_point] = True
for i in neighbors:
if self.__visited[i] is False:
self.__visited[i] = True
next_neighbors = self.__neighbor_searcher(i)
if len(next_neighbors) >= self.__neighbors:
neighbors += [k for k in next_neighbors if ( (k in neighbors) == False) and k != index_point]
if self.__belong[i] is False:
cluster.append(i)
self.__belong[i] = True
return cluster
|
!
@brief Expands cluster from specified point in the input data space.
@param[in] index_point (list): Index of a point from the data.
@return (list) Return tuple of list of indexes that belong to the same cluster and list of points that are marked as noise: (cluster, noise), or None if nothing has been expanded.
|
### Input:
!
@brief Expands cluster from specified point in the input data space.
@param[in] index_point (list): Index of a point from the data.
@return (list) Return tuple of list of indexes that belong to the same cluster and list of points that are marked as noise: (cluster, noise), or None if nothing has been expanded.
### Response:
#vtb
def __expand_cluster(self, index_point):
cluster = None
self.__visited[index_point] = True
neighbors = self.__neighbor_searcher(index_point)
if len(neighbors) >= self.__neighbors:
cluster = [index_point]
self.__belong[index_point] = True
for i in neighbors:
if self.__visited[i] is False:
self.__visited[i] = True
next_neighbors = self.__neighbor_searcher(i)
if len(next_neighbors) >= self.__neighbors:
neighbors += [k for k in next_neighbors if ( (k in neighbors) == False) and k != index_point]
if self.__belong[i] is False:
cluster.append(i)
self.__belong[i] = True
return cluster
|
#vtb
def create_table(self, name, schema):
columns = [" ".join(column) for column in schema]
self.execute("CREATE TABLE IF NOT EXISTS {name} ({columns})"
.format(name=name, columns=",".join(columns)))
|
Create a new table.
If the table already exists, nothing happens.
Example:
>>> db.create_table("foo", (("id", "integer primary key"),
("value", "text")))
Arguments:
name (str): The name of the table to create.
schema (sequence of tuples): A list of (name, type) tuples
representing each of the columns.
|
### Input:
Create a new table.
If the table already exists, nothing happens.
Example:
>>> db.create_table("foo", (("id", "integer primary key"),
("value", "text")))
Arguments:
name (str): The name of the table to create.
schema (sequence of tuples): A list of (name, type) tuples
representing each of the columns.
### Response:
#vtb
def create_table(self, name, schema):
columns = [" ".join(column) for column in schema]
self.execute("CREATE TABLE IF NOT EXISTS {name} ({columns})"
.format(name=name, columns=",".join(columns)))
|
#vtb
def update_association(self, association):
bad_goids = set()
for goids in association.values():
parents = set()
goids.update(parents)
if bad_goids:
sys.stdout.write("{N} GO IDs in assc. are not found in the GO-DAG: {GOs}\n".format(
N=len(bad_goids), GOs=" ".join(bad_goids)))
|
Add the GO parents of a gene's associated GO IDs to the gene's association.
|
### Input:
Add the GO parents of a gene's associated GO IDs to the gene's association.
### Response:
#vtb
def update_association(self, association):
bad_goids = set()
for goids in association.values():
parents = set()
goids.update(parents)
if bad_goids:
sys.stdout.write("{N} GO IDs in assc. are not found in the GO-DAG: {GOs}\n".format(
N=len(bad_goids), GOs=" ".join(bad_goids)))
|
#vtb
def parse_query(self, query_string):
if query_string == :
return xapian.Query()
elif query_string == :
return xapian.Query()
qp = xapian.QueryParser()
qp.set_database(self._database())
qp.set_stemmer(xapian.Stem(self.language))
qp.set_stemming_strategy(self.stemming_strategy)
qp.set_default_op(XAPIAN_OPTS[DEFAULT_OPERATOR])
qp.add_boolean_prefix(DJANGO_CT, TERM_PREFIXES[DJANGO_CT])
for field_dict in self.schema:
if field_dict[] == DJANGO_CT:
continue
qp.add_prefix(
field_dict[],
TERM_PREFIXES[] + field_dict[].upper()
)
vrp = XHValueRangeProcessor(self)
qp.add_valuerangeprocessor(vrp)
return qp.parse_query(query_string, self.flags)
|
Given a `query_string`, will attempt to return a xapian.Query
Required arguments:
``query_string`` -- A query string to parse
Returns a xapian.Query
|
### Input:
Given a `query_string`, will attempt to return a xapian.Query
Required arguments:
``query_string`` -- A query string to parse
Returns a xapian.Query
### Response:
#vtb
def parse_query(self, query_string):
if query_string == :
return xapian.Query()
elif query_string == :
return xapian.Query()
qp = xapian.QueryParser()
qp.set_database(self._database())
qp.set_stemmer(xapian.Stem(self.language))
qp.set_stemming_strategy(self.stemming_strategy)
qp.set_default_op(XAPIAN_OPTS[DEFAULT_OPERATOR])
qp.add_boolean_prefix(DJANGO_CT, TERM_PREFIXES[DJANGO_CT])
for field_dict in self.schema:
if field_dict[] == DJANGO_CT:
continue
qp.add_prefix(
field_dict[],
TERM_PREFIXES[] + field_dict[].upper()
)
vrp = XHValueRangeProcessor(self)
qp.add_valuerangeprocessor(vrp)
return qp.parse_query(query_string, self.flags)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.