code
stringlengths 64
7.01k
| docstring
stringlengths 2
15.8k
| text
stringlengths 144
19.2k
|
---|---|---|
#vtb
def attention_lm_moe_memory_efficient():
hparams = attention_lm_moe_large()
hparams.diet_experts = True
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.0
hparams.memory_efficient_ffn = True
hparams.attention_type = AttentionType.MEMORY_EFFICIENT
hparams.num_heads = 8
hparams.factored_logits = True
return hparams
|
Memory-efficient version.
|
### Input:
Memory-efficient version.
### Response:
#vtb
def attention_lm_moe_memory_efficient():
hparams = attention_lm_moe_large()
hparams.diet_experts = True
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.0
hparams.memory_efficient_ffn = True
hparams.attention_type = AttentionType.MEMORY_EFFICIENT
hparams.num_heads = 8
hparams.factored_logits = True
return hparams
|
#vtb
def figure_rst(figure_list, sources_dir):
figure_paths = [os.path.relpath(figure_path, sources_dir)
.replace(os.sep, ).lstrip()
for figure_path in figure_list]
images_rst = ""
if len(figure_paths) == 1:
figure_name = figure_paths[0]
images_rst = SINGLE_IMAGE % figure_name
elif len(figure_paths) > 1:
images_rst = HLIST_HEADER
for figure_name in figure_paths:
images_rst += HLIST_IMAGE_TEMPLATE % figure_name
return images_rst
|
Generate RST for a list of PNG filenames.
Depending on whether we have one or more figures, we use a
single rst call to 'image' or a horizontal list.
Parameters
----------
figure_list : list
List of strings of the figures' absolute paths.
sources_dir : str
absolute path of Sphinx documentation sources
Returns
-------
images_rst : str
rst code to embed the images in the document
|
### Input:
Generate RST for a list of PNG filenames.
Depending on whether we have one or more figures, we use a
single rst call to 'image' or a horizontal list.
Parameters
----------
figure_list : list
List of strings of the figures' absolute paths.
sources_dir : str
absolute path of Sphinx documentation sources
Returns
-------
images_rst : str
rst code to embed the images in the document
### Response:
#vtb
def figure_rst(figure_list, sources_dir):
figure_paths = [os.path.relpath(figure_path, sources_dir)
.replace(os.sep, ).lstrip()
for figure_path in figure_list]
images_rst = ""
if len(figure_paths) == 1:
figure_name = figure_paths[0]
images_rst = SINGLE_IMAGE % figure_name
elif len(figure_paths) > 1:
images_rst = HLIST_HEADER
for figure_name in figure_paths:
images_rst += HLIST_IMAGE_TEMPLATE % figure_name
return images_rst
|
#vtb
def _stack_bands(one, other):
assert set(one.band_names).intersection(set(other.band_names)) == set()
if one.band_names == other.band_names:
raise ValueError("rasters have the same bands, use another merge strategy")
new_mask = np.ma.getmaskarray(one.image)[0] | np.ma.getmaskarray(other.image)[0]
new_image = np.ma.masked_array(
np.concatenate([
one.image.data,
other.image.data
]),
mask=[new_mask] * (one.image.shape[0] + other.image.shape[0])
)
new_bands = one.band_names + other.band_names
return _Raster(image=new_image, band_names=new_bands)
|
Merges two rasters with non overlapping bands by stacking the bands.
|
### Input:
Merges two rasters with non overlapping bands by stacking the bands.
### Response:
#vtb
def _stack_bands(one, other):
assert set(one.band_names).intersection(set(other.band_names)) == set()
if one.band_names == other.band_names:
raise ValueError("rasters have the same bands, use another merge strategy")
new_mask = np.ma.getmaskarray(one.image)[0] | np.ma.getmaskarray(other.image)[0]
new_image = np.ma.masked_array(
np.concatenate([
one.image.data,
other.image.data
]),
mask=[new_mask] * (one.image.shape[0] + other.image.shape[0])
)
new_bands = one.band_names + other.band_names
return _Raster(image=new_image, band_names=new_bands)
|
#vtb
def handle_abs(self):
x_raw = self.microbit.accelerometer.get_x()
y_raw = self.microbit.accelerometer.get_y()
x_abs = (, 0x00, x_raw)
y_abs = (, 0x01, y_raw)
return x_abs, y_abs
|
Gets the state as the raw abolute numbers.
|
### Input:
Gets the state as the raw abolute numbers.
### Response:
#vtb
def handle_abs(self):
x_raw = self.microbit.accelerometer.get_x()
y_raw = self.microbit.accelerometer.get_y()
x_abs = (, 0x00, x_raw)
y_abs = (, 0x01, y_raw)
return x_abs, y_abs
|
#vtb
def walk_processes(top, topname=, topdown=True, ignoreFlag=False):
if not ignoreFlag:
flag = topdown
else:
flag = True
proc = top
level = 0
if flag:
yield topname, proc, level
if len(proc.subprocess) > 0:
level += 1
for name, subproc in proc.subprocess.items():
for name2, subproc2, level2 in walk_processes(subproc,
topname=name,
topdown=subproc.topdown,
ignoreFlag=ignoreFlag):
yield name2, subproc2, level+level2
if not flag:
yield topname, proc, level
|
Generator for recursive tree of climlab processes
Starts walking from climlab process ``top`` and generates a complete
list of all processes and sub-processes that are managed from ``top`` process.
``level`` indicades the rank of specific process in the process hierarchy:
.. note::
* level 0: ``top`` process
* level 1: sub-processes of ``top`` process
* level 2: sub-sub-processes of ``top`` process (=subprocesses of level 1 processes)
The method is based on os.walk().
:param top: top process from where walking should start
:type top: :class:`~climlab.process.process.Process`
:param str topname: name of top process [default: 'top']
:param bool topdown: whether geneterate *process_types* in regular or
in reverse order [default: True]
:param bool ignoreFlag: whether ``topdown`` flag should be ignored or not
[default: False]
:returns: name (str), proc (process), level (int)
:Example:
::
>>> import climlab
>>> from climlab.utils import walk
>>> model = climlab.EBM()
>>> for name, proc, top_proc in walk.walk_processes(model):
... print name
...
top
diffusion
LW
iceline
cold_albedo
warm_albedo
albedo
insolation
|
### Input:
Generator for recursive tree of climlab processes
Starts walking from climlab process ``top`` and generates a complete
list of all processes and sub-processes that are managed from ``top`` process.
``level`` indicades the rank of specific process in the process hierarchy:
.. note::
* level 0: ``top`` process
* level 1: sub-processes of ``top`` process
* level 2: sub-sub-processes of ``top`` process (=subprocesses of level 1 processes)
The method is based on os.walk().
:param top: top process from where walking should start
:type top: :class:`~climlab.process.process.Process`
:param str topname: name of top process [default: 'top']
:param bool topdown: whether geneterate *process_types* in regular or
in reverse order [default: True]
:param bool ignoreFlag: whether ``topdown`` flag should be ignored or not
[default: False]
:returns: name (str), proc (process), level (int)
:Example:
::
>>> import climlab
>>> from climlab.utils import walk
>>> model = climlab.EBM()
>>> for name, proc, top_proc in walk.walk_processes(model):
... print name
...
top
diffusion
LW
iceline
cold_albedo
warm_albedo
albedo
insolation
### Response:
#vtb
def walk_processes(top, topname=, topdown=True, ignoreFlag=False):
if not ignoreFlag:
flag = topdown
else:
flag = True
proc = top
level = 0
if flag:
yield topname, proc, level
if len(proc.subprocess) > 0:
level += 1
for name, subproc in proc.subprocess.items():
for name2, subproc2, level2 in walk_processes(subproc,
topname=name,
topdown=subproc.topdown,
ignoreFlag=ignoreFlag):
yield name2, subproc2, level+level2
if not flag:
yield topname, proc, level
|
#vtb
def get_audit_log(self, begin_time=None, end_time=None):
query_parms = self._time_query_parms(begin_time, end_time)
uri = self.uri + + query_parms
result = self.manager.session.post(uri)
return result
|
Return the console audit log entries, optionally filtered by their
creation time.
Authorization requirements:
* Task permission to the "Audit and Log Management" task.
Parameters:
begin_time (:class:`~py:datetime.datetime`):
Begin time for filtering. Log entries with a creation time older
than the begin time will be omitted from the results.
If `None`, no such filtering is performed (and the oldest available
log entries will be included).
end_time (:class:`~py:datetime.datetime`):
End time for filtering. Log entries with a creation time newer
than the end time will be omitted from the results.
If `None`, no such filtering is performed (and the newest available
log entries will be included).
Returns:
:term:`json object`:
A JSON object with the log entries, as described in section
'Response body contents' of operation 'Get Console Audit Log' in
the :term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
|
### Input:
Return the console audit log entries, optionally filtered by their
creation time.
Authorization requirements:
* Task permission to the "Audit and Log Management" task.
Parameters:
begin_time (:class:`~py:datetime.datetime`):
Begin time for filtering. Log entries with a creation time older
than the begin time will be omitted from the results.
If `None`, no such filtering is performed (and the oldest available
log entries will be included).
end_time (:class:`~py:datetime.datetime`):
End time for filtering. Log entries with a creation time newer
than the end time will be omitted from the results.
If `None`, no such filtering is performed (and the newest available
log entries will be included).
Returns:
:term:`json object`:
A JSON object with the log entries, as described in section
'Response body contents' of operation 'Get Console Audit Log' in
the :term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
### Response:
#vtb
def get_audit_log(self, begin_time=None, end_time=None):
query_parms = self._time_query_parms(begin_time, end_time)
uri = self.uri + + query_parms
result = self.manager.session.post(uri)
return result
|
#vtb
def locks(self):
return self.execute(
sql.LOCKS.format(
pid_column=self.pid_column,
query_column=self.query_column
)
)
|
Display queries with active locks.
Record(
procpid=31776,
relname=None,
transactionid=None,
granted=True,
query_snippet='select * from hello;',
age=datetime.timedelta(0, 0, 288174),
)
:returns: list of Records
|
### Input:
Display queries with active locks.
Record(
procpid=31776,
relname=None,
transactionid=None,
granted=True,
query_snippet='select * from hello;',
age=datetime.timedelta(0, 0, 288174),
)
:returns: list of Records
### Response:
#vtb
def locks(self):
return self.execute(
sql.LOCKS.format(
pid_column=self.pid_column,
query_column=self.query_column
)
)
|
#vtb
def read_csr(csr):
*
csr = _get_request_obj(csr)
ret = {
: csr.get_version() + 1,
: _parse_subject(csr.get_subject()),
: _dec2hex(csr.get_subject().as_hash()),
: hashlib.sha1(csr.get_pubkey().get_modulus()).hexdigest(),
: _get_csr_extensions(csr),
}
return ret
|
Returns a dict containing details of a certificate request.
:depends: - OpenSSL command line tool
csr:
A path or PEM encoded string containing the CSR to read.
CLI Example:
.. code-block:: bash
salt '*' x509.read_csr /etc/pki/mycert.csr
|
### Input:
Returns a dict containing details of a certificate request.
:depends: - OpenSSL command line tool
csr:
A path or PEM encoded string containing the CSR to read.
CLI Example:
.. code-block:: bash
salt '*' x509.read_csr /etc/pki/mycert.csr
### Response:
#vtb
def read_csr(csr):
*
csr = _get_request_obj(csr)
ret = {
: csr.get_version() + 1,
: _parse_subject(csr.get_subject()),
: _dec2hex(csr.get_subject().as_hash()),
: hashlib.sha1(csr.get_pubkey().get_modulus()).hexdigest(),
: _get_csr_extensions(csr),
}
return ret
|
#vtb
def authenticate_credentials(self, request, access_token):
try:
token = oauth2_provider.oauth2.models.AccessToken.objects.select_related()
token = token.get(token=access_token, expires__gt=provider_now())
except oauth2_provider.oauth2.models.AccessToken.DoesNotExist:
raise exceptions.AuthenticationFailed()
user = token.user
if not user.is_active:
msg = % user.username
raise exceptions.AuthenticationFailed(msg)
return (user, token)
|
Authenticate the request, given the access token.
|
### Input:
Authenticate the request, given the access token.
### Response:
#vtb
def authenticate_credentials(self, request, access_token):
try:
token = oauth2_provider.oauth2.models.AccessToken.objects.select_related()
token = token.get(token=access_token, expires__gt=provider_now())
except oauth2_provider.oauth2.models.AccessToken.DoesNotExist:
raise exceptions.AuthenticationFailed()
user = token.user
if not user.is_active:
msg = % user.username
raise exceptions.AuthenticationFailed(msg)
return (user, token)
|
#vtb
def encoded_datastream(self):
size = 0
if self.verify:
md5 = hashlib.md5()
leftover = None
while self.within_file:
content = self.get_next_section()
if content == BINARY_CONTENT_END:
if self.verify:
logger.info(,
size, humanize_file_size(size), md5.hexdigest())
self.within_file = False
elif self.within_file:
if leftover is not None:
content = b.join([leftover, content])
leftover = None
try:
decoded_content = binascii.a2b_base64(content)
except binascii.Error:
lines = content.split(b)
decoded_content = binascii.a2b_base64(b.join(lines[:-1]))
leftover = lines[-1]
if decoded_content is not None:
if self.verify:
md5.update(decoded_content)
size += len(decoded_content)
yield decoded_content
|
Generator for datastream content. Takes a list of sections
of data within the current chunk (split on binaryContent start and
end tags), runs a base64 decode, and yields the data. Computes
datastream size and MD5 as data is decoded for sanity-checking
purposes. If binary content is not completed within the current
chunk, it will retrieve successive chunks of export data until it
finds the end. Sets a flag when partial content is left within
the current chunk for continued processing by :meth:`object_data`.
:param sections: list of export data split on binary content start
and end tags, starting with the first section of binary content
|
### Input:
Generator for datastream content. Takes a list of sections
of data within the current chunk (split on binaryContent start and
end tags), runs a base64 decode, and yields the data. Computes
datastream size and MD5 as data is decoded for sanity-checking
purposes. If binary content is not completed within the current
chunk, it will retrieve successive chunks of export data until it
finds the end. Sets a flag when partial content is left within
the current chunk for continued processing by :meth:`object_data`.
:param sections: list of export data split on binary content start
and end tags, starting with the first section of binary content
### Response:
#vtb
def encoded_datastream(self):
size = 0
if self.verify:
md5 = hashlib.md5()
leftover = None
while self.within_file:
content = self.get_next_section()
if content == BINARY_CONTENT_END:
if self.verify:
logger.info(,
size, humanize_file_size(size), md5.hexdigest())
self.within_file = False
elif self.within_file:
if leftover is not None:
content = b.join([leftover, content])
leftover = None
try:
decoded_content = binascii.a2b_base64(content)
except binascii.Error:
lines = content.split(b)
decoded_content = binascii.a2b_base64(b.join(lines[:-1]))
leftover = lines[-1]
if decoded_content is not None:
if self.verify:
md5.update(decoded_content)
size += len(decoded_content)
yield decoded_content
|
#vtb
def set_transfer_spec(self):
_ret = False
try:
self._args.transfer_spec_func(self._args)
_ret = True
except Exception as ex:
self.notify_exception(AsperaTransferSpecError(ex), False)
return _ret
|
run the function to set the transfer spec on error set associated exception
|
### Input:
run the function to set the transfer spec on error set associated exception
### Response:
#vtb
def set_transfer_spec(self):
_ret = False
try:
self._args.transfer_spec_func(self._args)
_ret = True
except Exception as ex:
self.notify_exception(AsperaTransferSpecError(ex), False)
return _ret
|
#vtb
def _determine_timeout(default_timeout, specified_timeout, retry):
if specified_timeout is DEFAULT:
specified_timeout = default_timeout
if specified_timeout is default_timeout:
if (
retry
and retry is not DEFAULT
and isinstance(default_timeout, timeout.ExponentialTimeout)
):
return default_timeout.with_deadline(retry._deadline)
else:
return default_timeout
if isinstance(specified_timeout, (int, float)):
return timeout.ConstantTimeout(specified_timeout)
else:
return specified_timeout
|
Determines how timeout should be applied to a wrapped method.
Args:
default_timeout (Optional[Timeout]): The default timeout specified
at method creation time.
specified_timeout (Optional[Timeout]): The timeout specified at
invocation time. If :attr:`DEFAULT`, this will be set to
the ``default_timeout``.
retry (Optional[Retry]): The retry specified at invocation time.
Returns:
Optional[Timeout]: The timeout to apply to the method or ``None``.
|
### Input:
Determines how timeout should be applied to a wrapped method.
Args:
default_timeout (Optional[Timeout]): The default timeout specified
at method creation time.
specified_timeout (Optional[Timeout]): The timeout specified at
invocation time. If :attr:`DEFAULT`, this will be set to
the ``default_timeout``.
retry (Optional[Retry]): The retry specified at invocation time.
Returns:
Optional[Timeout]: The timeout to apply to the method or ``None``.
### Response:
#vtb
def _determine_timeout(default_timeout, specified_timeout, retry):
if specified_timeout is DEFAULT:
specified_timeout = default_timeout
if specified_timeout is default_timeout:
if (
retry
and retry is not DEFAULT
and isinstance(default_timeout, timeout.ExponentialTimeout)
):
return default_timeout.with_deadline(retry._deadline)
else:
return default_timeout
if isinstance(specified_timeout, (int, float)):
return timeout.ConstantTimeout(specified_timeout)
else:
return specified_timeout
|
#vtb
def cli(ctx, board, fpga, pack, type, size, project_dir,
verbose, verbose_yosys, verbose_arachne):
exit_code = SCons(project_dir).time({
: board,
: fpga,
: size,
: type,
: pack,
: {
: verbose,
: verbose_yosys,
: verbose_arachne
}
})
ctx.exit(exit_code)
|
Bitstream timing analysis.
|
### Input:
Bitstream timing analysis.
### Response:
#vtb
def cli(ctx, board, fpga, pack, type, size, project_dir,
verbose, verbose_yosys, verbose_arachne):
exit_code = SCons(project_dir).time({
: board,
: fpga,
: size,
: type,
: pack,
: {
: verbose,
: verbose_yosys,
: verbose_arachne
}
})
ctx.exit(exit_code)
|
#vtb
def set_level(self, level):
for handler in self.__coloredlogs_handlers:
handler.setLevel(level=level)
self.logger.setLevel(level=level)
|
Set the logging level of this logger.
:param level: must be an int or a str.
|
### Input:
Set the logging level of this logger.
:param level: must be an int or a str.
### Response:
#vtb
def set_level(self, level):
for handler in self.__coloredlogs_handlers:
handler.setLevel(level=level)
self.logger.setLevel(level=level)
|
#vtb
def _is_path(instance, attribute, s, exists=True):
"Validator for path-yness"
if not s:
return
if exists:
if os.path.exists(s):
return
else:
raise OSError("path does not exist")
else:
raise TypeError("Not a path?")
|
Validator for path-yness
|
### Input:
Validator for path-yness
### Response:
#vtb
def _is_path(instance, attribute, s, exists=True):
"Validator for path-yness"
if not s:
return
if exists:
if os.path.exists(s):
return
else:
raise OSError("path does not exist")
else:
raise TypeError("Not a path?")
|
#vtb
def nucmer_hits_to_ref_and_qry_coords(cls, nucmer_hits, contig=None):
if contig is None:
ctg_coords = {key: [] for key in nucmer_hits.keys()}
else:
ctg_coords = {contig: []}
ref_coords = {}
for key in ctg_coords:
hits = copy.copy(nucmer_hits[key])
hits.sort(key=lambda x: len(x.ref_coords()))
if len(hits) > 1:
i = 0
while i < len(hits) - 1:
c1 = hits[i].ref_coords()
c2 = hits[i+1].ref_coords()
if c2.contains(c1):
hits.pop(i)
else:
i += 1
ref_coords[key] = [hit.ref_coords() for hit in hits]
ctg_coords[key] = [hit.qry_coords() for hit in hits]
pyfastaq.intervals.merge_overlapping_in_list(ref_coords[key])
pyfastaq.intervals.merge_overlapping_in_list(ctg_coords[key])
return ctg_coords, ref_coords
|
Same as nucmer_hits_to_ref_coords, except removes containing hits first,
and returns ref and qry coords lists
|
### Input:
Same as nucmer_hits_to_ref_coords, except removes containing hits first,
and returns ref and qry coords lists
### Response:
#vtb
def nucmer_hits_to_ref_and_qry_coords(cls, nucmer_hits, contig=None):
if contig is None:
ctg_coords = {key: [] for key in nucmer_hits.keys()}
else:
ctg_coords = {contig: []}
ref_coords = {}
for key in ctg_coords:
hits = copy.copy(nucmer_hits[key])
hits.sort(key=lambda x: len(x.ref_coords()))
if len(hits) > 1:
i = 0
while i < len(hits) - 1:
c1 = hits[i].ref_coords()
c2 = hits[i+1].ref_coords()
if c2.contains(c1):
hits.pop(i)
else:
i += 1
ref_coords[key] = [hit.ref_coords() for hit in hits]
ctg_coords[key] = [hit.qry_coords() for hit in hits]
pyfastaq.intervals.merge_overlapping_in_list(ref_coords[key])
pyfastaq.intervals.merge_overlapping_in_list(ctg_coords[key])
return ctg_coords, ref_coords
|
#vtb
def _get_capabilities(self):
conn = self._get_connection()
capabilities = []
for c in conn.server_capabilities:
capabilities.append(c)
LOG.debug("Server capabilities: %s", capabilities)
return capabilities
|
Get the servers NETCONF capabilities.
:return: List of server capabilities.
|
### Input:
Get the servers NETCONF capabilities.
:return: List of server capabilities.
### Response:
#vtb
def _get_capabilities(self):
conn = self._get_connection()
capabilities = []
for c in conn.server_capabilities:
capabilities.append(c)
LOG.debug("Server capabilities: %s", capabilities)
return capabilities
|
#vtb
def set_loop_points(self, start_sample=-1, end_sample=0):
lib.SetVoiceLoopPoints(self._handle, start_sample, end_sample)
|
Set the loop points within the sound.
The sound must have been created with ``loop=True``. The default parameters cause the loop points to be set to
the entire sound duration.
:note: There is currently no API for converting sample numbers to times.
:param start_sample: sample number to loop back to
:param end_sample: sample number to loop at
|
### Input:
Set the loop points within the sound.
The sound must have been created with ``loop=True``. The default parameters cause the loop points to be set to
the entire sound duration.
:note: There is currently no API for converting sample numbers to times.
:param start_sample: sample number to loop back to
:param end_sample: sample number to loop at
### Response:
#vtb
def set_loop_points(self, start_sample=-1, end_sample=0):
lib.SetVoiceLoopPoints(self._handle, start_sample, end_sample)
|
#vtb
def _compute_all_features(self):
self._audio, _ = librosa.load(self.file_struct.audio_file,
sr=self.sr)
self.dur = len(self._audio) / float(self.sr)
self._framesync_features = self.compute_features()
self._compute_framesync_times()
self._est_beats_times, self._est_beats_frames = self.estimate_beats()
self._ann_beats_times, self._ann_beats_frames = self.read_ann_beats()
pad = True
self._est_beatsync_features, self._est_beatsync_times = \
self.compute_beat_sync_features(self._est_beats_frames,
self._est_beats_times, pad)
self._ann_beatsync_features, self._ann_beatsync_times = \
self.compute_beat_sync_features(self._ann_beats_frames,
self._ann_beats_times, pad)
|
Computes all the features (beatsync, framesync) from the audio.
|
### Input:
Computes all the features (beatsync, framesync) from the audio.
### Response:
#vtb
def _compute_all_features(self):
self._audio, _ = librosa.load(self.file_struct.audio_file,
sr=self.sr)
self.dur = len(self._audio) / float(self.sr)
self._framesync_features = self.compute_features()
self._compute_framesync_times()
self._est_beats_times, self._est_beats_frames = self.estimate_beats()
self._ann_beats_times, self._ann_beats_frames = self.read_ann_beats()
pad = True
self._est_beatsync_features, self._est_beatsync_times = \
self.compute_beat_sync_features(self._est_beats_frames,
self._est_beats_times, pad)
self._ann_beatsync_features, self._ann_beatsync_times = \
self.compute_beat_sync_features(self._ann_beats_frames,
self._ann_beats_times, pad)
|
#vtb
def aggregate_by_index(self, function, level=0):
result = self._map_by_index(function, level=level)
return result.map(lambda v: array(v), index=result.index)
|
Aggregrate data in each record, grouping by index values.
For each unique value of the index, applies a function to the group
indexed by that value. Returns a Series indexed by those unique values.
For the result to be a valid Series object, the aggregating function should
return a simple numeric type. Also allows selection of levels within a
multi-index. See select_by_index for more info on indices and multi-indices.
Parameters
----------
function : function
Aggregating function to map to Series values. Should take a list or ndarray
as input and return a simple numeric value.
level : list of ints, optional, default=0
Specifies the levels of the multi-index to use when determining unique index values.
If only a single level is desired, can be an int.
|
### Input:
Aggregrate data in each record, grouping by index values.
For each unique value of the index, applies a function to the group
indexed by that value. Returns a Series indexed by those unique values.
For the result to be a valid Series object, the aggregating function should
return a simple numeric type. Also allows selection of levels within a
multi-index. See select_by_index for more info on indices and multi-indices.
Parameters
----------
function : function
Aggregating function to map to Series values. Should take a list or ndarray
as input and return a simple numeric value.
level : list of ints, optional, default=0
Specifies the levels of the multi-index to use when determining unique index values.
If only a single level is desired, can be an int.
### Response:
#vtb
def aggregate_by_index(self, function, level=0):
result = self._map_by_index(function, level=level)
return result.map(lambda v: array(v), index=result.index)
|
#vtb
def association(self, group_xid):
association = {: group_xid}
self._indicator_data.setdefault(, []).append(association)
|
Add association using xid value.
Args:
group_xid (str): The external id of the Group to associate.
|
### Input:
Add association using xid value.
Args:
group_xid (str): The external id of the Group to associate.
### Response:
#vtb
def association(self, group_xid):
association = {: group_xid}
self._indicator_data.setdefault(, []).append(association)
|
#vtb
def _get_timezone(self, root):
tz_str = root.xpath()[0].text
hours = int(self._tz_re.search(tz_str).group(1))
return tzoffset(tz_str, hours * 60)
|
Find timezone informatation on bottom of the page.
|
### Input:
Find timezone informatation on bottom of the page.
### Response:
#vtb
def _get_timezone(self, root):
tz_str = root.xpath()[0].text
hours = int(self._tz_re.search(tz_str).group(1))
return tzoffset(tz_str, hours * 60)
|
#vtb
def _erase_card(self, number):
with self._lock:
if number < (len(self.cards) - 1):
self._erase_card(number + 1)
if number > (len(self.cards) - 1):
return
max_cards_horiz = int(curses.COLS / 35)
obliterate = curses.newwin(
6,
35,
7 + 6 * (number // max_cards_horiz),
35 * (number % max_cards_horiz),
)
obliterate.erase()
obliterate.noutrefresh()
del self.cards[number]
|
Destroy cards with this or higher number.
|
### Input:
Destroy cards with this or higher number.
### Response:
#vtb
def _erase_card(self, number):
with self._lock:
if number < (len(self.cards) - 1):
self._erase_card(number + 1)
if number > (len(self.cards) - 1):
return
max_cards_horiz = int(curses.COLS / 35)
obliterate = curses.newwin(
6,
35,
7 + 6 * (number // max_cards_horiz),
35 * (number % max_cards_horiz),
)
obliterate.erase()
obliterate.noutrefresh()
del self.cards[number]
|
#vtb
def inspect_task(self, task):
url = self._url(, task)
return self._result(self._get(url), True)
|
Retrieve information about a task.
Args:
task (str): Task ID
Returns:
(dict): Information about the task.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
### Input:
Retrieve information about a task.
Args:
task (str): Task ID
Returns:
(dict): Information about the task.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
### Response:
#vtb
def inspect_task(self, task):
url = self._url(, task)
return self._result(self._get(url), True)
|
#vtb
def config_name_from_full_name(full_name):
projects, _, configs, result = full_name.split("/")
if projects != "projects" or configs != "configs":
raise ValueError(
"Unexpected format of resource",
full_name,
,
)
return result
|
Extract the config name from a full resource name.
>>> config_name_from_full_name('projects/my-proj/configs/my-config')
"my-config"
:type full_name: str
:param full_name:
The full resource name of a config. The full resource name looks like
``projects/project-name/configs/config-name`` and is returned as the
``name`` field of a config resource. See
https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs
:rtype: str
:returns: The config's short name, given its full resource name.
:raises: :class:`ValueError` if ``full_name`` is not the expected format
|
### Input:
Extract the config name from a full resource name.
>>> config_name_from_full_name('projects/my-proj/configs/my-config')
"my-config"
:type full_name: str
:param full_name:
The full resource name of a config. The full resource name looks like
``projects/project-name/configs/config-name`` and is returned as the
``name`` field of a config resource. See
https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs
:rtype: str
:returns: The config's short name, given its full resource name.
:raises: :class:`ValueError` if ``full_name`` is not the expected format
### Response:
#vtb
def config_name_from_full_name(full_name):
projects, _, configs, result = full_name.split("/")
if projects != "projects" or configs != "configs":
raise ValueError(
"Unexpected format of resource",
full_name,
,
)
return result
|
#vtb
def golfclap(rest):
"Clap for something"
clapv = random.choice(phrases.clapvl)
adv = random.choice(phrases.advl)
adj = random.choice(phrases.adjl)
if rest:
clapee = rest.strip()
karma.Karma.store.change(clapee, 1)
return "/me claps %s for %s, %s %s." % (clapv, rest, adv, adj)
return "/me claps %s, %s %s." % (clapv, adv, adj)
|
Clap for something
|
### Input:
Clap for something
### Response:
#vtb
def golfclap(rest):
"Clap for something"
clapv = random.choice(phrases.clapvl)
adv = random.choice(phrases.advl)
adj = random.choice(phrases.adjl)
if rest:
clapee = rest.strip()
karma.Karma.store.change(clapee, 1)
return "/me claps %s for %s, %s %s." % (clapv, rest, adv, adj)
return "/me claps %s, %s %s." % (clapv, adv, adj)
|
#vtb
def exc_thrown_by_descriptor():
traceback = sys.exc_info()[2]
tb_locals = traceback.tb_frame.f_locals
if "self" in tb_locals:
if not isinstance(tb_locals["self"], Descriptor):
return False
return True
return False
|
Return True if the last exception was thrown by a
Descriptor instance.
|
### Input:
Return True if the last exception was thrown by a
Descriptor instance.
### Response:
#vtb
def exc_thrown_by_descriptor():
traceback = sys.exc_info()[2]
tb_locals = traceback.tb_frame.f_locals
if "self" in tb_locals:
if not isinstance(tb_locals["self"], Descriptor):
return False
return True
return False
|
#vtb
def AnnotateBED(bed, GTF, genome_file, bedcols=None, promoter=[1000,200]):
if type(bed) == type("string"):
bed=pd.read_table(bed,header=None)
bed.columns=bedcols.split(",")
print("Reading GTF file.")
sys.stdout.flush()
GTF=readGTF(GTF)
GTF["gene_name"]=retrieve_GTF_field("gene_name", GTF)
GTF["gene_id"]=retrieve_GTF_field("gene_id", GTF)
GTF["gene_name"]=GTF["gene_name"]+"/"+GTF["gene_id"]
GTF=GTF.drop(["gene_id"],axis=1)
print("Generating promoters annotation.")
sys.stdout.flush()
promoters=GTF[GTF["feature"]=="transcript"]
promoters_plus=promoters[promoters["strand"]=="+"]
promoters_minus=promoters[promoters["strand"]=="-"]
upstream=promoter[0]
downstream=promoter[1]
promoters_plus.loc[:,"promoter_start"]=promoters_plus.loc[:,"start"].astype(int)-upstream
promoters_plus.loc[:,"promoter_end"]=promoters_plus.loc[:,"start"].astype(int)+downstream
promoters_minus.loc[:,"promoter_start"]=promoters_minus["end"].astype(int)-downstream
promoters_minus.loc[:,"promoter_end"]=promoters_minus["end"].astype(int)+upstream
promoters=pd.concat([promoters_plus,promoters_minus])
promoters=promoters[["seqname","feature","promoter_start","promoter_end","gene_name"]]
promoters.columns=["seqname","feature","start","end","gene_name"]
promoters.loc[:,"feature"]="promoter"
promoters.drop_duplicates(inplace=True)
promoters.reset_index(inplace=True, drop=True)
chr_sizes=pd.read_table(genome_file,header=None)
chr_sizes.columns=["seqname","size"]
chr_sizes.loc[:,"seqname"]=chr_sizes["seqname"].astype(str)
promoters.loc[:,"seqname"]=promoters["seqname"].astype(str)
promoters=pd.merge(promoters,chr_sizes,how="left",on=["seqname"])
def CorrectStart(df):
s=df["start"]
if s < 0:
s=0
return s
def CorrectEnd(df):
s=df["end"]
e=df["size"]
if s > e:
s=e
return s
promoters.loc[:,"start"]=promoters.apply(CorrectStart,axis=1)
promoters.loc[:,"end"]=promoters.apply(CorrectEnd,axis=1)
promoters.drop(["size"],axis=1, inplace=True)
GTFs=GTF[["seqname","feature","start","end","gene_name"]]
GTFs=GTFs[ GTFs["feature"]!= "gene"]
GTFs.drop_duplicates(inplace=True)
GTFs.reset_index(inplace=True, drop=True)
GTFs=pd.concat([GTFs,promoters])
def NewName(df):
name=df["gene_name"]
feature=df["feature"]
if feature == "transcript":
res=name
else:
res=name+":"+feature
return res
GTFs.loc[:,"gene_name"]=GTFs.apply(NewName, axis=1)
GTFs=GTFs[["seqname","start","end","gene_name"]]
print( "Intersecting annotation tables and bed." )
sys.stdout.flush()
refGTF=dfTObedtool(GTFs)
pos=dfTObedtool(bed)
colsGTF=GTFs.columns.tolist()
newCols=bed.columns.tolist()
for f in colsGTF:
newCols.append(f+"_")
newCols_=[ s for s in newCols if s not in ["seqname_","start_", "end_"]]
pos=pos.intersect(refGTF, loj=True)
pos=pd.read_table(pos.fn , names=newCols)
pos=pos[newCols_]
print("Merging features.")
sys.stdout.flush()
def GetFeature(x):
if ":" in x:
res=x.split(":")[1]
else:
res=np.nan
return res
def GetName(x):
if ":" in x:
res=x.split(":")[0]
elif type(x) == type("string"):
if x != ".":
res=x
else:
res=np.nan
else:
res=np.nan
return res
pos["gene_feature_"]=pos["gene_name_"].apply( lambda x: GetFeature(x) )
pos["gene_name_"]=pos["gene_name_"].apply( lambda x: GetName(x) )
refcol=pos.columns.tolist()
refcol=[ s for s in refcol if s != "gene_feature_" ]
def CombineAnn(df):
def JOIN(x):
return .join([ str(s) for s in list(set(df[x])) if str(s) != "nan" ] )
return pd.Series(dict( gene_feature_ = JOIN("gene_feature_") ) )
pos_=pos.groupby(refcol).apply(CombineAnn)
pos_.reset_index(inplace=True, drop=False)
def MergeNameFeatures(df):
name=df["gene_name_"]
feature=df["gene_feature_"]
if (type(name) == type("string")) & (name != ".") :
if type(feature) == type("string"):
if len(feature) > 0:
res=name+": "+feature
else:
res=name
else:
res=name
else:
res=np.nan
return res
pos_["annotated_gene_features"]=pos_.apply(MergeNameFeatures,axis=1)
pos_=pos_.drop(["gene_name_","gene_feature_"],axis=1)
def CombineAnn(df):
def JOIN(x):
return .join([ str(s) for s in list(set(df[x])) if str(s) != "nan" ] )
return pd.Series(dict( annotated_gene_features = JOIN("annotated_gene_features") ) )
refcol=[ s for s in refcol if s != "gene_name_" ]
pos_=pos_.groupby(refcol).apply(CombineAnn)
pos_.reset_index(inplace=True, drop=False)
return pos_
|
Annotates a bed file.
:param bed: either a /path/to/file.bed or a Pandas dataframe in bed format. /path/to/file.bed implies bedcols.
:param GTF: /path/to/file.gtf
:param genome_file: /path/to/file.genome - a tab separated values of chr name and size information
:param bedcols: a comma separated string of column headers to use when reading in a bed file. eg: "chr,start,end,name"
:param promoter: a list containing the upstream start of the promoter region from the TSS and the downstream end of the promoter region from the TSS.
:returns: a Pandas dataframe with the annotated bed file. exons and promoters will be reported as well in the annotated_gene_features column.
|
### Input:
Annotates a bed file.
:param bed: either a /path/to/file.bed or a Pandas dataframe in bed format. /path/to/file.bed implies bedcols.
:param GTF: /path/to/file.gtf
:param genome_file: /path/to/file.genome - a tab separated values of chr name and size information
:param bedcols: a comma separated string of column headers to use when reading in a bed file. eg: "chr,start,end,name"
:param promoter: a list containing the upstream start of the promoter region from the TSS and the downstream end of the promoter region from the TSS.
:returns: a Pandas dataframe with the annotated bed file. exons and promoters will be reported as well in the annotated_gene_features column.
### Response:
#vtb
def AnnotateBED(bed, GTF, genome_file, bedcols=None, promoter=[1000,200]):
if type(bed) == type("string"):
bed=pd.read_table(bed,header=None)
bed.columns=bedcols.split(",")
print("Reading GTF file.")
sys.stdout.flush()
GTF=readGTF(GTF)
GTF["gene_name"]=retrieve_GTF_field("gene_name", GTF)
GTF["gene_id"]=retrieve_GTF_field("gene_id", GTF)
GTF["gene_name"]=GTF["gene_name"]+"/"+GTF["gene_id"]
GTF=GTF.drop(["gene_id"],axis=1)
print("Generating promoters annotation.")
sys.stdout.flush()
promoters=GTF[GTF["feature"]=="transcript"]
promoters_plus=promoters[promoters["strand"]=="+"]
promoters_minus=promoters[promoters["strand"]=="-"]
upstream=promoter[0]
downstream=promoter[1]
promoters_plus.loc[:,"promoter_start"]=promoters_plus.loc[:,"start"].astype(int)-upstream
promoters_plus.loc[:,"promoter_end"]=promoters_plus.loc[:,"start"].astype(int)+downstream
promoters_minus.loc[:,"promoter_start"]=promoters_minus["end"].astype(int)-downstream
promoters_minus.loc[:,"promoter_end"]=promoters_minus["end"].astype(int)+upstream
promoters=pd.concat([promoters_plus,promoters_minus])
promoters=promoters[["seqname","feature","promoter_start","promoter_end","gene_name"]]
promoters.columns=["seqname","feature","start","end","gene_name"]
promoters.loc[:,"feature"]="promoter"
promoters.drop_duplicates(inplace=True)
promoters.reset_index(inplace=True, drop=True)
chr_sizes=pd.read_table(genome_file,header=None)
chr_sizes.columns=["seqname","size"]
chr_sizes.loc[:,"seqname"]=chr_sizes["seqname"].astype(str)
promoters.loc[:,"seqname"]=promoters["seqname"].astype(str)
promoters=pd.merge(promoters,chr_sizes,how="left",on=["seqname"])
def CorrectStart(df):
s=df["start"]
if s < 0:
s=0
return s
def CorrectEnd(df):
s=df["end"]
e=df["size"]
if s > e:
s=e
return s
promoters.loc[:,"start"]=promoters.apply(CorrectStart,axis=1)
promoters.loc[:,"end"]=promoters.apply(CorrectEnd,axis=1)
promoters.drop(["size"],axis=1, inplace=True)
GTFs=GTF[["seqname","feature","start","end","gene_name"]]
GTFs=GTFs[ GTFs["feature"]!= "gene"]
GTFs.drop_duplicates(inplace=True)
GTFs.reset_index(inplace=True, drop=True)
GTFs=pd.concat([GTFs,promoters])
def NewName(df):
name=df["gene_name"]
feature=df["feature"]
if feature == "transcript":
res=name
else:
res=name+":"+feature
return res
GTFs.loc[:,"gene_name"]=GTFs.apply(NewName, axis=1)
GTFs=GTFs[["seqname","start","end","gene_name"]]
print( "Intersecting annotation tables and bed." )
sys.stdout.flush()
refGTF=dfTObedtool(GTFs)
pos=dfTObedtool(bed)
colsGTF=GTFs.columns.tolist()
newCols=bed.columns.tolist()
for f in colsGTF:
newCols.append(f+"_")
newCols_=[ s for s in newCols if s not in ["seqname_","start_", "end_"]]
pos=pos.intersect(refGTF, loj=True)
pos=pd.read_table(pos.fn , names=newCols)
pos=pos[newCols_]
print("Merging features.")
sys.stdout.flush()
def GetFeature(x):
if ":" in x:
res=x.split(":")[1]
else:
res=np.nan
return res
def GetName(x):
if ":" in x:
res=x.split(":")[0]
elif type(x) == type("string"):
if x != ".":
res=x
else:
res=np.nan
else:
res=np.nan
return res
pos["gene_feature_"]=pos["gene_name_"].apply( lambda x: GetFeature(x) )
pos["gene_name_"]=pos["gene_name_"].apply( lambda x: GetName(x) )
refcol=pos.columns.tolist()
refcol=[ s for s in refcol if s != "gene_feature_" ]
def CombineAnn(df):
def JOIN(x):
return .join([ str(s) for s in list(set(df[x])) if str(s) != "nan" ] )
return pd.Series(dict( gene_feature_ = JOIN("gene_feature_") ) )
pos_=pos.groupby(refcol).apply(CombineAnn)
pos_.reset_index(inplace=True, drop=False)
def MergeNameFeatures(df):
name=df["gene_name_"]
feature=df["gene_feature_"]
if (type(name) == type("string")) & (name != ".") :
if type(feature) == type("string"):
if len(feature) > 0:
res=name+": "+feature
else:
res=name
else:
res=name
else:
res=np.nan
return res
pos_["annotated_gene_features"]=pos_.apply(MergeNameFeatures,axis=1)
pos_=pos_.drop(["gene_name_","gene_feature_"],axis=1)
def CombineAnn(df):
def JOIN(x):
return .join([ str(s) for s in list(set(df[x])) if str(s) != "nan" ] )
return pd.Series(dict( annotated_gene_features = JOIN("annotated_gene_features") ) )
refcol=[ s for s in refcol if s != "gene_name_" ]
pos_=pos_.groupby(refcol).apply(CombineAnn)
pos_.reset_index(inplace=True, drop=False)
return pos_
|
#vtb
def process_file(self, path, dryrun):
if dryrun:
return path
ret = []
with open(path, "r") as infile:
for line in infile:
if re.search(self.__exp, line):
ret.append(line)
return ret if len(ret) > 0 else None
|
Print files path.
|
### Input:
Print files path.
### Response:
#vtb
def process_file(self, path, dryrun):
if dryrun:
return path
ret = []
with open(path, "r") as infile:
for line in infile:
if re.search(self.__exp, line):
ret.append(line)
return ret if len(ret) > 0 else None
|
#vtb
def tunnel_to_kernel(connection_info, sshserver, sshkey=None):
if isinstance(connection_info, basestring):
if tunnel.try_passwordless_ssh(sshserver, sshkey):
password=False
else:
password = getpass("SSH Password for %s: "%sshserver)
for lp,rp in zip(lports, rports):
tunnel.ssh_tunnel(lp, rp, sshserver, remote_ip, sshkey, password)
return tuple(lports)
|
tunnel connections to a kernel via ssh
This will open four SSH tunnels from localhost on this machine to the
ports associated with the kernel. They can be either direct
localhost-localhost tunnels, or if an intermediate server is necessary,
the kernel must be listening on a public IP.
Parameters
----------
connection_info : dict or str (path)
Either a connection dict, or the path to a JSON connection file
sshserver : str
The ssh sever to use to tunnel to the kernel. Can be a full
`user@server:port` string. ssh config aliases are respected.
sshkey : str [optional]
Path to file containing ssh key to use for authentication.
Only necessary if your ssh config does not already associate
a keyfile with the host.
Returns
-------
(shell, iopub, stdin, hb) : ints
The four ports on localhost that have been forwarded to the kernel.
|
### Input:
tunnel connections to a kernel via ssh
This will open four SSH tunnels from localhost on this machine to the
ports associated with the kernel. They can be either direct
localhost-localhost tunnels, or if an intermediate server is necessary,
the kernel must be listening on a public IP.
Parameters
----------
connection_info : dict or str (path)
Either a connection dict, or the path to a JSON connection file
sshserver : str
The ssh sever to use to tunnel to the kernel. Can be a full
`user@server:port` string. ssh config aliases are respected.
sshkey : str [optional]
Path to file containing ssh key to use for authentication.
Only necessary if your ssh config does not already associate
a keyfile with the host.
Returns
-------
(shell, iopub, stdin, hb) : ints
The four ports on localhost that have been forwarded to the kernel.
### Response:
#vtb
def tunnel_to_kernel(connection_info, sshserver, sshkey=None):
if isinstance(connection_info, basestring):
if tunnel.try_passwordless_ssh(sshserver, sshkey):
password=False
else:
password = getpass("SSH Password for %s: "%sshserver)
for lp,rp in zip(lports, rports):
tunnel.ssh_tunnel(lp, rp, sshserver, remote_ip, sshkey, password)
return tuple(lports)
|
#vtb
def get_domain_config(self, domain):
domain_root = self.identify_domain_root(domain)
host =
if len(domain_root) != len(domain):
host = domain.replace( + domain_root, )
domain_connect_api = self._identify_domain_connect_api(domain_root)
ret = self._get_domain_config_for_root(domain_root, domain_connect_api)
return DomainConnectConfig(domain, domain_root, host, ret)
|
Makes a discovery of domain name and resolves configuration of DNS provider
:param domain: str
domain name
:return: DomainConnectConfig
domain connect config
:raises: NoDomainConnectRecordException
when no _domainconnect record found
:raises: NoDomainConnectSettingsException
when settings are not found
|
### Input:
Makes a discovery of domain name and resolves configuration of DNS provider
:param domain: str
domain name
:return: DomainConnectConfig
domain connect config
:raises: NoDomainConnectRecordException
when no _domainconnect record found
:raises: NoDomainConnectSettingsException
when settings are not found
### Response:
#vtb
def get_domain_config(self, domain):
domain_root = self.identify_domain_root(domain)
host =
if len(domain_root) != len(domain):
host = domain.replace( + domain_root, )
domain_connect_api = self._identify_domain_connect_api(domain_root)
ret = self._get_domain_config_for_root(domain_root, domain_connect_api)
return DomainConnectConfig(domain, domain_root, host, ret)
|
#vtb
def my_protocol_parser(out, buf):
while True:
tp = yield from buf.read(5)
if tp in (MSG_PING, MSG_PONG):
yield from buf.skipuntil(b)
out.feed_data(Message(tp, None))
elif tp == MSG_STOP:
out.feed_data(Message(tp, None))
elif tp == MSG_TEXT:
text = yield from buf.readuntil(b)
out.feed_data(Message(tp, text.strip().decode()))
else:
raise ValueError()
|
Parser is used with StreamParser for incremental protocol parsing.
Parser is a generator function, but it is not a coroutine. Usually
parsers are implemented as a state machine.
more details in asyncio/parsers.py
existing parsers:
* HTTP protocol parsers asyncio/http/protocol.py
* websocket parser asyncio/http/websocket.py
|
### Input:
Parser is used with StreamParser for incremental protocol parsing.
Parser is a generator function, but it is not a coroutine. Usually
parsers are implemented as a state machine.
more details in asyncio/parsers.py
existing parsers:
* HTTP protocol parsers asyncio/http/protocol.py
* websocket parser asyncio/http/websocket.py
### Response:
#vtb
def my_protocol_parser(out, buf):
while True:
tp = yield from buf.read(5)
if tp in (MSG_PING, MSG_PONG):
yield from buf.skipuntil(b)
out.feed_data(Message(tp, None))
elif tp == MSG_STOP:
out.feed_data(Message(tp, None))
elif tp == MSG_TEXT:
text = yield from buf.readuntil(b)
out.feed_data(Message(tp, text.strip().decode()))
else:
raise ValueError()
|
#vtb
def hash_data(data, hasher=NoParam, base=NoParam, types=False,
hashlen=NoParam, convert=False):
if convert and isinstance(data, six.string_types):
try:
data = json.dumps(data)
except TypeError as ex:
pass
base = _rectify_base(base)
hashlen = _rectify_hashlen(hashlen)
hasher = _rectify_hasher(hasher)()
_update_hasher(hasher, data, types=types)
text = _digest_hasher(hasher, hashlen, base)
return text
|
Get a unique hash depending on the state of the data.
Args:
data (object):
Any sort of loosely organized data
hasher (str or HASHER):
Hash algorithm from hashlib, defaults to `sha512`.
base (str or List[str]):
Shorthand key or a list of symbols. Valid keys are: 'abc', 'hex',
and 'dec'. Defaults to 'hex'.
types (bool):
If True data types are included in the hash, otherwise only the raw
data is hashed. Defaults to False.
hashlen (int):
Maximum number of symbols in the returned hash. If not specified,
all are returned. DEPRECATED. Use slice syntax instead.
convert (bool, optional, default=True):
if True, try and convert the data to json an the json is hashed
instead. This can improve runtime in some instances, however the
hash may differ from the case where convert=False.
Notes:
alphabet26 is a pretty nice base, I recommend it.
However we default to hex because it is standard.
This means the output of hashdata with base=sha1 will be the same as
the output of `sha1sum`.
Returns:
str: text - hash string
Example:
>>> import ubelt as ub
>>> print(ub.hash_data([1, 2, (3, '4')], convert=False))
60b758587f599663931057e6ebdf185a...
>>> print(ub.hash_data([1, 2, (3, '4')], base='abc', hasher='sha512')[:32])
hsrgqvfiuxvvhcdnypivhhthmrolkzej
|
### Input:
Get a unique hash depending on the state of the data.
Args:
data (object):
Any sort of loosely organized data
hasher (str or HASHER):
Hash algorithm from hashlib, defaults to `sha512`.
base (str or List[str]):
Shorthand key or a list of symbols. Valid keys are: 'abc', 'hex',
and 'dec'. Defaults to 'hex'.
types (bool):
If True data types are included in the hash, otherwise only the raw
data is hashed. Defaults to False.
hashlen (int):
Maximum number of symbols in the returned hash. If not specified,
all are returned. DEPRECATED. Use slice syntax instead.
convert (bool, optional, default=True):
if True, try and convert the data to json an the json is hashed
instead. This can improve runtime in some instances, however the
hash may differ from the case where convert=False.
Notes:
alphabet26 is a pretty nice base, I recommend it.
However we default to hex because it is standard.
This means the output of hashdata with base=sha1 will be the same as
the output of `sha1sum`.
Returns:
str: text - hash string
Example:
>>> import ubelt as ub
>>> print(ub.hash_data([1, 2, (3, '4')], convert=False))
60b758587f599663931057e6ebdf185a...
>>> print(ub.hash_data([1, 2, (3, '4')], base='abc', hasher='sha512')[:32])
hsrgqvfiuxvvhcdnypivhhthmrolkzej
### Response:
#vtb
def hash_data(data, hasher=NoParam, base=NoParam, types=False,
hashlen=NoParam, convert=False):
if convert and isinstance(data, six.string_types):
try:
data = json.dumps(data)
except TypeError as ex:
pass
base = _rectify_base(base)
hashlen = _rectify_hashlen(hashlen)
hasher = _rectify_hasher(hasher)()
_update_hasher(hasher, data, types=types)
text = _digest_hasher(hasher, hashlen, base)
return text
|
#vtb
def haiz(obj, chart):
objGender = obj.gender()
objFaction = obj.faction()
if obj.id == const.MERCURY:
sun = chart.getObject(const.SUN)
orientalityM = orientality(obj, sun)
if orientalityM == ORIENTAL:
objGender = const.MASCULINE
objFaction = const.DIURNAL
else:
objGender = const.FEMININE
objFaction = const.NOCTURNAL
signGender = props.sign.gender[obj.sign]
genderConformity = (objGender == signGender)
factionConformity = False
diurnalChart = chart.isDiurnal()
if obj.id == const.SUN and not diurnalChart:
factionConformity = False
else:
objHouse = chart.houses.getObjectHouse(obj)
if (objFaction == const.DIURNAL and objHouse.id in diurnalFaction or
objFaction == const.NOCTURNAL and objHouse.id in nocturnalFaction):
factionConformity = True
if (genderConformity and factionConformity):
return HAIZ
elif (not genderConformity and not factionConformity):
return CHAIZ
else:
return None
|
Returns if an object is in Haiz.
|
### Input:
Returns if an object is in Haiz.
### Response:
#vtb
def haiz(obj, chart):
objGender = obj.gender()
objFaction = obj.faction()
if obj.id == const.MERCURY:
sun = chart.getObject(const.SUN)
orientalityM = orientality(obj, sun)
if orientalityM == ORIENTAL:
objGender = const.MASCULINE
objFaction = const.DIURNAL
else:
objGender = const.FEMININE
objFaction = const.NOCTURNAL
signGender = props.sign.gender[obj.sign]
genderConformity = (objGender == signGender)
factionConformity = False
diurnalChart = chart.isDiurnal()
if obj.id == const.SUN and not diurnalChart:
factionConformity = False
else:
objHouse = chart.houses.getObjectHouse(obj)
if (objFaction == const.DIURNAL and objHouse.id in diurnalFaction or
objFaction == const.NOCTURNAL and objHouse.id in nocturnalFaction):
factionConformity = True
if (genderConformity and factionConformity):
return HAIZ
elif (not genderConformity and not factionConformity):
return CHAIZ
else:
return None
|
#vtb
def scrape_wikinews(conn, project, articleset, query):
url = "http://en.wikinews.org/w/index.php?search={}&limit=50".format(query)
logging.info(url)
for page in get_pages(url):
urls = get_article_urls(page)
arts = list(get_articles(urls))
logging.info("Adding {} articles to set {}:{}"
.format(len(arts), project, articleset))
conn.create_articles(project=project, articleset=articleset,
json_data=arts)
|
Scrape wikinews articles from the given query
@param conn: The AmcatAPI object
@param articleset: The target articleset ID
@param category: The wikinews category name
|
### Input:
Scrape wikinews articles from the given query
@param conn: The AmcatAPI object
@param articleset: The target articleset ID
@param category: The wikinews category name
### Response:
#vtb
def scrape_wikinews(conn, project, articleset, query):
url = "http://en.wikinews.org/w/index.php?search={}&limit=50".format(query)
logging.info(url)
for page in get_pages(url):
urls = get_article_urls(page)
arts = list(get_articles(urls))
logging.info("Adding {} articles to set {}:{}"
.format(len(arts), project, articleset))
conn.create_articles(project=project, articleset=articleset,
json_data=arts)
|
#vtb
def _send_textmetrics(metrics):
data = [.join(map(six.text_type, metric)) for metric in metrics] + []
return .join(data)
|
Format metrics for the carbon plaintext protocol
|
### Input:
Format metrics for the carbon plaintext protocol
### Response:
#vtb
def _send_textmetrics(metrics):
data = [.join(map(six.text_type, metric)) for metric in metrics] + []
return .join(data)
|
#vtb
def get_citation_by_reference(self, type: str, reference: str) -> Optional[Citation]:
citation_hash = hash_citation(type=type, reference=reference)
return self.get_citation_by_hash(citation_hash)
|
Get a citation object by its type and reference.
|
### Input:
Get a citation object by its type and reference.
### Response:
#vtb
def get_citation_by_reference(self, type: str, reference: str) -> Optional[Citation]:
citation_hash = hash_citation(type=type, reference=reference)
return self.get_citation_by_hash(citation_hash)
|
#vtb
def entropy(args):
p = OptionParser(entropy.__doc__)
p.add_option("--threshold", default=0, type="int",
help="Complexity needs to be above")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
kmc_out, = args
fp = open(kmc_out)
for row in fp:
kmer, count = row.split()
score = entropy_score(kmer)
if score >= opts.threshold:
print(" ".join((kmer, count, "{:.2f}".format(score))))
|
%prog entropy kmc_dump.out
kmc_dump.out contains two columns:
AAAAAAAAAAAGAAGAAAGAAA 34
|
### Input:
%prog entropy kmc_dump.out
kmc_dump.out contains two columns:
AAAAAAAAAAAGAAGAAAGAAA 34
### Response:
#vtb
def entropy(args):
p = OptionParser(entropy.__doc__)
p.add_option("--threshold", default=0, type="int",
help="Complexity needs to be above")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
kmc_out, = args
fp = open(kmc_out)
for row in fp:
kmer, count = row.split()
score = entropy_score(kmer)
if score >= opts.threshold:
print(" ".join((kmer, count, "{:.2f}".format(score))))
|
#vtb
def load_tabs(self):
tab_group = self.get_tabs(self.request, **self.kwargs)
tabs = tab_group.get_tabs()
for tab in [t for t in tabs if issubclass(t.__class__, TableTab)]:
self.table_classes.extend(tab.table_classes)
for table in tab._tables.values():
self._table_dict[table._meta.name] = {: table,
: tab}
|
Loads the tab group.
It compiles the table instances for each table attached to
any :class:`horizon.tabs.TableTab` instances on the tab group.
This step is necessary before processing any tab or table actions.
|
### Input:
Loads the tab group.
It compiles the table instances for each table attached to
any :class:`horizon.tabs.TableTab` instances on the tab group.
This step is necessary before processing any tab or table actions.
### Response:
#vtb
def load_tabs(self):
tab_group = self.get_tabs(self.request, **self.kwargs)
tabs = tab_group.get_tabs()
for tab in [t for t in tabs if issubclass(t.__class__, TableTab)]:
self.table_classes.extend(tab.table_classes)
for table in tab._tables.values():
self._table_dict[table._meta.name] = {: table,
: tab}
|
#vtb
def abort(*args, **kwargs):
code = kwargs.pop("code", 1)
logger = kwargs.pop("logger", LOG.error if code else LOG.info)
fatal = kwargs.pop("fatal", True)
return_value = fatal
if isinstance(fatal, tuple) and len(fatal) == 2:
fatal, return_value = fatal
if logger and fatal is not None and args:
if logging.root.handlers:
logger(*args, **kwargs)
else:
sys.stderr.write("%s\n" % formatted_string(*args))
if fatal:
if isinstance(fatal, type) and issubclass(fatal, BaseException):
raise fatal(code)
if AbortException is not None:
if isinstance(AbortException, type) and issubclass(AbortException, BaseException):
raise AbortException(code)
return AbortException(code)
return return_value
|
Usage:
return abort("...") => will sys.exit() by default
return abort("...", fatal=True) => Will sys.exit()
# Not fatal, but will log/print message:
return abort("...", fatal=False) => Will return False
return abort("...", fatal=(False, None)) => Will return None
return abort("...", fatal=(False, -1)) => Will return -1
# Not fatal, will not log/print any message:
return abort("...", fatal=None) => Will return None
return abort("...", fatal=(None, None)) => Will return None
return abort("...", fatal=(None, -1)) => Will return -1
:param args: Args passed through for error reporting
:param kwargs: Args passed through for error reporting
:return: kwargs["return_value"] (default: -1) to signify failure to non-fatal callers
|
### Input:
Usage:
return abort("...") => will sys.exit() by default
return abort("...", fatal=True) => Will sys.exit()
# Not fatal, but will log/print message:
return abort("...", fatal=False) => Will return False
return abort("...", fatal=(False, None)) => Will return None
return abort("...", fatal=(False, -1)) => Will return -1
# Not fatal, will not log/print any message:
return abort("...", fatal=None) => Will return None
return abort("...", fatal=(None, None)) => Will return None
return abort("...", fatal=(None, -1)) => Will return -1
:param args: Args passed through for error reporting
:param kwargs: Args passed through for error reporting
:return: kwargs["return_value"] (default: -1) to signify failure to non-fatal callers
### Response:
#vtb
def abort(*args, **kwargs):
code = kwargs.pop("code", 1)
logger = kwargs.pop("logger", LOG.error if code else LOG.info)
fatal = kwargs.pop("fatal", True)
return_value = fatal
if isinstance(fatal, tuple) and len(fatal) == 2:
fatal, return_value = fatal
if logger and fatal is not None and args:
if logging.root.handlers:
logger(*args, **kwargs)
else:
sys.stderr.write("%s\n" % formatted_string(*args))
if fatal:
if isinstance(fatal, type) and issubclass(fatal, BaseException):
raise fatal(code)
if AbortException is not None:
if isinstance(AbortException, type) and issubclass(AbortException, BaseException):
raise AbortException(code)
return AbortException(code)
return return_value
|
#vtb
def sigma_cached(self, psd):
if not hasattr(self, ):
from pycbc.opt import LimitedSizeDict
self._sigmasq = LimitedSizeDict(size_limit=2**5)
key = id(psd)
if not hasattr(psd, ):
psd._sigma_cached_key = {}
if key not in self._sigmasq or id(self) not in psd._sigma_cached_key:
psd._sigma_cached_key[id(self)] = True
if pycbc.waveform.waveform_norm_exists(self.approximant):
if not hasattr(psd, ):
psd.sigmasq_vec = {}
if self.approximant not in psd.sigmasq_vec:
psd.sigmasq_vec[self.approximant] = pycbc.waveform.get_waveform_filter_norm(
self.approximant, psd, len(psd), psd.delta_f, self.f_lower)
if not hasattr(self, ):
amp_norm = pycbc.waveform.get_template_amplitude_norm(
self.params, approximant=self.approximant)
amp_norm = 1 if amp_norm is None else amp_norm
self.sigma_scale = (DYN_RANGE_FAC * amp_norm) ** 2.0
self._sigmasq[key] = self.sigma_scale * \
psd.sigmasq_vec[self.approximant][self.end_idx-1]
else:
if not hasattr(self, ):
from pycbc.filter.matchedfilter import get_cutoff_indices
N = (len(self) -1) * 2
kmin, kmax = get_cutoff_indices(
self.min_f_lower or self.f_lower, self.end_frequency,
self.delta_f, N)
self.sslice = slice(kmin, kmax)
self.sigma_view = self[self.sslice].squared_norm() * 4.0 * self.delta_f
if not hasattr(psd, ):
psd.invsqrt = 1.0 / psd[self.sslice]
self._sigmasq[key] = self.sigma_view.inner(psd.invsqrt)
return self._sigmasq[key]
|
Cache sigma calculate for use in tandem with the FilterBank class
|
### Input:
Cache sigma calculate for use in tandem with the FilterBank class
### Response:
#vtb
def sigma_cached(self, psd):
if not hasattr(self, ):
from pycbc.opt import LimitedSizeDict
self._sigmasq = LimitedSizeDict(size_limit=2**5)
key = id(psd)
if not hasattr(psd, ):
psd._sigma_cached_key = {}
if key not in self._sigmasq or id(self) not in psd._sigma_cached_key:
psd._sigma_cached_key[id(self)] = True
if pycbc.waveform.waveform_norm_exists(self.approximant):
if not hasattr(psd, ):
psd.sigmasq_vec = {}
if self.approximant not in psd.sigmasq_vec:
psd.sigmasq_vec[self.approximant] = pycbc.waveform.get_waveform_filter_norm(
self.approximant, psd, len(psd), psd.delta_f, self.f_lower)
if not hasattr(self, ):
amp_norm = pycbc.waveform.get_template_amplitude_norm(
self.params, approximant=self.approximant)
amp_norm = 1 if amp_norm is None else amp_norm
self.sigma_scale = (DYN_RANGE_FAC * amp_norm) ** 2.0
self._sigmasq[key] = self.sigma_scale * \
psd.sigmasq_vec[self.approximant][self.end_idx-1]
else:
if not hasattr(self, ):
from pycbc.filter.matchedfilter import get_cutoff_indices
N = (len(self) -1) * 2
kmin, kmax = get_cutoff_indices(
self.min_f_lower or self.f_lower, self.end_frequency,
self.delta_f, N)
self.sslice = slice(kmin, kmax)
self.sigma_view = self[self.sslice].squared_norm() * 4.0 * self.delta_f
if not hasattr(psd, ):
psd.invsqrt = 1.0 / psd[self.sslice]
self._sigmasq[key] = self.sigma_view.inner(psd.invsqrt)
return self._sigmasq[key]
|
#vtb
def depends (self, d):
self.dependencies_ = unique (self.dependencies_ + d).sort ()
|
Adds additional instances of 'VirtualTarget' that this
one depends on.
|
### Input:
Adds additional instances of 'VirtualTarget' that this
one depends on.
### Response:
#vtb
def depends (self, d):
self.dependencies_ = unique (self.dependencies_ + d).sort ()
|
#vtb
def add_module(self, module, cython=False):
name_module = module.__name__.split()[-1]
short = (
% name_module)
long = (
% module.__name__)
self._short2long[short] = long
for (name_member, member) in vars(module).items():
if self.consider_member(
name_member, member, module):
role = self.get_role(member, cython)
short = (
% name_member)
medium = (
% (name_module,
name_member))
long = (
% (role,
module.__name__,
name_member))
self.add_substitution(short, medium, long, module)
if inspect.isclass(member):
for name_submember, submember in vars(member).items():
if self.consider_member(
name_submember, submember, module, member):
role = self.get_role(submember, cython)
short = (
% (name_member,
name_submember))
medium = (
% (name_module,
name_member,
name_submember))
long = (
% (role,
module.__name__,
name_member,
name_submember))
self.add_substitution(short, medium, long, module)
|
Add the given module, its members, and their submembers.
The first examples are based on the site-package |numpy|: which
is passed to method |Substituter.add_module|:
>>> from hydpy.core.autodoctools import Substituter
>>> substituter = Substituter()
>>> import numpy
>>> substituter.add_module(numpy)
Firstly, the module itself is added:
>>> substituter.find('|numpy|')
|numpy| :mod:`~numpy`
Secondly, constants like |numpy.nan| are added:
>>> substituter.find('|numpy.nan|')
|numpy.nan| :const:`~numpy.nan`
Thirdly, functions like |numpy.clip| are added:
>>> substituter.find('|numpy.clip|')
|numpy.clip| :func:`~numpy.clip`
Fourthly, clases line |numpy.ndarray| are added:
>>> substituter.find('|numpy.ndarray|')
|numpy.ndarray| :class:`~numpy.ndarray`
When adding Cython modules, the `cython` flag should be set |True|:
>>> from hydpy.cythons import pointerutils
>>> substituter.add_module(pointerutils, cython=True)
>>> substituter.find('set_pointer')
|PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
|pointerutils.PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
|
### Input:
Add the given module, its members, and their submembers.
The first examples are based on the site-package |numpy|: which
is passed to method |Substituter.add_module|:
>>> from hydpy.core.autodoctools import Substituter
>>> substituter = Substituter()
>>> import numpy
>>> substituter.add_module(numpy)
Firstly, the module itself is added:
>>> substituter.find('|numpy|')
|numpy| :mod:`~numpy`
Secondly, constants like |numpy.nan| are added:
>>> substituter.find('|numpy.nan|')
|numpy.nan| :const:`~numpy.nan`
Thirdly, functions like |numpy.clip| are added:
>>> substituter.find('|numpy.clip|')
|numpy.clip| :func:`~numpy.clip`
Fourthly, clases line |numpy.ndarray| are added:
>>> substituter.find('|numpy.ndarray|')
|numpy.ndarray| :class:`~numpy.ndarray`
When adding Cython modules, the `cython` flag should be set |True|:
>>> from hydpy.cythons import pointerutils
>>> substituter.add_module(pointerutils, cython=True)
>>> substituter.find('set_pointer')
|PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
|pointerutils.PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
### Response:
#vtb
def add_module(self, module, cython=False):
name_module = module.__name__.split()[-1]
short = (
% name_module)
long = (
% module.__name__)
self._short2long[short] = long
for (name_member, member) in vars(module).items():
if self.consider_member(
name_member, member, module):
role = self.get_role(member, cython)
short = (
% name_member)
medium = (
% (name_module,
name_member))
long = (
% (role,
module.__name__,
name_member))
self.add_substitution(short, medium, long, module)
if inspect.isclass(member):
for name_submember, submember in vars(member).items():
if self.consider_member(
name_submember, submember, module, member):
role = self.get_role(submember, cython)
short = (
% (name_member,
name_submember))
medium = (
% (name_module,
name_member,
name_submember))
long = (
% (role,
module.__name__,
name_member,
name_submember))
self.add_substitution(short, medium, long, module)
|
#vtb
def _hashable_bytes(data):
if isinstance(data, bytes):
return data
elif isinstance(data, str):
return data.encode()
else:
raise TypeError(data)
|
Coerce strings to hashable bytes.
|
### Input:
Coerce strings to hashable bytes.
### Response:
#vtb
def _hashable_bytes(data):
if isinstance(data, bytes):
return data
elif isinstance(data, str):
return data.encode()
else:
raise TypeError(data)
|
#vtb
def _set_least_batch_id(self, txn_signature):
batch = self._batches_by_txn_id[txn_signature]
least_index = self._index_of_batch(
self._batches_by_id[self._least_batch_id_wo_results].batch)
current_index = self._index_of_batch(batch)
all_prior = False
if current_index <= least_index:
return
if all(
all(t.header_signature in self._txn_results
for t in b.transactions)
for b in self._batches[least_index:current_index]):
all_prior = True
if not all_prior:
return
possible_least = self._batches[current_index].header_signature
for b in self._batches[current_index:]:
if not all(t.header_signature in self._txn_results
for t in b.transactions):
possible_least = b.header_signature
break
self._least_batch_id_wo_results = possible_least
|
Set the first batch id that doesn't have all results.
Args:
txn_signature (str): The txn identifier of the transaction with
results being set.
|
### Input:
Set the first batch id that doesn't have all results.
Args:
txn_signature (str): The txn identifier of the transaction with
results being set.
### Response:
#vtb
def _set_least_batch_id(self, txn_signature):
batch = self._batches_by_txn_id[txn_signature]
least_index = self._index_of_batch(
self._batches_by_id[self._least_batch_id_wo_results].batch)
current_index = self._index_of_batch(batch)
all_prior = False
if current_index <= least_index:
return
if all(
all(t.header_signature in self._txn_results
for t in b.transactions)
for b in self._batches[least_index:current_index]):
all_prior = True
if not all_prior:
return
possible_least = self._batches[current_index].header_signature
for b in self._batches[current_index:]:
if not all(t.header_signature in self._txn_results
for t in b.transactions):
possible_least = b.header_signature
break
self._least_batch_id_wo_results = possible_least
|
#vtb
def with_reconnect(func):
from pymongo.errors import AutoReconnect
@functools.wraps(func)
def _reconnector(*args, **kwargs):
for _ in range(20):
try:
return func(*args, **kwargs)
except AutoReconnect:
time.sleep(0.250)
raise
return _reconnector
|
Handle when AutoReconnect is raised from pymongo. This is the standard error
raised for everything from "host disconnected" to "couldn't connect to host"
and more.
The sleep handles the edge case when the state of a replica set changes, and
the cursor raises AutoReconnect because the master may have changed. It can
take some time for the replica set to stop raising this exception, and the
small sleep and iteration count gives us a couple of seconds before we fail
completely.
|
### Input:
Handle when AutoReconnect is raised from pymongo. This is the standard error
raised for everything from "host disconnected" to "couldn't connect to host"
and more.
The sleep handles the edge case when the state of a replica set changes, and
the cursor raises AutoReconnect because the master may have changed. It can
take some time for the replica set to stop raising this exception, and the
small sleep and iteration count gives us a couple of seconds before we fail
completely.
### Response:
#vtb
def with_reconnect(func):
from pymongo.errors import AutoReconnect
@functools.wraps(func)
def _reconnector(*args, **kwargs):
for _ in range(20):
try:
return func(*args, **kwargs)
except AutoReconnect:
time.sleep(0.250)
raise
return _reconnector
|
#vtb
def stats(self):
status, _, body = self._request(, self.stats_path(),
{: })
if status == 200:
return json.loads(bytes_to_str(body))
else:
return None
|
Gets performance statistics and server information
|
### Input:
Gets performance statistics and server information
### Response:
#vtb
def stats(self):
status, _, body = self._request(, self.stats_path(),
{: })
if status == 200:
return json.loads(bytes_to_str(body))
else:
return None
|
#vtb
def cysparse_real_type_from_real_cysparse_complex_type(cysparse_type):
r_type = None
if cysparse_type in []:
r_type =
elif cysparse_type in []:
r_type =
elif cysparse_type in []:
r_type =
else:
raise TypeError("Not a recognized complex type")
return r_type
|
Returns the **real** type for the real or imaginary part of a **real** complex type.
For instance:
COMPLEX128_t -> FLOAT64_t
Args:
cysparse:
|
### Input:
Returns the **real** type for the real or imaginary part of a **real** complex type.
For instance:
COMPLEX128_t -> FLOAT64_t
Args:
cysparse:
### Response:
#vtb
def cysparse_real_type_from_real_cysparse_complex_type(cysparse_type):
r_type = None
if cysparse_type in []:
r_type =
elif cysparse_type in []:
r_type =
elif cysparse_type in []:
r_type =
else:
raise TypeError("Not a recognized complex type")
return r_type
|
#vtb
def get_mysql_credentials(cfg_file):
try:
parser = ConfigParser.ConfigParser()
cfg_fp = open(cfg_file)
parser.readfp(cfg_fp)
cfg_fp.close()
except ConfigParser.NoOptionError:
cfg_fp.close()
print()
sys.exit(1)
except IOError:
print(, cfg_file)
sys.exit(1)
value = parser.get(, )
try:
sys.exit(1)
|
Get the credentials and database name from options in config file.
|
### Input:
Get the credentials and database name from options in config file.
### Response:
#vtb
def get_mysql_credentials(cfg_file):
try:
parser = ConfigParser.ConfigParser()
cfg_fp = open(cfg_file)
parser.readfp(cfg_fp)
cfg_fp.close()
except ConfigParser.NoOptionError:
cfg_fp.close()
print()
sys.exit(1)
except IOError:
print(, cfg_file)
sys.exit(1)
value = parser.get(, )
try:
sys.exit(1)
|
#vtb
def show_grid(images, rows=None, cols=None):
grid = draw_grid(images, rows=rows, cols=cols)
imshow(grid)
|
Converts the input images to a grid image and shows it in a new window.
dtype support::
minimum of (
:func:`imgaug.imgaug.draw_grid`,
:func:`imgaug.imgaug.imshow`
)
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`.
|
### Input:
Converts the input images to a grid image and shows it in a new window.
dtype support::
minimum of (
:func:`imgaug.imgaug.draw_grid`,
:func:`imgaug.imgaug.imshow`
)
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`.
### Response:
#vtb
def show_grid(images, rows=None, cols=None):
grid = draw_grid(images, rows=rows, cols=cols)
imshow(grid)
|
#vtb
def _make_grid_of_axes(self,
bounding_rect=cfg.bounding_rect_default,
num_rows=cfg.num_rows_per_view_default,
num_cols=cfg.num_cols_grid_default,
axis_pad=cfg.axis_pad_default,
commn_annot=None,
**axis_kwargs):
axes_in_grid = list()
extents = self._compute_cell_extents_grid(bounding_rect=bounding_rect,
num_cols=num_cols,
num_rows=num_rows, axis_pad=axis_pad)
for cell_ext in extents:
ax_cell = self.fig.add_axes(cell_ext, frameon=False, visible=False,
**axis_kwargs)
if commn_annot is not None:
ax_cell.set_title(commn_annot)
ax_cell.set_axis_off()
axes_in_grid.append(ax_cell)
return axes_in_grid
|
Creates a grid of axes bounded within a given rectangle.
|
### Input:
Creates a grid of axes bounded within a given rectangle.
### Response:
#vtb
def _make_grid_of_axes(self,
bounding_rect=cfg.bounding_rect_default,
num_rows=cfg.num_rows_per_view_default,
num_cols=cfg.num_cols_grid_default,
axis_pad=cfg.axis_pad_default,
commn_annot=None,
**axis_kwargs):
axes_in_grid = list()
extents = self._compute_cell_extents_grid(bounding_rect=bounding_rect,
num_cols=num_cols,
num_rows=num_rows, axis_pad=axis_pad)
for cell_ext in extents:
ax_cell = self.fig.add_axes(cell_ext, frameon=False, visible=False,
**axis_kwargs)
if commn_annot is not None:
ax_cell.set_title(commn_annot)
ax_cell.set_axis_off()
axes_in_grid.append(ax_cell)
return axes_in_grid
|
#vtb
def from_kwargs(cls, **kwargs):
arrays = []
names = []
for p,vals in kwargs.items():
if not isinstance(vals, numpy.ndarray):
if not isinstance(vals, list):
vals = [vals]
vals = numpy.array(vals)
arrays.append(vals)
names.append(p)
return cls.from_arrays(arrays, names=names)
|
Creates a new instance of self from the given keyword arguments.
Each argument will correspond to a field in the returned array, with
the name of the field given by the keyword, and the value(s) whatever
the keyword was set to. Each keyword may be set to a single value or
a list of values. The number of values that each argument is set to
must be the same; this will be the size of the returned array.
Examples
--------
Create an array with fields 'mass1' and 'mass2':
>>> a = FieldArray.from_kwargs(mass1=[1.1, 3.], mass2=[2., 3.])
>>> a.fieldnames
('mass1', 'mass2')
>>> a.mass1, a.mass2
(array([ 1.1, 3. ]), array([ 2., 3.]))
Create an array with only a single element in it:
>>> a = FieldArray.from_kwargs(mass1=1.1, mass2=2.)
>>> a.mass1, a.mass2
(array([ 1.1]), array([ 2.]))
|
### Input:
Creates a new instance of self from the given keyword arguments.
Each argument will correspond to a field in the returned array, with
the name of the field given by the keyword, and the value(s) whatever
the keyword was set to. Each keyword may be set to a single value or
a list of values. The number of values that each argument is set to
must be the same; this will be the size of the returned array.
Examples
--------
Create an array with fields 'mass1' and 'mass2':
>>> a = FieldArray.from_kwargs(mass1=[1.1, 3.], mass2=[2., 3.])
>>> a.fieldnames
('mass1', 'mass2')
>>> a.mass1, a.mass2
(array([ 1.1, 3. ]), array([ 2., 3.]))
Create an array with only a single element in it:
>>> a = FieldArray.from_kwargs(mass1=1.1, mass2=2.)
>>> a.mass1, a.mass2
(array([ 1.1]), array([ 2.]))
### Response:
#vtb
def from_kwargs(cls, **kwargs):
arrays = []
names = []
for p,vals in kwargs.items():
if not isinstance(vals, numpy.ndarray):
if not isinstance(vals, list):
vals = [vals]
vals = numpy.array(vals)
arrays.append(vals)
names.append(p)
return cls.from_arrays(arrays, names=names)
|
#vtb
def web(connection, host, port):
from bio2bel.web.application import create_application
app = create_application(connection=connection)
app.run(host=host, port=port)
|
Run a combine web interface.
|
### Input:
Run a combine web interface.
### Response:
#vtb
def web(connection, host, port):
from bio2bel.web.application import create_application
app = create_application(connection=connection)
app.run(host=host, port=port)
|
#vtb
def reflectance_from_tbs(self, sun_zenith, tb_near_ir, tb_thermal, **kwargs):
if hasattr(tb_near_ir, ) or hasattr(tb_thermal, ):
compute = False
else:
compute = True
if hasattr(tb_near_ir, ) or hasattr(tb_thermal, ):
is_masked = True
else:
is_masked = False
if np.isscalar(tb_near_ir):
tb_nir = np.array([tb_near_ir, ])
else:
tb_nir = np.asanyarray(tb_near_ir)
if np.isscalar(tb_thermal):
tb_therm = np.array([tb_thermal, ])
else:
tb_therm = np.asanyarray(tb_thermal)
if tb_therm.shape != tb_nir.shape:
errmsg = .format(
str(tb_therm.shape), str(tb_nir.shape))
raise ValueError(errmsg)
tb_ir_co2 = kwargs.get()
lut = kwargs.get(, self.lut)
if tb_ir_co2 is None:
co2corr = False
tbco2 = None
else:
co2corr = True
if np.isscalar(tb_ir_co2):
tbco2 = np.array([tb_ir_co2, ])
else:
tbco2 = np.asanyarray(tb_ir_co2)
if not self.rsr:
raise NotImplementedError("Reflectance calculations without "
"rsr not yet supported!")
self._rad3x_t11 = self.tb2radiance(tb_therm, lut=lut)[]
thermal_emiss_one = self._rad3x_t11 * self.rsr_integral
l_nir = self.tb2radiance(tb_nir, lut=lut)[] * self.rsr_integral
if thermal_emiss_one.ravel().shape[0] < 10:
LOG.info(, str(thermal_emiss_one))
if l_nir.ravel().shape[0] < 10:
LOG.info(, str(l_nir))
sunzmask = (sun_zenith < 0.0) | (sun_zenith > 88.0)
sunz = where(sunzmask, 88.0, sun_zenith)
mu0 = np.cos(np.deg2rad(sunz))
self._rad3x = l_nir
self._solar_radiance = self.solar_flux * mu0 / np.pi
if co2corr:
self.derive_rad39_corr(tb_therm, tbco2)
LOG.info("CO2 correction applied...")
else:
self._rad3x_correction = 1.0
nomin = l_nir - thermal_emiss_one * self._rad3x_correction
denom = self._solar_radiance - thermal_emiss_one * self._rad3x_correction
data = nomin / denom
mask = (self._solar_radiance - thermal_emiss_one *
self._rad3x_correction) < EPSILON
logical_or(sunzmask, mask, out=mask)
logical_or(mask, np.isnan(tb_nir), out=mask)
self._r3x = where(mask, np.nan, data)
if hasattr(self._r3x, ) and compute:
res = self._r3x.compute()
else:
res = self._r3x
if is_masked:
res = np.ma.masked_array(res, mask=np.isnan(res))
return res
|
The relfectance calculated is without units and should be between 0 and 1.
Inputs:
sun_zenith: Sun zenith angle for every pixel - in degrees
tb_near_ir: The 3.7 (or 3.9 or equivalent) IR Tb's at every pixel
(Kelvin)
tb_thermal: The 10.8 (or 11 or 12 or equivalent) IR Tb's at every
pixel (Kelvin)
tb_ir_co2: The 13.4 micron channel (or similar - co2 absorption band)
brightness temperatures at every pixel. If None, no CO2
absorption correction will be applied.
|
### Input:
The relfectance calculated is without units and should be between 0 and 1.
Inputs:
sun_zenith: Sun zenith angle for every pixel - in degrees
tb_near_ir: The 3.7 (or 3.9 or equivalent) IR Tb's at every pixel
(Kelvin)
tb_thermal: The 10.8 (or 11 or 12 or equivalent) IR Tb's at every
pixel (Kelvin)
tb_ir_co2: The 13.4 micron channel (or similar - co2 absorption band)
brightness temperatures at every pixel. If None, no CO2
absorption correction will be applied.
### Response:
#vtb
def reflectance_from_tbs(self, sun_zenith, tb_near_ir, tb_thermal, **kwargs):
if hasattr(tb_near_ir, ) or hasattr(tb_thermal, ):
compute = False
else:
compute = True
if hasattr(tb_near_ir, ) or hasattr(tb_thermal, ):
is_masked = True
else:
is_masked = False
if np.isscalar(tb_near_ir):
tb_nir = np.array([tb_near_ir, ])
else:
tb_nir = np.asanyarray(tb_near_ir)
if np.isscalar(tb_thermal):
tb_therm = np.array([tb_thermal, ])
else:
tb_therm = np.asanyarray(tb_thermal)
if tb_therm.shape != tb_nir.shape:
errmsg = .format(
str(tb_therm.shape), str(tb_nir.shape))
raise ValueError(errmsg)
tb_ir_co2 = kwargs.get()
lut = kwargs.get(, self.lut)
if tb_ir_co2 is None:
co2corr = False
tbco2 = None
else:
co2corr = True
if np.isscalar(tb_ir_co2):
tbco2 = np.array([tb_ir_co2, ])
else:
tbco2 = np.asanyarray(tb_ir_co2)
if not self.rsr:
raise NotImplementedError("Reflectance calculations without "
"rsr not yet supported!")
self._rad3x_t11 = self.tb2radiance(tb_therm, lut=lut)[]
thermal_emiss_one = self._rad3x_t11 * self.rsr_integral
l_nir = self.tb2radiance(tb_nir, lut=lut)[] * self.rsr_integral
if thermal_emiss_one.ravel().shape[0] < 10:
LOG.info(, str(thermal_emiss_one))
if l_nir.ravel().shape[0] < 10:
LOG.info(, str(l_nir))
sunzmask = (sun_zenith < 0.0) | (sun_zenith > 88.0)
sunz = where(sunzmask, 88.0, sun_zenith)
mu0 = np.cos(np.deg2rad(sunz))
self._rad3x = l_nir
self._solar_radiance = self.solar_flux * mu0 / np.pi
if co2corr:
self.derive_rad39_corr(tb_therm, tbco2)
LOG.info("CO2 correction applied...")
else:
self._rad3x_correction = 1.0
nomin = l_nir - thermal_emiss_one * self._rad3x_correction
denom = self._solar_radiance - thermal_emiss_one * self._rad3x_correction
data = nomin / denom
mask = (self._solar_radiance - thermal_emiss_one *
self._rad3x_correction) < EPSILON
logical_or(sunzmask, mask, out=mask)
logical_or(mask, np.isnan(tb_nir), out=mask)
self._r3x = where(mask, np.nan, data)
if hasattr(self._r3x, ) and compute:
res = self._r3x.compute()
else:
res = self._r3x
if is_masked:
res = np.ma.masked_array(res, mask=np.isnan(res))
return res
|
#vtb
def password_change(self, wallet, password):
wallet = self._process_value(wallet, )
payload = {"wallet": wallet, "password": password}
resp = self.call(, payload)
return resp[] ==
|
Changes the password for **wallet** to **password**
.. enable_control required
:param wallet: Wallet to change password for
:type wallet: str
:param password: Password to set
:type password: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.password_change(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... password="test"
... )
True
|
### Input:
Changes the password for **wallet** to **password**
.. enable_control required
:param wallet: Wallet to change password for
:type wallet: str
:param password: Password to set
:type password: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.password_change(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... password="test"
... )
True
### Response:
#vtb
def password_change(self, wallet, password):
wallet = self._process_value(wallet, )
payload = {"wallet": wallet, "password": password}
resp = self.call(, payload)
return resp[] ==
|
#vtb
def generate_classified_legend(
analysis,
exposure,
hazard,
use_rounding,
debug_mode):
analysis_row = next(analysis.getFeatures())
thresholds = hazard.keywords.get()
if thresholds:
hazard_unit = hazard.keywords.get()
hazard_unit = definition(hazard_unit)[]
else:
hazard_unit = None
exposure = exposure.keywords[]
exposure_definitions = definition(exposure)
exposure_units = exposure_definitions[]
exposure_unit = exposure_units[0]
coefficient = 1
if len(exposure_units) > 1:
delta = coefficient_between_units(
exposure_units[1], exposure_units[0])
all_values_are_greater = True
for i, hazard_class in enumerate(hazard_classification[]):
field_name = hazard_count_field[] % hazard_class[]
try:
value = analysis_row[field_name]
except KeyError:
value = 0
if 0 < value < delta:
all_values_are_greater = False
if all_values_are_greater:
exposure_unit = exposure_units[1]
coefficient = delta
classes = OrderedDict()
for i, hazard_class in enumerate(hazard_classification[]):
field_name = hazard_count_field[] % hazard_class[]
try:
value = analysis_row[field_name]
except KeyError:
value = 0
value = format_number(
value,
use_rounding,
exposure_definitions[],
coefficient)
minimum = None
maximum = None
if thresholds:
if i == 0:
minimum = thresholds[hazard_class[]][0]
elif i == len(hazard_classification[]) - 1:
maximum = thresholds[hazard_class[]][1]
else:
minimum = thresholds[hazard_class[]][0]
maximum = thresholds[hazard_class[]][1]
label = _format_label(
hazard_class=hazard_class[],
value=value,
exposure_unit=exposure_unit[],
minimum=minimum,
maximum=maximum,
hazard_unit=hazard_unit)
classes[hazard_class[]] = (hazard_class[], label)
if exposure_definitions[] or debug_mode:
classes[not_exposed_class[]] = _add_not_exposed(
analysis_row,
use_rounding,
exposure_definitions[],
exposure_unit[],
coefficient)
return classes
|
Generate an ordered python structure with the classified symbology.
:param analysis: The analysis layer.
:type analysis: QgsVectorLayer
:param exposure: The exposure layer.
:type exposure: QgsVectorLayer
:param hazard: The hazard layer.
:type hazard: QgsVectorLayer
:param use_rounding: Boolean if we round number in the legend.
:type use_rounding: bool
:param debug_mode: Boolean if run in debug mode,to display the not exposed.
:type debug_mode: bool
:return: The ordered dictionary to use to build the classified style.
:rtype: OrderedDict
|
### Input:
Generate an ordered python structure with the classified symbology.
:param analysis: The analysis layer.
:type analysis: QgsVectorLayer
:param exposure: The exposure layer.
:type exposure: QgsVectorLayer
:param hazard: The hazard layer.
:type hazard: QgsVectorLayer
:param use_rounding: Boolean if we round number in the legend.
:type use_rounding: bool
:param debug_mode: Boolean if run in debug mode,to display the not exposed.
:type debug_mode: bool
:return: The ordered dictionary to use to build the classified style.
:rtype: OrderedDict
### Response:
#vtb
def generate_classified_legend(
analysis,
exposure,
hazard,
use_rounding,
debug_mode):
analysis_row = next(analysis.getFeatures())
thresholds = hazard.keywords.get()
if thresholds:
hazard_unit = hazard.keywords.get()
hazard_unit = definition(hazard_unit)[]
else:
hazard_unit = None
exposure = exposure.keywords[]
exposure_definitions = definition(exposure)
exposure_units = exposure_definitions[]
exposure_unit = exposure_units[0]
coefficient = 1
if len(exposure_units) > 1:
delta = coefficient_between_units(
exposure_units[1], exposure_units[0])
all_values_are_greater = True
for i, hazard_class in enumerate(hazard_classification[]):
field_name = hazard_count_field[] % hazard_class[]
try:
value = analysis_row[field_name]
except KeyError:
value = 0
if 0 < value < delta:
all_values_are_greater = False
if all_values_are_greater:
exposure_unit = exposure_units[1]
coefficient = delta
classes = OrderedDict()
for i, hazard_class in enumerate(hazard_classification[]):
field_name = hazard_count_field[] % hazard_class[]
try:
value = analysis_row[field_name]
except KeyError:
value = 0
value = format_number(
value,
use_rounding,
exposure_definitions[],
coefficient)
minimum = None
maximum = None
if thresholds:
if i == 0:
minimum = thresholds[hazard_class[]][0]
elif i == len(hazard_classification[]) - 1:
maximum = thresholds[hazard_class[]][1]
else:
minimum = thresholds[hazard_class[]][0]
maximum = thresholds[hazard_class[]][1]
label = _format_label(
hazard_class=hazard_class[],
value=value,
exposure_unit=exposure_unit[],
minimum=minimum,
maximum=maximum,
hazard_unit=hazard_unit)
classes[hazard_class[]] = (hazard_class[], label)
if exposure_definitions[] or debug_mode:
classes[not_exposed_class[]] = _add_not_exposed(
analysis_row,
use_rounding,
exposure_definitions[],
exposure_unit[],
coefficient)
return classes
|
#vtb
def _evaluate(self,R,z,phi=0.,t=0.):
l,n = bovy_coords.Rz_to_lambdanu(R,z,ac=self._ac,Delta=self._Delta)
return -1./(nu.sqrt(l) + nu.sqrt(n))
|
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2015-02-15 - Written - Trick (MPIA)
|
### Input:
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2015-02-15 - Written - Trick (MPIA)
### Response:
#vtb
def _evaluate(self,R,z,phi=0.,t=0.):
l,n = bovy_coords.Rz_to_lambdanu(R,z,ac=self._ac,Delta=self._Delta)
return -1./(nu.sqrt(l) + nu.sqrt(n))
|
#vtb
def plotActivation(self, position=None, time=None, velocity=None):
self.ax1.clear()
y = self.activations["n"] + self.activations["s"] + self.activations["e"] + \
self.activations["w"]
self.ax1.matshow(y.reshape(self.dimensions))
self.ax2.clear()
self.ax2.matshow(self.activationsI.reshape(self.dimensions))
self.ax3.clear()
self.ax3.matshow(self.activationHistoryI.reshape(self.dimensions))
titleString = ""
if time is not None:
titleString += "Time = {}".format(str(time))
if velocity is not None:
titleString += " Velocity = {}".format(str(velocity)[:4])
if position is not None:
titleString += " Position = {}".format(str(position)[:4])
plt.suptitle(titleString)
self.ax1.set_xlabel("Excitatory activity")
self.ax2.set_xlabel("Inhibitory activity")
self.ax3.set_xlabel("Boosting activity")
plt.tight_layout()
self.fig.canvas.draw()
|
Plot the activation of the current cell populations. Assumes that
two axes have already been created, ax1 and ax2. If done in a Jupyter
notebook, this plotting will overwrite the old plot.
:param position: The current location of the animal
:param time: The current time in the simulation
:param velocity: The current velocity of the animal
|
### Input:
Plot the activation of the current cell populations. Assumes that
two axes have already been created, ax1 and ax2. If done in a Jupyter
notebook, this plotting will overwrite the old plot.
:param position: The current location of the animal
:param time: The current time in the simulation
:param velocity: The current velocity of the animal
### Response:
#vtb
def plotActivation(self, position=None, time=None, velocity=None):
self.ax1.clear()
y = self.activations["n"] + self.activations["s"] + self.activations["e"] + \
self.activations["w"]
self.ax1.matshow(y.reshape(self.dimensions))
self.ax2.clear()
self.ax2.matshow(self.activationsI.reshape(self.dimensions))
self.ax3.clear()
self.ax3.matshow(self.activationHistoryI.reshape(self.dimensions))
titleString = ""
if time is not None:
titleString += "Time = {}".format(str(time))
if velocity is not None:
titleString += " Velocity = {}".format(str(velocity)[:4])
if position is not None:
titleString += " Position = {}".format(str(position)[:4])
plt.suptitle(titleString)
self.ax1.set_xlabel("Excitatory activity")
self.ax2.set_xlabel("Inhibitory activity")
self.ax3.set_xlabel("Boosting activity")
plt.tight_layout()
self.fig.canvas.draw()
|
#vtb
def get_dataset(self, key, info):
datadict = {
1000: [,
,
,
],
500: [,
],
250: []}
platform_name = self.metadata[][][
][][]
info.update({: + platform_name})
info.update({: })
if self.resolution != key.resolution:
return
datasets = datadict[self.resolution]
for dataset in datasets:
subdata = self.sd.select(dataset)
var_attrs = subdata.attributes()
band_names = var_attrs["band_names"].split(",")
try:
index = band_names.index(key.name)
except ValueError:
continue
uncertainty = self.sd.select(dataset + "_Uncert_Indexes")
array = xr.DataArray(from_sds(subdata, chunks=CHUNK_SIZE)[index, :, :],
dims=[, ]).astype(np.float32)
valid_range = var_attrs[]
array = array.where(array >= np.float32(valid_range[0]))
array = array.where(array <= np.float32(valid_range[1]))
array = array.where(from_sds(uncertainty, chunks=CHUNK_SIZE)[index, :, :] < 15)
if key.calibration == :
projectable = calibrate_bt(array, var_attrs, index, key.name)
info.setdefault(, )
info.setdefault(, )
elif key.calibration == :
projectable = calibrate_refl(array, var_attrs, index)
info.setdefault(, )
info.setdefault(,
)
elif key.calibration == :
projectable = calibrate_radiance(array, var_attrs, index)
info.setdefault(, var_attrs.get())
info.setdefault(,
)
elif key.calibration == :
projectable = calibrate_counts(array, var_attrs, index)
info.setdefault(, )
info.setdefault(, )
else:
raise ValueError("Unknown calibration for "
"key: {}".format(key))
projectable.attrs = info
return projectable
|
Read data from file and return the corresponding projectables.
|
### Input:
Read data from file and return the corresponding projectables.
### Response:
#vtb
def get_dataset(self, key, info):
datadict = {
1000: [,
,
,
],
500: [,
],
250: []}
platform_name = self.metadata[][][
][][]
info.update({: + platform_name})
info.update({: })
if self.resolution != key.resolution:
return
datasets = datadict[self.resolution]
for dataset in datasets:
subdata = self.sd.select(dataset)
var_attrs = subdata.attributes()
band_names = var_attrs["band_names"].split(",")
try:
index = band_names.index(key.name)
except ValueError:
continue
uncertainty = self.sd.select(dataset + "_Uncert_Indexes")
array = xr.DataArray(from_sds(subdata, chunks=CHUNK_SIZE)[index, :, :],
dims=[, ]).astype(np.float32)
valid_range = var_attrs[]
array = array.where(array >= np.float32(valid_range[0]))
array = array.where(array <= np.float32(valid_range[1]))
array = array.where(from_sds(uncertainty, chunks=CHUNK_SIZE)[index, :, :] < 15)
if key.calibration == :
projectable = calibrate_bt(array, var_attrs, index, key.name)
info.setdefault(, )
info.setdefault(, )
elif key.calibration == :
projectable = calibrate_refl(array, var_attrs, index)
info.setdefault(, )
info.setdefault(,
)
elif key.calibration == :
projectable = calibrate_radiance(array, var_attrs, index)
info.setdefault(, var_attrs.get())
info.setdefault(,
)
elif key.calibration == :
projectable = calibrate_counts(array, var_attrs, index)
info.setdefault(, )
info.setdefault(, )
else:
raise ValueError("Unknown calibration for "
"key: {}".format(key))
projectable.attrs = info
return projectable
|
#vtb
def _read_by_weight(self, F, att_weights, value):
output = F.batch_dot(att_weights, value)
return output
|
Read from the value matrix given the attention weights.
Parameters
----------
F : symbol or ndarray
att_weights : Symbol or NDArray
Attention weights.
For single-head attention,
Shape (batch_size, query_length, memory_length).
For multi-head attention,
Shape (batch_size, num_heads, query_length, memory_length).
value : Symbol or NDArray
Value of the memory. Shape (batch_size, memory_length, total_value_dim)
Returns
-------
context_vec: Symbol or NDArray
Shape (batch_size, query_length, context_vec_dim)
|
### Input:
Read from the value matrix given the attention weights.
Parameters
----------
F : symbol or ndarray
att_weights : Symbol or NDArray
Attention weights.
For single-head attention,
Shape (batch_size, query_length, memory_length).
For multi-head attention,
Shape (batch_size, num_heads, query_length, memory_length).
value : Symbol or NDArray
Value of the memory. Shape (batch_size, memory_length, total_value_dim)
Returns
-------
context_vec: Symbol or NDArray
Shape (batch_size, query_length, context_vec_dim)
### Response:
#vtb
def _read_by_weight(self, F, att_weights, value):
output = F.batch_dot(att_weights, value)
return output
|
#vtb
def for_meters(cls, meter_x, meter_y, zoom):
point = Point.from_meters(meter_x=meter_x, meter_y=meter_y)
pixel_x, pixel_y = point.pixels(zoom=zoom)
return cls.for_pixels(pixel_x=pixel_x, pixel_y=pixel_y, zoom=zoom)
|
Creates a tile from X Y meters in Spherical Mercator EPSG:900913
|
### Input:
Creates a tile from X Y meters in Spherical Mercator EPSG:900913
### Response:
#vtb
def for_meters(cls, meter_x, meter_y, zoom):
point = Point.from_meters(meter_x=meter_x, meter_y=meter_y)
pixel_x, pixel_y = point.pixels(zoom=zoom)
return cls.for_pixels(pixel_x=pixel_x, pixel_y=pixel_y, zoom=zoom)
|
#vtb
def multiple_sequence_alignment(seqs_fp, threads=1):
logger = logging.getLogger(__name__)
logger.info( % seqs_fp)
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning( % seqs_fp)
return None
msa_fp = seqs_fp +
params = [, , , , ,
, str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info( % seqs_fp)
logger.debug( % serr)
return None
return msa_fp
|
Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
|
### Input:
Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
### Response:
#vtb
def multiple_sequence_alignment(seqs_fp, threads=1):
logger = logging.getLogger(__name__)
logger.info( % seqs_fp)
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning( % seqs_fp)
return None
msa_fp = seqs_fp +
params = [, , , , ,
, str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info( % seqs_fp)
logger.debug( % serr)
return None
return msa_fp
|
#vtb
def contains(self, string):
vectype = self.weld_type
if isinstance(vectype, WeldVec):
elem_type = vectype.elemType
if isinstance(elem_type, WeldChar):
return SeriesWeld(
grizzly_impl.contains(
self.expr,
elem_type,
string
),
WeldBit(),
self.df,
self.column_name
)
raise Exception("Cannot call to_lower on non string type")
|
Summary
Returns:
TYPE: Description
|
### Input:
Summary
Returns:
TYPE: Description
### Response:
#vtb
def contains(self, string):
vectype = self.weld_type
if isinstance(vectype, WeldVec):
elem_type = vectype.elemType
if isinstance(elem_type, WeldChar):
return SeriesWeld(
grizzly_impl.contains(
self.expr,
elem_type,
string
),
WeldBit(),
self.df,
self.column_name
)
raise Exception("Cannot call to_lower on non string type")
|
#vtb
def flasher(msg, severity=None):
try:
flash(msg, severity)
except RuntimeError:
if severity == :
logging.error(msg)
else:
logging.info(msg)
|
Flask's flash if available, logging call if not
|
### Input:
Flask's flash if available, logging call if not
### Response:
#vtb
def flasher(msg, severity=None):
try:
flash(msg, severity)
except RuntimeError:
if severity == :
logging.error(msg)
else:
logging.info(msg)
|
#vtb
def _fetch(self, key):
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
current_section = self.section
while True:
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
val = current_section.get(, {}).get(key)
if val is not None and not isinstance(val, Section):
break
if current_section.parent is current_section:
break
current_section = current_section.parent
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section
|
Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
|
### Input:
Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
### Response:
#vtb
def _fetch(self, key):
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
current_section = self.section
while True:
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
val = current_section.get(, {}).get(key)
if val is not None and not isinstance(val, Section):
break
if current_section.parent is current_section:
break
current_section = current_section.parent
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section
|
#vtb
def url(self):
self.render()
return self._apiurl + .join(self._parts()).replace(,)
|
Returns the rendered URL of the chart
|
### Input:
Returns the rendered URL of the chart
### Response:
#vtb
def url(self):
self.render()
return self._apiurl + .join(self._parts()).replace(,)
|
#vtb
def set_install_id(filename, install_id):
if get_install_id(filename) is None:
raise InstallNameError(.format(filename))
back_tick([, , install_id, filename])
|
Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id
|
### Input:
Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id
### Response:
#vtb
def set_install_id(filename, install_id):
if get_install_id(filename) is None:
raise InstallNameError(.format(filename))
back_tick([, , install_id, filename])
|
#vtb
def train(self):
self.stamp_start = time.time()
for iteration, batch in tqdm.tqdm(enumerate(self.iter_train),
desc=, total=self.max_iter,
ncols=80):
self.epoch = self.iter_train.epoch
self.iteration = iteration
if self.interval_validate and \
self.iteration % self.interval_validate == 0:
self.validate()
batch = map(datasets.transform_lsvrc2012_vgg16, batch)
in_vars = utils.batch_to_vars(batch, device=self.device)
self.model.zerograds()
loss = self.model(*in_vars)
if loss is not None:
loss.backward()
self.optimizer.update()
lbl_true = zip(*batch)[1]
lbl_pred = chainer.functions.argmax(self.model.score, axis=1)
lbl_pred = chainer.cuda.to_cpu(lbl_pred.data)
acc = utils.label_accuracy_score(
lbl_true, lbl_pred, self.model.n_class)
self._write_log(**{
: self.epoch,
: self.iteration,
: time.time() - self.stamp_start,
: float(loss.data),
: acc[0],
: acc[1],
: acc[2],
: acc[3],
})
if iteration >= self.max_iter:
self._save_model()
break
|
Train the network using the training dataset.
Parameters
----------
None
Returns
-------
None
|
### Input:
Train the network using the training dataset.
Parameters
----------
None
Returns
-------
None
### Response:
#vtb
def train(self):
self.stamp_start = time.time()
for iteration, batch in tqdm.tqdm(enumerate(self.iter_train),
desc=, total=self.max_iter,
ncols=80):
self.epoch = self.iter_train.epoch
self.iteration = iteration
if self.interval_validate and \
self.iteration % self.interval_validate == 0:
self.validate()
batch = map(datasets.transform_lsvrc2012_vgg16, batch)
in_vars = utils.batch_to_vars(batch, device=self.device)
self.model.zerograds()
loss = self.model(*in_vars)
if loss is not None:
loss.backward()
self.optimizer.update()
lbl_true = zip(*batch)[1]
lbl_pred = chainer.functions.argmax(self.model.score, axis=1)
lbl_pred = chainer.cuda.to_cpu(lbl_pred.data)
acc = utils.label_accuracy_score(
lbl_true, lbl_pred, self.model.n_class)
self._write_log(**{
: self.epoch,
: self.iteration,
: time.time() - self.stamp_start,
: float(loss.data),
: acc[0],
: acc[1],
: acc[2],
: acc[3],
})
if iteration >= self.max_iter:
self._save_model()
break
|
#vtb
def get_parameters(rq, variables, endpoint, query_metadata, auth=None):
internal_matcher = re.compile("__agg_\d+__")
variable_matcher = re.compile(
"(?P<required>[_]{1,2})(?P<name>[^_]+)_?(?P<type>[a-zA-Z0-9]+)?_?(?P<userdefined>[a-zA-Z0-9]+)?.*$")
parameters = {}
for v in variables:
if internal_matcher.match(v):
continue
match = variable_matcher.match(v)
if match:
vname = match.group()
vrequired = True if match.group() == else False
vtype =
vcodes = get_enumeration(rq, vname, endpoint, query_metadata, auth)
vdefault = get_defaults(rq, vname, query_metadata)
vlang = None
vdatatype = None
vformat = None
mtype = match.group()
muserdefined = match.group()
if mtype in [, , ]:
vtype = mtype
elif mtype in []:
vtype =
vformat =
elif mtype:
vtype =
if mtype in static.XSD_DATATYPES:
vdatatype = .format(mtype)
elif len(mtype) == 2:
vlang = mtype
elif muserdefined:
vdatatype = .format(mtype, muserdefined)
parameters[vname] = {
: .format(v),
: vrequired,
: vname,
: vtype
}
if vcodes is not None:
parameters[vname][] = sorted(vcodes)
if vlang is not None:
parameters[vname][] = vlang
if vdatatype is not None:
parameters[vname][] = vdatatype
if vformat is not None:
parameters[vname][] = vformat
if vdefault is not None:
parameters[vname][] = vdefault
glogger.info(.format(parameters))
return parameters
|
?_name The variable specifies the API mandatory parameter name. The value is incorporated in the query as plain literal.
?__name The parameter name is optional.
?_name_iri The variable is substituted with the parameter value as a IRI (also: number or literal).
?_name_en The parameter value is considered as literal with the language 'en' (e.g., en,it,es, etc.).
?_name_integer The parameter value is considered as literal and the XSD datatype 'integer' is added during substitution.
?_name_prefix_datatype The parameter value is considered as literal and the datatype 'prefix:datatype' is added during substitution. The prefix must be specified according to the SPARQL syntax.
|
### Input:
?_name The variable specifies the API mandatory parameter name. The value is incorporated in the query as plain literal.
?__name The parameter name is optional.
?_name_iri The variable is substituted with the parameter value as a IRI (also: number or literal).
?_name_en The parameter value is considered as literal with the language 'en' (e.g., en,it,es, etc.).
?_name_integer The parameter value is considered as literal and the XSD datatype 'integer' is added during substitution.
?_name_prefix_datatype The parameter value is considered as literal and the datatype 'prefix:datatype' is added during substitution. The prefix must be specified according to the SPARQL syntax.
### Response:
#vtb
def get_parameters(rq, variables, endpoint, query_metadata, auth=None):
internal_matcher = re.compile("__agg_\d+__")
variable_matcher = re.compile(
"(?P<required>[_]{1,2})(?P<name>[^_]+)_?(?P<type>[a-zA-Z0-9]+)?_?(?P<userdefined>[a-zA-Z0-9]+)?.*$")
parameters = {}
for v in variables:
if internal_matcher.match(v):
continue
match = variable_matcher.match(v)
if match:
vname = match.group()
vrequired = True if match.group() == else False
vtype =
vcodes = get_enumeration(rq, vname, endpoint, query_metadata, auth)
vdefault = get_defaults(rq, vname, query_metadata)
vlang = None
vdatatype = None
vformat = None
mtype = match.group()
muserdefined = match.group()
if mtype in [, , ]:
vtype = mtype
elif mtype in []:
vtype =
vformat =
elif mtype:
vtype =
if mtype in static.XSD_DATATYPES:
vdatatype = .format(mtype)
elif len(mtype) == 2:
vlang = mtype
elif muserdefined:
vdatatype = .format(mtype, muserdefined)
parameters[vname] = {
: .format(v),
: vrequired,
: vname,
: vtype
}
if vcodes is not None:
parameters[vname][] = sorted(vcodes)
if vlang is not None:
parameters[vname][] = vlang
if vdatatype is not None:
parameters[vname][] = vdatatype
if vformat is not None:
parameters[vname][] = vformat
if vdefault is not None:
parameters[vname][] = vdefault
glogger.info(.format(parameters))
return parameters
|
#vtb
def create_blueprint(endpoints):
blueprint = Blueprint(
,
__name__,
url_prefix=,
template_folder=,
static_folder=,
)
@blueprint.errorhandler(PIDDeletedError)
def tombstone_errorhandler(error):
return render_template(
current_app.config[],
pid=error.pid,
record=error.record or {},
), 410
@blueprint.context_processor
def inject_export_formats():
return dict(
export_formats=(
current_app.extensions[].export_formats)
)
for endpoint, options in (endpoints or {}).items():
blueprint.add_url_rule(**create_url_rule(endpoint, **options))
return blueprint
|
Create Invenio-Records-UI blueprint.
The factory installs one URL route per endpoint defined, and adds an
error handler for rendering tombstones.
:param endpoints: Dictionary of endpoints to be installed. See usage
documentation for further details.
:returns: The initialized blueprint.
|
### Input:
Create Invenio-Records-UI blueprint.
The factory installs one URL route per endpoint defined, and adds an
error handler for rendering tombstones.
:param endpoints: Dictionary of endpoints to be installed. See usage
documentation for further details.
:returns: The initialized blueprint.
### Response:
#vtb
def create_blueprint(endpoints):
blueprint = Blueprint(
,
__name__,
url_prefix=,
template_folder=,
static_folder=,
)
@blueprint.errorhandler(PIDDeletedError)
def tombstone_errorhandler(error):
return render_template(
current_app.config[],
pid=error.pid,
record=error.record or {},
), 410
@blueprint.context_processor
def inject_export_formats():
return dict(
export_formats=(
current_app.extensions[].export_formats)
)
for endpoint, options in (endpoints or {}).items():
blueprint.add_url_rule(**create_url_rule(endpoint, **options))
return blueprint
|
#vtb
def stop(self):
if not self._providers_registered:
self.queue_consumer.unregister_provider(self)
self._unregistered_from_queue_consumer.send(True)
|
Stop the RpcConsumer.
The RpcConsumer ordinary unregisters from the QueueConsumer when the
last Rpc subclass unregisters from it. If no providers were registered,
we should unregister from the QueueConsumer as soon as we're asked
to stop.
|
### Input:
Stop the RpcConsumer.
The RpcConsumer ordinary unregisters from the QueueConsumer when the
last Rpc subclass unregisters from it. If no providers were registered,
we should unregister from the QueueConsumer as soon as we're asked
to stop.
### Response:
#vtb
def stop(self):
if not self._providers_registered:
self.queue_consumer.unregister_provider(self)
self._unregistered_from_queue_consumer.send(True)
|
#vtb
def get_object_or_child_by_type(self, *types):
objects = self.get_objects_or_children_by_type(*types)
return objects[0] if any(objects) else None
|
Get object if child already been read or get child.
Use this method for fast access to objects in case of static configurations.
:param types: requested object types.
:return: all children of the specified types.
|
### Input:
Get object if child already been read or get child.
Use this method for fast access to objects in case of static configurations.
:param types: requested object types.
:return: all children of the specified types.
### Response:
#vtb
def get_object_or_child_by_type(self, *types):
objects = self.get_objects_or_children_by_type(*types)
return objects[0] if any(objects) else None
|
#vtb
def answer(part, module=):
marks = json.load(open(os.path.join(data_directory, module), ))
return marks[ + str(part+1)]
|
Returns the answers to the lab classes.
|
### Input:
Returns the answers to the lab classes.
### Response:
#vtb
def answer(part, module=):
marks = json.load(open(os.path.join(data_directory, module), ))
return marks[ + str(part+1)]
|
#vtb
def import_medusa_data(mat_filename, config_file):
df_emd, df_md = _read_mat_mnu0(mat_filename)
if not isinstance(config_file, np.ndarray):
configs = np.loadtxt(config_file).astype(int)
else:
configs = config_file
print()
quadpole_list = []
if df_emd is not None:
index = 0
for Ar, Br, M, N in configs:
) * 1e3
else:
dfn = pd.DataFrame()
return dfn, df_md
|
Import measurement data (a .mat file) of the FZJ EIT160 system. This
data format is identified as 'FZJ-EZ-2017'.
Parameters
----------
mat_filename: string
filename to the .mat data file. Note that only MNU0 single-potentials
are supported!
config_file: string
filename for configuration file. The configuration file contains N rows
with 4 columns each (a, b, m, n)
Returns
-------
|
### Input:
Import measurement data (a .mat file) of the FZJ EIT160 system. This
data format is identified as 'FZJ-EZ-2017'.
Parameters
----------
mat_filename: string
filename to the .mat data file. Note that only MNU0 single-potentials
are supported!
config_file: string
filename for configuration file. The configuration file contains N rows
with 4 columns each (a, b, m, n)
Returns
-------
### Response:
#vtb
def import_medusa_data(mat_filename, config_file):
df_emd, df_md = _read_mat_mnu0(mat_filename)
if not isinstance(config_file, np.ndarray):
configs = np.loadtxt(config_file).astype(int)
else:
configs = config_file
print()
quadpole_list = []
if df_emd is not None:
index = 0
for Ar, Br, M, N in configs:
) * 1e3
else:
dfn = pd.DataFrame()
return dfn, df_md
|
#vtb
def escape(identifier, ansi_quotes, should_quote):
if not should_quote(identifier):
return identifier
quote = if ansi_quotes else
identifier = identifier.replace(quote, 2*quote)
return .format(quote, identifier, quote)
|
Escape identifiers.
ANSI uses single quotes, but many databases use back quotes.
|
### Input:
Escape identifiers.
ANSI uses single quotes, but many databases use back quotes.
### Response:
#vtb
def escape(identifier, ansi_quotes, should_quote):
if not should_quote(identifier):
return identifier
quote = if ansi_quotes else
identifier = identifier.replace(quote, 2*quote)
return .format(quote, identifier, quote)
|
#vtb
def is_valid_ipv4 (ip):
if not _ipv4_re.match(ip):
return False
a, b, c, d = [int(i) for i in ip.split(".")]
return a <= 255 and b <= 255 and c <= 255 and d <= 255
|
Return True if given ip is a valid IPv4 address.
|
### Input:
Return True if given ip is a valid IPv4 address.
### Response:
#vtb
def is_valid_ipv4 (ip):
if not _ipv4_re.match(ip):
return False
a, b, c, d = [int(i) for i in ip.split(".")]
return a <= 255 and b <= 255 and c <= 255 and d <= 255
|
#vtb
def get_api_id(self, lambda_name):
try:
response = self.cf_client.describe_stack_resource(StackName=lambda_name,
LogicalResourceId=)
return response[].get(, None)
except:
try:
response = self.apigateway_client.get_rest_apis(limit=500)
for item in response[]:
if item[] == lambda_name:
return item[]
logger.exception()
return None
except:
return None
|
Given a lambda_name, return the API id.
|
### Input:
Given a lambda_name, return the API id.
### Response:
#vtb
def get_api_id(self, lambda_name):
try:
response = self.cf_client.describe_stack_resource(StackName=lambda_name,
LogicalResourceId=)
return response[].get(, None)
except:
try:
response = self.apigateway_client.get_rest_apis(limit=500)
for item in response[]:
if item[] == lambda_name:
return item[]
logger.exception()
return None
except:
return None
|
#vtb
def fromdict(dict):
index = dict[]
seed = hb_decode(dict[])
n = dict[]
root = hb_decode(dict[])
hmac = hb_decode(dict[])
timestamp = dict[]
self = State(index, seed, n, root, hmac, timestamp)
return self
|
Takes a dictionary as an argument and returns a new State object
from the dictionary.
:param dict: the dictionary to convert
|
### Input:
Takes a dictionary as an argument and returns a new State object
from the dictionary.
:param dict: the dictionary to convert
### Response:
#vtb
def fromdict(dict):
index = dict[]
seed = hb_decode(dict[])
n = dict[]
root = hb_decode(dict[])
hmac = hb_decode(dict[])
timestamp = dict[]
self = State(index, seed, n, root, hmac, timestamp)
return self
|
#vtb
def group_add(self, name, restrict, repos, lces=[], assets=[], queries=[],
policies=[], dashboards=[], credentials=[], description=):
return self.raw_query(, , data={
: [{: i} for i in lces],
: [{: i} for i in assets],
: [{: i} for i in queries],
: [{: i} for i in policies],
: [{: i} for i in dashboards],
: [{: i} for i in credentials],
: [{: i} for i in repos],
: [{: i} for i in restrict],
: name,
: description,
: [],
:
})
|
group_add name, restrict, repos
|
### Input:
group_add name, restrict, repos
### Response:
#vtb
def group_add(self, name, restrict, repos, lces=[], assets=[], queries=[],
policies=[], dashboards=[], credentials=[], description=):
return self.raw_query(, , data={
: [{: i} for i in lces],
: [{: i} for i in assets],
: [{: i} for i in queries],
: [{: i} for i in policies],
: [{: i} for i in dashboards],
: [{: i} for i in credentials],
: [{: i} for i in repos],
: [{: i} for i in restrict],
: name,
: description,
: [],
:
})
|
#vtb
def verify_connectivity(config):
logger.debug("Verifying Connectivity")
ic = InsightsConnection(config)
try:
branch_info = ic.get_branch_info()
except requests.ConnectionError as e:
logger.debug(e)
logger.debug("Failed to connect to satellite")
return False
except LookupError as e:
logger.debug(e)
logger.debug("Failed to parse response from satellite")
return False
try:
remote_leaf = branch_info[]
return remote_leaf
except LookupError as e:
logger.debug(e)
logger.debug("Failed to find accurate branch_info")
return False
|
Verify connectivity to satellite server
|
### Input:
Verify connectivity to satellite server
### Response:
#vtb
def verify_connectivity(config):
logger.debug("Verifying Connectivity")
ic = InsightsConnection(config)
try:
branch_info = ic.get_branch_info()
except requests.ConnectionError as e:
logger.debug(e)
logger.debug("Failed to connect to satellite")
return False
except LookupError as e:
logger.debug(e)
logger.debug("Failed to parse response from satellite")
return False
try:
remote_leaf = branch_info[]
return remote_leaf
except LookupError as e:
logger.debug(e)
logger.debug("Failed to find accurate branch_info")
return False
|
#vtb
def parse_response(self, resp):
p, u = self.getparser()
if hasattr(resp,):
text = resp.text
else:
encoding = requests.utils.get_encoding_from_headers(resp.headers)
if encoding is None:
encoding=
if sys.version_info[0]==2:
text = unicode(resp.content, encoding, errors=)
else:
assert sys.version_info[0]==3
text = str(resp.content, encoding, errors=)
p.feed(text)
p.close()
return u.close()
|
Parse the xmlrpc response.
|
### Input:
Parse the xmlrpc response.
### Response:
#vtb
def parse_response(self, resp):
p, u = self.getparser()
if hasattr(resp,):
text = resp.text
else:
encoding = requests.utils.get_encoding_from_headers(resp.headers)
if encoding is None:
encoding=
if sys.version_info[0]==2:
text = unicode(resp.content, encoding, errors=)
else:
assert sys.version_info[0]==3
text = str(resp.content, encoding, errors=)
p.feed(text)
p.close()
return u.close()
|
#vtb
def validate_path(xj_path):
if not isinstance(xj_path, str):
raise XJPathError()
for path in split(xj_path, ):
if path == :
continue
if path.startswith():
if path == or path == :
continue
try:
int(path[1:])
except ValueError:
raise XJPathError(
)
|
Validates XJ path.
:param str xj_path: XJ Path
:raise: XJPathError if validation fails.
|
### Input:
Validates XJ path.
:param str xj_path: XJ Path
:raise: XJPathError if validation fails.
### Response:
#vtb
def validate_path(xj_path):
if not isinstance(xj_path, str):
raise XJPathError()
for path in split(xj_path, ):
if path == :
continue
if path.startswith():
if path == or path == :
continue
try:
int(path[1:])
except ValueError:
raise XJPathError(
)
|
#vtb
def next(self):
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
self._entries = self._zip.infolist()[self._start_index:self._end_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
self._start_index += 1
return (entry, lambda: self._read(entry))
|
Returns the next input from this input reader as (ZipInfo, opener) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple is a zipfile.ZipInfo object.
The second element of the tuple is a zero-argument function that, when
called, returns the complete body of the file.
|
### Input:
Returns the next input from this input reader as (ZipInfo, opener) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple is a zipfile.ZipInfo object.
The second element of the tuple is a zero-argument function that, when
called, returns the complete body of the file.
### Response:
#vtb
def next(self):
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
self._entries = self._zip.infolist()[self._start_index:self._end_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
self._start_index += 1
return (entry, lambda: self._read(entry))
|
#vtb
def find_enclosing_bracket_left(self, left_ch, right_ch, start_pos=None):
if self.current_char == left_ch:
return 0
if start_pos is None:
start_pos = 0
else:
start_pos = max(0, start_pos)
stack = 1
for i in range(self.cursor_position - 1, start_pos - 1, -1):
c = self.text[i]
if c == right_ch:
stack += 1
elif c == left_ch:
stack -= 1
if stack == 0:
return i - self.cursor_position
|
Find the left bracket enclosing current position. Return the relative
position to the cursor position.
When `start_pos` is given, don't look past the position.
|
### Input:
Find the left bracket enclosing current position. Return the relative
position to the cursor position.
When `start_pos` is given, don't look past the position.
### Response:
#vtb
def find_enclosing_bracket_left(self, left_ch, right_ch, start_pos=None):
if self.current_char == left_ch:
return 0
if start_pos is None:
start_pos = 0
else:
start_pos = max(0, start_pos)
stack = 1
for i in range(self.cursor_position - 1, start_pos - 1, -1):
c = self.text[i]
if c == right_ch:
stack += 1
elif c == left_ch:
stack -= 1
if stack == 0:
return i - self.cursor_position
|
#vtb
def cancel(batch_fn, cancel_fn, ops):
canceled_ops = []
error_messages = []
max_batch = 256
total_ops = len(ops)
for first_op in range(0, total_ops, max_batch):
batch_canceled, batch_messages = _cancel_batch(
batch_fn, cancel_fn, ops[first_op:first_op + max_batch])
canceled_ops.extend(batch_canceled)
error_messages.extend(batch_messages)
return canceled_ops, error_messages
|
Cancel operations.
Args:
batch_fn: API-specific batch function.
cancel_fn: API-specific cancel function.
ops: A list of operations to cancel.
Returns:
A list of operations canceled and a list of error messages.
|
### Input:
Cancel operations.
Args:
batch_fn: API-specific batch function.
cancel_fn: API-specific cancel function.
ops: A list of operations to cancel.
Returns:
A list of operations canceled and a list of error messages.
### Response:
#vtb
def cancel(batch_fn, cancel_fn, ops):
canceled_ops = []
error_messages = []
max_batch = 256
total_ops = len(ops)
for first_op in range(0, total_ops, max_batch):
batch_canceled, batch_messages = _cancel_batch(
batch_fn, cancel_fn, ops[first_op:first_op + max_batch])
canceled_ops.extend(batch_canceled)
error_messages.extend(batch_messages)
return canceled_ops, error_messages
|
#vtb
def get_or_create(name=None, group=None, config=None, extra=0, verbose=0, backend_opts=None):
require(, )
backend_opts = backend_opts or {}
verbose = int(verbose)
extra = int(extra)
if config:
config_fn = common.find_template(config)
config = yaml.load(open(config_fn))
env.update(config)
env.vm_type = (env.vm_type or ).lower()
assert env.vm_type,
group = group or env.vm_group
assert group,
ret = exists(name=name, group=group)
if not extra and ret:
if verbose:
print( % (name, group))
return ret
today = datetime.date.today()
release = int( % (today.year, today.month, today.day))
if not name:
existing_instances = list_instances(
group=group,
release=release,
verbose=verbose)
name = env.vm_name_template.format(index=len(existing_instances)+1)
if env.vm_type == EC2:
return get_or_create_ec2_instance(
name=name,
group=group,
release=release,
verbose=verbose,
backend_opts=backend_opts)
else:
raise NotImplementedError
|
Creates a virtual machine instance.
|
### Input:
Creates a virtual machine instance.
### Response:
#vtb
def get_or_create(name=None, group=None, config=None, extra=0, verbose=0, backend_opts=None):
require(, )
backend_opts = backend_opts or {}
verbose = int(verbose)
extra = int(extra)
if config:
config_fn = common.find_template(config)
config = yaml.load(open(config_fn))
env.update(config)
env.vm_type = (env.vm_type or ).lower()
assert env.vm_type,
group = group or env.vm_group
assert group,
ret = exists(name=name, group=group)
if not extra and ret:
if verbose:
print( % (name, group))
return ret
today = datetime.date.today()
release = int( % (today.year, today.month, today.day))
if not name:
existing_instances = list_instances(
group=group,
release=release,
verbose=verbose)
name = env.vm_name_template.format(index=len(existing_instances)+1)
if env.vm_type == EC2:
return get_or_create_ec2_instance(
name=name,
group=group,
release=release,
verbose=verbose,
backend_opts=backend_opts)
else:
raise NotImplementedError
|
#vtb
def SG(self):
rs density at the currently specified conditions.
Examples
--------
>>> Chemical().SG
0.7428160596603596
lsg':
return self.SGg
rho = self.rho
if rho is not None:
return SG(rho)
return None
|
r'''Specific gravity of the chemical, [dimensionless].
For gas-phase conditions, this is calculated at 15.6 °C (60 °F) and 1
atm for the chemical and the reference fluid, air.
For liquid and solid phase conditions, this is calculated based on a
reference fluid of water at 4°C at 1 atm, but the with the liquid or
solid chemical's density at the currently specified conditions.
Examples
--------
>>> Chemical('MTBE').SG
0.7428160596603596
|
### Input:
r'''Specific gravity of the chemical, [dimensionless].
For gas-phase conditions, this is calculated at 15.6 °C (60 °F) and 1
atm for the chemical and the reference fluid, air.
For liquid and solid phase conditions, this is calculated based on a
reference fluid of water at 4°C at 1 atm, but the with the liquid or
solid chemical's density at the currently specified conditions.
Examples
--------
>>> Chemical('MTBE').SG
0.7428160596603596
### Response:
#vtb
def SG(self):
rs density at the currently specified conditions.
Examples
--------
>>> Chemical().SG
0.7428160596603596
lsg':
return self.SGg
rho = self.rho
if rho is not None:
return SG(rho)
return None
|
#vtb
def xy(self):
if self._xy != (None, None):
self._x, self._y = self._xy
if self._x is not None and self._y is not None:
x = self._x
if self._x > 1:
x = self._x / 65555
y = self._y
if self._y > 1:
y = self._y / 65555
return (x, y)
return None
|
CIE xy color space coordinates as array [x, y] of real values (0..1).
|
### Input:
CIE xy color space coordinates as array [x, y] of real values (0..1).
### Response:
#vtb
def xy(self):
if self._xy != (None, None):
self._x, self._y = self._xy
if self._x is not None and self._y is not None:
x = self._x
if self._x > 1:
x = self._x / 65555
y = self._y
if self._y > 1:
y = self._y / 65555
return (x, y)
return None
|
#vtb
def _client_run(self):
self._connection.work()
now = self._counter.get_current_ms()
if self._last_activity_timestamp and not self._was_message_received:
time.sleep(0.05)
if self._timeout > 0:
timespan = now - self._last_activity_timestamp
if timespan >= self._timeout:
_logger.info("Timeout reached, closing receiver.")
self._shutdown = True
else:
self._last_activity_timestamp = now
self._was_message_received = False
return True
|
MessageReceiver Link is now open - start receiving messages.
Will return True if operation successful and client can remain open for
further work.
:rtype: bool
|
### Input:
MessageReceiver Link is now open - start receiving messages.
Will return True if operation successful and client can remain open for
further work.
:rtype: bool
### Response:
#vtb
def _client_run(self):
self._connection.work()
now = self._counter.get_current_ms()
if self._last_activity_timestamp and not self._was_message_received:
time.sleep(0.05)
if self._timeout > 0:
timespan = now - self._last_activity_timestamp
if timespan >= self._timeout:
_logger.info("Timeout reached, closing receiver.")
self._shutdown = True
else:
self._last_activity_timestamp = now
self._was_message_received = False
return True
|
#vtb
def user_return(self, frame, return_value):
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
self._old_Pdb_user_return(frame, return_value)
|
This function is called when a return trap is set here.
|
### Input:
This function is called when a return trap is set here.
### Response:
#vtb
def user_return(self, frame, return_value):
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
self._old_Pdb_user_return(frame, return_value)
|
#vtb
def get_data_path(cls):
marvin_path = os.environ.get(cls._key)
if not marvin_path:
raise InvalidConfigException()
is_path_created = check_path(marvin_path, create=True)
if not is_path_created:
raise InvalidConfigException()
return marvin_path
|
Read data path from the following sources in order of priority:
1. Environment variable
If not found raises an exception
:return: str - datapath
|
### Input:
Read data path from the following sources in order of priority:
1. Environment variable
If not found raises an exception
:return: str - datapath
### Response:
#vtb
def get_data_path(cls):
marvin_path = os.environ.get(cls._key)
if not marvin_path:
raise InvalidConfigException()
is_path_created = check_path(marvin_path, create=True)
if not is_path_created:
raise InvalidConfigException()
return marvin_path
|
#vtb
def _parse_request_reply(self):
"waiting for a reply to our request"
(version, reply, _, typ) = struct.unpack(, msg)
if version != 5:
self.reply_error(SocksError(
"Expected version 5, got {}".format(version)))
return
if reply != self.SUCCEEDED:
self.reply_error(_create_socks_error(reply))
return
reply_dispatcher = {
self.REPLY_IPV4: self._parse_ipv4_reply,
self.REPLY_HOST: self._parse_domain_name_reply,
self.REPLY_IPV6: self._parse_ipv6_reply,
}
try:
method = reply_dispatcher[typ]
except KeyError:
self.reply_error(SocksError(
"Unexpected response type {}".format(typ)))
return
method()
|
waiting for a reply to our request
|
### Input:
waiting for a reply to our request
### Response:
#vtb
def _parse_request_reply(self):
"waiting for a reply to our request"
(version, reply, _, typ) = struct.unpack(, msg)
if version != 5:
self.reply_error(SocksError(
"Expected version 5, got {}".format(version)))
return
if reply != self.SUCCEEDED:
self.reply_error(_create_socks_error(reply))
return
reply_dispatcher = {
self.REPLY_IPV4: self._parse_ipv4_reply,
self.REPLY_HOST: self._parse_domain_name_reply,
self.REPLY_IPV6: self._parse_ipv6_reply,
}
try:
method = reply_dispatcher[typ]
except KeyError:
self.reply_error(SocksError(
"Unexpected response type {}".format(typ)))
return
method()
|
#vtb
def bold(*content, sep=):
return _md(_join(*content, sep=sep), symbols=MD_SYMBOLS[0])
|
Make bold text (Markdown)
:param content:
:param sep:
:return:
|
### Input:
Make bold text (Markdown)
:param content:
:param sep:
:return:
### Response:
#vtb
def bold(*content, sep=):
return _md(_join(*content, sep=sep), symbols=MD_SYMBOLS[0])
|
#vtb
def matplotlib_to_ginga_cmap(cm, name=None):
if name is None:
name = cm.name
arr = cm(np.arange(0, min_cmap_len) / np.float(min_cmap_len - 1))
clst = arr[:, 0:3]
return ColorMap(name, clst)
|
Convert matplotlib colormap to Ginga's.
|
### Input:
Convert matplotlib colormap to Ginga's.
### Response:
#vtb
def matplotlib_to_ginga_cmap(cm, name=None):
if name is None:
name = cm.name
arr = cm(np.arange(0, min_cmap_len) / np.float(min_cmap_len - 1))
clst = arr[:, 0:3]
return ColorMap(name, clst)
|
#vtb
def gather_data(registry):
host = socket.gethostname()
ram_metric = Gauge("memory_usage_bytes", "Memory usage in bytes.",
{: host})
cpu_metric = Gauge("cpu_usage_percent", "CPU usage percent.",
{: host})
registry.register(ram_metric)
registry.register(cpu_metric)
while True:
time.sleep(1)
ram = psutil.virtual_memory()
swap = psutil.swap_memory()
ram_metric.set({: "virtual", }, ram.used)
ram_metric.set({: "virtual", : "cached"}, ram.cached)
ram_metric.set({: "swap"}, swap.used)
for c, p in enumerate(psutil.cpu_percent(interval=1, percpu=True)):
cpu_metric.set({: c}, p)
|
Gathers the metrics
|
### Input:
Gathers the metrics
### Response:
#vtb
def gather_data(registry):
host = socket.gethostname()
ram_metric = Gauge("memory_usage_bytes", "Memory usage in bytes.",
{: host})
cpu_metric = Gauge("cpu_usage_percent", "CPU usage percent.",
{: host})
registry.register(ram_metric)
registry.register(cpu_metric)
while True:
time.sleep(1)
ram = psutil.virtual_memory()
swap = psutil.swap_memory()
ram_metric.set({: "virtual", }, ram.used)
ram_metric.set({: "virtual", : "cached"}, ram.cached)
ram_metric.set({: "swap"}, swap.used)
for c, p in enumerate(psutil.cpu_percent(interval=1, percpu=True)):
cpu_metric.set({: c}, p)
|
#vtb
def status(self, code=None):
if code is None:
return self.response.status_code
log(_("checking that status code {} is returned...").format(code))
if code != self.response.status_code:
raise Failure(_("expected status code {}, but got {}").format(
code, self.response.status_code))
return self
|
Check status code in response returned by application.
If ``code`` is not None, assert that ``code`` is returned by application,
else simply return the status code.
:param code: ``code`` to assert that application returns
:type code: int
Example usage::
check50.flask.app("application.py").status(200)
status = check50.flask.app("application.py").get("/").status()
if status != 200:
raise check50.Failure(f"expected status code 200, but got {status}")
|
### Input:
Check status code in response returned by application.
If ``code`` is not None, assert that ``code`` is returned by application,
else simply return the status code.
:param code: ``code`` to assert that application returns
:type code: int
Example usage::
check50.flask.app("application.py").status(200)
status = check50.flask.app("application.py").get("/").status()
if status != 200:
raise check50.Failure(f"expected status code 200, but got {status}")
### Response:
#vtb
def status(self, code=None):
if code is None:
return self.response.status_code
log(_("checking that status code {} is returned...").format(code))
if code != self.response.status_code:
raise Failure(_("expected status code {}, but got {}").format(
code, self.response.status_code))
return self
|
#vtb
def get_ip_interface_output_interface_ip_address_ipv4(self, **kwargs):
config = ET.Element("config")
get_ip_interface = ET.Element("get_ip_interface")
config = get_ip_interface
output = ET.SubElement(get_ip_interface, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop()
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop()
ip_address = ET.SubElement(interface, "ip-address")
ipv4 = ET.SubElement(ip_address, "ipv4")
ipv4.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
### Input:
Auto Generated Code
### Response:
#vtb
def get_ip_interface_output_interface_ip_address_ipv4(self, **kwargs):
config = ET.Element("config")
get_ip_interface = ET.Element("get_ip_interface")
config = get_ip_interface
output = ET.SubElement(get_ip_interface, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop()
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop()
ip_address = ET.SubElement(interface, "ip-address")
ipv4 = ET.SubElement(ip_address, "ipv4")
ipv4.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
#vtb
def rename(args):
old_name, new_name = args.names
add_tags(resources.ec2.Instance(resolve_instance_id(old_name)), Name=new_name, dry_run=args.dry_run)
|
Supply two names: Existing instance name or ID, and new name to assign to the instance.
|
### Input:
Supply two names: Existing instance name or ID, and new name to assign to the instance.
### Response:
#vtb
def rename(args):
old_name, new_name = args.names
add_tags(resources.ec2.Instance(resolve_instance_id(old_name)), Name=new_name, dry_run=args.dry_run)
|
#vtb
def _check_fact_ref_eval(cls, cpel_dom):
CHECK_SYSTEM = "check-system"
CHECK_LOCATION = "check-location"
CHECK_ID = "check-id"
checksystemID = cpel_dom.getAttribute(CHECK_SYSTEM)
if (checksystemID == "http://oval.mitre.org/XMLSchema/ovaldefinitions-5"):
return CPELanguage2_3._ovalcheck(cpel_dom.getAttribute(CHECK_LOCATION),
cpel_dom.getAttribute(CHECK_ID))
if (checksystemID == "http://scap.nist.gov/schema/ocil/2"):
return CPELanguage2_3._ocilcheck(cpel_dom.getAttribute(CHECK_LOCATION),
cpel_dom.getAttribute(CHECK_ID))
return False
|
Returns the result (True, False, Error) of performing the specified
check, unless the check isnt supported, in which case it returns
False. Error is a catch-all for all results other than True and
False.
:param string cpel_dom: XML infoset for the check_fact_ref element.
:returns: result of performing the specified check
:rtype: boolean or error
|
### Input:
Returns the result (True, False, Error) of performing the specified
check, unless the check isnt supported, in which case it returns
False. Error is a catch-all for all results other than True and
False.
:param string cpel_dom: XML infoset for the check_fact_ref element.
:returns: result of performing the specified check
:rtype: boolean or error
### Response:
#vtb
def _check_fact_ref_eval(cls, cpel_dom):
CHECK_SYSTEM = "check-system"
CHECK_LOCATION = "check-location"
CHECK_ID = "check-id"
checksystemID = cpel_dom.getAttribute(CHECK_SYSTEM)
if (checksystemID == "http://oval.mitre.org/XMLSchema/ovaldefinitions-5"):
return CPELanguage2_3._ovalcheck(cpel_dom.getAttribute(CHECK_LOCATION),
cpel_dom.getAttribute(CHECK_ID))
if (checksystemID == "http://scap.nist.gov/schema/ocil/2"):
return CPELanguage2_3._ocilcheck(cpel_dom.getAttribute(CHECK_LOCATION),
cpel_dom.getAttribute(CHECK_ID))
return False
|
#vtb
def deserialize(self, data, fields=None):
if not isinstance(data, (bytes, bytearray)):
return data
return msgpack.unpackb(data, encoding=, object_pairs_hook=decode_to_sorted)
|
Deserializes msgpack bytes to OrderedDict (in the same sorted order as for serialize)
:param data: the data in bytes
:return: sorted OrderedDict
|
### Input:
Deserializes msgpack bytes to OrderedDict (in the same sorted order as for serialize)
:param data: the data in bytes
:return: sorted OrderedDict
### Response:
#vtb
def deserialize(self, data, fields=None):
if not isinstance(data, (bytes, bytearray)):
return data
return msgpack.unpackb(data, encoding=, object_pairs_hook=decode_to_sorted)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.