code
stringlengths 64
7.01k
| docstring
stringlengths 2
15.8k
| text
stringlengths 144
19.2k
|
---|---|---|
#vtb
def is_data_dependent(fmto, data):
if callable(fmto.data_dependent):
return fmto.data_dependent(data)
return fmto.data_dependent
|
Check whether a formatoption is data dependent
Parameters
----------
fmto: Formatoption
The :class:`Formatoption` instance to check
data: xarray.DataArray
The data array to use if the :attr:`~Formatoption.data_dependent`
attribute is a callable
Returns
-------
bool
True, if the formatoption depends on the data
|
### Input:
Check whether a formatoption is data dependent
Parameters
----------
fmto: Formatoption
The :class:`Formatoption` instance to check
data: xarray.DataArray
The data array to use if the :attr:`~Formatoption.data_dependent`
attribute is a callable
Returns
-------
bool
True, if the formatoption depends on the data
### Response:
#vtb
def is_data_dependent(fmto, data):
if callable(fmto.data_dependent):
return fmto.data_dependent(data)
return fmto.data_dependent
|
#vtb
def metadata(self, run_id=None):
try:
version = subprocess.check_output(
["git", "describe", "--tags", "--always"]).decode()
except:
version = None
if self.config[][] == :
data_version = self.config[][]
database_tables = self.config[]
elif self.config[][] == :
data_version =
database_tables = self.config[]
else:
data_version =
database_tables =
assumptions = {}
assumptions.update(self.config[])
assumptions.update(self.config[])
assumptions.update(self.config[])
assumptions.update(self.config[])
if not run_id:
run_id = datetime.now().strftime("%Y%m%d%H%M%S")
if not self._run_id:
self._run_id = run_id
metadata = dict(
version=version,
mv_grid_districts=[int(_.id_db) for _ in self._mv_grid_districts],
database_tables=database_tables,
data_version=data_version,
assumptions=assumptions,
run_id=self._run_id
)
return metadata
|
Provide metadata on a Ding0 run
Parameters
----------
run_id: str, (defaults to current date)
Distinguish multiple versions of Ding0 data by a `run_id`. If not
set it defaults to current date in the format YYYYMMDDhhmmss
Returns
-------
dict
Metadata
|
### Input:
Provide metadata on a Ding0 run
Parameters
----------
run_id: str, (defaults to current date)
Distinguish multiple versions of Ding0 data by a `run_id`. If not
set it defaults to current date in the format YYYYMMDDhhmmss
Returns
-------
dict
Metadata
### Response:
#vtb
def metadata(self, run_id=None):
try:
version = subprocess.check_output(
["git", "describe", "--tags", "--always"]).decode()
except:
version = None
if self.config[][] == :
data_version = self.config[][]
database_tables = self.config[]
elif self.config[][] == :
data_version =
database_tables = self.config[]
else:
data_version =
database_tables =
assumptions = {}
assumptions.update(self.config[])
assumptions.update(self.config[])
assumptions.update(self.config[])
assumptions.update(self.config[])
if not run_id:
run_id = datetime.now().strftime("%Y%m%d%H%M%S")
if not self._run_id:
self._run_id = run_id
metadata = dict(
version=version,
mv_grid_districts=[int(_.id_db) for _ in self._mv_grid_districts],
database_tables=database_tables,
data_version=data_version,
assumptions=assumptions,
run_id=self._run_id
)
return metadata
|
#vtb
def getAllData(self, temp = True, accel = True, gyro = True):
allData = {}
if temp:
allData["temp"] = self.getTemp()
if accel:
allData["accel"] = self.getAccelData( raw = False )
if gyro:
allData["gyro"] = self.getGyroData()
return allData
|
!
Get all the available data.
@param temp: True - Allow to return Temperature data
@param accel: True - Allow to return Accelerometer data
@param gyro: True - Allow to return Gyroscope data
@return a dictionary data
@retval {} Did not read any data
@retval {"temp":32.3,"accel":{"x":0.45634,"y":0.2124,"z":1.334},"gyro":{"x":0.45634,"y":0.2124,"z":1.334}} Returned all data
|
### Input:
!
Get all the available data.
@param temp: True - Allow to return Temperature data
@param accel: True - Allow to return Accelerometer data
@param gyro: True - Allow to return Gyroscope data
@return a dictionary data
@retval {} Did not read any data
@retval {"temp":32.3,"accel":{"x":0.45634,"y":0.2124,"z":1.334},"gyro":{"x":0.45634,"y":0.2124,"z":1.334}} Returned all data
### Response:
#vtb
def getAllData(self, temp = True, accel = True, gyro = True):
allData = {}
if temp:
allData["temp"] = self.getTemp()
if accel:
allData["accel"] = self.getAccelData( raw = False )
if gyro:
allData["gyro"] = self.getGyroData()
return allData
|
#vtb
def file_detector_context(self, file_detector_class, *args, **kwargs):
last_detector = None
if not isinstance(self.file_detector, file_detector_class):
last_detector = self.file_detector
self.file_detector = file_detector_class(*args, **kwargs)
try:
yield
finally:
if last_detector is not None:
self.file_detector = last_detector
|
Overrides the current file detector (if necessary) in limited context.
Ensures the original file detector is set afterwards.
Example:
with webdriver.file_detector_context(UselessFileDetector):
someinput.send_keys('/etc/hosts')
:Args:
- file_detector_class - Class of the desired file detector. If the class is different
from the current file_detector, then the class is instantiated with args and kwargs
and used as a file detector during the duration of the context manager.
- args - Optional arguments that get passed to the file detector class during
instantiation.
- kwargs - Keyword arguments, passed the same way as args.
|
### Input:
Overrides the current file detector (if necessary) in limited context.
Ensures the original file detector is set afterwards.
Example:
with webdriver.file_detector_context(UselessFileDetector):
someinput.send_keys('/etc/hosts')
:Args:
- file_detector_class - Class of the desired file detector. If the class is different
from the current file_detector, then the class is instantiated with args and kwargs
and used as a file detector during the duration of the context manager.
- args - Optional arguments that get passed to the file detector class during
instantiation.
- kwargs - Keyword arguments, passed the same way as args.
### Response:
#vtb
def file_detector_context(self, file_detector_class, *args, **kwargs):
last_detector = None
if not isinstance(self.file_detector, file_detector_class):
last_detector = self.file_detector
self.file_detector = file_detector_class(*args, **kwargs)
try:
yield
finally:
if last_detector is not None:
self.file_detector = last_detector
|
#vtb
def cmdloop(self):
while True:
cmdline = input(self.prompt)
tokens = shlex.split(cmdline)
if not tokens:
if self.last_cmd:
tokens = self.last_cmd
else:
print()
continue
if tokens[0] not in self.commands:
print()
continue
command = self.commands[tokens[0]]
self.last_cmd = tokens
try:
if command(self.state, tokens):
break
except CmdExit:
continue
except Exception as e:
if e not in self.safe_exceptions:
logger.exception()
|
Start CLI REPL.
|
### Input:
Start CLI REPL.
### Response:
#vtb
def cmdloop(self):
while True:
cmdline = input(self.prompt)
tokens = shlex.split(cmdline)
if not tokens:
if self.last_cmd:
tokens = self.last_cmd
else:
print()
continue
if tokens[0] not in self.commands:
print()
continue
command = self.commands[tokens[0]]
self.last_cmd = tokens
try:
if command(self.state, tokens):
break
except CmdExit:
continue
except Exception as e:
if e not in self.safe_exceptions:
logger.exception()
|
#vtb
def isTemporal(inferenceType):
if InferenceType.__temporalInferenceTypes is None:
InferenceType.__temporalInferenceTypes = \
set([InferenceType.TemporalNextStep,
InferenceType.TemporalClassification,
InferenceType.TemporalAnomaly,
InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep])
return inferenceType in InferenceType.__temporalInferenceTypes
|
Returns True if the inference type is 'temporal', i.e. requires a
temporal memory in the network.
|
### Input:
Returns True if the inference type is 'temporal', i.e. requires a
temporal memory in the network.
### Response:
#vtb
def isTemporal(inferenceType):
if InferenceType.__temporalInferenceTypes is None:
InferenceType.__temporalInferenceTypes = \
set([InferenceType.TemporalNextStep,
InferenceType.TemporalClassification,
InferenceType.TemporalAnomaly,
InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep])
return inferenceType in InferenceType.__temporalInferenceTypes
|
#vtb
def is_instance_factory(_type):
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
type_repr = "|".join(map(str, _type))
else:
type_repr = "" % _type
def inner(x):
if not isinstance(x, _type):
raise ValueError("Value must be an instance of %s" % type_repr)
return inner
|
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which returns the
True if x is an instance of `_type`
|
### Input:
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which returns the
True if x is an instance of `_type`
### Response:
#vtb
def is_instance_factory(_type):
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
type_repr = "|".join(map(str, _type))
else:
type_repr = "" % _type
def inner(x):
if not isinstance(x, _type):
raise ValueError("Value must be an instance of %s" % type_repr)
return inner
|
#vtb
def _get_filename(class_name, language):
name = str(class_name).strip()
lang = str(language)
if language in [, ]:
name = "".join([name[0].upper() + name[1:]])
suffix = {
: , : , : ,
: , : , :
}
suffix = suffix.get(lang, lang)
return .format(name, suffix)
|
Generate the specific filename.
Parameters
----------
:param class_name : str
The used class name.
:param language : {'c', 'go', 'java', 'js', 'php', 'ruby'}
The target programming language.
Returns
-------
filename : str
The generated filename.
|
### Input:
Generate the specific filename.
Parameters
----------
:param class_name : str
The used class name.
:param language : {'c', 'go', 'java', 'js', 'php', 'ruby'}
The target programming language.
Returns
-------
filename : str
The generated filename.
### Response:
#vtb
def _get_filename(class_name, language):
name = str(class_name).strip()
lang = str(language)
if language in [, ]:
name = "".join([name[0].upper() + name[1:]])
suffix = {
: , : , : ,
: , : , :
}
suffix = suffix.get(lang, lang)
return .format(name, suffix)
|
#vtb
def __check_mem(self):
mem_free = psutil.virtual_memory().available / 2**20
self.log.debug("Memory free: %s/%s", mem_free, self.mem_limit)
if mem_free < self.mem_limit:
raise RuntimeError(
"Not enough resources: free memory less "
"than %sMB: %sMB" % (self.mem_limit, mem_free))
|
raise exception on RAM exceeded
|
### Input:
raise exception on RAM exceeded
### Response:
#vtb
def __check_mem(self):
mem_free = psutil.virtual_memory().available / 2**20
self.log.debug("Memory free: %s/%s", mem_free, self.mem_limit)
if mem_free < self.mem_limit:
raise RuntimeError(
"Not enough resources: free memory less "
"than %sMB: %sMB" % (self.mem_limit, mem_free))
|
#vtb
def as_tree(self, visitor=None, children=None):
_parameters = {"node": self}
if visitor is not None:
_parameters["visitor"] = visitor
if children is not None:
_parameters["children"] = children
return self.__class__.objects.node_as_tree(**_parameters)
|
Recursively traverses each tree (starting from each root) in order
to generate a dictionary-based tree structure of the entire forest.
Each level of the forest/tree is a list of nodes, and each node
consists of a dictionary representation, where the entry
``children`` (by default) consists of a list of dictionary
representations of its children.
See :meth:`CTENodeManager.as_tree` and
:meth:`CTENodeManager.node_as_tree` for details on how this method
works, as well as its expected arguments.
:param visitor: optional function responsible for generating the
dictionary representation of a node.
:param children: optional function responsible for generating a
children key and list for a node.
:return: a dictionary representation of the structure of the forest.
|
### Input:
Recursively traverses each tree (starting from each root) in order
to generate a dictionary-based tree structure of the entire forest.
Each level of the forest/tree is a list of nodes, and each node
consists of a dictionary representation, where the entry
``children`` (by default) consists of a list of dictionary
representations of its children.
See :meth:`CTENodeManager.as_tree` and
:meth:`CTENodeManager.node_as_tree` for details on how this method
works, as well as its expected arguments.
:param visitor: optional function responsible for generating the
dictionary representation of a node.
:param children: optional function responsible for generating a
children key and list for a node.
:return: a dictionary representation of the structure of the forest.
### Response:
#vtb
def as_tree(self, visitor=None, children=None):
_parameters = {"node": self}
if visitor is not None:
_parameters["visitor"] = visitor
if children is not None:
_parameters["children"] = children
return self.__class__.objects.node_as_tree(**_parameters)
|
#vtb
def convenience_calc_fisher_approx(self, params):
shapes, intercepts, betas = self.convenience_split_params(params)
args = [betas,
self.design,
self.alt_id_vector,
self.rows_to_obs,
self.rows_to_alts,
self.choice_vector,
self.utility_transform,
self.calc_dh_d_shape,
self.calc_dh_dv,
self.calc_dh_d_alpha,
intercepts,
shapes,
self.ridge,
self.weights]
return cc.calc_fisher_info_matrix(*args)
|
Calculates the BHHH approximation of the Fisher Information Matrix for
this model / dataset.
|
### Input:
Calculates the BHHH approximation of the Fisher Information Matrix for
this model / dataset.
### Response:
#vtb
def convenience_calc_fisher_approx(self, params):
shapes, intercepts, betas = self.convenience_split_params(params)
args = [betas,
self.design,
self.alt_id_vector,
self.rows_to_obs,
self.rows_to_alts,
self.choice_vector,
self.utility_transform,
self.calc_dh_d_shape,
self.calc_dh_dv,
self.calc_dh_d_alpha,
intercepts,
shapes,
self.ridge,
self.weights]
return cc.calc_fisher_info_matrix(*args)
|
#vtb
def enver(*args):
from optparse import OptionParser
parser = OptionParser(usage=trim(enver.__doc__))
parser.add_option(
, ,
action=, const=UserRegisteredEnvironment,
default=MachineRegisteredEnvironment,
dest=,
help="Use the current user-a--appendstore_true-r--replacestore_true--remove-valuestore_true-e--editstore_true==setremove_valuesedit'
method = getattr(options.class_, method_name)
method(name, value, options)
except IndexError:
options.class_.show()
|
%prog [<name>=[value]]
To show all environment variables, call with no parameters:
%prog
To Add/Modify/Delete environment variable:
%prog <name>=[value]
If <name> is PATH or PATHEXT, %prog will by default append the value using
a semicolon as a separator. Use -r to disable this behavior or -a to force
it for variables other than PATH and PATHEXT.
If append is prescribed, but the value doesn't exist, the value will be
created.
If there is no value, %prog will delete the <name> environment variable.
i.e. "PATH="
To remove a specific value or values from a semicolon-separated
multi-value variable (such as PATH), use --remove-value.
e.g. enver --remove-value PATH=C:\\Unwanted\\Dir\\In\\Path
Remove-value matches case-insensitive and also matches any substring
so the following would also be sufficient to remove the aforementioned
undesirable dir.
enver --remove-value PATH=UNWANTED
Note that %prog does not affect the current running environment, and can
only affect subsequently spawned applications.
|
### Input:
%prog [<name>=[value]]
To show all environment variables, call with no parameters:
%prog
To Add/Modify/Delete environment variable:
%prog <name>=[value]
If <name> is PATH or PATHEXT, %prog will by default append the value using
a semicolon as a separator. Use -r to disable this behavior or -a to force
it for variables other than PATH and PATHEXT.
If append is prescribed, but the value doesn't exist, the value will be
created.
If there is no value, %prog will delete the <name> environment variable.
i.e. "PATH="
To remove a specific value or values from a semicolon-separated
multi-value variable (such as PATH), use --remove-value.
e.g. enver --remove-value PATH=C:\\Unwanted\\Dir\\In\\Path
Remove-value matches case-insensitive and also matches any substring
so the following would also be sufficient to remove the aforementioned
undesirable dir.
enver --remove-value PATH=UNWANTED
Note that %prog does not affect the current running environment, and can
only affect subsequently spawned applications.
### Response:
#vtb
def enver(*args):
from optparse import OptionParser
parser = OptionParser(usage=trim(enver.__doc__))
parser.add_option(
, ,
action=, const=UserRegisteredEnvironment,
default=MachineRegisteredEnvironment,
dest=,
help="Use the current user-a--appendstore_true-r--replacestore_true--remove-valuestore_true-e--editstore_true==setremove_valuesedit'
method = getattr(options.class_, method_name)
method(name, value, options)
except IndexError:
options.class_.show()
|
#vtb
def save_tip_length(labware: Labware, length: float):
calibration_path = CONFIG[]
if not calibration_path.exists():
calibration_path.mkdir(parents=True, exist_ok=True)
labware_offset_path = calibration_path/.format(labware._id)
calibration_data = _helper_tip_length_data_format(
str(labware_offset_path), length)
with labware_offset_path.open() as f:
json.dump(calibration_data, f)
labware.tip_length = length
|
Function to be used whenever an updated tip length is found for
of a given tip rack. If an offset file does not exist, create the file
using labware id as the filename. If the file does exist, load it and
modify the length and the lastModified fields under the "tipLength" key.
|
### Input:
Function to be used whenever an updated tip length is found for
of a given tip rack. If an offset file does not exist, create the file
using labware id as the filename. If the file does exist, load it and
modify the length and the lastModified fields under the "tipLength" key.
### Response:
#vtb
def save_tip_length(labware: Labware, length: float):
calibration_path = CONFIG[]
if not calibration_path.exists():
calibration_path.mkdir(parents=True, exist_ok=True)
labware_offset_path = calibration_path/.format(labware._id)
calibration_data = _helper_tip_length_data_format(
str(labware_offset_path), length)
with labware_offset_path.open() as f:
json.dump(calibration_data, f)
labware.tip_length = length
|
#vtb
def step2(expnums, ccd, version, prefix=None, dry_run=False, default="WCS"):
jmp_trans = []
jmp_args = []
matt_args = []
idx = 0
for expnum in expnums:
jmp_args.append(
storage.get_file(expnum, ccd=ccd, version=version, ext=, prefix=prefix)[0:-8]
)
jmp_trans.append(
storage.get_file(expnum, ccd=ccd, version=version, ext=, prefix=prefix)[0:-8]
)
idx += 1
matt_args.append( % idx)
matt_args.append(
storage.get_file(expnum, ccd=ccd, version=version, ext=, prefix=prefix)[0:-9]
)
logging.info(util.exec_prog(jmp_trans))
if default == "WCS":
logging.info(compute_trans(expnums, ccd, version, prefix, default=default))
logging.info(util.exec_prog(jmp_args))
logging.info(util.exec_prog(matt_args))
check_args = []
if os.access(, os.R_OK):
os.unlink()
ptf = open(, )
ptf.write("
ptf.write("
for expnum in expnums:
filename = os.path.splitext(storage.get_image(expnum, ccd, version=version, prefix=prefix))[0]
if not os.access(filename + ".bright.psf", os.R_OK):
os.link(filename + ".bright.jmp", filename + ".bright.psf")
if not os.access(filename + ".obj.psf", os.R_OK):
os.link(filename + ".obj.jmp", filename + ".obj.psf")
ptf.write("{:>19s}{:>10.1f}{:>5s}\n".format(filename,
_FWHM,
"NO"))
ptf.close()
if os.access(, os.F_OK):
os.unlink()
logging.info(util.exec_prog(check_args))
if os.access(, os.F_OK):
raise OSError(errno.EBADMSG, )
if os.access(, os.F_OK):
os.unlink()
if dry_run:
return
for expnum in expnums:
for ext in [, , ]:
uri = storage.dbimages_uri(expnum, ccd=ccd, version=version, ext=ext, prefix=prefix)
filename = os.path.basename(uri)
storage.copy(filename, uri)
return
|
run the actual step2 on the given exp/ccd combo
|
### Input:
run the actual step2 on the given exp/ccd combo
### Response:
#vtb
def step2(expnums, ccd, version, prefix=None, dry_run=False, default="WCS"):
jmp_trans = []
jmp_args = []
matt_args = []
idx = 0
for expnum in expnums:
jmp_args.append(
storage.get_file(expnum, ccd=ccd, version=version, ext=, prefix=prefix)[0:-8]
)
jmp_trans.append(
storage.get_file(expnum, ccd=ccd, version=version, ext=, prefix=prefix)[0:-8]
)
idx += 1
matt_args.append( % idx)
matt_args.append(
storage.get_file(expnum, ccd=ccd, version=version, ext=, prefix=prefix)[0:-9]
)
logging.info(util.exec_prog(jmp_trans))
if default == "WCS":
logging.info(compute_trans(expnums, ccd, version, prefix, default=default))
logging.info(util.exec_prog(jmp_args))
logging.info(util.exec_prog(matt_args))
check_args = []
if os.access(, os.R_OK):
os.unlink()
ptf = open(, )
ptf.write("
ptf.write("
for expnum in expnums:
filename = os.path.splitext(storage.get_image(expnum, ccd, version=version, prefix=prefix))[0]
if not os.access(filename + ".bright.psf", os.R_OK):
os.link(filename + ".bright.jmp", filename + ".bright.psf")
if not os.access(filename + ".obj.psf", os.R_OK):
os.link(filename + ".obj.jmp", filename + ".obj.psf")
ptf.write("{:>19s}{:>10.1f}{:>5s}\n".format(filename,
_FWHM,
"NO"))
ptf.close()
if os.access(, os.F_OK):
os.unlink()
logging.info(util.exec_prog(check_args))
if os.access(, os.F_OK):
raise OSError(errno.EBADMSG, )
if os.access(, os.F_OK):
os.unlink()
if dry_run:
return
for expnum in expnums:
for ext in [, , ]:
uri = storage.dbimages_uri(expnum, ccd=ccd, version=version, ext=ext, prefix=prefix)
filename = os.path.basename(uri)
storage.copy(filename, uri)
return
|
#vtb
def broadcast(self, command, *args, **kwargs):
criterion = kwargs.pop(, self.BROADCAST_FILTER_ALL)
for index, user in items(self.users()):
if criterion(user, command, *args, **kwargs):
self.notify(user, command, *args, **kwargs)
|
Notifies each user with a specified command.
|
### Input:
Notifies each user with a specified command.
### Response:
#vtb
def broadcast(self, command, *args, **kwargs):
criterion = kwargs.pop(, self.BROADCAST_FILTER_ALL)
for index, user in items(self.users()):
if criterion(user, command, *args, **kwargs):
self.notify(user, command, *args, **kwargs)
|
#vtb
def page(title=None, pageid=None, auto_suggest=True, redirect=True, preload=False):
if title is not None:
if auto_suggest:
results, suggestion = search(title, results=1, suggestion=True)
try:
title = suggestion or results[0]
except IndexError:
raise PageError(title)
return WikipediaPage(title, redirect=redirect, preload=preload)
elif pageid is not None:
return WikipediaPage(pageid=pageid, preload=preload)
else:
raise ValueError("Either a title or a pageid must be specified")
|
Get a WikipediaPage object for the page with title `title` or the pageid
`pageid` (mutually exclusive).
Keyword arguments:
* title - the title of the page to load
* pageid - the numeric pageid of the page to load
* auto_suggest - let Wikipedia find a valid page title for the query
* redirect - allow redirection without raising RedirectError
* preload - load content, summary, images, references, and links during initialization
|
### Input:
Get a WikipediaPage object for the page with title `title` or the pageid
`pageid` (mutually exclusive).
Keyword arguments:
* title - the title of the page to load
* pageid - the numeric pageid of the page to load
* auto_suggest - let Wikipedia find a valid page title for the query
* redirect - allow redirection without raising RedirectError
* preload - load content, summary, images, references, and links during initialization
### Response:
#vtb
def page(title=None, pageid=None, auto_suggest=True, redirect=True, preload=False):
if title is not None:
if auto_suggest:
results, suggestion = search(title, results=1, suggestion=True)
try:
title = suggestion or results[0]
except IndexError:
raise PageError(title)
return WikipediaPage(title, redirect=redirect, preload=preload)
elif pageid is not None:
return WikipediaPage(pageid=pageid, preload=preload)
else:
raise ValueError("Either a title or a pageid must be specified")
|
#vtb
def _lookup_vpc_allocs(query_type, session=None, order=None, **bfilter):
if session is None:
session = bc.get_reader_session()
if order:
query_method = getattr(session.query(
nexus_models_v2.NexusVPCAlloc).filter_by(**bfilter).order_by(
order),
query_type)
else:
query_method = getattr(session.query(
nexus_models_v2.NexusVPCAlloc).filter_by(**bfilter), query_type)
try:
vpcs = query_method()
if vpcs:
return vpcs
except sa_exc.NoResultFound:
pass
raise c_exc.NexusVPCAllocNotFound(**bfilter)
|
Look up 'query_type' Nexus VPC Allocs matching the filter.
:param query_type: 'all', 'one' or 'first'
:param session: db session
:param order: select what field to order data
:param bfilter: filter for mappings query
:returns: VPCs if query gave a result, else
raise NexusVPCAllocNotFound.
|
### Input:
Look up 'query_type' Nexus VPC Allocs matching the filter.
:param query_type: 'all', 'one' or 'first'
:param session: db session
:param order: select what field to order data
:param bfilter: filter for mappings query
:returns: VPCs if query gave a result, else
raise NexusVPCAllocNotFound.
### Response:
#vtb
def _lookup_vpc_allocs(query_type, session=None, order=None, **bfilter):
if session is None:
session = bc.get_reader_session()
if order:
query_method = getattr(session.query(
nexus_models_v2.NexusVPCAlloc).filter_by(**bfilter).order_by(
order),
query_type)
else:
query_method = getattr(session.query(
nexus_models_v2.NexusVPCAlloc).filter_by(**bfilter), query_type)
try:
vpcs = query_method()
if vpcs:
return vpcs
except sa_exc.NoResultFound:
pass
raise c_exc.NexusVPCAllocNotFound(**bfilter)
|
#vtb
def system_find_affiliates(input_params={}, always_retry=True, **kwargs):
return DXHTTPRequest(, input_params, always_retry=always_retry, **kwargs)
|
Invokes the /system/findAffiliates API method.
|
### Input:
Invokes the /system/findAffiliates API method.
### Response:
#vtb
def system_find_affiliates(input_params={}, always_retry=True, **kwargs):
return DXHTTPRequest(, input_params, always_retry=always_retry, **kwargs)
|
#vtb
def echo(text, **kwargs):
if shakedown.cli.quiet:
return
if not in kwargs:
kwargs[] = True
if in kwargs:
text = decorate(text, kwargs[])
if in os.environ and os.environ[] == :
if text:
print(text, end="", flush=True)
if kwargs.get():
print()
else:
click.echo(text, nl=kwargs.get())
|
Print results to the console
:param text: the text string to print
:type text: str
:return: a string
:rtype: str
|
### Input:
Print results to the console
:param text: the text string to print
:type text: str
:return: a string
:rtype: str
### Response:
#vtb
def echo(text, **kwargs):
if shakedown.cli.quiet:
return
if not in kwargs:
kwargs[] = True
if in kwargs:
text = decorate(text, kwargs[])
if in os.environ and os.environ[] == :
if text:
print(text, end="", flush=True)
if kwargs.get():
print()
else:
click.echo(text, nl=kwargs.get())
|
#vtb
def _base_type(self):
type_class = self._dimension_dict["type"]["class"]
if type_class == "categorical":
return "categorical"
if type_class == "enum":
subclass = self._dimension_dict["type"]["subtype"]["class"]
return "enum.%s" % subclass
raise NotImplementedError("unexpected dimension type class " % type_class)
|
Return str like 'enum.numeric' representing dimension type.
This string is a 'type.subclass' concatenation of the str keys
used to identify the dimension type in the cube response JSON.
The '.subclass' suffix only appears where a subtype is present.
|
### Input:
Return str like 'enum.numeric' representing dimension type.
This string is a 'type.subclass' concatenation of the str keys
used to identify the dimension type in the cube response JSON.
The '.subclass' suffix only appears where a subtype is present.
### Response:
#vtb
def _base_type(self):
type_class = self._dimension_dict["type"]["class"]
if type_class == "categorical":
return "categorical"
if type_class == "enum":
subclass = self._dimension_dict["type"]["subtype"]["class"]
return "enum.%s" % subclass
raise NotImplementedError("unexpected dimension type class " % type_class)
|
#vtb
def tpx(mt, x, t):
return mt.lx[x + t] / mt.lx[x]
|
tpx : Returns the probability that x will survive within t years
|
### Input:
tpx : Returns the probability that x will survive within t years
### Response:
#vtb
def tpx(mt, x, t):
return mt.lx[x + t] / mt.lx[x]
|
#vtb
def parse_field(field: str) -> Tuple[str, Optional[str]]:
_field = field.split()
_field = [f.strip() for f in _field]
if len(_field) == 1 and _field[0]:
return _field[0], None
elif len(_field) == 2 and _field[0] and _field[1]:
return _field[0], _field[1]
raise QueryParserException(
.format(field))
|
Parses fields with underscores, and return field and suffix.
Example:
foo => foo, None
metric.foo => metric, foo
|
### Input:
Parses fields with underscores, and return field and suffix.
Example:
foo => foo, None
metric.foo => metric, foo
### Response:
#vtb
def parse_field(field: str) -> Tuple[str, Optional[str]]:
_field = field.split()
_field = [f.strip() for f in _field]
if len(_field) == 1 and _field[0]:
return _field[0], None
elif len(_field) == 2 and _field[0] and _field[1]:
return _field[0], _field[1]
raise QueryParserException(
.format(field))
|
#vtb
def _pwl_gen_costs(self, generators, base_mva):
ng = len(generators)
gpwl = [g for g in generators if g.pcost_model == PW_LINEAR]
if self.dc:
pgbas = 0
nq = 0
ybas = ng
else:
pgbas = 0
nq = ng
ybas = ng + nq
ny = len(gpwl)
if ny == 0:
return None, None
nc = len([co for gn in gpwl for co in gn.p_cost])
Ay = lil_matrix((ybas + ny, nc - ny))
by = array([])
j = 0
k = 0
for i, g in enumerate(gpwl):
ns = len(g.p_cost)
p = array([x / base_mva for x, c in g.p_cost])
c = array([c for x, c in g.p_cost])
m = diff(c) / diff(p)
if 0.0 in diff(p):
raise ValueError, "Bad Pcost data: %s (%s)" % (p, g.name)
logger.error("Bad Pcost data: %s" % p)
b = m * p[:ns-1] - c[:ns-1]
by = r_[by, b.T]
Ay[pgbas + i, k:k + ns - 1] = m
Ay[ybas + j, k:k + ns - 1] = -ones(ns-1)
k += (ns - 1)
j += 1
y = Variable("y", ny)
if self.dc:
ycon = LinearConstraint("ycon", Ay.T, None, by, ["Pg", "y"])
else:
ycon = LinearConstraint("ycon", Ay.T, None, by, ["Pg", "Qg","y"])
return y, ycon
|
Returns the basin constraints for piece-wise linear gen cost
variables. CCV cost formulation expressed as Ay * x <= by.
Based on makeAy.m from MATPOWER by C. E. Murillo-Sanchez, developed at
PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more
information.
|
### Input:
Returns the basin constraints for piece-wise linear gen cost
variables. CCV cost formulation expressed as Ay * x <= by.
Based on makeAy.m from MATPOWER by C. E. Murillo-Sanchez, developed at
PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more
information.
### Response:
#vtb
def _pwl_gen_costs(self, generators, base_mva):
ng = len(generators)
gpwl = [g for g in generators if g.pcost_model == PW_LINEAR]
if self.dc:
pgbas = 0
nq = 0
ybas = ng
else:
pgbas = 0
nq = ng
ybas = ng + nq
ny = len(gpwl)
if ny == 0:
return None, None
nc = len([co for gn in gpwl for co in gn.p_cost])
Ay = lil_matrix((ybas + ny, nc - ny))
by = array([])
j = 0
k = 0
for i, g in enumerate(gpwl):
ns = len(g.p_cost)
p = array([x / base_mva for x, c in g.p_cost])
c = array([c for x, c in g.p_cost])
m = diff(c) / diff(p)
if 0.0 in diff(p):
raise ValueError, "Bad Pcost data: %s (%s)" % (p, g.name)
logger.error("Bad Pcost data: %s" % p)
b = m * p[:ns-1] - c[:ns-1]
by = r_[by, b.T]
Ay[pgbas + i, k:k + ns - 1] = m
Ay[ybas + j, k:k + ns - 1] = -ones(ns-1)
k += (ns - 1)
j += 1
y = Variable("y", ny)
if self.dc:
ycon = LinearConstraint("ycon", Ay.T, None, by, ["Pg", "y"])
else:
ycon = LinearConstraint("ycon", Ay.T, None, by, ["Pg", "Qg","y"])
return y, ycon
|
#vtb
def index_of(self, name):
result = -1
for index, actor in enumerate(self.actors):
if actor.name == name:
result = index
break
return result
|
Returns the index of the actor with the given name.
:param name: the name of the Actor to find
:type name: str
:return: the index, -1 if not found
:rtype: int
|
### Input:
Returns the index of the actor with the given name.
:param name: the name of the Actor to find
:type name: str
:return: the index, -1 if not found
:rtype: int
### Response:
#vtb
def index_of(self, name):
result = -1
for index, actor in enumerate(self.actors):
if actor.name == name:
result = index
break
return result
|
#vtb
def from_grpc_error(rpc_exc):
if isinstance(rpc_exc, grpc.Call):
return from_grpc_status(
rpc_exc.code(), rpc_exc.details(), errors=(rpc_exc,), response=rpc_exc
)
else:
return GoogleAPICallError(str(rpc_exc), errors=(rpc_exc,), response=rpc_exc)
|
Create a :class:`GoogleAPICallError` from a :class:`grpc.RpcError`.
Args:
rpc_exc (grpc.RpcError): The gRPC error.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
|
### Input:
Create a :class:`GoogleAPICallError` from a :class:`grpc.RpcError`.
Args:
rpc_exc (grpc.RpcError): The gRPC error.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
### Response:
#vtb
def from_grpc_error(rpc_exc):
if isinstance(rpc_exc, grpc.Call):
return from_grpc_status(
rpc_exc.code(), rpc_exc.details(), errors=(rpc_exc,), response=rpc_exc
)
else:
return GoogleAPICallError(str(rpc_exc), errors=(rpc_exc,), response=rpc_exc)
|
#vtb
def rpcexec(self, payload):
log.debug(json.dumps(payload))
self.ws.send(json.dumps(payload, ensure_ascii=False).encode("utf8"))
|
Execute a call by sending the payload
:param dict payload: Payload data
:raises ValueError: if the server does not respond in proper JSON format
:raises RPCError: if the server returns an error
|
### Input:
Execute a call by sending the payload
:param dict payload: Payload data
:raises ValueError: if the server does not respond in proper JSON format
:raises RPCError: if the server returns an error
### Response:
#vtb
def rpcexec(self, payload):
log.debug(json.dumps(payload))
self.ws.send(json.dumps(payload, ensure_ascii=False).encode("utf8"))
|
#vtb
def process_event(self, event, ipmicmd, seldata):
event[] = None
evdata = event[]
if evdata[0] & 0b11000000 == 0b10000000:
event[] = evdata[1]
if evdata[0] & 0b110000 == 0b100000:
event[] = evdata[2]
|
Modify an event according with OEM understanding.
Given an event, allow an OEM module to augment it. For example,
event data fields can have OEM bytes. Other times an OEM may wish
to apply some transform to some field to suit their conventions.
|
### Input:
Modify an event according with OEM understanding.
Given an event, allow an OEM module to augment it. For example,
event data fields can have OEM bytes. Other times an OEM may wish
to apply some transform to some field to suit their conventions.
### Response:
#vtb
def process_event(self, event, ipmicmd, seldata):
event[] = None
evdata = event[]
if evdata[0] & 0b11000000 == 0b10000000:
event[] = evdata[1]
if evdata[0] & 0b110000 == 0b100000:
event[] = evdata[2]
|
#vtb
def set_pump_status(self, status):
self.pump_status = status
_logger.info("%r partition %r", status, self.lease.partition_id)
|
Updates pump status and logs update to console.
|
### Input:
Updates pump status and logs update to console.
### Response:
#vtb
def set_pump_status(self, status):
self.pump_status = status
_logger.info("%r partition %r", status, self.lease.partition_id)
|
#vtb
async def connect(channel: discord.VoiceChannel):
node_ = node.get_node(channel.guild.id)
p = await node_.player_manager.create_player(channel)
return p
|
Connects to a discord voice channel.
This is the publicly exposed way to connect to a discord voice channel.
The :py:func:`initialize` function must be called first!
Parameters
----------
channel
Returns
-------
Player
The created Player object.
Raises
------
IndexError
If there are no available lavalink nodes ready to connect to discord.
|
### Input:
Connects to a discord voice channel.
This is the publicly exposed way to connect to a discord voice channel.
The :py:func:`initialize` function must be called first!
Parameters
----------
channel
Returns
-------
Player
The created Player object.
Raises
------
IndexError
If there are no available lavalink nodes ready to connect to discord.
### Response:
#vtb
async def connect(channel: discord.VoiceChannel):
node_ = node.get_node(channel.guild.id)
p = await node_.player_manager.create_player(channel)
return p
|
#vtb
def loop_read(self, max_packets=1):
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
max_packets = len(self._out_messages) + len(self._in_messages)
if max_packets < 1:
max_packets = 1
for i in range(0, max_packets):
rc = self._packet_read()
if rc > 0:
return self._loop_rc_handle(rc)
elif rc == MQTT_ERR_AGAIN:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
|
Process read network events. Use in place of calling loop() if you
wish to handle your client reads as part of your own application.
Use socket() to obtain the client socket to call select() or equivalent
on.
Do not use if you are using the threaded interface loop_start().
|
### Input:
Process read network events. Use in place of calling loop() if you
wish to handle your client reads as part of your own application.
Use socket() to obtain the client socket to call select() or equivalent
on.
Do not use if you are using the threaded interface loop_start().
### Response:
#vtb
def loop_read(self, max_packets=1):
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
max_packets = len(self._out_messages) + len(self._in_messages)
if max_packets < 1:
max_packets = 1
for i in range(0, max_packets):
rc = self._packet_read()
if rc > 0:
return self._loop_rc_handle(rc)
elif rc == MQTT_ERR_AGAIN:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
|
#vtb
def occult(target1, shape1, frame1, target2, shape2, frame2, abcorr, observer,
et):
target1 = stypes.stringToCharP(target1)
shape1 = stypes.stringToCharP(shape1)
frame1 = stypes.stringToCharP(frame1)
target2 = stypes.stringToCharP(target2)
shape2 = stypes.stringToCharP(shape2)
frame2 = stypes.stringToCharP(frame2)
abcorr = stypes.stringToCharP(abcorr)
observer = stypes.stringToCharP(observer)
et = ctypes.c_double(et)
occult_code = ctypes.c_int()
libspice.occult_c(target1, shape1, frame1, target2, shape2, frame2, abcorr,
observer, et, ctypes.byref(occult_code))
return occult_code.value
|
Determines the occultation condition (not occulted, partially,
etc.) of one target relative to another target as seen by
an observer at a given time.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/occult_c.html
:param target1: Name or ID of first target.
:type target1: str
:param shape1: Type of shape model used for first target.
:type shape1: str
:param frame1: Body-fixed, body-centered frame for first body.
:type frame1: str
:param target2: Name or ID of second target.
:type target2: str
:param shape2: Type of shape model used for second target.
:type shape2: str
:param frame2: Body-fixed, body-centered frame for second body.
:type frame2: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param observer: Name or ID of the observer.
:type observer: str
:param et: Time of the observation (seconds past J2000).
:type et: float
:return: Occultation identification code.
:rtype: int
|
### Input:
Determines the occultation condition (not occulted, partially,
etc.) of one target relative to another target as seen by
an observer at a given time.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/occult_c.html
:param target1: Name or ID of first target.
:type target1: str
:param shape1: Type of shape model used for first target.
:type shape1: str
:param frame1: Body-fixed, body-centered frame for first body.
:type frame1: str
:param target2: Name or ID of second target.
:type target2: str
:param shape2: Type of shape model used for second target.
:type shape2: str
:param frame2: Body-fixed, body-centered frame for second body.
:type frame2: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param observer: Name or ID of the observer.
:type observer: str
:param et: Time of the observation (seconds past J2000).
:type et: float
:return: Occultation identification code.
:rtype: int
### Response:
#vtb
def occult(target1, shape1, frame1, target2, shape2, frame2, abcorr, observer,
et):
target1 = stypes.stringToCharP(target1)
shape1 = stypes.stringToCharP(shape1)
frame1 = stypes.stringToCharP(frame1)
target2 = stypes.stringToCharP(target2)
shape2 = stypes.stringToCharP(shape2)
frame2 = stypes.stringToCharP(frame2)
abcorr = stypes.stringToCharP(abcorr)
observer = stypes.stringToCharP(observer)
et = ctypes.c_double(et)
occult_code = ctypes.c_int()
libspice.occult_c(target1, shape1, frame1, target2, shape2, frame2, abcorr,
observer, et, ctypes.byref(occult_code))
return occult_code.value
|
#vtb
def get_default_config(self):
config = super(KafkaConsumerLagCollector, self).get_default_config()
config.update({
: ,
: ,
:
})
return config
|
Returns the default collector settings
|
### Input:
Returns the default collector settings
### Response:
#vtb
def get_default_config(self):
config = super(KafkaConsumerLagCollector, self).get_default_config()
config.update({
: ,
: ,
:
})
return config
|
#vtb
def from_span(cls, inputs, window_length, span, **kwargs):
if span <= 1:
raise ValueError(
"`span` must be a positive number. %s was passed." % span
)
decay_rate = (1.0 - (2.0 / (1.0 + span)))
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
)
|
Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[EquityPricing.close],
window_length=30,
span=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
|
### Input:
Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[EquityPricing.close],
window_length=30,
span=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
### Response:
#vtb
def from_span(cls, inputs, window_length, span, **kwargs):
if span <= 1:
raise ValueError(
"`span` must be a positive number. %s was passed." % span
)
decay_rate = (1.0 - (2.0 / (1.0 + span)))
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
)
|
#vtb
def add_filter(self, filter_, frequencies=None, dB=True,
analog=False, sample_rate=None, **kwargs):
if not analog:
if not sample_rate:
raise ValueError("Must give sample_rate frequency to display "
"digital (analog=False) filter")
sample_rate = Quantity(sample_rate, ).value
dt = 2 * pi / sample_rate
if not isinstance(frequencies, (type(None), int)):
frequencies = numpy.atleast_1d(frequencies).copy()
frequencies *= dt
_, fcomp = parse_filter(filter_, analog=False)
if analog:
lti = signal.lti(*fcomp)
else:
lti = signal.dlti(*fcomp, dt=dt)
w, mag, phase = lti.bode(w=frequencies)
if not dB:
mag = 10 ** (mag / 10.)
mline = self.maxes.plot(w, mag, **kwargs)[0]
pline = self.paxes.plot(w, phase, **kwargs)[0]
return mline, pline
|
Add a linear time-invariant filter to this BodePlot
Parameters
----------
filter_ : `~scipy.signal.lti`, `tuple`
the filter to plot, either as a `~scipy.signal.lti`, or a
`tuple` with the following number and meaning of elements
- 2: (numerator, denominator)
- 3: (zeros, poles, gain)
- 4: (A, B, C, D)
frequencies : `numpy.ndarray`, optional
list of frequencies (in Hertz) at which to plot
dB : `bool`, optional
if `True`, display magnitude in decibels, otherwise display
amplitude, default: `True`
**kwargs
any other keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.plot`
Returns
-------
mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>`
the lines drawn for the magnitude and phase of the filter.
|
### Input:
Add a linear time-invariant filter to this BodePlot
Parameters
----------
filter_ : `~scipy.signal.lti`, `tuple`
the filter to plot, either as a `~scipy.signal.lti`, or a
`tuple` with the following number and meaning of elements
- 2: (numerator, denominator)
- 3: (zeros, poles, gain)
- 4: (A, B, C, D)
frequencies : `numpy.ndarray`, optional
list of frequencies (in Hertz) at which to plot
dB : `bool`, optional
if `True`, display magnitude in decibels, otherwise display
amplitude, default: `True`
**kwargs
any other keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.plot`
Returns
-------
mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>`
the lines drawn for the magnitude and phase of the filter.
### Response:
#vtb
def add_filter(self, filter_, frequencies=None, dB=True,
analog=False, sample_rate=None, **kwargs):
if not analog:
if not sample_rate:
raise ValueError("Must give sample_rate frequency to display "
"digital (analog=False) filter")
sample_rate = Quantity(sample_rate, ).value
dt = 2 * pi / sample_rate
if not isinstance(frequencies, (type(None), int)):
frequencies = numpy.atleast_1d(frequencies).copy()
frequencies *= dt
_, fcomp = parse_filter(filter_, analog=False)
if analog:
lti = signal.lti(*fcomp)
else:
lti = signal.dlti(*fcomp, dt=dt)
w, mag, phase = lti.bode(w=frequencies)
if not dB:
mag = 10 ** (mag / 10.)
mline = self.maxes.plot(w, mag, **kwargs)[0]
pline = self.paxes.plot(w, phase, **kwargs)[0]
return mline, pline
|
#vtb
def __update_offsets(self, fileobj, atoms, delta, offset):
if delta == 0:
return
moov = atoms[b"moov"]
for atom in moov.findall(b, True):
self.__update_offset_table(fileobj, ">%dI", atom, delta, offset)
for atom in moov.findall(b, True):
self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset)
try:
for atom in atoms[b"moof"].findall(b, True):
self.__update_tfhd(fileobj, atom, delta, offset)
except KeyError:
pass
|
Update offset tables in all 'stco' and 'co64' atoms.
|
### Input:
Update offset tables in all 'stco' and 'co64' atoms.
### Response:
#vtb
def __update_offsets(self, fileobj, atoms, delta, offset):
if delta == 0:
return
moov = atoms[b"moov"]
for atom in moov.findall(b, True):
self.__update_offset_table(fileobj, ">%dI", atom, delta, offset)
for atom in moov.findall(b, True):
self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset)
try:
for atom in atoms[b"moof"].findall(b, True):
self.__update_tfhd(fileobj, atom, delta, offset)
except KeyError:
pass
|
#vtb
def from_hdf5_path(cls, hdf5_path):
from keras.models import load_model
hdf5_local_path = BCommon.get_local_file(hdf5_path)
kmodel = load_model(hdf5_local_path)
return kmodel, DefinitionLoader.from_kmodel(kmodel)
|
:param hdf5_path: hdf5 path which can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system.
:return: BigDL Model
|
### Input:
:param hdf5_path: hdf5 path which can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system.
:return: BigDL Model
### Response:
#vtb
def from_hdf5_path(cls, hdf5_path):
from keras.models import load_model
hdf5_local_path = BCommon.get_local_file(hdf5_path)
kmodel = load_model(hdf5_local_path)
return kmodel, DefinitionLoader.from_kmodel(kmodel)
|
#vtb
def data(self):
clone = copy.deepcopy(self)
clone._cfg[] = ReturnType.Object
return clone
|
return (data_dict, key) tuple instead of models instances
|
### Input:
return (data_dict, key) tuple instead of models instances
### Response:
#vtb
def data(self):
clone = copy.deepcopy(self)
clone._cfg[] = ReturnType.Object
return clone
|
#vtb
def save(self):
try:
response = requests.post(self._upload_url,
auth=self.jss.session.auth,
verify=self.jss.session.verify,
files=self.resource)
except JSSPostError as error:
if error.status_code == 409:
raise JSSPostError(error)
else:
raise JSSMethodNotAllowedError(self.__class__.__name__)
if response.status_code == 201:
if self.jss.verbose:
print "POST: Success"
print response.text.encode("utf-8")
elif response.status_code >= 400:
error_handler(JSSPostError, response)
|
POST the object to the JSS.
|
### Input:
POST the object to the JSS.
### Response:
#vtb
def save(self):
try:
response = requests.post(self._upload_url,
auth=self.jss.session.auth,
verify=self.jss.session.verify,
files=self.resource)
except JSSPostError as error:
if error.status_code == 409:
raise JSSPostError(error)
else:
raise JSSMethodNotAllowedError(self.__class__.__name__)
if response.status_code == 201:
if self.jss.verbose:
print "POST: Success"
print response.text.encode("utf-8")
elif response.status_code >= 400:
error_handler(JSSPostError, response)
|
#vtb
def get_permission_request(parser, token):
return PermissionForObjectNode.handle_token(
parser, token, approved=False, name=)
|
Performs a permission request check with the given signature, user and objects
and assigns the result to a context variable.
Syntax::
{% get_permission_request PERMISSION_LABEL.CHECK_NAME for USER and *OBJS [as VARNAME] %}
{% get_permission_request "poll_permission.change_poll"
for request.user and poll as "asked_for_permissio" %}
{% get_permission_request "poll_permission.change_poll"
for request.user and poll,second_poll as "asked_for_permissio" %}
{% if asked_for_permissio %}
Dude, you already asked for permission!
{% else %}
Oh, please fill out this 20 page form and sign here.
{% endif %}
|
### Input:
Performs a permission request check with the given signature, user and objects
and assigns the result to a context variable.
Syntax::
{% get_permission_request PERMISSION_LABEL.CHECK_NAME for USER and *OBJS [as VARNAME] %}
{% get_permission_request "poll_permission.change_poll"
for request.user and poll as "asked_for_permissio" %}
{% get_permission_request "poll_permission.change_poll"
for request.user and poll,second_poll as "asked_for_permissio" %}
{% if asked_for_permissio %}
Dude, you already asked for permission!
{% else %}
Oh, please fill out this 20 page form and sign here.
{% endif %}
### Response:
#vtb
def get_permission_request(parser, token):
return PermissionForObjectNode.handle_token(
parser, token, approved=False, name=)
|
#vtb
def keyphrases_table(keyphrases, texts, similarity_measure=None, synonimizer=None,
language=consts.Language.ENGLISH):
similarity_measure = similarity_measure or relevance.ASTRelevanceMeasure()
text_titles = texts.keys()
text_collection = texts.values()
similarity_measure.set_text_collection(text_collection, language)
i = 0
keyphrases_prepared = {keyphrase: utils.prepare_text(keyphrase)
for keyphrase in keyphrases}
total_keyphrases = len(keyphrases)
total_scores = len(text_collection) * total_keyphrases
res = {}
for keyphrase in keyphrases:
if not keyphrase:
continue
res[keyphrase] = {}
for j in xrange(len(text_collection)):
i += 1
logging.progress("Calculating matching scores", i, total_scores)
res[keyphrase][text_titles[j]] = similarity_measure.relevance(
keyphrases_prepared[keyphrase],
text=j, synonimizer=synonimizer)
logging.clear()
return res
|
Constructs the keyphrases table, containing their matching scores in a set of texts.
The resulting table is stored as a dictionary of dictionaries,
where the entry table["keyphrase"]["text"] corresponds
to the matching score (0 <= score <= 1) of keyphrase "keyphrase"
in the text named "text".
:param keyphrases: list of strings
:param texts: dictionary of form {text_name: text}
:param similarity_measure: similarity measure to use
:param synonimizer: SynonymExtractor object to be used
:param language: Language of the text collection / keyphrases
:returns: dictionary of dictionaries, having keyphrases on its first level and texts
on the second level.
|
### Input:
Constructs the keyphrases table, containing their matching scores in a set of texts.
The resulting table is stored as a dictionary of dictionaries,
where the entry table["keyphrase"]["text"] corresponds
to the matching score (0 <= score <= 1) of keyphrase "keyphrase"
in the text named "text".
:param keyphrases: list of strings
:param texts: dictionary of form {text_name: text}
:param similarity_measure: similarity measure to use
:param synonimizer: SynonymExtractor object to be used
:param language: Language of the text collection / keyphrases
:returns: dictionary of dictionaries, having keyphrases on its first level and texts
on the second level.
### Response:
#vtb
def keyphrases_table(keyphrases, texts, similarity_measure=None, synonimizer=None,
language=consts.Language.ENGLISH):
similarity_measure = similarity_measure or relevance.ASTRelevanceMeasure()
text_titles = texts.keys()
text_collection = texts.values()
similarity_measure.set_text_collection(text_collection, language)
i = 0
keyphrases_prepared = {keyphrase: utils.prepare_text(keyphrase)
for keyphrase in keyphrases}
total_keyphrases = len(keyphrases)
total_scores = len(text_collection) * total_keyphrases
res = {}
for keyphrase in keyphrases:
if not keyphrase:
continue
res[keyphrase] = {}
for j in xrange(len(text_collection)):
i += 1
logging.progress("Calculating matching scores", i, total_scores)
res[keyphrase][text_titles[j]] = similarity_measure.relevance(
keyphrases_prepared[keyphrase],
text=j, synonimizer=synonimizer)
logging.clear()
return res
|
#vtb
def update(self, *fields):
from mongoframes.queries import to_refs
assert in self._document, "Canupdate_id_id$setupdated').send(self.__class__, frames=[self])
|
Update this document. Optionally a specific list of fields to update can
be specified.
|
### Input:
Update this document. Optionally a specific list of fields to update can
be specified.
### Response:
#vtb
def update(self, *fields):
from mongoframes.queries import to_refs
assert in self._document, "Canupdate_id_id$setupdated').send(self.__class__, frames=[self])
|
#vtb
def get_client():
return InfluxDBClient(
settings.INFLUXDB_HOST,
settings.INFLUXDB_PORT,
settings.INFLUXDB_USER,
settings.INFLUXDB_PASSWORD,
settings.INFLUXDB_DATABASE,
timeout=settings.INFLUXDB_TIMEOUT,
ssl=getattr(settings, , False),
verify_ssl=getattr(settings, , False),
)
|
Returns an ``InfluxDBClient`` instance.
|
### Input:
Returns an ``InfluxDBClient`` instance.
### Response:
#vtb
def get_client():
return InfluxDBClient(
settings.INFLUXDB_HOST,
settings.INFLUXDB_PORT,
settings.INFLUXDB_USER,
settings.INFLUXDB_PASSWORD,
settings.INFLUXDB_DATABASE,
timeout=settings.INFLUXDB_TIMEOUT,
ssl=getattr(settings, , False),
verify_ssl=getattr(settings, , False),
)
|
#vtb
def parse_clubs(self, clubs_page):
character_info = self.parse_sidebar(clubs_page)
second_col = clubs_page.find(u, {: }).find(u).find(u).find_all(u, recursive=False)[1]
try:
clubs_header = second_col.find(u, text=u)
character_info[u] = []
if clubs_header:
curr_elt = clubs_header.nextSibling
while curr_elt is not None:
if curr_elt.name == u:
link = curr_elt.find(u)
club_id = int(re.match(r, link.get(u)).group(u))
num_members = int(re.match(r, curr_elt.find(u).text).group(u))
character_info[u].append(self.session.club(club_id).set({: link.text, : num_members}))
curr_elt = curr_elt.nextSibling
except:
if not self.session.suppress_parse_exceptions:
raise
return character_info
|
Parses the DOM and returns character clubs attributes.
:type clubs_page: :class:`bs4.BeautifulSoup`
:param clubs_page: MAL character clubs page's DOM
:rtype: dict
:return: character clubs attributes.
|
### Input:
Parses the DOM and returns character clubs attributes.
:type clubs_page: :class:`bs4.BeautifulSoup`
:param clubs_page: MAL character clubs page's DOM
:rtype: dict
:return: character clubs attributes.
### Response:
#vtb
def parse_clubs(self, clubs_page):
character_info = self.parse_sidebar(clubs_page)
second_col = clubs_page.find(u, {: }).find(u).find(u).find_all(u, recursive=False)[1]
try:
clubs_header = second_col.find(u, text=u)
character_info[u] = []
if clubs_header:
curr_elt = clubs_header.nextSibling
while curr_elt is not None:
if curr_elt.name == u:
link = curr_elt.find(u)
club_id = int(re.match(r, link.get(u)).group(u))
num_members = int(re.match(r, curr_elt.find(u).text).group(u))
character_info[u].append(self.session.club(club_id).set({: link.text, : num_members}))
curr_elt = curr_elt.nextSibling
except:
if not self.session.suppress_parse_exceptions:
raise
return character_info
|
#vtb
def find_l50(contig_lengths_dict, genome_length_dict):
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
|
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
|
### Input:
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
### Response:
#vtb
def find_l50(contig_lengths_dict, genome_length_dict):
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
currentcontig += 1
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict
|
#vtb
def load_all(path, include_core=True, subfolders=None, path_in_arc=None):
def clean(varStr):
return re.sub(, , str(varStr))
path = Path(path)
if zipfile.is_zipfile(str(path)):
with zipfile.ZipFile(file=str(path), mode=) as zz:
zipcontent = zz.namelist()
if path_in_arc:
path_in_arc = str(path_in_arc)
if path_in_arc not in zipcontent:
path_in_arc = os.path.join(path_in_arc,
DEFAULT_FILE_NAMES[])
if path_in_arc not in zipcontent:
raise ReadError(
.format(
DEFAULT_FILE_NAMES[], path))
else:
with zipfile.ZipFile(file=str(path), mode=) as zz:
fpfiles = [
f for f in zz.namelist()
if
os.path.basename(f) == DEFAULT_FILE_NAMES[] and
json.loads(zz.read(f).decode()
)[] == ]
if len(fpfiles) == 0:
raise ReadError(
.format(
DEFAULT_FILE_NAMES[], path))
elif len(fpfiles) > 1:
raise ReadError(
.format(path))
else:
path_in_arc = os.path.dirname(fpfiles[0])
logging.debug("Expect file parameter-file at {} in {}".format(
path_in_arc, path))
io = load(path, include_core=include_core, path_in_arc=path_in_arc)
if zipfile.is_zipfile(str(path)):
root_in_zip = os.path.dirname(path_in_arc)
if subfolders is None:
subfolders = {
os.path.relpath(os.path.dirname(p), root_in_zip)
for p in zipcontent
if p.startswith(root_in_zip) and
os.path.dirname(p) != root_in_zip}
for subfolder_name in subfolders:
if subfolder_name not in zipcontent + list({
os.path.dirname(p) for p in zipcontent}):
subfolder_full = os.path.join(root_in_zip, subfolder_name)
else:
subfolder_full = subfolder_name
subfolder_name = os.path.basename(os.path.normpath(subfolder_name))
if subfolder_name not in zipcontent:
subfolder_full_meta = os.path.join(
subfolder_full, DEFAULT_FILE_NAMES[])
else:
subfolder_full_meta = subfolder_full
if subfolder_full_meta in zipcontent:
ext = load(path,
include_core=include_core,
path_in_arc=subfolder_full_meta)
setattr(io, clean(subfolder_name), ext)
io.meta._add_fileio("Added satellite account "
"from {}".format(subfolder_full))
else:
continue
else:
if subfolders is None:
subfolders = [d for d in path.iterdir() if d.is_dir()]
for subfolder_name in subfolders:
if not os.path.exists(str(subfolder_name)):
subfolder_full = path / subfolder_name
else:
subfolder_full = subfolder_name
subfolder_name = os.path.basename(os.path.normpath(subfolder_name))
if not os.path.isfile(str(subfolder_full)):
subfolder_full_meta = (subfolder_full /
DEFAULT_FILE_NAMES[])
else:
subfolder_full_meta = subfolder_full
if subfolder_full_meta.exists():
ext = load(subfolder_full, include_core=include_core)
setattr(io, clean(subfolder_name), ext)
io.meta._add_fileio("Added satellite account "
"from {}".format(subfolder_full))
else:
continue
return io
|
Loads a full IO system with all extension in path
Parameters
----------
path : pathlib.Path or string
Path or path with para file name for the data to load.
This must either point to the directory containing the uncompressed
data or the location of a compressed zip file with the data. In the
later case and if there are several mrio's in the zip file the
parameter 'path_in_arc' need to be specifiec to further indicate the
location of the data in the compressed file.
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
subfolders: list of pathlib.Path or string, optional
By default (subfolders=None), all subfolders in path containing a json
parameter file (as defined in DEFAULT_FILE_NAMES['filepara']:
metadata.json) are parsed. If only a subset should be used, pass a list
of names of subfolders. These can either be strings specifying direct
subfolders of path, or absolute/relative path if the extensions are
stored at a different location. Both modes can be mixed. If the data
is read from a zip archive the path must be given as described below in
'path_in_arc', relative to the root defined in the paramter
'path_in_arc'. Extensions in a different zip archive must be read
separately by calling the function 'load' for this extension.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'emissions' pass 'emissions/'. Only used
if parameter 'path' points to an compressed zip file.
Can be None (default) if there is only one mrio database in the
zip archive (thus only one file_parameter file as the systemtype entry
'IOSystem'.
|
### Input:
Loads a full IO system with all extension in path
Parameters
----------
path : pathlib.Path or string
Path or path with para file name for the data to load.
This must either point to the directory containing the uncompressed
data or the location of a compressed zip file with the data. In the
later case and if there are several mrio's in the zip file the
parameter 'path_in_arc' need to be specifiec to further indicate the
location of the data in the compressed file.
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
subfolders: list of pathlib.Path or string, optional
By default (subfolders=None), all subfolders in path containing a json
parameter file (as defined in DEFAULT_FILE_NAMES['filepara']:
metadata.json) are parsed. If only a subset should be used, pass a list
of names of subfolders. These can either be strings specifying direct
subfolders of path, or absolute/relative path if the extensions are
stored at a different location. Both modes can be mixed. If the data
is read from a zip archive the path must be given as described below in
'path_in_arc', relative to the root defined in the paramter
'path_in_arc'. Extensions in a different zip archive must be read
separately by calling the function 'load' for this extension.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'emissions' pass 'emissions/'. Only used
if parameter 'path' points to an compressed zip file.
Can be None (default) if there is only one mrio database in the
zip archive (thus only one file_parameter file as the systemtype entry
'IOSystem'.
### Response:
#vtb
def load_all(path, include_core=True, subfolders=None, path_in_arc=None):
def clean(varStr):
return re.sub(, , str(varStr))
path = Path(path)
if zipfile.is_zipfile(str(path)):
with zipfile.ZipFile(file=str(path), mode=) as zz:
zipcontent = zz.namelist()
if path_in_arc:
path_in_arc = str(path_in_arc)
if path_in_arc not in zipcontent:
path_in_arc = os.path.join(path_in_arc,
DEFAULT_FILE_NAMES[])
if path_in_arc not in zipcontent:
raise ReadError(
.format(
DEFAULT_FILE_NAMES[], path))
else:
with zipfile.ZipFile(file=str(path), mode=) as zz:
fpfiles = [
f for f in zz.namelist()
if
os.path.basename(f) == DEFAULT_FILE_NAMES[] and
json.loads(zz.read(f).decode()
)[] == ]
if len(fpfiles) == 0:
raise ReadError(
.format(
DEFAULT_FILE_NAMES[], path))
elif len(fpfiles) > 1:
raise ReadError(
.format(path))
else:
path_in_arc = os.path.dirname(fpfiles[0])
logging.debug("Expect file parameter-file at {} in {}".format(
path_in_arc, path))
io = load(path, include_core=include_core, path_in_arc=path_in_arc)
if zipfile.is_zipfile(str(path)):
root_in_zip = os.path.dirname(path_in_arc)
if subfolders is None:
subfolders = {
os.path.relpath(os.path.dirname(p), root_in_zip)
for p in zipcontent
if p.startswith(root_in_zip) and
os.path.dirname(p) != root_in_zip}
for subfolder_name in subfolders:
if subfolder_name not in zipcontent + list({
os.path.dirname(p) for p in zipcontent}):
subfolder_full = os.path.join(root_in_zip, subfolder_name)
else:
subfolder_full = subfolder_name
subfolder_name = os.path.basename(os.path.normpath(subfolder_name))
if subfolder_name not in zipcontent:
subfolder_full_meta = os.path.join(
subfolder_full, DEFAULT_FILE_NAMES[])
else:
subfolder_full_meta = subfolder_full
if subfolder_full_meta in zipcontent:
ext = load(path,
include_core=include_core,
path_in_arc=subfolder_full_meta)
setattr(io, clean(subfolder_name), ext)
io.meta._add_fileio("Added satellite account "
"from {}".format(subfolder_full))
else:
continue
else:
if subfolders is None:
subfolders = [d for d in path.iterdir() if d.is_dir()]
for subfolder_name in subfolders:
if not os.path.exists(str(subfolder_name)):
subfolder_full = path / subfolder_name
else:
subfolder_full = subfolder_name
subfolder_name = os.path.basename(os.path.normpath(subfolder_name))
if not os.path.isfile(str(subfolder_full)):
subfolder_full_meta = (subfolder_full /
DEFAULT_FILE_NAMES[])
else:
subfolder_full_meta = subfolder_full
if subfolder_full_meta.exists():
ext = load(subfolder_full, include_core=include_core)
setattr(io, clean(subfolder_name), ext)
io.meta._add_fileio("Added satellite account "
"from {}".format(subfolder_full))
else:
continue
return io
|
#vtb
def decrypt(ciphertext, secret, inital_vector, checksum=True, lazy=True):
secret = _lazysecret(secret) if lazy else secret
encobj = AES.new(secret, AES.MODE_CFB, inital_vector)
try:
padded = ciphertext + ( * (len(ciphertext) % 4))
decoded = base64.urlsafe_b64decode(str(padded))
plaintext = encobj.decrypt(decoded)
except (TypeError, binascii.Error):
raise InvalidKeyError("invalid key")
if checksum:
try:
crc, plaintext = (base64.urlsafe_b64decode(
plaintext[-8:]), plaintext[:-8])
except (TypeError, binascii.Error):
raise CheckSumError("checksum mismatch")
if not crc == _pack_crc(plaintext):
raise CheckSumError("checksum mismatch")
return plaintext
|
Decrypts ciphertext with secret
ciphertext - encrypted content to decrypt
secret - secret to decrypt ciphertext
inital_vector - initial vector
lazy - pad secret if less than legal blocksize (default: True)
checksum - verify crc32 byte encoded checksum (default: True)
returns plaintext
|
### Input:
Decrypts ciphertext with secret
ciphertext - encrypted content to decrypt
secret - secret to decrypt ciphertext
inital_vector - initial vector
lazy - pad secret if less than legal blocksize (default: True)
checksum - verify crc32 byte encoded checksum (default: True)
returns plaintext
### Response:
#vtb
def decrypt(ciphertext, secret, inital_vector, checksum=True, lazy=True):
secret = _lazysecret(secret) if lazy else secret
encobj = AES.new(secret, AES.MODE_CFB, inital_vector)
try:
padded = ciphertext + ( * (len(ciphertext) % 4))
decoded = base64.urlsafe_b64decode(str(padded))
plaintext = encobj.decrypt(decoded)
except (TypeError, binascii.Error):
raise InvalidKeyError("invalid key")
if checksum:
try:
crc, plaintext = (base64.urlsafe_b64decode(
plaintext[-8:]), plaintext[:-8])
except (TypeError, binascii.Error):
raise CheckSumError("checksum mismatch")
if not crc == _pack_crc(plaintext):
raise CheckSumError("checksum mismatch")
return plaintext
|
#vtb
def redfearn(lat, lon, false_easting=None, false_northing=None,
zone=None, central_meridian=None, scale_factor=None):
from math import pi, sqrt, sin, cos, tan
a = 6378137.0
inverse_flattening = 298.257222101
if scale_factor is None:
K0 = 0.9996
else:
K0 = scale_factor
zone_width = 6
longitude_of_central_meridian_zone0 = -183
longitude_of_western_edge_zone0 = -186
if false_easting is None:
false_easting = 500000
if false_northing is None:
if lat < 0:
false_northing = 10000000
else:
false_northing = 0
f = 1.0/inverse_flattening
b = a*(1-f)
e2 = 2*f - f*f
e = sqrt(e2)
e2_ = e2/(1-e2)
e_ = sqrt(e2_)
e4 = e2*e2
e6 = e2*e4
n = (a-b)/(a+b)
n2 = n*n
n3 = n*n2
n4 = n2*n2
G = a*(1-n)*(1-n2)*(1+9*n2/4+225*n4/64)*pi/180
phi = lat*pi/180
sinphi = sin(phi)
sin2phi = sin(2*phi)
sin4phi = sin(4*phi)
sin6phi = sin(6*phi)
cosphi = cos(phi)
cosphi2 = cosphi*cosphi
cosphi3 = cosphi*cosphi2
cosphi4 = cosphi2*cosphi2
cosphi5 = cosphi*cosphi4
cosphi6 = cosphi2*cosphi4
cosphi7 = cosphi*cosphi6
cosphi8 = cosphi4*cosphi4
t = tan(phi)
t2 = t*t
t4 = t2*t2
t6 = t2*t4
rho = a*(1-e2)/(1-e2*sinphi*sinphi)**1.5
nu = a/(1-e2*sinphi*sinphi)**0.5
psi = nu/rho
psi2 = psi*psi
psi3 = psi*psi2
psi4 = psi2*psi2
A0 = 1 - e2/4 - 3*e4/64 - 5*e6/256
A2 = 3.0/8*(e2+e4/4+15*e6/128)
A4 = 15.0/256*(e4+3*e6/4)
A6 = 35*e6/3072
term1 = a*A0*phi
term2 = -a*A2*sin2phi
term3 = a*A4*sin4phi
term4 = -a*A6*sin6phi
m = term1 + term2 + term3 + term4
if zone is not None and central_meridian is not None:
msg =
raise ValueError(msg)
if zone is None:
zone = int((lon - longitude_of_western_edge_zone0)/zone_width)
if central_meridian is None:
central_meridian = zone*zone_width+longitude_of_central_meridian_zone0
else:
zone = -1
omega = (lon-central_meridian)*pi/180
omega2 = omega*omega
omega3 = omega*omega2
omega4 = omega2*omega2
omega5 = omega*omega4
omega6 = omega3*omega3
omega7 = omega*omega6
omega8 = omega4*omega4
term1 = nu*sinphi*cosphi*omega2/2
term2 = nu*sinphi*cosphi3*(4*psi2+psi-t2)*omega4/24
term3 = nu*sinphi*cosphi5*\
(8*psi4*(11-24*t2)-28*psi3*(1-6*t2)+\
psi2*(1-32*t2)-psi*2*t2+t4-t2)*omega6/720
term4 = nu*sinphi*cosphi7*(1385-3111*t2+543*t4-t6)*omega8/40320
northing = false_northing + K0*(m + term1 + term2 + term3 + term4)
term1 = nu*omega*cosphi
term2 = nu*cosphi3*(psi-t2)*omega3/6
term3 = nu*cosphi5*(4*psi3*(1-6*t2)+psi2*(1+8*t2)-2*psi*t2+t4)*omega5/120
term4 = nu*cosphi7*(61-479*t2+179*t4-t6)*omega7/5040
easting = false_easting + K0*(term1 + term2 + term3 + term4)
return zone, easting, northing
|
Compute UTM projection using Redfearn's formula
lat, lon is latitude and longitude in decimal degrees
If false easting and northing are specified they will override
the standard
If zone is specified reproject lat and long to specified zone instead of
standard zone
If meridian is specified, reproject lat and lon to that instead of zone. In this case
zone will be set to -1 to indicate non-UTM projection
Note that zone and meridian cannot both be specifed
|
### Input:
Compute UTM projection using Redfearn's formula
lat, lon is latitude and longitude in decimal degrees
If false easting and northing are specified they will override
the standard
If zone is specified reproject lat and long to specified zone instead of
standard zone
If meridian is specified, reproject lat and lon to that instead of zone. In this case
zone will be set to -1 to indicate non-UTM projection
Note that zone and meridian cannot both be specifed
### Response:
#vtb
def redfearn(lat, lon, false_easting=None, false_northing=None,
zone=None, central_meridian=None, scale_factor=None):
from math import pi, sqrt, sin, cos, tan
a = 6378137.0
inverse_flattening = 298.257222101
if scale_factor is None:
K0 = 0.9996
else:
K0 = scale_factor
zone_width = 6
longitude_of_central_meridian_zone0 = -183
longitude_of_western_edge_zone0 = -186
if false_easting is None:
false_easting = 500000
if false_northing is None:
if lat < 0:
false_northing = 10000000
else:
false_northing = 0
f = 1.0/inverse_flattening
b = a*(1-f)
e2 = 2*f - f*f
e = sqrt(e2)
e2_ = e2/(1-e2)
e_ = sqrt(e2_)
e4 = e2*e2
e6 = e2*e4
n = (a-b)/(a+b)
n2 = n*n
n3 = n*n2
n4 = n2*n2
G = a*(1-n)*(1-n2)*(1+9*n2/4+225*n4/64)*pi/180
phi = lat*pi/180
sinphi = sin(phi)
sin2phi = sin(2*phi)
sin4phi = sin(4*phi)
sin6phi = sin(6*phi)
cosphi = cos(phi)
cosphi2 = cosphi*cosphi
cosphi3 = cosphi*cosphi2
cosphi4 = cosphi2*cosphi2
cosphi5 = cosphi*cosphi4
cosphi6 = cosphi2*cosphi4
cosphi7 = cosphi*cosphi6
cosphi8 = cosphi4*cosphi4
t = tan(phi)
t2 = t*t
t4 = t2*t2
t6 = t2*t4
rho = a*(1-e2)/(1-e2*sinphi*sinphi)**1.5
nu = a/(1-e2*sinphi*sinphi)**0.5
psi = nu/rho
psi2 = psi*psi
psi3 = psi*psi2
psi4 = psi2*psi2
A0 = 1 - e2/4 - 3*e4/64 - 5*e6/256
A2 = 3.0/8*(e2+e4/4+15*e6/128)
A4 = 15.0/256*(e4+3*e6/4)
A6 = 35*e6/3072
term1 = a*A0*phi
term2 = -a*A2*sin2phi
term3 = a*A4*sin4phi
term4 = -a*A6*sin6phi
m = term1 + term2 + term3 + term4
if zone is not None and central_meridian is not None:
msg =
raise ValueError(msg)
if zone is None:
zone = int((lon - longitude_of_western_edge_zone0)/zone_width)
if central_meridian is None:
central_meridian = zone*zone_width+longitude_of_central_meridian_zone0
else:
zone = -1
omega = (lon-central_meridian)*pi/180
omega2 = omega*omega
omega3 = omega*omega2
omega4 = omega2*omega2
omega5 = omega*omega4
omega6 = omega3*omega3
omega7 = omega*omega6
omega8 = omega4*omega4
term1 = nu*sinphi*cosphi*omega2/2
term2 = nu*sinphi*cosphi3*(4*psi2+psi-t2)*omega4/24
term3 = nu*sinphi*cosphi5*\
(8*psi4*(11-24*t2)-28*psi3*(1-6*t2)+\
psi2*(1-32*t2)-psi*2*t2+t4-t2)*omega6/720
term4 = nu*sinphi*cosphi7*(1385-3111*t2+543*t4-t6)*omega8/40320
northing = false_northing + K0*(m + term1 + term2 + term3 + term4)
term1 = nu*omega*cosphi
term2 = nu*cosphi3*(psi-t2)*omega3/6
term3 = nu*cosphi5*(4*psi3*(1-6*t2)+psi2*(1+8*t2)-2*psi*t2+t4)*omega5/120
term4 = nu*cosphi7*(61-479*t2+179*t4-t6)*omega7/5040
easting = false_easting + K0*(term1 + term2 + term3 + term4)
return zone, easting, northing
|
#vtb
def post(self, url, data, params=None):
r = self.session.post(url, data=data, params=params)
return self._response_parser(r, expect_json=False)
|
Initiate a POST request
|
### Input:
Initiate a POST request
### Response:
#vtb
def post(self, url, data, params=None):
r = self.session.post(url, data=data, params=params)
return self._response_parser(r, expect_json=False)
|
#vtb
def callgrind(self, out, filename=None, commandline=None, relative_path=False):
print(u, file=out)
print(u, file=out)
print(u, file=out)
print(u, file=out)
print(u, file=out)
if commandline is not None:
print(u, commandline, file=out)
file_dict = self._mergeFileTiming()
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
if os.path.sep != "/":
convertPath = lambda x, cascade=convertPath: cascade(
.join(x.split(os.path.sep))
)
code_to_name_dict = {}
homonym_counter = {}
def getCodeName(filename, code):
try:
return code_to_name_dict[code]
except KeyError:
name = code.co_name + % code.co_firstlineno
key = (filename, name)
homonym_count = homonym_counter.get(key, 0)
if homonym_count:
name += % homonym_count
homonym_counter[key] = homonym_count + 1
code_to_name_dict[code] = name
return name
for current_file in self._getFileNameList(filename, may_sort=False):
file_timing = file_dict[current_file]
print(u % convertPath(current_file), file=out)
func_dict = defaultdict(lambda: defaultdict(lambda: ([], [])))
for lineno, code, hits, duration in file_timing.iterHits():
func_dict[getCodeName(current_file, code)][lineno][0].append(
(hits, int(duration * 1000000)),
)
for (
lineno,
caller,
call_hits, call_duration,
callee_file, callee,
) in file_timing.iterCalls():
call_ticks = int(call_duration * 1000000)
func_call_list = func_dict[
getCodeName(current_file, caller)
][lineno][1]
append = func_call_list.append
append(u + convertPath(callee_file))
append(u + getCodeName(callee_file, callee))
append(u % (call_hits, callee.co_firstlineno))
append(u % (lineno, call_hits, call_ticks, call_ticks // call_hits))
for func_name, line_dict in func_dict.iteritems():
print(u % func_name, file=out)
for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()):
if func_hit_list:
hits = sum(x for x, _ in func_hit_list)
ticks = sum(x for _, x in func_hit_list)
print(
u % (
lineno,
hits,
ticks,
ticks // hits,
),
file=out,
)
for line in func_call_list:
print(line, file=out)
|
Dump statistics in callgrind format.
Contains:
- per-line hit count, time and time-per-hit
- call associations (call tree)
Note: hit count is not inclusive, in that it is not the sum of all
hits inside that call.
Time unit: microsecond (1e-6 second).
out (file-ish opened for writing)
Destination of callgrind profiling data.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this profiling data.
relative_path (bool)
When True, absolute elements are stripped from path. Useful when
maintaining several copies of source trees with their own
profiling result, so kcachegrind does not look in system-wide
files which may not match with profiled code.
|
### Input:
Dump statistics in callgrind format.
Contains:
- per-line hit count, time and time-per-hit
- call associations (call tree)
Note: hit count is not inclusive, in that it is not the sum of all
hits inside that call.
Time unit: microsecond (1e-6 second).
out (file-ish opened for writing)
Destination of callgrind profiling data.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this profiling data.
relative_path (bool)
When True, absolute elements are stripped from path. Useful when
maintaining several copies of source trees with their own
profiling result, so kcachegrind does not look in system-wide
files which may not match with profiled code.
### Response:
#vtb
def callgrind(self, out, filename=None, commandline=None, relative_path=False):
print(u, file=out)
print(u, file=out)
print(u, file=out)
print(u, file=out)
print(u, file=out)
if commandline is not None:
print(u, commandline, file=out)
file_dict = self._mergeFileTiming()
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
if os.path.sep != "/":
convertPath = lambda x, cascade=convertPath: cascade(
.join(x.split(os.path.sep))
)
code_to_name_dict = {}
homonym_counter = {}
def getCodeName(filename, code):
try:
return code_to_name_dict[code]
except KeyError:
name = code.co_name + % code.co_firstlineno
key = (filename, name)
homonym_count = homonym_counter.get(key, 0)
if homonym_count:
name += % homonym_count
homonym_counter[key] = homonym_count + 1
code_to_name_dict[code] = name
return name
for current_file in self._getFileNameList(filename, may_sort=False):
file_timing = file_dict[current_file]
print(u % convertPath(current_file), file=out)
func_dict = defaultdict(lambda: defaultdict(lambda: ([], [])))
for lineno, code, hits, duration in file_timing.iterHits():
func_dict[getCodeName(current_file, code)][lineno][0].append(
(hits, int(duration * 1000000)),
)
for (
lineno,
caller,
call_hits, call_duration,
callee_file, callee,
) in file_timing.iterCalls():
call_ticks = int(call_duration * 1000000)
func_call_list = func_dict[
getCodeName(current_file, caller)
][lineno][1]
append = func_call_list.append
append(u + convertPath(callee_file))
append(u + getCodeName(callee_file, callee))
append(u % (call_hits, callee.co_firstlineno))
append(u % (lineno, call_hits, call_ticks, call_ticks // call_hits))
for func_name, line_dict in func_dict.iteritems():
print(u % func_name, file=out)
for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()):
if func_hit_list:
hits = sum(x for x, _ in func_hit_list)
ticks = sum(x for _, x in func_hit_list)
print(
u % (
lineno,
hits,
ticks,
ticks // hits,
),
file=out,
)
for line in func_call_list:
print(line, file=out)
|
#vtb
def _prm_get_longest_stringsize(string_list):
maxlength = 1
for stringar in string_list:
if isinstance(stringar, np.ndarray):
if stringar.ndim > 0:
for string in stringar.ravel():
maxlength = max(len(string), maxlength)
else:
maxlength = max(len(stringar.tolist()), maxlength)
else:
maxlength = max(len(stringar), maxlength)
return int(maxlength * 1.5)
|
Returns the longest string size for a string entry across data.
|
### Input:
Returns the longest string size for a string entry across data.
### Response:
#vtb
def _prm_get_longest_stringsize(string_list):
maxlength = 1
for stringar in string_list:
if isinstance(stringar, np.ndarray):
if stringar.ndim > 0:
for string in stringar.ravel():
maxlength = max(len(string), maxlength)
else:
maxlength = max(len(stringar.tolist()), maxlength)
else:
maxlength = max(len(stringar), maxlength)
return int(maxlength * 1.5)
|
#vtb
def line_spacing(self):
pPr = self._element.pPr
if pPr is None:
return None
return self._line_spacing(pPr.spacing_line, pPr.spacing_lineRule)
|
|float| or |Length| value specifying the space between baselines in
successive lines of the paragraph. A value of |None| indicates line
spacing is inherited from the style hierarchy. A float value, e.g.
``2.0`` or ``1.75``, indicates spacing is applied in multiples of
line heights. A |Length| value such as ``Pt(12)`` indicates spacing
is a fixed height. The |Pt| value class is a convenient way to apply
line spacing in units of points. Assigning |None| resets line spacing
to inherit from the style hierarchy.
|
### Input:
|float| or |Length| value specifying the space between baselines in
successive lines of the paragraph. A value of |None| indicates line
spacing is inherited from the style hierarchy. A float value, e.g.
``2.0`` or ``1.75``, indicates spacing is applied in multiples of
line heights. A |Length| value such as ``Pt(12)`` indicates spacing
is a fixed height. The |Pt| value class is a convenient way to apply
line spacing in units of points. Assigning |None| resets line spacing
to inherit from the style hierarchy.
### Response:
#vtb
def line_spacing(self):
pPr = self._element.pPr
if pPr is None:
return None
return self._line_spacing(pPr.spacing_line, pPr.spacing_lineRule)
|
#vtb
def info(self, msg):
self._execActions(, msg)
msg = self._execFilters(, msg)
self._processMsg(, msg)
self._sendMsg(, msg)
|
Log Info Messages
|
### Input:
Log Info Messages
### Response:
#vtb
def info(self, msg):
self._execActions(, msg)
msg = self._execFilters(, msg)
self._processMsg(, msg)
self._sendMsg(, msg)
|
#vtb
def imagetransformerpp_base_8l_8h_big_cond_dr03_dan():
hparams = imagetransformerpp_sep_channels_8l_8h()
hparams.hidden_size = 512
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 4
hparams.max_length = 3075
hparams.layer_prepostprocess_dropout = 0.3
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.summarize_grads = True
hparams.learning_rate = 0.01
return hparams
|
big 1d model for conditional image generation.2.99 on cifar10.
|
### Input:
big 1d model for conditional image generation.2.99 on cifar10.
### Response:
#vtb
def imagetransformerpp_base_8l_8h_big_cond_dr03_dan():
hparams = imagetransformerpp_sep_channels_8l_8h()
hparams.hidden_size = 512
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 4
hparams.max_length = 3075
hparams.layer_prepostprocess_dropout = 0.3
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.summarize_grads = True
hparams.learning_rate = 0.01
return hparams
|
#vtb
def _add_secondary_if_exists(secondary, out, get_retriever):
secondary = [_file_local_or_remote(y, get_retriever) for y in secondary]
secondary = [z for z in secondary if z]
if secondary:
out["secondaryFiles"] = [{"class": "File", "path": f} for f in secondary]
return out
|
Add secondary files only if present locally or remotely.
|
### Input:
Add secondary files only if present locally or remotely.
### Response:
#vtb
def _add_secondary_if_exists(secondary, out, get_retriever):
secondary = [_file_local_or_remote(y, get_retriever) for y in secondary]
secondary = [z for z in secondary if z]
if secondary:
out["secondaryFiles"] = [{"class": "File", "path": f} for f in secondary]
return out
|
#vtb
def _produceIt(self, segments, thunk):
if not self.prefixURL:
needle = ()
else:
needle = tuple(self.prefixURL.split())
S = len(needle)
if segments[:S] == needle:
if segments == JUST_SLASH:
subsegments = segments
else:
subsegments = segments[S:]
res = thunk()
if res is not None:
return res, subsegments
|
Underlying implmeentation of L{PrefixURLMixin.produceResource} and
L{PrefixURLMixin.sessionlessProduceResource}.
@param segments: the URL segments to dispatch.
@param thunk: a 0-argument callable which returns an L{IResource}
provider, or None.
@return: a 2-tuple of C{(resource, remainingSegments)}, or L{None}.
|
### Input:
Underlying implmeentation of L{PrefixURLMixin.produceResource} and
L{PrefixURLMixin.sessionlessProduceResource}.
@param segments: the URL segments to dispatch.
@param thunk: a 0-argument callable which returns an L{IResource}
provider, or None.
@return: a 2-tuple of C{(resource, remainingSegments)}, or L{None}.
### Response:
#vtb
def _produceIt(self, segments, thunk):
if not self.prefixURL:
needle = ()
else:
needle = tuple(self.prefixURL.split())
S = len(needle)
if segments[:S] == needle:
if segments == JUST_SLASH:
subsegments = segments
else:
subsegments = segments[S:]
res = thunk()
if res is not None:
return res, subsegments
|
#vtb
def generate_csr(self, basename=):
csr = BytesIO()
crypto.create_csr(
self.key.file,
self.name,
.format(basename, int(datetime.now().timestamp())),
.format(self.cuit),
csr,
)
csr.seek(0)
return csr
|
Creates a CSR for this TaxPayer's key
Creates a file-like object that contains the CSR which can be used to
request a new certificate from AFIP.
|
### Input:
Creates a CSR for this TaxPayer's key
Creates a file-like object that contains the CSR which can be used to
request a new certificate from AFIP.
### Response:
#vtb
def generate_csr(self, basename=):
csr = BytesIO()
crypto.create_csr(
self.key.file,
self.name,
.format(basename, int(datetime.now().timestamp())),
.format(self.cuit),
csr,
)
csr.seek(0)
return csr
|
#vtb
def set_qos(self, prefetch_size=0, prefetch_count=0, apply_globally=False):
self.sender.send_BasicQos(prefetch_size, prefetch_count, apply_globally)
yield from self.synchroniser.wait(spec.BasicQosOK)
self.reader.ready()
|
Specify quality of service by requesting that messages be pre-fetched
from the server. Pre-fetching means that the server will deliver messages
to the client while the client is still processing unacknowledged messages.
This method is a :ref:`coroutine <coroutine>`.
:param int prefetch_size: Specifies a prefetch window in bytes.
Messages smaller than this will be sent from the server in advance.
This value may be set to 0, which means "no specific limit".
:param int prefetch_count: Specifies a prefetch window in terms of whole messages.
:param bool apply_globally: If true, apply these QoS settings on a global level.
The meaning of this is implementation-dependent. From the
`RabbitMQ documentation <https://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.qos.global>`_:
RabbitMQ has reinterpreted this field. The original specification said:
"By default the QoS settings apply to the current channel only.
If this field is set, they are applied to the entire connection."
Instead, RabbitMQ takes global=false to mean that the QoS settings should apply
per-consumer (for new consumers on the channel; existing ones being unaffected) and
global=true to mean that the QoS settings should apply per-channel.
|
### Input:
Specify quality of service by requesting that messages be pre-fetched
from the server. Pre-fetching means that the server will deliver messages
to the client while the client is still processing unacknowledged messages.
This method is a :ref:`coroutine <coroutine>`.
:param int prefetch_size: Specifies a prefetch window in bytes.
Messages smaller than this will be sent from the server in advance.
This value may be set to 0, which means "no specific limit".
:param int prefetch_count: Specifies a prefetch window in terms of whole messages.
:param bool apply_globally: If true, apply these QoS settings on a global level.
The meaning of this is implementation-dependent. From the
`RabbitMQ documentation <https://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.qos.global>`_:
RabbitMQ has reinterpreted this field. The original specification said:
"By default the QoS settings apply to the current channel only.
If this field is set, they are applied to the entire connection."
Instead, RabbitMQ takes global=false to mean that the QoS settings should apply
per-consumer (for new consumers on the channel; existing ones being unaffected) and
global=true to mean that the QoS settings should apply per-channel.
### Response:
#vtb
def set_qos(self, prefetch_size=0, prefetch_count=0, apply_globally=False):
self.sender.send_BasicQos(prefetch_size, prefetch_count, apply_globally)
yield from self.synchroniser.wait(spec.BasicQosOK)
self.reader.ready()
|
#vtb
def find_channel_groups(chan):
labels = chan.return_label()
group_names = {match(, label).group(1) for label in labels}
groups = {}
for group_name in group_names:
groups[group_name] = [label for label in labels if label.startswith(group_name)]
return groups
|
Channels are often organized in groups (different grids / strips or
channels in different brain locations), so we use a simple heuristic to
get these channel groups.
Parameters
----------
chan : instance of Channels
channels to group
Returns
-------
groups : dict
channel groups: key is the common string, and the item is a list of
labels
|
### Input:
Channels are often organized in groups (different grids / strips or
channels in different brain locations), so we use a simple heuristic to
get these channel groups.
Parameters
----------
chan : instance of Channels
channels to group
Returns
-------
groups : dict
channel groups: key is the common string, and the item is a list of
labels
### Response:
#vtb
def find_channel_groups(chan):
labels = chan.return_label()
group_names = {match(, label).group(1) for label in labels}
groups = {}
for group_name in group_names:
groups[group_name] = [label for label in labels if label.startswith(group_name)]
return groups
|
#vtb
def _update_remote_children(remote_parent, children):
name_to_child = _name_to_child_map(children)
for remote_child in remote_parent.children:
local_child = name_to_child.get(remote_child.name)
if local_child:
local_child.update_remote_ids(remote_child)
|
Update remote_ids based on on parent matching up the names of children.
:param remote_parent: RemoteProject/RemoteFolder who has children
:param children: [LocalFolder,LocalFile] children to set remote_ids based on remote children
|
### Input:
Update remote_ids based on on parent matching up the names of children.
:param remote_parent: RemoteProject/RemoteFolder who has children
:param children: [LocalFolder,LocalFile] children to set remote_ids based on remote children
### Response:
#vtb
def _update_remote_children(remote_parent, children):
name_to_child = _name_to_child_map(children)
for remote_child in remote_parent.children:
local_child = name_to_child.get(remote_child.name)
if local_child:
local_child.update_remote_ids(remote_child)
|
#vtb
def from_string(cls, address, case_sensitive=False):
assert isinstance(address, str),
username, domainname = address.split()
return cls(username, domainname, case_sensitive=case_sensitive)
|
Alternate constructor for building from a string.
:param str address: An email address in <user>@<domain> form
:param bool case_sensitive: passed directly to the constructor argument
of the same name.
:returns: An account from the given arguments
:rtype: :class:`Account`
|
### Input:
Alternate constructor for building from a string.
:param str address: An email address in <user>@<domain> form
:param bool case_sensitive: passed directly to the constructor argument
of the same name.
:returns: An account from the given arguments
:rtype: :class:`Account`
### Response:
#vtb
def from_string(cls, address, case_sensitive=False):
assert isinstance(address, str),
username, domainname = address.split()
return cls(username, domainname, case_sensitive=case_sensitive)
|
#vtb
def _xysxy2(date):
planets = _planets(date)
x_tab, y_tab, s_tab = _tab(), _tab(), _tab()
ttt = date.change_scale().julian_century
X = -16616.99 + 2004191742.88 * ttt - 427219.05 * ttt ** 2 - 198620.54 * ttt ** 3\
- 46.05 * ttt ** 4 + 5.98 * ttt ** 5
Y = -6950.78 - 25381.99 * ttt - 22407250.99 * ttt ** 2 + 1842.28 * ttt ** 3\
+ 1113.06 * ttt ** 4 + 0.99 * ttt ** 5
s_xy2 = 94.0 + 3808.65 * ttt - 122.68 * ttt ** 2 - 72574.11 * ttt ** 3\
+ 27.98 * ttt ** 4 + 15.62 * ttt ** 5
for j in range(5):
_x, _y, _s = 0, 0, 0
for i in range(len(x_tab[j])):
Axs, Axc, *p_coefs = x_tab[j][i]
ax_p = np.dot(p_coefs, planets)
_x += Axs * np.sin(ax_p) + Axc * np.cos(ax_p)
for i in range(len(y_tab[j])):
Ays, Ayc, *p_coefs = y_tab[j][i]
ay_p = np.dot(p_coefs, planets)
_y += Ays * np.sin(ay_p) + Ayc * np.cos(ay_p)
for i in range(len(s_tab[j])):
Ass, Asc, *p_coefs = s_tab[j][i]
as_p = np.dot(p_coefs, planets)
_s += Ass * np.sin(as_p) + Asc * np.cos(as_p)
X += _x * ttt ** j
Y += _y * ttt ** j
s_xy2 += _s * ttt ** j
return X * 1e-6, Y * 1e-6, s_xy2 * 1e-6
|
Here we deviate from what has been done everywhere else. Instead of taking the formulas
available in the Vallado, we take those described in the files tab5.2{a,b,d}.txt.
The result should be equivalent, but they are the last iteration of the IAU2000A as of June 2016
Args:
date (Date)
Return:
3-tuple of float: Values of X, Y, s + XY/2 in arcsecond
|
### Input:
Here we deviate from what has been done everywhere else. Instead of taking the formulas
available in the Vallado, we take those described in the files tab5.2{a,b,d}.txt.
The result should be equivalent, but they are the last iteration of the IAU2000A as of June 2016
Args:
date (Date)
Return:
3-tuple of float: Values of X, Y, s + XY/2 in arcsecond
### Response:
#vtb
def _xysxy2(date):
planets = _planets(date)
x_tab, y_tab, s_tab = _tab(), _tab(), _tab()
ttt = date.change_scale().julian_century
X = -16616.99 + 2004191742.88 * ttt - 427219.05 * ttt ** 2 - 198620.54 * ttt ** 3\
- 46.05 * ttt ** 4 + 5.98 * ttt ** 5
Y = -6950.78 - 25381.99 * ttt - 22407250.99 * ttt ** 2 + 1842.28 * ttt ** 3\
+ 1113.06 * ttt ** 4 + 0.99 * ttt ** 5
s_xy2 = 94.0 + 3808.65 * ttt - 122.68 * ttt ** 2 - 72574.11 * ttt ** 3\
+ 27.98 * ttt ** 4 + 15.62 * ttt ** 5
for j in range(5):
_x, _y, _s = 0, 0, 0
for i in range(len(x_tab[j])):
Axs, Axc, *p_coefs = x_tab[j][i]
ax_p = np.dot(p_coefs, planets)
_x += Axs * np.sin(ax_p) + Axc * np.cos(ax_p)
for i in range(len(y_tab[j])):
Ays, Ayc, *p_coefs = y_tab[j][i]
ay_p = np.dot(p_coefs, planets)
_y += Ays * np.sin(ay_p) + Ayc * np.cos(ay_p)
for i in range(len(s_tab[j])):
Ass, Asc, *p_coefs = s_tab[j][i]
as_p = np.dot(p_coefs, planets)
_s += Ass * np.sin(as_p) + Asc * np.cos(as_p)
X += _x * ttt ** j
Y += _y * ttt ** j
s_xy2 += _s * ttt ** j
return X * 1e-6, Y * 1e-6, s_xy2 * 1e-6
|
#vtb
def get_factors_iterative2(n):
ans, stack, x = [], [], 2
while True:
if x > n // x:
if not stack:
return ans
ans.append(stack + [n])
x = stack.pop()
n *= x
x += 1
elif n % x == 0:
stack.append(x)
n //= x
else:
x += 1
|
[summary]
analog as above
Arguments:
n {[int]} -- [description]
Returns:
[list of lists] -- [all factors of n]
|
### Input:
[summary]
analog as above
Arguments:
n {[int]} -- [description]
Returns:
[list of lists] -- [all factors of n]
### Response:
#vtb
def get_factors_iterative2(n):
ans, stack, x = [], [], 2
while True:
if x > n // x:
if not stack:
return ans
ans.append(stack + [n])
x = stack.pop()
n *= x
x += 1
elif n % x == 0:
stack.append(x)
n //= x
else:
x += 1
|
#vtb
def incremental_a_value(bval, min_mag, mag_inc):
a_cum = 10. ** (bval * min_mag)
a_inc = a_cum + np.log10((10. ** (bval * mag_inc)) -
(10. ** (-bval * mag_inc)))
return a_inc
|
Incremental a-value from cumulative - using the version of the
Hermann (1979) formula described in Wesson et al. (2003)
:param float bval:
Gutenberg & Richter (1944) b-value
:param np.ndarray min_mag:
Minimum magnitude of completeness table
:param float mag_inc:
Magnitude increment of the completeness table
|
### Input:
Incremental a-value from cumulative - using the version of the
Hermann (1979) formula described in Wesson et al. (2003)
:param float bval:
Gutenberg & Richter (1944) b-value
:param np.ndarray min_mag:
Minimum magnitude of completeness table
:param float mag_inc:
Magnitude increment of the completeness table
### Response:
#vtb
def incremental_a_value(bval, min_mag, mag_inc):
a_cum = 10. ** (bval * min_mag)
a_inc = a_cum + np.log10((10. ** (bval * mag_inc)) -
(10. ** (-bval * mag_inc)))
return a_inc
|
#vtb
def _make_input(self, action, old_quat):
return {
"dpos": action[:3],
"rotation": T.quat2mat(T.quat_multiply(old_quat, action[3:7])),
}
|
Helper function that returns a dictionary with keys dpos, rotation from a raw input
array. The first three elements are taken to be displacement in position, and a
quaternion indicating the change in rotation with respect to @old_quat.
|
### Input:
Helper function that returns a dictionary with keys dpos, rotation from a raw input
array. The first three elements are taken to be displacement in position, and a
quaternion indicating the change in rotation with respect to @old_quat.
### Response:
#vtb
def _make_input(self, action, old_quat):
return {
"dpos": action[:3],
"rotation": T.quat2mat(T.quat_multiply(old_quat, action[3:7])),
}
|
#vtb
def _elements(cls):
if not cls.__is_selector():
raise Exception("Invalid selector[%s]." %cls.__control["by"])
driver = Web.driver
try:
elements = WebDriverWait(driver, cls.__control["timeout"]).until(lambda driver: getattr(driver,"find_elements")(cls.__control["by"], cls.__control["value"]))
except:
raise Exception("Timeout at %d seconds.Element(%s) not found." %(cls.__control["timeout"],cls.__control["by"]))
return elements
|
find the elements with controls
|
### Input:
find the elements with controls
### Response:
#vtb
def _elements(cls):
if not cls.__is_selector():
raise Exception("Invalid selector[%s]." %cls.__control["by"])
driver = Web.driver
try:
elements = WebDriverWait(driver, cls.__control["timeout"]).until(lambda driver: getattr(driver,"find_elements")(cls.__control["by"], cls.__control["value"]))
except:
raise Exception("Timeout at %d seconds.Element(%s) not found." %(cls.__control["timeout"],cls.__control["by"]))
return elements
|
#vtb
def hide_me(tb, g=globals()):
base_tb = tb
try:
while tb and tb.tb_frame.f_globals is not g:
tb = tb.tb_next
while tb and tb.tb_frame.f_globals is g:
tb = tb.tb_next
except Exception as e:
logging.exception(e)
tb = base_tb
if not tb:
tb = base_tb
return tb
|
Hide stack traceback of given stack
|
### Input:
Hide stack traceback of given stack
### Response:
#vtb
def hide_me(tb, g=globals()):
base_tb = tb
try:
while tb and tb.tb_frame.f_globals is not g:
tb = tb.tb_next
while tb and tb.tb_frame.f_globals is g:
tb = tb.tb_next
except Exception as e:
logging.exception(e)
tb = base_tb
if not tb:
tb = base_tb
return tb
|
#vtb
def namelist(self):
names = []
for member in self.filelist:
names.append(member.filename)
return names
|
Return a list of file names in the archive.
|
### Input:
Return a list of file names in the archive.
### Response:
#vtb
def namelist(self):
names = []
for member in self.filelist:
names.append(member.filename)
return names
|
#vtb
def namespace(self):
if self.prefix is None:
return self.defaultNamespace()
return self.resolvePrefix(self.prefix)
|
Get the element's namespace.
@return: The element's namespace by resolving the prefix, the explicit
namespace or the inherited namespace.
@rtype: (I{prefix}, I{name})
|
### Input:
Get the element's namespace.
@return: The element's namespace by resolving the prefix, the explicit
namespace or the inherited namespace.
@rtype: (I{prefix}, I{name})
### Response:
#vtb
def namespace(self):
if self.prefix is None:
return self.defaultNamespace()
return self.resolvePrefix(self.prefix)
|
#vtb
async def delayNdefProps(self):
async with self.getTempSlab() as slab:
seqn = s_slabseqn.SlabSeqn(slab, )
self.ndefdelay = seqn
yield
self.ndefdelay = None
logger.info(f)
for i, (oldv, newv) in seqn.iter(0):
await self.editNdefProps(oldv, newv)
if i and i % _progress == 0:
logger.info(f)
|
Hold this during a series of renames to delay ndef
secondary property processing until the end....
|
### Input:
Hold this during a series of renames to delay ndef
secondary property processing until the end....
### Response:
#vtb
async def delayNdefProps(self):
async with self.getTempSlab() as slab:
seqn = s_slabseqn.SlabSeqn(slab, )
self.ndefdelay = seqn
yield
self.ndefdelay = None
logger.info(f)
for i, (oldv, newv) in seqn.iter(0):
await self.editNdefProps(oldv, newv)
if i and i % _progress == 0:
logger.info(f)
|
#vtb
def p_FuncDef(p):
p[0] = FuncDef(p[2], p[3], p[5], p[8], p[9], p[10])
|
FuncDef : DEF RefModifier INDENTIFIER LPARENT ParamList RPARENT COLON ReturnTypeModifier Terminator Block
|
### Input:
FuncDef : DEF RefModifier INDENTIFIER LPARENT ParamList RPARENT COLON ReturnTypeModifier Terminator Block
### Response:
#vtb
def p_FuncDef(p):
p[0] = FuncDef(p[2], p[3], p[5], p[8], p[9], p[10])
|
#vtb
def get(self, rid, data_callback=None, raise_on_error=True):
cached_data = None
ds_data = self.ds.get(rid, raise_on_error=False)
if ds_data is not None:
expired = True
if ds_data.get() is True:
if self.ttl < int(ds_data.get(, {}).get(, 0)):
cached_data = ds_data.get(, {}).get()
expired = False
self.tcex.log.debug(.format(rid))
else:
self.tcex.log.debug(.format(rid))
if expired or ds_data.get() is False:
if callable(data_callback):
cached_data = data_callback(rid)
self.tcex.log.debug(.format(rid))
if cached_data:
self.update(rid, cached_data, raise_on_error)
return cached_data
|
Get cached data from the data store.
Args:
rid (str): The record identifier.
data_callback (callable): A method that will return the data.
raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.
Returns:
object : Python request response.
|
### Input:
Get cached data from the data store.
Args:
rid (str): The record identifier.
data_callback (callable): A method that will return the data.
raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.
Returns:
object : Python request response.
### Response:
#vtb
def get(self, rid, data_callback=None, raise_on_error=True):
cached_data = None
ds_data = self.ds.get(rid, raise_on_error=False)
if ds_data is not None:
expired = True
if ds_data.get() is True:
if self.ttl < int(ds_data.get(, {}).get(, 0)):
cached_data = ds_data.get(, {}).get()
expired = False
self.tcex.log.debug(.format(rid))
else:
self.tcex.log.debug(.format(rid))
if expired or ds_data.get() is False:
if callable(data_callback):
cached_data = data_callback(rid)
self.tcex.log.debug(.format(rid))
if cached_data:
self.update(rid, cached_data, raise_on_error)
return cached_data
|
#vtb
def get_region_from_metadata():
global __Location__
if __Location__ == :
log.debug()
return None
if __Location__ != :
return __Location__
try:
result = requests.get(
"http://169.254.169.254/latest/dynamic/instance-identity/document",
proxies={: }, timeout=AWS_METADATA_TIMEOUT,
)
except requests.exceptions.RequestException:
log.warning(, exc_info=True)
__Location__ =
return None
try:
region = result.json()[]
__Location__ = region
return __Location__
except (ValueError, KeyError):
log.warning()
return None
return None
|
Try to get region from instance identity document and cache it
.. versionadded:: 2015.5.6
|
### Input:
Try to get region from instance identity document and cache it
.. versionadded:: 2015.5.6
### Response:
#vtb
def get_region_from_metadata():
global __Location__
if __Location__ == :
log.debug()
return None
if __Location__ != :
return __Location__
try:
result = requests.get(
"http://169.254.169.254/latest/dynamic/instance-identity/document",
proxies={: }, timeout=AWS_METADATA_TIMEOUT,
)
except requests.exceptions.RequestException:
log.warning(, exc_info=True)
__Location__ =
return None
try:
region = result.json()[]
__Location__ = region
return __Location__
except (ValueError, KeyError):
log.warning()
return None
return None
|
#vtb
def _create_doc(self):
root = etree.Element()
root.set(, )
root.set(, self.name)
return root
|
Create document.
:return:
|
### Input:
Create document.
:return:
### Response:
#vtb
def _create_doc(self):
root = etree.Element()
root.set(, )
root.set(, self.name)
return root
|
#vtb
def licenses(self):
return {self._acronym_lic(l): l for l in self.resp_text.split()
if l.startswith(self.prefix_lic)}
|
OSI Approved license.
|
### Input:
OSI Approved license.
### Response:
#vtb
def licenses(self):
return {self._acronym_lic(l): l for l in self.resp_text.split()
if l.startswith(self.prefix_lic)}
|
#vtb
def as_proto(self):
if self._dims is None:
return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)
else:
return tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=-1 if d.value is None else d.value
)
for d in self._dims
]
)
|
Returns this shape as a `TensorShapeProto`.
|
### Input:
Returns this shape as a `TensorShapeProto`.
### Response:
#vtb
def as_proto(self):
if self._dims is None:
return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)
else:
return tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=-1 if d.value is None else d.value
)
for d in self._dims
]
)
|
#vtb
def cmd_rollback(self, name):
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel()
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config[],
migrate_table=self.app.config[])
router.rollback(name)
|
Rollback migrations.
|
### Input:
Rollback migrations.
### Response:
#vtb
def cmd_rollback(self, name):
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel()
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config[],
migrate_table=self.app.config[])
router.rollback(name)
|
#vtb
def getPropAllSupers(self, aURI):
aURI = aURI
try:
qres = self.rdflib_graph.query( % (aURI))
except:
printDebug(
"... warning: the query failed (maybe missing SPARQL 1.1 support?)"
)
qres = []
return list(qres)
|
note: requires SPARQL 1.1
2015-06-04: currenlty not used, inferred from above
|
### Input:
note: requires SPARQL 1.1
2015-06-04: currenlty not used, inferred from above
### Response:
#vtb
def getPropAllSupers(self, aURI):
aURI = aURI
try:
qres = self.rdflib_graph.query( % (aURI))
except:
printDebug(
"... warning: the query failed (maybe missing SPARQL 1.1 support?)"
)
qres = []
return list(qres)
|
#vtb
def _value_format(self, value):
return % (
self.area_names.get(self.adapt_code(value[0]), ),
self._y_format(value[1])
)
|
Format value for map value display.
|
### Input:
Format value for map value display.
### Response:
#vtb
def _value_format(self, value):
return % (
self.area_names.get(self.adapt_code(value[0]), ),
self._y_format(value[1])
)
|
#vtb
def delete(self):
if self in self._parent.vlan_interface:
self._parent.data[] = [
v for v in self._parent.vlan_interface
if v != self]
self.update()
for route in self._parent._engine.routing:
if route.to_delete:
route.delete()
|
Delete this Vlan interface from the parent interface.
This will also remove stale routes if the interface has
networks associated with it.
:return: None
|
### Input:
Delete this Vlan interface from the parent interface.
This will also remove stale routes if the interface has
networks associated with it.
:return: None
### Response:
#vtb
def delete(self):
if self in self._parent.vlan_interface:
self._parent.data[] = [
v for v in self._parent.vlan_interface
if v != self]
self.update()
for route in self._parent._engine.routing:
if route.to_delete:
route.delete()
|
#vtb
def connect(self, deleteOldVersions=False, recreate=False):
with ConnectionFactory.get() as conn:
self._initTables(cursor=conn.cursor, deleteOldVersions=deleteOldVersions,
recreate=recreate)
conn.cursor.execute()
self._connectionID = conn.cursor.fetchall()[0][0]
self._logger.info("clientJobsConnectionID=%r", self._connectionID)
return
|
Locate the current version of the jobs DB or create a new one, and
optionally delete old versions laying around. If desired, this method
can be called at any time to re-create the tables from scratch, delete
old versions of the database, etc.
Parameters:
----------------------------------------------------------------
deleteOldVersions: if true, delete any old versions of the DB left
on the server
recreate: if true, recreate the database from scratch even
if it already exists.
|
### Input:
Locate the current version of the jobs DB or create a new one, and
optionally delete old versions laying around. If desired, this method
can be called at any time to re-create the tables from scratch, delete
old versions of the database, etc.
Parameters:
----------------------------------------------------------------
deleteOldVersions: if true, delete any old versions of the DB left
on the server
recreate: if true, recreate the database from scratch even
if it already exists.
### Response:
#vtb
def connect(self, deleteOldVersions=False, recreate=False):
with ConnectionFactory.get() as conn:
self._initTables(cursor=conn.cursor, deleteOldVersions=deleteOldVersions,
recreate=recreate)
conn.cursor.execute()
self._connectionID = conn.cursor.fetchall()[0][0]
self._logger.info("clientJobsConnectionID=%r", self._connectionID)
return
|
#vtb
def iter_insert_items(tree):
if tree.list_values:
keys = tree.attrs
for values in tree.list_values:
if len(keys) != len(values):
raise SyntaxError(
"Values do not match attributes " "" % (values, keys)
)
yield dict(zip(keys, map(resolve, values)))
elif tree.map_values:
for item in tree.map_values:
data = {}
for (key, val) in item:
data[key] = resolve(val)
yield data
else:
raise SyntaxError("No insert data found")
|
Iterate over the items to insert from an INSERT statement
|
### Input:
Iterate over the items to insert from an INSERT statement
### Response:
#vtb
def iter_insert_items(tree):
if tree.list_values:
keys = tree.attrs
for values in tree.list_values:
if len(keys) != len(values):
raise SyntaxError(
"Values do not match attributes " "" % (values, keys)
)
yield dict(zip(keys, map(resolve, values)))
elif tree.map_values:
for item in tree.map_values:
data = {}
for (key, val) in item:
data[key] = resolve(val)
yield data
else:
raise SyntaxError("No insert data found")
|
#vtb
def sha256(message, encoder=nacl.encoding.HexEncoder):
return encoder.encode(nacl.bindings.crypto_hash_sha256(message))
|
Hashes ``message`` with SHA256.
:param message: The message to hash.
:type message: bytes
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes
|
### Input:
Hashes ``message`` with SHA256.
:param message: The message to hash.
:type message: bytes
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes
### Response:
#vtb
def sha256(message, encoder=nacl.encoding.HexEncoder):
return encoder.encode(nacl.bindings.crypto_hash_sha256(message))
|
#vtb
def _remove_list_item(self, beacon_config, label):
index = self._get_index(beacon_config, label)
del beacon_config[index]
|
Remove an item from a beacon config list
|
### Input:
Remove an item from a beacon config list
### Response:
#vtb
def _remove_list_item(self, beacon_config, label):
index = self._get_index(beacon_config, label)
del beacon_config[index]
|
#vtb
def extended_arg_patterns(self):
for arg in self._arg_iterator(self.args):
if isinstance(arg, Pattern):
if arg.mode > self.single:
while True:
yield arg
else:
yield arg
else:
yield arg
|
Iterator over patterns for positional arguments to be matched
This yields the elements of :attr:`args`, extended by their `mode`
value
|
### Input:
Iterator over patterns for positional arguments to be matched
This yields the elements of :attr:`args`, extended by their `mode`
value
### Response:
#vtb
def extended_arg_patterns(self):
for arg in self._arg_iterator(self.args):
if isinstance(arg, Pattern):
if arg.mode > self.single:
while True:
yield arg
else:
yield arg
else:
yield arg
|
#vtb
def get_still_seg_belonged(dt_str, seg_duration, fmt=):
dt = time_util.str_to_datetime(dt_str, fmt)
minutes_of_day = time_util.get_minutes_of_day(dt)
return time_util.minutes_to_time_str(
minutes_of_day - minutes_of_day % seg_duration)
|
获取该时刻所属的非滑动时间片
:param dt_str: datetime string, eg: 2016-10-31 12:22:11
:param seg_duration: 时间片长度, unit: minute
:param fmt: datetime string format
:return:
|
### Input:
获取该时刻所属的非滑动时间片
:param dt_str: datetime string, eg: 2016-10-31 12:22:11
:param seg_duration: 时间片长度, unit: minute
:param fmt: datetime string format
:return:
### Response:
#vtb
def get_still_seg_belonged(dt_str, seg_duration, fmt=):
dt = time_util.str_to_datetime(dt_str, fmt)
minutes_of_day = time_util.get_minutes_of_day(dt)
return time_util.minutes_to_time_str(
minutes_of_day - minutes_of_day % seg_duration)
|
#vtb
def check_calendar(self, ds):
valid_calendars = [
,
,
,
,
,
,
,
,
,
]
ret_val = []
for time_var in ds.get_variables_by_attributes(calendar=lambda c: c is not None):
reasoning = None
valid_calendar = time_var.calendar in valid_calendars
if not valid_calendar:
reasoning = ["§4.4.1 Variable %s should have a valid calendar: is not a valid calendar" % (time_var.name, time_var.calendar)]
result = Result(BaseCheck.LOW,
valid_calendar,
self.section_titles[],
reasoning)
ret_val.append(result)
return ret_val
|
Check the calendar attribute for variables defining time and ensure it
is a valid calendar prescribed by CF.
CF §4.4.1 In order to calculate a new date and time given a base date, base
time and a time increment one must know what calendar to use.
The values currently defined for calendar are:
- gregorian or standard
- proleptic_gregorian
- noleap or 365_day
- all_leap or 366_day
- 360_day
- julian
- none
The calendar attribute may be set to none in climate experiments that
simulate a fixed time of year.
The time of year is indicated by the date in the reference time of the
units attribute.
If none of the calendars defined above applies, a non-standard calendar
can be defined. The lengths of each month are explicitly defined with
the month_lengths attribute of the time axis.
If leap years are included, then two other attributes of the time axis
should also be defined:
leap_year, leap_month
The calendar attribute is not required when a non-standard calendar is
being used. It is sufficient to define the calendar using the
month_lengths attribute, along with leap_year, and leap_month as
appropriate. However, the calendar attribute is allowed to take
non-standard values and in that case defining the non-standard calendar
using the appropriate attributes is required.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
|
### Input:
Check the calendar attribute for variables defining time and ensure it
is a valid calendar prescribed by CF.
CF §4.4.1 In order to calculate a new date and time given a base date, base
time and a time increment one must know what calendar to use.
The values currently defined for calendar are:
- gregorian or standard
- proleptic_gregorian
- noleap or 365_day
- all_leap or 366_day
- 360_day
- julian
- none
The calendar attribute may be set to none in climate experiments that
simulate a fixed time of year.
The time of year is indicated by the date in the reference time of the
units attribute.
If none of the calendars defined above applies, a non-standard calendar
can be defined. The lengths of each month are explicitly defined with
the month_lengths attribute of the time axis.
If leap years are included, then two other attributes of the time axis
should also be defined:
leap_year, leap_month
The calendar attribute is not required when a non-standard calendar is
being used. It is sufficient to define the calendar using the
month_lengths attribute, along with leap_year, and leap_month as
appropriate. However, the calendar attribute is allowed to take
non-standard values and in that case defining the non-standard calendar
using the appropriate attributes is required.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
### Response:
#vtb
def check_calendar(self, ds):
valid_calendars = [
,
,
,
,
,
,
,
,
,
]
ret_val = []
for time_var in ds.get_variables_by_attributes(calendar=lambda c: c is not None):
reasoning = None
valid_calendar = time_var.calendar in valid_calendars
if not valid_calendar:
reasoning = ["§4.4.1 Variable %s should have a valid calendar: is not a valid calendar" % (time_var.name, time_var.calendar)]
result = Result(BaseCheck.LOW,
valid_calendar,
self.section_titles[],
reasoning)
ret_val.append(result)
return ret_val
|
#vtb
def asserts(self, *args, **kwargs):
result = self.match(*args, **kwargs)
self.expect(result)
return result
|
Wraps match method and places under an assertion. Override this for higher-level control,
such as returning a custom object for additional validation (e.g. expect().to.change())
|
### Input:
Wraps match method and places under an assertion. Override this for higher-level control,
such as returning a custom object for additional validation (e.g. expect().to.change())
### Response:
#vtb
def asserts(self, *args, **kwargs):
result = self.match(*args, **kwargs)
self.expect(result)
return result
|
#vtb
def tasks(self):
task_input = {: }
output = taskengine.execute(task_input, self._engine_name, cwd=self._cwd)
return output[][]
|
Returns a list of all tasks known to the engine.
:return: A list of task names.
|
### Input:
Returns a list of all tasks known to the engine.
:return: A list of task names.
### Response:
#vtb
def tasks(self):
task_input = {: }
output = taskengine.execute(task_input, self._engine_name, cwd=self._cwd)
return output[][]
|
#vtb
def unpitched_low(dur, idx):
env = sinusoid(lag2freq(dur * 2)).limit(dur) ** 2
freq = 40 + 20 * sinusoid(1000 * Hz, phase=uniform(-pi, pi))
result = (low_table(freq * Hz) + low_table(freq * 1.1 * Hz)) * env * .5
return list(result)
|
Non-harmonic bass/lower frequency sound as a list (due to memoization).
Parameters
----------
dur:
Duration, in samples.
idx:
Zero or one (integer), for a small difference to the sound played.
Returns
-------
A list with the synthesized note.
|
### Input:
Non-harmonic bass/lower frequency sound as a list (due to memoization).
Parameters
----------
dur:
Duration, in samples.
idx:
Zero or one (integer), for a small difference to the sound played.
Returns
-------
A list with the synthesized note.
### Response:
#vtb
def unpitched_low(dur, idx):
env = sinusoid(lag2freq(dur * 2)).limit(dur) ** 2
freq = 40 + 20 * sinusoid(1000 * Hz, phase=uniform(-pi, pi))
result = (low_table(freq * Hz) + low_table(freq * 1.1 * Hz)) * env * .5
return list(result)
|
#vtb
def p_case_list(p):
if len(p) == 6:
p[0] = p[1] + [ast.Case(p[3], p[5], lineno=p.lineno(2))]
elif len(p) == 5:
p[0] = p[1] + [ast.Default(p[4], lineno=p.lineno(2))]
else:
p[0] = []
|
case_list : empty
| case_list CASE expr case_separator inner_statement_list
| case_list DEFAULT case_separator inner_statement_list
|
### Input:
case_list : empty
| case_list CASE expr case_separator inner_statement_list
| case_list DEFAULT case_separator inner_statement_list
### Response:
#vtb
def p_case_list(p):
if len(p) == 6:
p[0] = p[1] + [ast.Case(p[3], p[5], lineno=p.lineno(2))]
elif len(p) == 5:
p[0] = p[1] + [ast.Default(p[4], lineno=p.lineno(2))]
else:
p[0] = []
|
#vtb
def create_lv(self, name, length, units):
if units != "%":
size = size_units[units] * length
else:
if not (0 < length <= 100) or type(length) is float:
raise ValueError("Length not supported.")
size = (self.size("B") / 100) * length
self.open()
lvh = lvm_vg_create_lv_linear(self.handle, name, c_ulonglong(size))
if not bool(lvh):
self.close()
raise CommitError("Failed to create LV.")
lv = LogicalVolume(self, lvh=lvh)
self.close()
return lv
|
Creates a logical volume and returns the LogicalVolume instance associated with
the lv_t handle::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg", "w")
lv = vg.create_lv("mylv", 40, "MiB")
*Args:*
* name (str): The desired logical volume name.
* length (int): The desired size.
* units (str): The size units.
*Raises:*
* HandleError, CommitError, ValueError
.. note::
The VolumeGroup instance must be in write mode, otherwise CommitError
is raised.
|
### Input:
Creates a logical volume and returns the LogicalVolume instance associated with
the lv_t handle::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg", "w")
lv = vg.create_lv("mylv", 40, "MiB")
*Args:*
* name (str): The desired logical volume name.
* length (int): The desired size.
* units (str): The size units.
*Raises:*
* HandleError, CommitError, ValueError
.. note::
The VolumeGroup instance must be in write mode, otherwise CommitError
is raised.
### Response:
#vtb
def create_lv(self, name, length, units):
if units != "%":
size = size_units[units] * length
else:
if not (0 < length <= 100) or type(length) is float:
raise ValueError("Length not supported.")
size = (self.size("B") / 100) * length
self.open()
lvh = lvm_vg_create_lv_linear(self.handle, name, c_ulonglong(size))
if not bool(lvh):
self.close()
raise CommitError("Failed to create LV.")
lv = LogicalVolume(self, lvh=lvh)
self.close()
return lv
|
#vtb
def clean(bundle, before, after, keep_last):
bundles_module.clean(
bundle,
before,
after,
keep_last,
)
|
Clean up data downloaded with the ingest command.
|
### Input:
Clean up data downloaded with the ingest command.
### Response:
#vtb
def clean(bundle, before, after, keep_last):
bundles_module.clean(
bundle,
before,
after,
keep_last,
)
|
#vtb
def parse_child_elements(self, element):
for child in element.iterchildren():
self.parsers[child.tag](child)
|
parses all children of an etree element
|
### Input:
parses all children of an etree element
### Response:
#vtb
def parse_child_elements(self, element):
for child in element.iterchildren():
self.parsers[child.tag](child)
|
#vtb
def make_dependent(self, source, target, action):
sourcetargetactiononetwotwo
if not self._generators:
return
src_permuter, src = self._resolve_child(source)
dest = self._resolve_child(target)[1]
container = src_permuter._generators
idx = container.index(src)
container[idx] = DependentValueGenerator(src.name(), dest, action)
self._update_independent_generators()
|
Create a dependency between path 'source' and path 'target' via the
callable 'action'.
>>> permuter._generators
[IterValueGenerator(one), IterValueGenerator(two)]
>>> permuter.make_dependent('one', 'two', lambda x: x + 1)
Going forward, 'two' will only contain values that are (one+1)
|
### Input:
Create a dependency between path 'source' and path 'target' via the
callable 'action'.
>>> permuter._generators
[IterValueGenerator(one), IterValueGenerator(two)]
>>> permuter.make_dependent('one', 'two', lambda x: x + 1)
Going forward, 'two' will only contain values that are (one+1)
### Response:
#vtb
def make_dependent(self, source, target, action):
sourcetargetactiononetwotwo
if not self._generators:
return
src_permuter, src = self._resolve_child(source)
dest = self._resolve_child(target)[1]
container = src_permuter._generators
idx = container.index(src)
container[idx] = DependentValueGenerator(src.name(), dest, action)
self._update_independent_generators()
|
#vtb
def _update_dPrxy(self):
super(ExpCM_fitprefs, self)._update_dPrxy()
if in self.freeparams:
tildeFrxyQxy = self.tildeFrxy * self.Qxy
j = 0
zetaxterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype=)
zetayterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype=)
for r in range(self.nsites):
for i in range(N_AA - 1):
zetari = self.zeta[j]
zetaxterm.fill(0)
zetayterm.fill(0)
zetaxterm[r][self._aa_for_x > i] = -1.0 / zetari
zetaxterm[r][self._aa_for_x == i] = -1.0 / (zetari - 1.0)
zetayterm[r][self._aa_for_y > i] = 1.0 / zetari
zetayterm[r][self._aa_for_y == i] = 1.0 / (zetari - 1.0)
self.dPrxy[][j] = tildeFrxyQxy * (zetayterm + zetaxterm)
_fill_diagonals(self.dPrxy[][j], self._diag_indices)
j += 1
|
Update `dPrxy`.
|
### Input:
Update `dPrxy`.
### Response:
#vtb
def _update_dPrxy(self):
super(ExpCM_fitprefs, self)._update_dPrxy()
if in self.freeparams:
tildeFrxyQxy = self.tildeFrxy * self.Qxy
j = 0
zetaxterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype=)
zetayterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype=)
for r in range(self.nsites):
for i in range(N_AA - 1):
zetari = self.zeta[j]
zetaxterm.fill(0)
zetayterm.fill(0)
zetaxterm[r][self._aa_for_x > i] = -1.0 / zetari
zetaxterm[r][self._aa_for_x == i] = -1.0 / (zetari - 1.0)
zetayterm[r][self._aa_for_y > i] = 1.0 / zetari
zetayterm[r][self._aa_for_y == i] = 1.0 / (zetari - 1.0)
self.dPrxy[][j] = tildeFrxyQxy * (zetayterm + zetaxterm)
_fill_diagonals(self.dPrxy[][j], self._diag_indices)
j += 1
|
#vtb
def _param_callback(self, name, value):
print(.format(name, value))
self._param_check_list.remove(name)
if len(self._param_check_list) == 0:
print()
for g in self._param_groups:
self._cf.param.remove_update_callback(group=g,
cb=self._param_callback)
pkd = random.random()
print()
print(.format(pkd))
self._cf.param.add_update_callback(group=,
name=,
cb=self._a_pitch_kd_callback)
self._cf.param.set_value(,
.format(pkd))
|
Generic callback registered for all the groups
|
### Input:
Generic callback registered for all the groups
### Response:
#vtb
def _param_callback(self, name, value):
print(.format(name, value))
self._param_check_list.remove(name)
if len(self._param_check_list) == 0:
print()
for g in self._param_groups:
self._cf.param.remove_update_callback(group=g,
cb=self._param_callback)
pkd = random.random()
print()
print(.format(pkd))
self._cf.param.add_update_callback(group=,
name=,
cb=self._a_pitch_kd_callback)
self._cf.param.set_value(,
.format(pkd))
|
#vtb
def StringIO(*args, **kw):
global StringIO
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
return StringIO(*args,**kw)
|
Thunk to load the real StringIO on demand
|
### Input:
Thunk to load the real StringIO on demand
### Response:
#vtb
def StringIO(*args, **kw):
global StringIO
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
return StringIO(*args,**kw)
|
#vtb
def _check_neg(self, level, *tokens):
for record in self.records:
if level is not None and record.levelno != level:
continue
if all(token in record.message for token in tokens):
break
else:
return
msg = "Tokens {} found in the following record: {} {!r}".format(
tokens, record.levelname, record.message)
self.test_instance.fail(msg)
|
Check that the different tokens were NOT logged in one record, assert by level.
|
### Input:
Check that the different tokens were NOT logged in one record, assert by level.
### Response:
#vtb
def _check_neg(self, level, *tokens):
for record in self.records:
if level is not None and record.levelno != level:
continue
if all(token in record.message for token in tokens):
break
else:
return
msg = "Tokens {} found in the following record: {} {!r}".format(
tokens, record.levelname, record.message)
self.test_instance.fail(msg)
|
#vtb
def _set_get_vnetwork_hosts(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_vnetwork_hosts.get_vnetwork_hosts, is_leaf=True, yang_name="get-vnetwork-hosts", rest_name="get-vnetwork-hosts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "rpc",
: ,
})
self.__get_vnetwork_hosts = t
if hasattr(self, ):
self._set()
|
Setter method for get_vnetwork_hosts, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_hosts (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_hosts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_hosts() directly.
YANG Description: Shows discovered hosts
|
### Input:
Setter method for get_vnetwork_hosts, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_hosts (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_hosts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_hosts() directly.
YANG Description: Shows discovered hosts
### Response:
#vtb
def _set_get_vnetwork_hosts(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_vnetwork_hosts.get_vnetwork_hosts, is_leaf=True, yang_name="get-vnetwork-hosts", rest_name="get-vnetwork-hosts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "rpc",
: ,
})
self.__get_vnetwork_hosts = t
if hasattr(self, ):
self._set()
|
#vtb
def assert_reset(self, asserted):
try:
self._invalidate_cached_registers()
self._link.assert_reset(asserted)
except DAPAccess.Error as exc:
six.raise_from(self._convert_exception(exc), exc)
|
Assert or de-assert target reset line
|
### Input:
Assert or de-assert target reset line
### Response:
#vtb
def assert_reset(self, asserted):
try:
self._invalidate_cached_registers()
self._link.assert_reset(asserted)
except DAPAccess.Error as exc:
six.raise_from(self._convert_exception(exc), exc)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.