text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def multicomp(pvals, alpha=0.05, method='holm'):
"""P-values correction for multiple comparisons.
Parameters
----------
pvals : array_like
uncorrected p-values.
alpha : float
Significance level.
method : string
Method used for testing and adjustment of pvalues. Can be either the
full name or initial letters. Available methods are ::
'bonf' : one-step Bonferroni correction
'holm' : step-down method using Bonferroni adjustments
'fdr_bh' : Benjamini/Hochberg FDR correction
'fdr_by' : Benjamini/Yekutieli FDR correction
'none' : pass-through option (no correction applied)
Returns
-------
reject : array, boolean
True for hypothesis that can be rejected for given alpha.
pvals_corrected : array
P-values corrected for multiple testing.
See Also
--------
bonf : Bonferroni correction
holm : Holm-Bonferroni correction
fdr : Benjamini/Hochberg and Benjamini/Yekutieli FDR correction
pairwise_ttests : Pairwise post-hocs T-tests
Notes
-----
This function is similar to the `p.adjust` R function.
The correction methods include the Bonferroni correction ("bonf")
in which the p-values are multiplied by the number of comparisons.
Less conservative methods are also included such as Holm (1979) ("holm"),
Benjamini & Hochberg (1995) ("fdr_bh"), and Benjamini
& Yekutieli (2001) ("fdr_by"), respectively.
The first two methods are designed to give strong control of the
family-wise error rate. Note that the Holm's method is usually preferred
over the Bonferroni correction.
The "fdr_bh" and "fdr_by" methods control the false discovery rate, i.e.
the expected proportion of false discoveries amongst the rejected
hypotheses. The false discovery rate is a less stringent condition than
the family-wise error rate, so these methods are more powerful than the
others.
References
----------
- Bonferroni, C. E. (1935). Il calcolo delle assicurazioni su gruppi
di teste. Studi in onore del professore salvatore ortu carboni, 13-60.
- Holm, S. (1979). A simple sequentially rejective multiple test procedure.
Scandinavian Journal of Statistics, 6, 65–70.
- Benjamini, Y., and Hochberg, Y. (1995). Controlling the false discovery
rate: a practical and powerful approach to multiple testing. Journal of
the Royal Statistical Society Series B, 57, 289–300.
- Benjamini, Y., and Yekutieli, D. (2001). The control of the false
discovery rate in multiple testing under dependency. Annals of
Statistics, 29, 1165–1188.
Examples
--------
FDR correction of an array of p-values
>>> from pingouin import multicomp
>>> pvals = [.50, .003, .32, .054, .0003]
>>> reject, pvals_corr = multicomp(pvals, method='fdr_bh')
>>> print(reject, pvals_corr)
[False True False False True] [0.5 0.0075 0.4 0.09 0.0015]
"""
if not isinstance(pvals, (list, np.ndarray)):
err = "pvals must be a list or a np.ndarray"
raise ValueError(err)
if method.lower() in ['b', 'bonf', 'bonferroni']:
reject, pvals_corrected = bonf(pvals, alpha=alpha)
elif method.lower() in ['h', 'holm']:
reject, pvals_corrected = holm(pvals, alpha=alpha)
elif method.lower() in ['fdr', 'fdr_bh']:
reject, pvals_corrected = fdr(pvals, alpha=alpha, method='fdr_bh')
elif method.lower() in ['fdr_by']:
reject, pvals_corrected = fdr(pvals, alpha=alpha, method='fdr_by')
elif method.lower() == 'none':
pvals_corrected = pvals
with np.errstate(invalid='ignore'):
reject = np.less(pvals_corrected, alpha)
else:
raise ValueError('Multiple comparison method not recognized')
return reject, pvals_corrected | 0.000257 |
def get_score(self):
"""
获得成绩列表
:return: json array
"""
html = self.__get_score_html()
soup = BeautifulSoup(html)
div = soup.find_all('div', class_='Section1')[0]
tag_ps = div.find_all('p')
del tag_ps[0]
result = []
'''
#one object
{
'year':'第一学期',
'score_list':[
{
'id':'0800040',
'name':'C++'
'type':'必修',
'xuefen':'1',
'score':'95',
'remark':'重修'
}
]
}
'''
# 最后一个为第二课堂学分,删除之
tables = soup.find_all('table', attrs={'class': 'MsoTableGrid', 'border': '1'})
del tables[len(tables) - 1]
# 第x个学期
year_num = 1
for table in tables:
try:
trs = table.find_all('tr')
tern_info = {
'year': year_num,
'score_list': []
}
# 遍历每一列
for tr in trs:
tds = tr.find_all(self.__get_td)
if len(tds) == 0:
continue
lesson_info = {
'id': _.trim(tds[0].get_text()),
'name': _.trim(tds[1].get_text()),
'type': _.trim(tds[2].get_text()),
'xuefen': _.trim(tds[3].get_text()),
'score': _.trim(tds[4].get_text()),
'remark': _.trim(tds[5].get_text())
}
tern_info['score_list'].append(lesson_info)
year_num += 1
result.append(tern_info)
except Exception as e:
_.d(e.message)
return result | 0.001595 |
def add_disk(self, path, force_disk_indexes=True, **args):
"""Adds a disk specified by the path to the ImageParser.
:param path: The path to the disk volume
:param force_disk_indexes: If true, always uses disk indexes. If False, only uses disk indexes if this is the
second volume you add. If you plan on using this method, always leave this True.
If you add a second disk when the previous disk has no index, an error is raised.
:param args: Arguments to pass to the constructor of the Disk.
"""
if self.disks and self.disks[0].index is None:
raise DiskIndexError("First disk has no index.")
if force_disk_indexes or self.disks:
index = len(self.disks) + 1
else:
index = None
disk = Disk(self, path, index=str(index) if index else None, **args)
self.disks.append(disk)
return disk | 0.005118 |
def present(name,
config=None,
**kwargs):
'''
Ensure the job is present in the Jenkins configured jobs
name
The unique name for the Jenkins job
config
The Salt URL for the file to use for configuring the job
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ['Job {0} is up to date.'.format(name)]}
if __salt__['jenkins.job_exists'](name):
_current_job_config = __salt__['jenkins.get_job_config'](name)
buf = six.moves.StringIO(_current_job_config)
oldXML = ET.fromstring(buf.read())
cached_source_path = __salt__['cp.cache_file'](config, __env__)
with salt.utils.files.fopen(cached_source_path) as _fp:
newXML = ET.fromstring(salt.utils.stringutils.to_unicode(_fp.read()))
if not _elements_equal(oldXML, newXML):
diff = difflib.unified_diff(
ET.tostringlist(oldXML, encoding='utf8', method='xml'),
ET.tostringlist(newXML, encoding='utf8', method='xml'), lineterm='')
try:
__salt__['jenkins.update_job'](name, config, __env__)
except CommandExecutionError as exc:
return _fail(ret, exc.strerror)
else:
ret['changes'] = ''.join(diff)
ret['comment'].append('Job \'{0}\' updated.'.format(name))
else:
cached_source_path = __salt__['cp.cache_file'](config, __env__)
with salt.utils.files.fopen(cached_source_path) as _fp:
new_config_xml = salt.utils.stringutils.to_unicode(_fp.read())
try:
__salt__['jenkins.create_job'](name, config, __env__)
except CommandExecutionError as exc:
return _fail(ret, exc.strerror)
buf = six.moves.StringIO(new_config_xml)
diff = difflib.unified_diff('', buf.readlines(), lineterm='')
ret['changes'][name] = ''.join(diff)
ret['comment'].append('Job \'{0}\' added.'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret | 0.001427 |
def get_storage(self, name='main', file_format='pickle', TTL=None):
'''Returns a storage for the given name. The returned storage is a
fully functioning python dictionary and is designed to be used that
way. It is usually not necessary for the caller to load or save the
storage manually. If the storage does not already exist, it will be
created.
.. seealso:: :class:`xbmcswift2.TimedStorage` for more details.
:param name: The name of the storage to retrieve.
:param file_format: Choices are 'pickle', 'csv', and 'json'. Pickle is
recommended as it supports python objects.
.. note:: If a storage already exists for the given
name, the file_format parameter is
ignored. The format will be determined by
the existing storage file.
:param TTL: The time to live for storage items specified in minutes or None
for no expiration. Since storage items aren't expired until a
storage is loaded form disk, it is possible to call
get_storage() with a different TTL than when the storage was
created. The currently specified TTL is always honored.
'''
if not hasattr(self, '_unsynced_storages'):
self._unsynced_storages = {}
filename = os.path.join(self.storage_path, name)
try:
storage = self._unsynced_storages[filename]
log.debug('Loaded storage "%s" from memory', name)
except KeyError:
if TTL:
TTL = timedelta(minutes=TTL)
try:
storage = TimedStorage(filename, file_format, TTL)
except ValueError:
# Thrown when the storage file is corrupted and can't be read.
# Prompt user to delete storage.
choices = ['Clear storage', 'Cancel']
ret = xbmcgui.Dialog().select('A storage file is corrupted. It'
' is recommended to clear it.',
choices)
if ret == 0:
os.remove(filename)
storage = TimedStorage(filename, file_format, TTL)
else:
raise Exception('Corrupted storage file at %s' % filename)
self._unsynced_storages[filename] = storage
log.debug('Loaded storage "%s" from disk', name)
return storage | 0.001893 |
def decode_to_str(self):
"""
Return the array of string identifiers corresponding to self
>>> enum_array = household('housing_occupancy_status', period)
>>> enum_array[0]
>>> 2 # Encoded value
>>> enum_array.decode_to_str()[0]
>>> 'free_lodger' # String identifier
"""
return np.select([self == item.index for item in self.possible_values], [item.name for item in self.possible_values]) | 0.006198 |
def _list_queues(self, prefix=None, marker=None, max_results=None,
include=None, timeout=None):
'''
Returns a list of queues under the specified account. Makes a single list
request to the service. Used internally by the list_queues method.
:param str prefix:
Filters the results to return only queues with names that begin
with the specified prefix.
:param str marker:
A token which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
queues. The marker value is opaque to the client.
:param int max_results:
The maximum number of queues to return. A single list request may
return up to 1000 queues and potentially a continuation token which
should be followed to get additional resutls.
:param str include:
Include this parameter to specify that the container's
metadata be returned as part of the response body.
:param int timeout:
The server timeout, expressed in seconds.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = _get_path()
request.query = [
('comp', 'list'),
('prefix', _to_str(prefix)),
('marker', _to_str(marker)),
('maxresults', _int_to_str(max_results)),
('include', _to_str(include)),
('timeout', _int_to_str(timeout))
]
response = self._perform_request(request)
return _convert_xml_to_queues(response) | 0.003638 |
def _apply_summaries(self):
"""Add all summary rows and columns."""
def as_frame(r):
if isinstance(r, pd.Series):
return r.to_frame()
else:
return r
df = self.data
if df.index.nlevels > 1:
raise ValueError(
"You cannot currently have both summary rows and columns on a "
"MultiIndex."
)
_df = df
if self.summary_rows:
rows = pd.concat([agg.apply(_df)
for agg in self._cleaned_summary_rows], axis=1).T
df = pd.concat([df, as_frame(rows)], axis=0)
if self.summary_cols:
cols = pd.concat([agg.apply(_df)
for agg in self._cleaned_summary_cols], axis=1)
df = pd.concat([df, as_frame(cols)], axis=1)
return df | 0.00224 |
def epanechnikov(xx, idx=None):
"""
The Epanechnikov kernel estimated for xx values at indices idx (zero
elsewhere)
Parameters
----------
xx: float array
Values of the function on which the kernel is computed. Typically,
these are Euclidean distances from some point x0 (see do_kernel)
idx: tuple
An indexing tuple pointing to the coordinates in xx for which the
kernel value is estimated. Default: None (all points are used!)
Notes
-----
This is equation 6.4 in FHT chapter 6
"""
ans = np.zeros(xx.shape)
ans[idx] = 0.75 * (1-xx[idx]**2)
return ans | 0.009036 |
def onStart(self, *args, **kwarg):
"""
Verify user input and kick off the client's program if valid
"""
with transactUI(self):
config = self.navbar.getActiveConfig()
config.resetErrors()
if config.isValid():
self.clientRunner.run(self.buildCliString())
self.showConsole()
else:
config.displayErrors()
self.Layout() | 0.004255 |
def users_register(self, email, name, password, username, **kwargs):
"""Register a new user."""
return self.__call_api_post('users.register', email=email, name=name, password=password, username=username,
kwargs=kwargs) | 0.011111 |
def __dict_to_BetterDict(self, attr):
"""Convert the passed attr to a BetterDict if the value is a dict
Returns: The new value of the passed attribute."""
if type(self[attr]) == dict:
self[attr] = BetterDict(self[attr])
return self[attr] | 0.007067 |
def nx_graph_from_dotfile(filename: str) -> nx.DiGraph:
""" Get a networkx graph from a DOT file, and reverse the edges. """
return nx.DiGraph(read_dot(filename).reverse()) | 0.005556 |
def column_preview(table_name, col_name):
"""
Return the first ten elements of a column as JSON in Pandas'
"split" format.
"""
col = orca.get_table(table_name).get_column(col_name).head(10)
return (
col.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'}) | 0.00295 |
def read_structs(fstream):
"""
Read all structs from likwid's file stream.
Args:
fstream: Likwid's output file stream.
Returns:
A generator that can be used to iterate over all structs in the
fstream.
"""
struct = read_struct(fstream)
while struct is not None:
yield struct
struct = read_struct(fstream) | 0.002681 |
def _ps_extract_pid(self, line):
"""
Extract PID and parent PID from an output line from the PS command
"""
this_pid = self.regex['pid'].sub(r'\g<1>', line)
this_parent = self.regex['parent'].sub(r'\g<1>', line)
# Return the main / parent PIDs
return this_pid, this_parent | 0.008902 |
def terminate(self):
"""
Called when an existing task is removed from the configuration.
This sets a Do Not Resuscitate flag and then initiates a stop
sequence. Once all processes have stopped, the task will delete
itself.
"""
log = self._params.get('log', self._discard)
self._dnr = time.time()
self.stop()
log.info("Task '%s' marked for death", self._name) | 0.004598 |
def get_last(self, n=1):
"""
Retrieve the last n rows from the table
:param n: number of rows to return
:return: list of rows
"""
rows = []
# Get values from the partial db first
if self.tracker.dbcon_part and check_table_exists(self.tracker.dbcon_part, self.name):
rows.extend(get_last_row(self.tracker.dbcon_part, self.name, n))
# Then add rows from the master if required
if len(rows) < n and check_table_exists(self.tracker.dbcon_master, self.name):
rows.extend(get_last_row(self.tracker.dbcon_master, self.name, n))
return rows[-n:] | 0.006163 |
def check_args(args):
"""Checks the arguments and options.
:param args: an object containing the options and arguments of the program.
:type args: :py:class:`argparse.Namespace`
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exits with error code 1.
"""
# Checking that the input file exists
if not os.path.isfile(args.evec):
m = "{}: no such file".format(args.evec)
raise ProgramError(m)
return True | 0.001597 |
def set_palette_colors(self, palette):
"""Updates the color buttons with the given palette
"""
palette = palette.split(':')
for i, pal in enumerate(palette):
x, color = Gdk.Color.parse(pal)
self.get_widget('palette_%d' % i).set_color(color) | 0.006757 |
def __RenderOurModuleFlags(self, module, output_lines, prefix=''):
"""Generates a help string for a given module."""
flags = self._GetFlagsDefinedByModule(module)
if flags:
self.__RenderModuleFlags(module, flags, output_lines, prefix) | 0.007937 |
def restore_schema(task, **kwargs):
""" Switches the schema back to the one from before running the task. """
from .compat import get_public_schema_name
schema_name = get_public_schema_name()
include_public = True
if hasattr(task, '_old_schema'):
schema_name, include_public = task._old_schema
# If the schema names match, don't do anything.
if connection.schema_name == schema_name:
return
connection.set_schema(schema_name, include_public=include_public) | 0.001969 |
def rescale_array_to_z1z2(array, z1z2=(-1.0, 1.0)):
"""Rescale the values in a numpy array to the [z1,z2] interval.
The transformation is carried out following the relation
array_rs = b_flux * array - c_flux
as explained in Appendix B1 of Cardiel (2009, MNRAS, 396, 680)
Parameters
----------
array : numpy array
Numpy array to be rescaled.
z1z2 : tuple, floats
Minimum and maximum values in the returned array.
Returns
-------
array_rs : numpy array
Array with rescaled values.
coef_rs : tuple, floats
Coefficients b_flux and c_flux employed in the rescaling
operation.
"""
if type(array) is not np.ndarray:
raise ValueError("array=" + str(array) + " must be a numpy.ndarray")
array_min = array.min()
array_max = array.max()
z1, z2 = z1z2
delta = array_max - array_min
b_flux = (z2 - z1) / delta
c_flux = (z2 * array_min - z1 * array_max) / delta
array_rs = b_flux * array - c_flux
return array_rs, (b_flux, c_flux) | 0.000943 |
def file_format(self):
"""Formats device filesystem"""
log.info('Formating, can take minutes depending on flash size...')
res = self.__exchange('file.format()', timeout=300)
if 'format done' not in res:
log.error(res)
else:
log.info(res)
return res | 0.00625 |
def create_gaps_and_overlaps_tier(self, tier1, tier2, tier_name=None,
maxlen=-1, fast=False):
"""Create a tier with the gaps and overlaps of the annotations.
For types see :func:`get_gaps_and_overlaps`
:param str tier1: Name of the first tier.
:param str tier2: Name of the second tier.
:param str tier_name: Name of the new tier, if ``None`` the name will
be generated.
:param int maxlen: Maximum length of gaps (skip longer ones), if ``-1``
no maximum will be used.
:param bool fast: Flag for using the fast method.
:returns: List of gaps and overlaps of the form:
``[(type, start, end)]``.
:raises KeyError: If a tier is non existent.
:raises IndexError: If no annotations are available in the tiers.
"""
if tier_name is None:
tier_name = '{}_{}_ftos'.format(tier1, tier2)
self.add_tier(tier_name)
ftos = []
ftogen = self.get_gaps_and_overlaps2(tier1, tier2, maxlen) if fast\
else self.get_gaps_and_overlaps(tier1, tier2, maxlen)
for fto in ftogen:
ftos.append(fto)
if fto[1]-fto[0] >= 1:
self.add_annotation(tier_name, fto[0], fto[1], fto[2])
self.clean_time_slots()
return ftos | 0.002137 |
def _try_get_solutions(self, address, size, access, max_solutions=0x1000, force=False):
"""
Try to solve for a symbolic address, checking permissions when reading/writing size bytes.
:param Expression address: The address to solve for
:param int size: How many bytes to check permissions for
:param str access: 'r' or 'w'
:param int max_solutions: Will raise if more solutions are found
:param force: Whether to ignore permission failure
:rtype: list
"""
assert issymbolic(address)
solutions = solver.get_all_values(self.constraints, address, maxcnt=max_solutions)
crashing_condition = False
for base in solutions:
if not self.access_ok(slice(base, base + size), access, force):
crashing_condition = Operators.OR(address == base, crashing_condition)
if solver.can_be_true(self.constraints, crashing_condition):
raise InvalidSymbolicMemoryAccess(address, access, size, crashing_condition)
return solutions | 0.006548 |
def monochrome(clr):
"""
Returns colors in the same hue with varying brightness/saturation.
"""
def _wrap(x, min, threshold, plus):
if x - min < threshold:
return x + plus
else:
return x - min
colors = colorlist(clr)
c = clr.copy()
c.brightness = _wrap(clr.brightness, 0.5, 0.2, 0.3)
c.saturation = _wrap(clr.saturation, 0.3, 0.1, 0.3)
colors.append(c)
c = clr.copy()
c.brightness = _wrap(clr.brightness, 0.2, 0.2, 0.6)
colors.append(c)
c = clr.copy()
c.brightness = max(0.2, clr.brightness + (1 - clr.brightness) * 0.2)
c.saturation = _wrap(clr.saturation, 0.3, 0.1, 0.3)
colors.append(c)
c = clr.copy()
c.brightness = _wrap(clr.brightness, 0.5, 0.2, 0.3)
colors.append(c)
return colors | 0.00123 |
def proximal(self):
"""Return the proximal factory of the functional.
See Also
--------
odl.solvers.nonsmooth.proximal_operators.proximal_l1 :
proximal factory for the L1-norm.
odl.solvers.nonsmooth.proximal_operators.proximal_l2 :
proximal factory for the L2-norm.
"""
if self.exponent == 1:
return proximal_l1(space=self.domain)
elif self.exponent == 2:
return proximal_l2(space=self.domain)
elif self.exponent == np.inf:
return proximal_linfty(space=self.domain)
else:
raise NotImplementedError('`proximal` only implemented for p=1, '
'p=2, and p=inf') | 0.002681 |
def delete_board(self, id):
"""Delete an agile board."""
board = Board(self._options, self._session, raw={'id': id})
board.delete() | 0.012903 |
def _assertCALL(self, url, *, allow_empty=False, check_headers=True, check_status=True,
expect_errors=False, name=None, method='get', data=None):
"""
check url for response changes
:param url: url to check
:param allow_empty: if True ignore empty response and 404 errors
:param check_headers: check response headers
:param check_status: check response status code
:raises: ValueError
:raises: AssertionError
"""
self.view = resolve(url).func.cls
m = getattr(self.client, method.lower())
self.filename = self.get_response_filename(method, name or url)
response = m(url, data=data)
assert response.accepted_renderer
payload = response.data
if not allow_empty and not payload:
raise ValueError(f"View {self.view} returned and empty json. Check your test")
if response.status_code > 299 and not expect_errors:
raise ValueError(f"View {self.view} unexpected response. {response.status_code} - {response.content}")
if not allow_empty and response.status_code == 404:
raise ValueError(f"View {self.view} returned 404 status code. Check your test")
if not os.path.exists(self.filename) or os.environ.get('API_CHECKER_RESET', False):
_write(self.filename, serialize_response(response))
stored = load_response(self.filename)
if (check_status) and response.status_code != stored.status_code:
raise StatusCodeError(self.view, response.status_code, stored.status_code)
if check_headers:
self._assert_headers(response, stored)
self.compare(payload, stored.data, self.filename, view=self.view) | 0.005117 |
def validate_units(self):
"""Ensure that wavelenth and flux units belong to the
correct classes.
Raises
------
TypeError
Wavelength unit is not `~pysynphot.units.WaveUnits` or
flux unit is not `~pysynphot.units.FluxUnits`.
"""
if (not isinstance(self.waveunits, units.WaveUnits)):
raise TypeError("%s is not a valid WaveUnit" % self.waveunits)
if (not isinstance(self.fluxunits, units.FluxUnits)):
raise TypeError("%s is not a valid FluxUnit" % self.fluxunits) | 0.003484 |
def url_assembler(query_string, no_redirect=0, no_html=0, skip_disambig=0):
"""Assembler of parameters for building request query.
Args:
query_string: Query to be passed to DuckDuckGo API.
no_redirect: Skip HTTP redirects (for !bang commands). Default - False.
no_html: Remove HTML from text, e.g. bold and italics. Default - False.
skip_disambig: Skip disambiguation (D) Type. Default - False.
Returns:
A “percent-encoded” string which is used as a part of the query.
"""
params = [('q', query_string.encode("utf-8")), ('format', 'json')]
if no_redirect:
params.append(('no_redirect', 1))
if no_html:
params.append(('no_html', 1))
if skip_disambig:
params.append(('skip_disambig', 1))
return '/?' + urlencode(params) | 0.001218 |
def Matches(self, file_entry):
"""Compares the file entry against the filter.
Args:
file_entry (dfvfs.FileEntry): file entry to compare.
Returns:
bool: True if the file entry matches the filter, False if not or
None if the filter does not apply.
"""
if not self._file_scanner or not file_entry.IsFile():
return None
file_object = file_entry.GetFileObject()
if not file_object:
return False
try:
scan_state = pysigscan.scan_state()
self._file_scanner.scan_file_object(scan_state, file_object)
except IOError as exception:
# TODO: replace location by display name.
location = getattr(file_entry.path_spec, 'location', '')
logging.error((
'[skipping] unable to scan file: {0:s} for signatures '
'with error: {1!s}').format(location, exception))
return False
finally:
file_object.close()
return scan_state.number_of_scan_results > 0 | 0.010256 |
def find_out_attribs(self):
"""
Get all out attributes in the shader source.
:return: List of attribute names
"""
names = []
for line in self.lines:
if line.strip().startswith("out "):
names.append(line.split()[2].replace(';', ''))
return names | 0.0059 |
def takes_instance_or_queryset(func):
"""Decorator that makes standard Django admin actions compatible."""
@wraps(func)
def decorated_function(self, request, queryset):
# func follows the prototype documented at:
# https://docs.djangoproject.com/en/dev/ref/contrib/admin/actions/#writing-action-functions
if not isinstance(queryset, QuerySet):
try:
# Django >=1.8
queryset = self.get_queryset(request).filter(pk=queryset.pk)
except AttributeError:
try:
# Django >=1.6,<1.8
model = queryset._meta.model
except AttributeError: # pragma: no cover
# Django <1.6
model = queryset._meta.concrete_model
queryset = model.objects.filter(pk=queryset.pk)
return func(self, request, queryset)
return decorated_function | 0.001065 |
def status(ctx, date, f, pushed):
"""
Shows the summary of what's going to be committed to the server.
"""
try:
timesheet_collection = get_timesheet_collection_for_context(ctx, f)
except ParseError as e:
ctx.obj['view'].err(e)
else:
ctx.obj['view'].show_status(
timesheet_collection.entries.filter(
date, regroup=ctx.obj['settings']['regroup_entries'],
pushed=False if not pushed else None
)
) | 0.00198 |
def getResiduals(self):
""" regress out fixed effects and results residuals """
X = np.zeros((self.N*self.P,self.n_fixed_effs))
ip = 0
for i in range(self.n_terms):
Ki = self.A[i].shape[0]*self.F[i].shape[1]
X[:,ip:ip+Ki] = np.kron(self.A[i].T,self.F[i])
ip += Ki
y = np.reshape(self.Y,(self.Y.size,1),order='F')
RV = regressOut(y,X)
RV = np.reshape(RV,self.Y.shape,order='F')
return RV | 0.022634 |
def get_range_start_line_number(self,rng):
"""
.. warning:: not implemented
"""
sys.stderr.write("error unimplemented get_range_start_line\n")
sys.exit()
for i in range(0,len(self._lines)):
if rng.cmp(self._lines[i]['rng'])==0: return i+1
return None | 0.021127 |
def toJulian(dt=None):
"""Converts a Python datetime to a Julian date, using the formula from
Meesus (1991). This formula is reproduced in D.A. Vallado (2004).
See:
D.A. Vallado, Fundamentals of Astrodynamics and Applications, p. 187
http://books.google.com/books?id=PJLlWzMBKjkC&lpg=PA956&vq=187&pg=PA187
"""
if dt is None:
dt = datetime.datetime.utcnow()
if dt.month < 3:
year = dt.year - 1
month = dt.month + 12
else:
year = dt.year
month = dt.month
A = int(year / 100.0)
B = 2 - A + int(A / 4.0)
C = ( (dt.second / 60.0 + dt.minute) / 60.0 + dt.hour ) / 24.0
jd = int(365.25 * (year + 4716))
jd += int(30.6001 * (month + 1)) + dt.day + B - 1524.5 + C
return jd | 0.01519 |
def combine_heads(self, x):
"""Combine tensor that has been split.
Args:
x: A tensor [batch_size, num_heads, length, hidden_size/num_heads]
Returns:
A tensor with shape [batch_size, length, hidden_size]
"""
with tf.name_scope("combine_heads"):
batch_size = tf.shape(x)[0]
length = tf.shape(x)[2]
x = tf.transpose(x, [0, 2, 1, 3]) # --> [batch, length, num_heads, depth]
return tf.reshape(x, [batch_size, length, self.hidden_size]) | 0.01227 |
def cleanup_images(remove_old=False, **kwargs):
"""
Removes all images that have no name, and that are not references as dependency by any other named image. Similar
to the ``prune`` functionality in newer Docker versions, but supports more filters.
:param remove_old: Also remove images that do have a name, but no `latest` tag.
:type remove_old: bool
"""
keep_tags = env.get('docker_keep_tags')
if keep_tags is not None:
kwargs.setdefault('keep_tags', keep_tags)
removed_images = docker_fabric().cleanup_images(remove_old=remove_old, **kwargs)
if kwargs.get('list_only'):
puts('Unused images:')
for image_name in removed_images:
fastprint(image_name, end='\n') | 0.006766 |
def dump(data, stream=None, **kwds):
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
Dict keys are produced in the order in which they appear in OrderedDicts.
Safe version.
If objects are not "conventional" objects, they will be dumped converted to string with the str() function.
They will then not be recovered when loading with the load() function.
"""
# Display OrderedDicts correctly
class OrderedDumper(SafeDumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
original_yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
list(data.items()))
# Display long strings correctly
def _long_str_representer(dumper, data):
if data.find("\n") != -1:
# Drop some uneeded data
# \t are forbidden in YAML
data = data.replace("\t", " ")
# empty spaces at end of line are always useless in INGInious, and forbidden in YAML
data = "\n".join([p.rstrip() for p in data.split("\n")])
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
else:
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
# Default representation for some odd objects
def _default_representer(dumper, data):
return _long_str_representer(dumper, str(data))
OrderedDumper.add_representer(str, _long_str_representer)
OrderedDumper.add_representer(str, _long_str_representer)
OrderedDumper.add_representer(OrderedDict, _dict_representer)
OrderedDumper.add_representer(None, _default_representer)
s = original_yaml.dump(data, stream, OrderedDumper, encoding='utf-8', allow_unicode=True, default_flow_style=False, indent=4, **kwds)
if s is not None:
return s.decode('utf-8')
else:
return | 0.003086 |
def create_bagit_stream(dir_name, payload_info_list):
"""Create a stream containing a BagIt zip archive.
Args:
dir_name : str
The name of the root directory in the zip file, under which all the files
are placed (avoids "zip bombs").
payload_info_list: list
List of payload_info_dict, each dict describing a file.
- keys: pid, filename, iter, checksum, checksum_algorithm
- If the filename is None, the pid is used for the filename.
"""
zip_file = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED)
_add_path(dir_name, payload_info_list)
payload_byte_count, payload_file_count = _add_payload_files(
zip_file, payload_info_list
)
tag_info_list = _add_tag_files(
zip_file, dir_name, payload_info_list, payload_byte_count, payload_file_count
)
_add_manifest_files(zip_file, dir_name, payload_info_list, tag_info_list)
_add_tag_manifest_file(zip_file, dir_name, tag_info_list)
return zip_file | 0.002874 |
def verify_key(self, url):
"""For verifying your API key.
Provide the URL of your site or blog you will be checking spam from.
"""
response = self._request('verify-key', {
'blog': url,
'key': self._key
})
if response.status is 200:
# Read response (trimmed of whitespace)
return response.read().strip() == "valid"
return False | 0.012766 |
def get_distributions(self):
"""
Returns a dictionary of name and its distribution. Distribution is a ndarray.
The ndarray is stored in the standard way such that the rightmost variable changes most often.
Consider a CPD of variable 'd' which has parents 'b' and 'c' (distribution['CONDSET'] = ['b', 'c'])
| d_0 d_1
---------------------------
b_0, c_0 | 0.8 0.2
b_0, c_1 | 0.9 0.1
b_1, c_0 | 0.7 0.3
b_1, c_1 | 0.05 0.95
The value of distribution['d']['DPIS'] for the above example will be:
array([[ 0.8 , 0.2 ], [ 0.9 , 0.1 ], [ 0.7 , 0.3 ], [ 0.05, 0.95]])
Examples
--------
>>> reader = XBNReader('xbn_test.xml')
>>> reader.get_distributions()
{'a': {'TYPE': 'discrete', 'DPIS': array([[ 0.2, 0.8]])},
'e': {'TYPE': 'discrete', 'DPIS': array([[ 0.8, 0.2],
[ 0.6, 0.4]]), 'CONDSET': ['c'], 'CARDINALITY': [2]},
'b': {'TYPE': 'discrete', 'DPIS': array([[ 0.8, 0.2],
[ 0.2, 0.8]]), 'CONDSET': ['a'], 'CARDINALITY': [2]},
'c': {'TYPE': 'discrete', 'DPIS': array([[ 0.2 , 0.8 ],
[ 0.05, 0.95]]), 'CONDSET': ['a'], 'CARDINALITY': [2]},
'd': {'TYPE': 'discrete', 'DPIS': array([[ 0.8 , 0.2 ],
[ 0.9 , 0.1 ],
[ 0.7 , 0.3 ],
[ 0.05, 0.95]]), 'CONDSET': ['b', 'c']}, 'CARDINALITY': [2, 2]}
"""
distribution = {}
for dist in self.bnmodel.find('DISTRIBUTIONS'):
variable_name = dist.find('PRIVATE').get('NAME')
distribution[variable_name] = {'TYPE': dist.get('TYPE')}
if dist.find('CONDSET') is not None:
distribution[variable_name]['CONDSET'] = [var.get('NAME') for
var in dist.find('CONDSET').findall('CONDELEM')]
distribution[variable_name]['CARDINALITY'] = np.array(
[len(set(np.array([list(map(int, dpi.get('INDEXES').split()))
for dpi in dist.find('DPIS')])[:, i]))
for i in range(len(distribution[variable_name]['CONDSET']))])
distribution[variable_name]['DPIS'] = np.array(
[list(map(float, dpi.text.split())) for dpi in dist.find('DPIS')])
return distribution | 0.004068 |
def storage(self):
""" get the counter storage
"""
annotation = get_portal_annotation()
if annotation.get(NUMBER_STORAGE) is None:
annotation[NUMBER_STORAGE] = OIBTree()
return annotation[NUMBER_STORAGE] | 0.011719 |
def _order_linkage_group(group):
""" For a given group (ie: a list containing [marker, position])
order the list according to their position.
"""
tmp = {}
for row in group:
if float(row[1]) in tmp: # pragma: no cover
tmp[float(row[1])].append(row[0])
else:
tmp[float(row[1])] = [row[0]]
keys = list(tmp.keys())
keys.sort()
output = []
for key in keys:
for entry in tmp[key]:
if not entry:
continue
output.append([entry, str(key)])
return output | 0.001742 |
async def unplonk(self, ctx, *, member: discord.Member):
"""Unbans a user from using the bot.
To use this command you must have the Manage Server permission
or have a Bot Admin role.
"""
plonks = self.config.get('plonks', {})
guild_id = ctx.message.server.id
db = plonks.get(guild_id, [])
try:
db.remove(member.id)
except ValueError:
await self.bot.responses.failure(message='%s is not banned from using the bot in this server.' % member)
else:
plonks[guild_id] = db
await self.config.put('plonks', plonks)
await self.bot.responses.success(message='%s has been unbanned from using the bot in this server.' % member) | 0.005277 |
def find_element(self, value, by=By.ID, update=False) -> Elements:
'''Find a element or the first element.'''
if update or not self._nodes:
self.uidump()
for node in self._nodes:
if node.attrib[by] == value:
bounds = node.attrib['bounds']
coord = list(map(int, re.findall(r'\d+', bounds)))
click_point = (coord[0] + coord[2]) / \
2, (coord[1] + coord[3]) / 2
return self._element_cls(self, node.attrib, by, value, coord, click_point)
raise NoSuchElementException(f'No such element: {by}={value!r}.') | 0.004695 |
def run_command(self, config_file, sources):
"""
:param str config_file: The name of config file.
:param list sources: The list with source files.
"""
config = configparser.ConfigParser()
config.read(config_file)
rdbms = config.get('database', 'rdbms').lower()
loader = self.create_routine_loader(rdbms)
status = loader.main(config_file, sources)
return status | 0.004515 |
def accept(self):
"""Method invoked when OK button is clicked."""
try:
self.save_metadata()
except InvalidValidationException as e:
display_warning_message_box(
self, tr('Invalid Field Mapping'), str(e))
return
super(FieldMappingDialog, self).accept() | 0.00597 |
def create_default_element(self, name):
"""
Creates a <@name/> tag under root if there is none.
"""
found = self.root.find(name)
if found is not None:
return found
ele = ET.Element(name)
self.root.append(ele)
return ele | 0.006757 |
def createProgBuilder(env):
"""This is a utility function that creates the Program
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
program = env['BUILDERS']['Program']
except KeyError:
import SCons.Defaults
program = SCons.Builder.Builder(action = SCons.Defaults.LinkAction,
emitter = '$PROGEMITTER',
prefix = '$PROGPREFIX',
suffix = '$PROGSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'Object',
target_scanner = ProgramScanner)
env['BUILDERS']['Program'] = program
return program | 0.017564 |
def _validate_fold_has_outputs_or_count_filter(fold_scope_location, fold_has_count_filter, outputs):
"""Ensure the @fold scope has at least one output, or filters on the size of the fold."""
# This function makes sure that the @fold scope has an effect.
# Folds either output data, or filter the data enclosing the fold based on the size of the fold.
if fold_has_count_filter:
# This fold has a filter on the "_x_count" property, so it is legal and has an effect.
return True
# At least one output in the outputs list must point to the fold_scope_location,
# or the scope corresponding to fold_scope_location had no @outputs and is illegal.
for output in six.itervalues(outputs):
if output['fold'] == fold_scope_location:
return True
raise GraphQLCompilationError(u'Found a @fold scope that has no effect on the query. '
u'Each @fold scope must either perform filtering, or contain at '
u'least one field marked for output. Fold location: {}'
.format(fold_scope_location)) | 0.008741 |
def _assign_method(self, resource_class, method_type):
"""
Using reflection, assigns a new method to this class.
Args:
resource_class: A resource class
method_type: The HTTP method type
"""
"""
If we assigned the same method to each method, it's the same
method in memory, so we need one for each acceptable HTTP method.
"""
method_name = resource_class.get_method_name(
resource_class, method_type)
valid_status_codes = getattr(
resource_class.Meta,
'valid_status_codes',
DEFAULT_VALID_STATUS_CODES
)
# I know what you're going to say, and I'd love help making this nicer
# reflection assigns the same memory addr to each method otherwise.
def get(self, method_type=method_type, method_name=method_name,
valid_status_codes=valid_status_codes,
resource=resource_class, data=None, uid=None, **kwargs):
return self.call_api(
method_type, method_name,
valid_status_codes, resource,
data, uid=uid, **kwargs)
def put(self, method_type=method_type, method_name=method_name,
valid_status_codes=valid_status_codes,
resource=resource_class, data=None, uid=None, **kwargs):
return self.call_api(
method_type, method_name,
valid_status_codes, resource,
data, uid=uid, **kwargs)
def post(self, method_type=method_type, method_name=method_name,
valid_status_codes=valid_status_codes,
resource=resource_class, data=None, uid=None, **kwargs):
return self.call_api(
method_type, method_name,
valid_status_codes, resource,
data, uid=uid, **kwargs)
def patch(self, method_type=method_type, method_name=method_name,
valid_status_codes=valid_status_codes,
resource=resource_class, data=None, uid=None, **kwargs):
return self.call_api(
method_type, method_name,
valid_status_codes, resource,
data, uid=uid, **kwargs)
def delete(self, method_type=method_type, method_name=method_name,
valid_status_codes=valid_status_codes,
resource=resource_class, data=None, uid=None, **kwargs):
return self.call_api(
method_type, method_name,
valid_status_codes, resource,
data, uid=uid, **kwargs)
method_map = {
'GET': get,
'PUT': put,
'POST': post,
'PATCH': patch,
'DELETE': delete
}
setattr(
self, method_name,
types.MethodType(method_map[method_type], self)
) | 0.00068 |
def subscribe_user_to_discussion(recID, uid):
"""
Subscribe a user to a discussion, so the she receives by emails
all new new comments for this record.
:param recID: record ID corresponding to the discussion we want to
subscribe the user
:param uid: user id
"""
query = """INSERT INTO "cmtSUBSCRIPTION" (id_bibrec, id_user, creation_time)
VALUES (%s, %s, %s)"""
params = (recID, uid, convert_datestruct_to_datetext(time.localtime()))
try:
run_sql(query, params)
except:
return 0
return 1 | 0.005119 |
def in6_getLinkScopedMcastAddr(addr, grpid=None, scope=2):
"""
Generate a Link-Scoped Multicast Address as described in RFC 4489.
Returned value is in printable notation.
'addr' parameter specifies the link-local address to use for generating
Link-scoped multicast address IID.
By default, the function returns a ::/96 prefix (aka last 32 bits of
returned address are null). If a group id is provided through 'grpid'
parameter, last 32 bits of the address are set to that value (accepted
formats : b'\x12\x34\x56\x78' or '12345678' or 0x12345678 or 305419896).
By default, generated address scope is Link-Local (2). That value can
be modified by passing a specific 'scope' value as an argument of the
function. RFC 4489 only authorizes scope values <= 2. Enforcement
is performed by the function (None will be returned).
If no link-local address can be used to generate the Link-Scoped IPv6
Multicast address, or if another error occurs, None is returned.
"""
if scope not in [0, 1, 2]:
return None
try:
if not in6_islladdr(addr):
return None
addr = inet_pton(socket.AF_INET6, addr)
except Exception:
warning("in6_getLinkScopedMcastPrefix(): Invalid address provided")
return None
iid = addr[8:]
if grpid is None:
grpid = b'\x00\x00\x00\x00'
else:
if isinstance(grpid, (bytes, str)):
if len(grpid) == 8:
try:
grpid = int(grpid, 16) & 0xffffffff
except Exception:
warning("in6_getLinkScopedMcastPrefix(): Invalid group id provided") # noqa: E501
return None
elif len(grpid) == 4:
try:
grpid = struct.unpack("!I", grpid)[0]
except Exception:
warning("in6_getLinkScopedMcastPrefix(): Invalid group id provided") # noqa: E501
return None
grpid = struct.pack("!I", grpid)
flgscope = struct.pack("B", 0xff & ((0x3 << 4) | scope))
plen = b'\xff'
res = b'\x00'
a = b'\xff' + flgscope + res + plen + iid + grpid
return inet_ntop(socket.AF_INET6, a) | 0.000446 |
def thickness_hydrostatic(pressure, temperature, **kwargs):
r"""Calculate the thickness of a layer via the hypsometric equation.
This thickness calculation uses the pressure and temperature profiles (and optionally
mixing ratio) via the hypsometric equation with virtual temperature adjustment
.. math:: Z_2 - Z_1 = -\frac{R_d}{g} \int_{p_1}^{p_2} T_v d\ln p,
which is based off of Equation 3.24 in [Hobbs2006]_.
This assumes a hydrostatic atmosphere.
Layer bottom and depth specified in pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
mixing : `pint.Quantity`, optional
Profile of dimensionless mass mixing ratio. If none is given, virtual temperature
is simply set to be the given temperature.
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
bottom : `pint.Quantity`, optional
The bottom of the layer in pressure. Defaults to the first observation.
depth : `pint.Quantity`, optional
The depth of the layer in hPa. Defaults to the full profile if bottom is not given,
and 100 hPa if bottom is given.
Returns
-------
`pint.Quantity`
The thickness of the layer in meters.
See Also
--------
thickness_hydrostatic_from_relative_humidity, pressure_to_height_std, virtual_temperature
"""
mixing = kwargs.pop('mixing', None)
molecular_weight_ratio = kwargs.pop('molecular_weight_ratio', mpconsts.epsilon)
bottom = kwargs.pop('bottom', None)
depth = kwargs.pop('depth', None)
# Get the data for the layer, conditional upon bottom/depth being specified and mixing
# ratio being given
if bottom is None and depth is None:
if mixing is None:
layer_p, layer_virttemp = pressure, temperature
else:
layer_p = pressure
layer_virttemp = virtual_temperature(temperature, mixing, molecular_weight_ratio)
else:
if mixing is None:
layer_p, layer_virttemp = get_layer(pressure, temperature, bottom=bottom,
depth=depth)
else:
layer_p, layer_temp, layer_w = get_layer(pressure, temperature, mixing,
bottom=bottom, depth=depth)
layer_virttemp = virtual_temperature(layer_temp, layer_w, molecular_weight_ratio)
# Take the integral (with unit handling) and return the result in meters
return (- mpconsts.Rd / mpconsts.g * np.trapz(
layer_virttemp.to('K'), x=np.log(layer_p / units.hPa)) * units.K).to('m') | 0.005176 |
def bool_input(message):
'''
Ask a user for a boolean input
args:
message (str): Prompt for user
returns:
bool_in (boolean): Input boolean
'''
while True:
suffix = ' (true or false): '
inp = input(message + suffix)
if inp.lower() == 'true':
return True
elif inp.lower() == 'false':
return False
else:
print(colored('Must be either true or false, try again!', 'red')) | 0.002062 |
def to_element(self, include_namespaces=False):
"""Return an ElementTree Element representing this instance.
Args:
include_namespaces (bool, optional): If True, include xml
namespace attributes on the root element
Return:
~xml.etree.ElementTree.Element: The (XML) Element representation of
this object
"""
# We piggy back on the implementation in DidlItem
didl_item = DidlItem(
title="DUMMY",
# This is ignored. Sonos gets the title from the item_id
parent_id="DUMMY", # Ditto
item_id=self.item_id,
desc=self.desc,
resources=self.resources
)
return didl_item.to_element(include_namespaces=include_namespaces) | 0.002491 |
def get_command(self, ctx, name):
"""Fetch command from folder."""
plugin = os.path.basename(self.folder)
try:
command = importlib.import_module("honeycomb.commands.{}.{}".format(plugin, name))
except ImportError:
raise click.UsageError("No such command {} {}\n\n{}".format(plugin, name, self.get_help(ctx)))
return getattr(command, name) | 0.00995 |
def compile_file(self, path, incl_search_paths=None):
"""
Parse & compile a single file and append it to RDLCompiler's root
namespace.
If any exceptions (:class:`~systemrdl.RDLCompileError` or other)
occur during compilation, then the RDLCompiler object should be discarded.
Parameters
----------
path:str
Path to an RDL source file
incl_search_paths:list
List of additional paths to search to resolve includes.
If unset, defaults to an empty list.
Relative include paths are resolved in the following order:
1. Search each path specified in ``incl_search_paths``.
2. Path relative to the source file performing the include.
Raises
------
:class:`~systemrdl.RDLCompileError`
If any fatal compile error is encountered.
"""
if incl_search_paths is None:
incl_search_paths = []
fpp = preprocessor.FilePreprocessor(self.env, path, incl_search_paths)
preprocessed_text, seg_map = fpp.preprocess()
input_stream = preprocessor.PreprocessedInputStream(preprocessed_text, seg_map)
lexer = SystemRDLLexer(input_stream)
lexer.removeErrorListeners()
lexer.addErrorListener(messages.RDLAntlrErrorListener(self.msg))
token_stream = CommonTokenStream(lexer)
parser = SystemRDLParser(token_stream)
parser.removeErrorListeners()
parser.addErrorListener(messages.RDLAntlrErrorListener(self.msg))
# Run Antlr parser on input
parsed_tree = parser.root()
if self.msg.had_error:
self.msg.fatal("Parse aborted due to previous errors")
# Traverse parse tree with RootVisitor
self.visitor.visit(parsed_tree)
# Reset default property assignments from namespace.
# They should not be shared between files since that would be confusing.
self.namespace.default_property_ns_stack = [{}]
if self.msg.had_error:
self.msg.fatal("Compile aborted due to previous errors") | 0.002342 |
def TK_ask(title,msg):
"""use the GUI to ask YES or NO."""
root = tkinter.Tk()
root.attributes("-topmost", True) #always on top
root.withdraw() #hide tk window
result=tkinter.messagebox.askyesno(title,msg)
root.destroy()
return result | 0.030534 |
def _create_list_of_array_controllers(self):
"""Creates the list of Array Controller URIs.
:raises: IloCommandNotSupportedError if the ArrayControllers
doesnt have member "Member".
:returns list of ArrayControllers.
"""
headers, array_uri, array_settings = (
self._get_array_controller_resource())
array_uri_links = []
if ('links' in array_settings and
'Member' in array_settings['links']):
array_uri_links = array_settings['links']['Member']
else:
msg = ('"links/Member" section in ArrayControllers'
' does not exist')
raise exception.IloCommandNotSupportedError(msg)
return array_uri_links | 0.002635 |
def sinc_window(num_zeros=64, precision=9, window=None, rolloff=0.945):
'''Construct a windowed sinc interpolation filter
Parameters
----------
num_zeros : int > 0
The number of zero-crossings to retain in the sinc filter
precision : int > 0
The number of filter coefficients to retain for each zero-crossing
window : callable
The window function. By default, uses Blackman-Harris.
rolloff : float > 0
The roll-off frequency (as a fraction of nyquist)
Returns
-------
interp_window: np.ndarray [shape=(num_zeros * num_table + 1)]
The interpolation window (right-hand side)
num_bits: int
The number of bits of precision to use in the filter table
rolloff : float > 0
The roll-off frequency of the filter, as a fraction of Nyquist
Raises
------
TypeError
if `window` is not callable or `None`
ValueError
if `num_zeros < 1`, `precision < 1`,
or `rolloff` is outside the range `(0, 1]`.
Examples
--------
>>> # A filter with 10 zero-crossings, 32 samples per crossing, and a
... # Hann window for tapering.
>>> halfwin, prec, rolloff = resampy.filters.sinc_window(num_zeros=10, precision=5,
... window=scipy.signal.hann)
>>> halfwin
array([ 9.450e-01, 9.436e-01, ..., -7.455e-07, -0.000e+00])
>>> prec
32
>>> rolloff
0.945
>>> # Or using sinc-window filter construction directly in resample
>>> y = resampy.resample(x, sr_orig, sr_new, filter='sinc_window',
... num_zeros=10, precision=5,
... window=scipy.signal.hann)
'''
if window is None:
window = scipy.signal.blackmanharris
elif not six.callable(window):
raise TypeError('window must be callable, not type(window)={}'.format(type(window)))
if not 0 < rolloff <= 1:
raise ValueError('Invalid roll-off: rolloff={}'.format(rolloff))
if num_zeros < 1:
raise ValueError('Invalid num_zeros: num_zeros={}'.format(num_zeros))
if precision < 0:
raise ValueError('Invalid precision: precision={}'.format(precision))
# Generate the right-wing of the sinc
num_bits = 2**precision
n = num_bits * num_zeros
sinc_win = rolloff * np.sinc(rolloff * np.linspace(0, num_zeros, num=n + 1,
endpoint=True))
# Build the window function and cut off the left half
taper = window(2 * n + 1)[n:]
interp_win = (taper * sinc_win)
return interp_win, num_bits, rolloff | 0.0015 |
def _GenerateNotices(self):
"""Generate a summary of any notices.
Returns:
The generated HTML as a string.
"""
items = []
for e in self._notices:
d = e.GetDictToFormat()
if 'url' in d.keys():
d['url'] = '<a href="%(url)s">%(url)s</a>' % d
items.append('<li class="notice">%s</li>' %
e.FormatProblem(d).replace('\n', '<br>'))
if items:
return '<h2>Notices:</h2>\n<ul>%s</ul>\n' % '\n'.join(items)
else:
return '' | 0.011928 |
def _set_igp_sync(self, v, load=False):
"""
Setter method for igp_sync, mapped from YANG variable /mpls_state/rsvp/igp_sync (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_igp_sync is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igp_sync() directly.
YANG Description: MPLS Rsvp IGP Synchronization information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=igp_sync.igp_sync, is_container='container', presence=False, yang_name="igp-sync", rest_name="igp-sync", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """igp_sync must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=igp_sync.igp_sync, is_container='container', presence=False, yang_name="igp-sync", rest_name="igp-sync", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__igp_sync = t
if hasattr(self, '_set'):
self._set() | 0.005682 |
def _set_main_widget(self, widget, redraw):
"""
add provided widget to widget list and display it
:param widget:
:return:
"""
self.set_body(widget)
self.reload_footer()
if redraw:
logger.debug("redraw main widget")
self.refresh() | 0.006289 |
def ensure_path_exists(self):
# type: (LocalDestinationPath) -> None
"""Ensure path exists
:param LocalDestinationPath self: this
"""
if self._is_dir is None:
raise RuntimeError('is_dir not set')
if self._is_dir:
self._path.mkdir(mode=0o750, parents=True, exist_ok=True)
else:
if self._path.exists() and self._path.is_dir():
raise RuntimeError(
('destination path {} already exists and is a '
'directory').format(self._path))
else:
# ensure parent path exists and is created
self._path.parent.mkdir(
mode=0o750, parents=True, exist_ok=True) | 0.003984 |
def merge_all_config_sections_into_a_single_dict(desired_type: Type[T], config: ConfigParser, logger: Logger,
conversion_finder: ConversionFinder, **kwargs) -> Dict[str, Any]:
"""
Helper method to convert a 'configparser' into a dictionary [property > value].
Properties from all sections are collected. If the same key appears in several sections, an
error will be thrown
:param file_object:
:return:
"""
# convert the whole config to a dictionary by flattening all sections. If a key is found twice in two different
# sections an error is raised
results = dict()
for section, props in config.items():
for key, value in props.items():
if key in results.keys():
# find all sections where it appears
sections_where_it_appears = [s for s, p in config.items() if key in p.keys()]
raise MultipleKeyOccurenceInConfigurationError.create(key, sections_where_it_appears)
else:
results[key] = value
return ConversionFinder.convert_collection_values_according_to_pep(results, desired_type, conversion_finder,
logger, **kwargs) | 0.007794 |
def dumps(o, preserve=False):
"""Stringifies input dict as toml
Args:
o: Object to dump into toml
preserve: Boolean parameter. If true, preserve inline tables.
Returns:
String containing the toml corresponding to dict
"""
retval = ""
addtoretval, sections = _dump_sections(o, "")
retval += addtoretval
while sections != {}:
newsections = {}
for section in sections:
addtoretval, addtosections = _dump_sections(sections[section],
section, preserve)
if addtoretval or (not addtoretval and not addtosections):
if retval and retval[-2:] != "\n\n":
retval += "\n"
retval += "[" + section + "]\n"
if addtoretval:
retval += addtoretval
for s in addtosections:
newsections[section + "." + s] = addtosections[s]
sections = newsections
return retval | 0.000978 |
def default_backends(cls):
"""Retrieve the default configuration.
This will look in the repository configuration (if for_path is
specified), the users' home directory and the system
configuration.
"""
paths = []
paths.append(os.path.expanduser("~/.gitconfig"))
paths.append("/etc/gitconfig")
backends = []
for path in paths:
try:
cf = ConfigFile.from_path(path)
except (IOError, OSError), e:
if e.errno != errno.ENOENT:
raise
else:
continue
backends.append(cf)
return backends | 0.002903 |
def _lm_solve(r, pmut, ddiag, bqt, delta, par0, enorm, finfo):
"""Compute the Levenberg-Marquardt parameter and solution vector.
Parameters:
r - IN/OUT n-by-m matrix, m >= n. On input, the full lower triangle is
the full lower triangle of R and the strict upper triangle is
ignored. On output, the strict upper triangle has been
obliterated. The value of 'm' here is not relevant so long as it
is at least n.
pmut - n-vector, defines permutation of R
ddiag - n-vector, diagonal elements of D
bqt - n-vector, first elements of B Q^T
delta - positive scalar, specifies scale of enorm(Dx)
par0 - positive scalar, initial estimate of the LM parameter
enorm - norm-computing function
finfo - info about chosen floating-point representation
Returns:
par - positive scalar, final estimate of LM parameter
x - n-vector, least-squares solution of LM equation (see below)
This routine computes the Levenberg-Marquardt parameter 'par' and a LM
solution vector 'x'. Given an n-by-n matrix A, an n-by-n nonsingular
diagonal matrix D, an m-vector B, and a positive number delta, the
problem is to determine values such that 'x' is the least-squares
solution to
A x = B
sqrt(par) * D x = 0
and either
(1) par = 0, dxnorm - delta <= 0.1 delta or
(2) par > 0 and |dxnorm - delta| <= 0.1 delta
where dxnorm = enorm(D x).
This routine is not given A, B, or D directly. If we define the
column-pivoted transposed QR factorization of A such that
A P = R Q
where P is a permutation matrix, Q has orthogonal rows, and R is a
lower triangular matrix with diagonal elements of nonincreasing
magnitude, this routine is given the full lower triangle of R, a
vector defining P ('pmut'), and the first n components of B Q^T
('bqt'). These values are essentially passed verbatim to _qrd_solve().
This routine iterates to estimate par. Usually only a few iterations
are needed, but no more than 10 are performed.
"""
dwarf = finfo.tiny
n, m = r.shape
x = np.empty_like(bqt)
sdiag = np.empty_like(bqt)
# "Compute and store x in the Gauss-Newton direction. If the
# Jacobian is rank-deficient, obtain a least-squares solution."
nnonsingular = n
wa1 = bqt.copy()
for i in range(n):
if r[i,i] == 0:
nnonsingular = i
wa1[i:] = 0
break
for j in range(nnonsingular - 1, -1, -1):
wa1[j] /= r[j,j]
wa1[:j] -= r[j,:j] * wa1[j]
x[pmut] = wa1
# Initial function evaluation. Check if the Gauss-Newton direction
# was good enough.
wa2 = ddiag * x
dxnorm = enorm(wa2, finfo)
normdiff = dxnorm - delta
if normdiff <= 0.1 * delta:
return 0, x
# If the Jacobian is not rank deficient, the Newton step provides
# a lower bound for the zero of the function.
par_lower = 0.
if nnonsingular == n:
wa1 = ddiag[pmut] * wa2[pmut] / dxnorm
wa1[0] /= r[0,0] # "Degenerate case"
for j in range(1, n):
wa1[j] = (wa1[j] - np.dot(wa1[:j], r[j,:j])) / r[j,j]
temp = enorm(wa1, finfo)
par_lower = normdiff / delta / temp**2
# We can always find an upper bound.
for j in range(n):
wa1[j] = np.dot(bqt[:j+1], r[j,:j+1]) / ddiag[pmut[j]]
gnorm = enorm(wa1, finfo)
par_upper = gnorm / delta
if par_upper == 0:
par_upper = dwarf / min(delta, 0.1)
# Now iterate our way to victory.
par = np.clip(par0, par_lower, par_upper)
if par == 0:
par = gnorm / dxnorm
itercount = 0
while True:
itercount += 1
if par == 0:
par = max(dwarf, par_upper * 0.001)
temp = np.sqrt(par)
wa1 = temp * ddiag
x = _qrd_solve(r[:,:n], pmut, wa1, bqt, sdiag) # sdiag is an output arg here
wa2 = ddiag * x
dxnorm = enorm(wa2, finfo)
olddiff = normdiff
normdiff = dxnorm - delta
if abs(normdiff) < 0.1 * delta:
break # converged
if par_lower == 0 and normdiff <= olddiff and olddiff < 0:
break # overshot, I guess?
if itercount == 10:
break # this is taking too long
# Compute and apply the Newton correction
wa1 = ddiag[pmut] * wa2[pmut] / dxnorm
for j in range(n - 1):
wa1[j] /= sdiag[j]
wa1[j+1:n] -= r[j,j+1:n] * wa1[j]
wa1[n-1] /= sdiag[n-1] # degenerate case
par_delta = normdiff / delta / enorm(wa1, finfo)**2
if normdiff > 0:
par_lower = max(par_lower, par)
elif normdiff < 0:
par_upper = min(par_upper, par)
par = max(par_lower, par + par_delta)
return par, x | 0.003619 |
def cached_method(func):
""" Memoize for class methods """
@functools.wraps(func)
def wrapper(self, *args):
if not hasattr(self, "_cache"):
self._cache = {}
key = _argstring((func.__name__,) + args)
if key not in self._cache:
self._cache[key] = func(self, *args)
return self._cache[key]
return wrapper | 0.002681 |
def write_fix(self, time=None, latitude=None, longitude=None, valid=False,
pressure_alt=None, gps_alt=None, extensions=None):
"""
Write a fix record::
writer.write_fix(
datetime.time(12, 34, 56),
latitude=51.40375,
longitude=6.41275,
valid=True,
pressure_alt=1234,
gps_alt=1432,
)
# -> B1234565124225N00624765EA0123401432
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param latitude: longitude of the last GPS fix
:param longitude: latitude of the last GPS fix
:param valid: ``True`` if the current GPS fix is 3D
:param pressure_alt: altitude to the ICAO ISA above the 1013.25 hPa
sea level datum
:param gps_alt: altitude above the WGS84 ellipsoid
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_fix_extensions`
"""
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
record += self.format_latitude(latitude)
record += self.format_longitude(longitude)
record += 'A' if valid else 'V'
record += '%05d' % (pressure_alt or 0)
record += '%05d' % (gps_alt or 0)
if self.fix_extensions or extensions:
if not (isinstance(extensions, list) and
isinstance(self.fix_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.fix_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.fix_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('B', record) | 0.001328 |
def __find_hidden_analyses(self, docs):
""" Jätab meelde, millised analüüsid on nn peidetud ehk siis mida ei
tule arvestada lemmade järelühestamisel:
*) kesksõnade nud, dud, tud mitmesused;
*) muutumatute sõnade sõnaliigi mitmesus;
*) oleviku 'olema' mitmesus ('nad on' vs 'ta on');
*) asesõnade ainsuse-mitmuse mitmesus;
*) arv- ja asesõnade vaheline mitmesus;
Tagastab sõnastiku peidetud analüüse sisaldanud sõnade asukohtadega,
iga võti kujul (doc_index, word_index); """
hidden = dict()
nudTudLopud = re.compile('^.*[ntd]ud$')
for d in range(len(docs)):
for w in range(len(docs[d][WORDS])):
word = docs[d][WORDS][w]
if ANALYSIS in word and len(word[ANALYSIS]) > 1:
#
# 1) Kui enamus analüüse on nud/tud/dud analüüsid, peida mitmesus:
# kõla+nud //_V_ nud, // kõla=nud+0 //_A_ // kõla=nud+0 //_A_ sg n, // kõla=nud+d //_A_ pl n, //
nudTud = [ nudTudLopud.match(a[ROOT]) != None or \
nudTudLopud.match(a[ENDING]) != None \
for a in word[ANALYSIS] ]
if nudTud.count( True ) > 1:
hidden[(d, w)] = 1
#
# 2) Kui analyysidel on sama lemma ja puudub vormitunnus, siis peida mitmesused ära:
# Nt kui+0 //_D_ // kui+0 //_J_ //
# nagu+0 //_D_ // nagu+0 //_J_ //
lemmas = set([ a[ROOT] for a in word[ANALYSIS] ])
forms = set([ a[FORM] for a in word[ANALYSIS] ])
if len(lemmas) == 1 and len(forms) == 1 and (list(forms))[0] == '':
hidden[(d, w)] = 1
#
# 3) Kui 'olema'-analyysidel on sama lemma ning sama l6pp, peida mitmesused:
# Nt 'nad on' vs 'ta on' saavad sama olema-analyysi, mis jääb mitmeseks;
endings = set([ a[ENDING] for a in word[ANALYSIS] ])
if len(lemmas) == 1 and (list(lemmas))[0] == 'ole' and len(endings) == 1 \
and (list(endings))[0] == '0':
hidden[(d, w)] = 1
#
# 4) Kui asesõnadel on sama lemma ja lõpp, peida ainsuse/mitmuse mitmesus:
# Nt kõik+0 //_P_ sg n // kõik+0 //_P_ pl n //
# kes+0 //_P_ sg n // kes+0 //_P_ pl n //
postags = set([ a[POSTAG] for a in word[ANALYSIS] ])
if len(lemmas) == 1 and len(postags) == 1 and 'P' in postags and \
len(endings) == 1:
hidden[(d, w)] = 1
#
# 5) Kui on sama lemma ja lõpp, peida arv- ja asesõnadevaheline mitmesus:
# Nt teine+0 //_O_ pl n, // teine+0 //_P_ pl n, //
# üks+l //_N_ sg ad, // üks+l //_P_ sg ad, //
if len(lemmas) == 1 and 'P' in postags and ('O' in postags or \
'N' in postags) and len(endings) == 1:
hidden[(d, w)] = 1
return hidden | 0.011508 |
def add_constraints(self):
"""
Set the base constraints on the relation query.
:rtype: None
"""
if self._constraints:
foreign_key = getattr(self._parent, self._foreign_key, None)
if foreign_key is None:
self._query = None
else:
table = self._related.get_table()
self._query.where(
"{}.{}".format(table, self._other_key), "=", foreign_key
) | 0.003992 |
def generate_password(length=32):
"""Generate a cryptographically secure random string to use for passwords
Args:
length (int): Length of password, defaults to 32 characters
Returns:
Randomly generated string
"""
return ''.join(random.SystemRandom().choice(string.ascii_letters + '!@#$+.,') for _ in range(length)) | 0.005682 |
def get_version(version=None):
"""Derives a PEP386-compliant version number from VERSION."""
if version is None:
version = VERSION
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] == 'alpha' and version[4] == 0:
# At the toplevel, this would cause an import loop.
from django.utils.version import get_svn_revision
svn_revision = get_svn_revision()[4:]
if svn_revision != 'unknown':
sub = '.dev%s' % svn_revision
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return main + sub | 0.001028 |
def schema_factory(schema_name, **schema_nodes):
"""Schema Validation class factory.
Args:
schema_name(str): The namespace of the schema.
schema_nodes(dict): The attr_names / SchemaNodes mapping of schema.
Returns:
A Schema class.
Raises:
SchemaError, for bad attribute setting initialization.
Examples:
>>> from schema_factory import FloatNode, StringNode, SchemaNode
>>>
>>> PointSchema = schema_factory(
... schema_name='point',
... lat=FloatNode(),
... lng=FloatNode(),
... )
...
>>> point = PointSchema(lat=34, lng=29.01)
>>> print(point.to_dict)
OrderedDict([('lat', 34.0), ('lng', 29.01)])
>>> point2 = PointSchema(lat='34', lng='0')
>>> print(point2.to_dict)
OrderedDict([('lat', 34.0), ('lng', 0.0)])
>>> RegionSchema = schema_factory(
... schema_name='Region',
... name=StringNode(),
... country_code=StringNode( required=True, validators=[lambda x: len(x) == 2]),
... location=SchemaNode(PointSchema, required=False, default=None),
... keywords=StringNode(array=True, required=False, default=[])
... )
...
>>> region = RegionSchema(name='Athens', country_code='gr', location={'lat': 32.7647, 'lng': 27.03})
>>> print(region)
<RegionSchema instance, attributes:['country_code', 'keywords', 'location', 'name']>
>>> region.keywords
[]
>>> region2 = RegionSchema(name='Athens')
Traceback (most recent call last):
...
schema_factory.errors.SchemaError: Missing Required Attributes: {'country_code'}
>>> region3 = RegionSchema(name='Athens', country_code='gr', location={'lat': 32.7647, 'lng': 27.03},
... foo='bar')
Traceback (most recent call last):
...
schema_factory.errors.SchemaError: Invalid Attributes RegionSchema for {'foo'}.
>>> region4 = RegionSchema(name='Athens', country_code='gr', keywords=['Acropolis', 'Mousaka', 434132])
"""
schema_dict = dict()
schema_dict.update(schema_nodes)
def cls_repr(self): # pragma: no cover
return "<{} instance at: 0x{:x}>".format(self.__class__, id(self))
def cls_str(self): # pragma: no cover
return "<{} instance, attributes:{}>".format(
self.__class__.__name__,
self.schema_nodes
)
def cls_init(self, **kwargs):
kwargs_set = set(kwargs)
if not self.required.issubset(kwargs_set):
raise SchemaError('Missing Required Attributes: {}'.format(
self.required.difference(kwargs_set)
))
if not set(kwargs).issubset(set(self.schema_nodes)):
raise SchemaError('Invalid Attributes {} for {}.'.format(
self.__class__.__name__,
set(kwargs).difference(set(self.schema_nodes))
))
for attr_name in kwargs:
setattr(self, attr_name, kwargs[attr_name])
def to_dict(self):
return OrderedDict([(k, getattr(self, k)) for k in self.schema_nodes])
schema_dict['to_dict'] = property(to_dict)
schema_dict['__init__'] = cls_init
schema_dict['__repr__'] = cls_repr
schema_dict['__str__'] = cls_str
return SchemaType('{}Schema'.format(schema_name.title()), (), schema_dict) | 0.002314 |
def ls_dir(path, include_hidden=False):
"""Finds content of folder
:param path: directory to get list of files and folders
:param include_hidden: True iff include hidden files in list
:return: List of paths in given directory
"""
lst = []
for file in os.listdir(path):
hidden_file = FileSystem(file).is_hidden()
if (hidden_file and include_hidden) or (not hidden_file):
lst.append(os.path.join(path, file))
return list(set(lst)) | 0.002045 |
def copy(self, newdata=None):
'''Return a copy of the cube with optionally new data.'''
if newdata is None:
newdata = self.data.copy()
return self.__class__(
self.molecule, self.origin.copy(), self.axes.copy(),
self.nrep.copy(), newdata, self.subtitle, self.nuclear_charges
) | 0.005831 |
def gather_data(options):
"""Get Data specific for command selected.
Create ec2 specific query and output title based on
options specified, retrieves the raw response data
from aws, then processes it into the i_info dict,
which is used throughout this module.
Args:
options (object): contains args and data from parser,
that has been adjusted by the command
specific functions as appropriate.
Returns:
i_info (dict): information on instances and details.
param_str (str): the title to display before the list.
"""
(qry_string, param_str) = qry_create(options)
qry_results = awsc.get_inst_info(qry_string)
i_info = process_results(qry_results)
return (i_info, param_str) | 0.001256 |
def timestamp(value, fmt=None):
"""Parse a datetime to a unix timestamp.
Uses fast custom parsing for common datetime formats or the slow dateutil
parser for other formats. This is a trade off between ease of use and speed
and is very useful for fast parsing of timestamp strings whose format may
standard but varied or unknown prior to parsing.
Common formats include:
1 Feb 2010 12:00:00 GMT
Mon, 1 Feb 2010 22:00:00 +1000
20100201120000
1383470155 (seconds since epoch)
See the other timestamp_*() functions for more details.
Args:
value: A string representing a datetime.
fmt: A timestamp format string like for time.strptime().
Returns:
The time in seconds since epoch as and integer for the value specified.
"""
if fmt:
return _timestamp_formats.get(fmt,
lambda v: timestamp_fmt(v, fmt)
)(value)
l = len(value)
if 19 <= l <= 24 and value[3] == " ":
# '%d %b %Y %H:%M:%Sxxxx'
try:
return timestamp_d_b_Y_H_M_S(value)
except (KeyError, ValueError, OverflowError):
pass
if 30 <= l <= 31:
# '%a, %d %b %Y %H:%M:%S %z'
try:
return timestamp_a__d_b_Y_H_M_S_z(value)
except (KeyError, ValueError, OverflowError):
pass
if l == 14:
# '%Y%m%d%H%M%S'
try:
return timestamp_YmdHMS(value)
except (ValueError, OverflowError):
pass
# epoch timestamp
try:
return timestamp_epoch(value)
except ValueError:
pass
# slow version
return timestamp_any(value) | 0.003578 |
def visit_Name(self, node, store_as_param=False, **kwargs):
"""All assignments to names go through this function."""
if store_as_param or node.ctx == 'param':
self.symbols.declare_parameter(node.name)
elif node.ctx == 'store':
self.symbols.store(node.name)
elif node.ctx == 'load':
self.symbols.load(node.name) | 0.005291 |
def libvlc_media_set_user_data(p_md, p_new_user_data):
'''Sets media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
@param p_md: media descriptor object.
@param p_new_user_data: pointer to user data.
'''
f = _Cfunctions.get('libvlc_media_set_user_data', None) or \
_Cfunction('libvlc_media_set_user_data', ((1,), (1,),), None,
None, Media, ctypes.c_void_p)
return f(p_md, p_new_user_data) | 0.003448 |
def _extract_vararray_max(tform):
"""
Extract number from PX(number)
"""
first = tform.find('(')
last = tform.rfind(')')
if first == -1 or last == -1:
# no max length specified
return -1
maxnum = int(tform[first+1:last])
return maxnum | 0.003521 |
def handle(self, *args, **options):
"""
:param args:
:param options:
:return:
"""
counter = 0
for key in options:
if options[key]:
counter += 1
# If no options are set, do a normal patch
if counter == 1:
options['default'] = True
###########################################################################################
tag_succeed = 1
if APISettings.GIT_TAG_AUTO_COMMIT:
Git.add()
Git.commit()
if options['default']:
tag_succeed = Git.tag()
if options['staging']:
tag_succeed = Git.tag(APISettings.GIT_STAGING_PRE_TAG)
if options['production']:
tag_succeed = Git.tag(APISettings.GIT_ACTIVATE_PRE_TAG)
if options['push'] | APISettings.GIT_TAG_AUTO_TAG_PUSH:
if tag_succeed:
Git.push_tags() | 0.003145 |
def robust_topological_sort(graph: Graph) -> list:
"""Identify strongly connected components then perform a topological sort of those components."""
assert check_argument_types()
components = strongly_connected_components(graph)
node_component = {}
for component in components:
for node in component:
node_component[node] = component
component_graph = {}
for component in components:
component_graph[component] = []
for node in graph:
node_c = node_component[node]
for successor in graph[node]:
successor_c = node_component[successor]
if node_c != successor_c:
component_graph[node_c].append(successor_c)
return topological_sort(component_graph) | 0.046512 |
def reset_syslog_config(host,
username,
password,
protocol=None,
port=None,
syslog_config=None,
esxi_hosts=None,
credstore=None):
'''
Reset the syslog service to its default settings.
Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``,
``default-rotate``, ``default-size``, ``default-timeout``,
or ``all`` for all of these.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
syslog_config
List of parameters to reset, provided as a comma-delimited string, or 'all' to
reset all syslog configuration parameters. Required.
esxi_hosts
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
on a list of one or more ESXi machines.
credstore
Optionally set to path to the credential store file.
:return: Dictionary with a top-level key of 'success' which indicates
if all the parameters were reset, and individual keys
for each parameter indicating which succeeded or failed, per host.
CLI Example:
``syslog_config`` can be passed as a quoted, comma-separated string, e.g.
.. code-block:: bash
# Used for ESXi host connection information
salt '*' vsphere.reset_syslog_config my.esxi.host root bad-password \
syslog_config='logdir,loghost'
# Used for connecting to a vCenter Server
salt '*' vsphere.reset_syslog_config my.vcenter.location root bad-password \
syslog_config='logdir,loghost' esxi_hosts='[esxi-1.host.com, esxi-2.host.com]'
'''
if not syslog_config:
raise CommandExecutionError('The \'reset_syslog_config\' function requires a '
'\'syslog_config\' setting.')
valid_resets = ['logdir', 'loghost', 'default-rotate',
'default-size', 'default-timeout', 'logdir-unique']
cmd = 'system syslog config set --reset='
if ',' in syslog_config:
resets = [ind_reset.strip() for ind_reset in syslog_config.split(',')]
elif syslog_config == 'all':
resets = valid_resets
else:
resets = [syslog_config]
ret = {}
if esxi_hosts:
if not isinstance(esxi_hosts, list):
raise CommandExecutionError('\'esxi_hosts\' must be a list.')
for esxi_host in esxi_hosts:
response_dict = _reset_syslog_config_params(host, username, password,
cmd, resets, valid_resets,
protocol=protocol, port=port,
esxi_host=esxi_host, credstore=credstore)
ret.update({esxi_host: response_dict})
else:
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
response_dict = _reset_syslog_config_params(host, username, password,
cmd, resets, valid_resets,
protocol=protocol, port=port,
credstore=credstore)
ret.update({host: response_dict})
return ret | 0.00345 |
def add_standard_firewall(self, server_id, is_virt=True):
"""Creates a firewall for the specified virtual/hardware server.
:param int server_id: The ID of the server to create the firewall for
:param bool is_virt: If true, will create the firewall for a virtual
server, otherwise for a hardware server.
:returns: A dictionary containing the standard virtual server firewall
order
"""
package = self.get_standard_package(server_id, is_virt)
if is_virt:
product_order = {
'complexType': 'SoftLayer_Container_Product_Order_Network_'
'Protection_Firewall',
'quantity': 1,
'packageId': 0,
'virtualGuests': [{'id': server_id}],
'prices': [{'id': package[0]['prices'][0]['id']}]
}
else:
product_order = {
'complexType': 'SoftLayer_Container_Product_Order_Network_'
'Protection_Firewall',
'quantity': 1,
'packageId': 0,
'hardware': [{'id': server_id}],
'prices': [{'id': package[0]['prices'][0]['id']}]
}
return self.client['Product_Order'].placeOrder(product_order) | 0.001483 |
def get_field(self, path, name):
"""
Retrieves the value of the field at the specified path.
:param path: str or Path instance
:param name:
:type name: str
:return:
:raises ValueError: A component of path is a field name.
:raises KeyError: A component of path doesn't exist.
:raises TypeError: The field name is a component of a path.
"""
try:
value = self.get(path, name)
if not isinstance(value, str):
raise TypeError()
return value
except KeyError:
raise KeyError() | 0.00318 |
def send_message(self, stream, msg):
"""Send an arbitrary message to a particular client.
Parameters
----------
stream : :class:`tornado.iostream.IOStream` object
The stream to send the message to.
msg : Message object
The message to send.
Notes
-----
This method can only be called in the IOLoop thread.
Failed sends disconnect the client connection and calls the device
on_client_disconnect() method. They do not raise exceptions, but they
are logged. Sends also fail if more than self.MAX_WRITE_BUFFER_SIZE
bytes are queued for sending, implying that client is falling behind.
"""
assert get_thread_ident() == self.ioloop_thread_id
try:
if stream.KATCPServer_closing:
raise RuntimeError('Stream is closing so we cannot '
'accept any more writes')
return stream.write(str(msg) + '\n')
except Exception:
addr = self.get_address(stream)
self._logger.warn('Could not send message {0!r} to {1}'
.format(str(msg), addr), exc_info=True)
stream.close(exc_info=True) | 0.001591 |
def _list_files_in_path(path, pattern="*.stan"):
"""
indexes a directory of stan files
returns as dictionary containing contents of files
"""
results = []
for dirname, subdirs, files in os.walk(path):
for name in files:
if fnmatch(name, pattern):
results.append(os.path.join(dirname, name))
return(results) | 0.002695 |
def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
OSFileEntry: a file entry or None if not available.
"""
if platform.system() == 'Windows':
# Return the root with the drive letter of the volume the current
# working directory is on.
location = os.getcwd()
location, _, _ = location.partition('\\')
location = '{0:s}\\'.format(location)
else:
location = '/'
if not os.path.exists(location):
return None
path_spec = os_path_spec.OSPathSpec(location=location)
return self.GetFileEntryByPathSpec(path_spec) | 0.01318 |
async def get_upstream_dns(cls) -> list:
"""Upstream DNS server addresses.
Upstream DNS servers used to resolve domains not managed by this MAAS
(space-separated IP addresses). Only used when MAAS is running its own
DNS server. This value is used as the value of 'forwarders' in the DNS
server config.
"""
data = await cls.get_config("upstream_dns")
return [] if data is None else re.split(r'[,\s]+', data) | 0.004246 |
def get_password(self, service, username):
"""Read the password from the file.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
# fetch the password
try:
password_base64 = self.config.get(service, username).encode()
# decode with base64
password_encrypted = base64.decodestring(password_base64)
# decrypted the password
password = self.decrypt(password_encrypted).decode('utf-8')
except (configparser.NoOptionError, configparser.NoSectionError):
password = None
return password | 0.00313 |
def createExpenseItemsForEvents(request=None, datetimeTuple=None, rule=None, event=None):
'''
For each StaffMember-related Repeated Expense Rule, look for EventStaffMember
instances in the designated time window that do not already have expenses associated
with them. For hourly rental expenses, then generate new expenses that are
associated with this rule. For non-hourly expenses, generate new expenses
based on the non-overlapping intervals of days, weeks or months for which
there is not already an ExpenseItem associated with the rule in question.
'''
# This is used repeatedly, so it is put at the top
submissionUser = getattr(request, 'user', None)
# Return the number of new expense items created
generate_count = 0
# First, construct the set of rules that need to be checked for affiliated events
rule_filters = Q(disabled=False) & Q(rentalRate__gt=0) & \
Q(Q(staffmemberwageinfo__isnull=False) | Q(staffdefaultwage__isnull=False))
if rule:
rule_filters = rule_filters & Q(id=rule.id)
rulesToCheck = RepeatedExpenseRule.objects.filter(
rule_filters).distinct().order_by(
'-staffmemberwageinfo__category', '-staffdefaultwage__category'
)
# These are the filters placed on Events that overlap the window in which
# expenses are being generated.
event_timefilters = Q()
if datetimeTuple and len(datetimeTuple) == 2:
timelist = list(datetimeTuple)
timelist.sort()
event_timefilters = event_timefilters & (
Q(event__startTime__gte=timelist[0]) & Q(event__startTime__lte=timelist[1])
)
if event:
event_timefilters = event_timefilters & Q(event__id=event.id)
# Now, we loop through the set of rules that need to be applied, then loop
# through the Events in the window in question that involved the staff
# member indicated by the rule.
for rule in rulesToCheck:
staffMember = getattr(rule, 'staffMember', None)
staffCategory = getattr(rule, 'category', None)
# No need to continue if expenses are not to be generated
if (
(not staffMember and not staffCategory) or
(
not staffMember and not
getConstant('financial__autoGenerateFromStaffCategoryDefaults')
)
):
continue
# For construction of expense descriptions
replacements = {
'type': _('Staff'),
'to': _('payment to'),
'for': _('for'),
}
# This is the generic category for all Event staff, but it may be overridden below
expense_category = getConstant('financial__otherStaffExpenseCat')
if staffCategory:
if staffMember:
# This staff member in this category
eventstaff_filter = Q(staffMember=staffMember) & Q(category=staffCategory)
elif getConstant('financial__autoGenerateFromStaffCategoryDefaults'):
# Any staff member who does not already have a rule specified this category
eventstaff_filter = (
Q(category=staffCategory) &
~Q(staffMember__expenserules__category=staffCategory)
)
replacements['type'] = staffCategory.name
# For standard categories of staff, map the EventStaffCategory to
# an ExpenseCategory using the stored constants. Otherwise, the
# ExpenseCategory is a generic one.
if staffCategory == getConstant('general__eventStaffCategoryAssistant'):
expense_category = getConstant('financial__assistantClassInstructionExpenseCat')
elif staffCategory in [
getConstant('general__eventStaffCategoryInstructor'),
getConstant('general__eventStaffCategorySubstitute')
]:
expense_category = getConstant('financial__classInstructionExpenseCat')
else:
# We don't want to generate duplicate expenses when there is both a category-limited
# rule and a non-limited rule for the same person, so we have to construct the list
# of categories that are to be excluded if no category is specified by this rule.
coveredCategories = list(staffMember.expenserules.filter(
category__isnull=False).values_list('category__id', flat=True))
eventstaff_filter = Q(staffMember=staffMember) & ~Q(category__id__in=coveredCategories)
if rule.advanceDays is not None:
if rule.advanceDaysReference == RepeatedExpenseRule.MilestoneChoices.end:
event_timefilters = event_timefilters & Q(
event__endTime__lte=timezone.now() + timedelta(days=rule.advanceDays)
)
elif rule.advanceDaysReference == RepeatedExpenseRule.MilestoneChoices.start:
event_timefilters = event_timefilters & Q(
event__startTime__lte=timezone.now() + timedelta(days=rule.advanceDays)
)
if rule.priorDays is not None:
if rule.priorDaysReference == RepeatedExpenseRule.MilestoneChoices.end:
event_timefilters = event_timefilters & Q(
event__endTime__gte=timezone.now() - timedelta(days=rule.priorDays)
)
elif rule.priorDaysReference == RepeatedExpenseRule.MilestoneChoices.start:
event_timefilters = event_timefilters & Q(
event__startTime__gte=timezone.now() - timedelta(days=rule.priorDays)
)
if rule.startDate:
event_timefilters = event_timefilters & Q(event__startTime__gte=timezone.now().replace(
year=rule.startDate.year, month=rule.startDate.month, day=rule.startDate.day,
hour=0, minute=0, second=0, microsecond=0,
))
if rule.endDate:
event_timefilters = event_timefilters & Q(event__startTime__lte=timezone.now().replace(
year=rule.endDate.year, month=rule.endDate.month, day=rule.endDate.day,
hour=0, minute=0, second=0, microsecond=0,
))
# Loop through EventStaffMembers for which there are not already
# directly allocated expenses under this rule, and create new
# ExpenseItems for them depending on whether the rule requires hourly
# expenses or non-hourly ones to be generated.
staffers = EventStaffMember.objects.filter(eventstaff_filter & event_timefilters).exclude(
Q(event__expenseitem__expenseRule=rule)).distinct()
if rule.applyRateRule == rule.RateRuleChoices.hourly:
for staffer in staffers:
# Hourly expenses are always generated without checking for
# overlapping windows, because the periods over which hourly
# expenses are defined are disjoint. However, hourly expenses
# are allocated directly to events, so we just need to create
# expenses for any events that do not already have an Expense
# Item generate under this rule.
replacements['event'] = staffer.event.name
replacements['name'] = staffer.staffMember.fullName
replacements['dates'] = staffer.event.startTime.strftime('%Y-%m-%d')
if (
staffer.event.startTime.strftime('%Y-%m-%d') !=
staffer.event.endTime.strftime('%Y-%m-%d')
):
replacements['dates'] += ' %s %s' % (
_('to'), staffer.event.endTime.strftime('%Y-%m-%d')
)
# Find or create the TransactionParty associated with the staff member.
staffer_party = TransactionParty.objects.get_or_create(
staffMember=staffer.staffMember,
defaults={
'name': staffer.staffMember.fullName,
'user': getattr(staffer.staffMember, 'userAccount', None)
}
)[0]
params = {
'event': staffer.event,
'category': expense_category,
'expenseRule': rule,
'description': '%(type)s %(to)s %(name)s %(for)s: %(event)s, %(dates)s' % \
replacements,
'submissionUser': submissionUser,
'hours': staffer.netHours,
'wageRate': rule.rentalRate,
'total': staffer.netHours * rule.rentalRate,
'accrualDate': staffer.event.startTime,
'payTo': staffer_party,
}
ExpenseItem.objects.create(**params)
generate_count += 1
else:
# Non-hourly expenses are generated by constructing the time
# intervals in which the occurrence occurs, and removing from that
# interval any intervals in which an expense has already been
# generated under this rule (so, for example, monthly rentals will
# now show up multiple times). So, we just need to construct the set
# of intervals for which to construct expenses. We first need to
# split the set of EventStaffMember objects by StaffMember (in case
# this rule is not person-specific) and then run this provedure
# separated by StaffMember.
members = StaffMember.objects.filter(eventstaffmember__in=staffers)
for member in members:
events = [x.event for x in staffers.filter(staffMember=member)]
# Find or create the TransactionParty associated with the staff member.
staffer_party = TransactionParty.objects.get_or_create(
staffMember=member,
defaults={
'name': member.fullName,
'user': getattr(member, 'userAccount', None)
}
)[0]
intervals = [
(x.localStartTime, x.localEndTime) for x in
EventOccurrence.objects.filter(event__in=events)
]
remaining_intervals = rule.getWindowsAndTotals(intervals)
for startTime, endTime, total, description in remaining_intervals:
replacements['when'] = description
replacements['name'] = member.fullName
params = {
'category': expense_category,
'expenseRule': rule,
'periodStart': startTime,
'periodEnd': endTime,
'description': '%(type)s %(to)s %(name)s %(for)s %(when)s' % replacements,
'submissionUser': submissionUser,
'total': total,
'accrualDate': startTime,
'payTo': staffer_party,
}
ExpenseItem.objects.create(**params)
generate_count += 1
rulesToCheck.update(lastRun=timezone.now())
return generate_count | 0.003529 |
def _choose_random_edge(self, edges: Set[EDGE]) -> Optional[EDGE]:
"""Picks random edge from the set of edges.
Args:
edges: Set of edges to pick from.
Returns:
Random edge from the supplied set, or None for empty set.
"""
if edges:
index = self._rand.randint(len(edges))
for e in edges:
if not index:
return e
index -= 1
return None | 0.004184 |
def read_bytes(self, where, size, force=False):
"""
Read from memory.
:param int where: address to read data from
:param int size: number of bytes
:param force: whether to ignore memory permissions
:return: data
:rtype: list[int or Expression]
"""
result = []
for i in range(size):
result.append(Operators.CHR(self.read_int(where + i, 8, force)))
return result | 0.004338 |