text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _get_req_rem_geo(self, ds_info):
"""Find out which geolocation files are needed."""
if ds_info['dataset_groups'][0].startswith('GM'):
if self.use_tc is False:
req_geo = 'GMODO'
rem_geo = 'GMTCO'
else:
req_geo = 'GMTCO'
rem_geo = 'GMODO'
elif ds_info['dataset_groups'][0].startswith('GI'):
if self.use_tc is False:
req_geo = 'GIMGO'
rem_geo = 'GITCO'
else:
req_geo = 'GITCO'
rem_geo = 'GIMGO'
else:
raise ValueError('Unknown dataset group %s' % ds_info['dataset_groups'][0])
return req_geo, rem_geo | 0.004115 |
def get_asset_admin_session_for_repository(self, repository_id=None, proxy=None):
"""Gets an asset administration session for the given repository.
arg: repository_id (osid.id.Id): the ``Id`` of the repository
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetAdminSession) - an
``AssetAdminSession``
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` or ``proxy`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_asset_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_admin()`` and ``supports_visible_federation()``
are ``true``.*
"""
asset_lookup_session = self._provider_manager.get_asset_lookup_session_for_repository(
repository_id, proxy)
return AssetAdminSession(
self._provider_manager.get_asset_admin_session_for_repository(repository_id,
proxy),
self._config_map,
asset_lookup_session) | 0.004688 |
def available(software=True,
drivers=True,
summary=False,
skip_installed=True,
skip_hidden=True,
skip_mandatory=False,
skip_reboot=False,
categories=None,
severities=None,):
'''
.. versionadded:: 2017.7.0
List updates that match the passed criteria. This allows for more filter
options than :func:`list`. Good for finding a specific GUID or KB.
Args:
software (bool):
Include software updates in the results (default is True)
drivers (bool):
Include driver updates in the results (default is True)
summary (bool):
- True: Return a summary of updates available for each category.
- False (default): Return a detailed list of available updates.
skip_installed (bool):
Skip updates that are already installed. Default is False.
skip_hidden (bool):
Skip updates that have been hidden. Default is True.
skip_mandatory (bool):
Skip mandatory updates. Default is False.
skip_reboot (bool):
Skip updates that require a reboot. Default is False.
categories (list):
Specify the categories to list. Must be passed as a list. All
categories returned by default.
Categories include the following:
* Critical Updates
* Definition Updates
* Drivers (make sure you set drivers=True)
* Feature Packs
* Security Updates
* Update Rollups
* Updates
* Update Rollups
* Windows 7
* Windows 8.1
* Windows 8.1 drivers
* Windows 8.1 and later drivers
* Windows Defender
severities (list):
Specify the severities to include. Must be passed as a list. All
severities returned by default.
Severities include the following:
* Critical
* Important
Returns:
dict: Returns a dict containing either a summary or a list of updates:
.. code-block:: cfg
List of Updates:
{'<GUID>': {'Title': <title>,
'KB': <KB>,
'GUID': <the globally unique identifier for the update>
'Description': <description>,
'Downloaded': <has the update been downloaded>,
'Installed': <has the update been installed>,
'Mandatory': <is the update mandatory>,
'UserInput': <is user input required>,
'EULAAccepted': <has the EULA been accepted>,
'Severity': <update severity>,
'NeedsReboot': <is the update installed and awaiting reboot>,
'RebootBehavior': <will the update require a reboot>,
'Categories': [ '<category 1>',
'<category 2>',
...]
}
}
Summary of Updates:
{'Total': <total number of updates returned>,
'Available': <updates that are not downloaded or installed>,
'Downloaded': <updates that are downloaded but not installed>,
'Installed': <updates installed (usually 0 unless installed=True)>,
'Categories': { <category 1>: <total for that category>,
<category 2>: <total for category 2>,
... }
}
CLI Examples:
.. code-block:: bash
# Normal Usage (list all software updates)
salt '*' win_wua.available
# List all updates with categories of Critical Updates and Drivers
salt '*' win_wua.available categories=["Critical Updates","Drivers"]
# List all Critical Security Updates
salt '*' win_wua.available categories=["Security Updates"] severities=["Critical"]
# List all updates with a severity of Critical
salt '*' win_wua.available severities=["Critical"]
# A summary of all available updates
salt '*' win_wua.available summary=True
# A summary of all Feature Packs and Windows 8.1 Updates
salt '*' win_wua.available categories=["Feature Packs","Windows 8.1"] summary=True
'''
# Create a Windows Update Agent instance
wua = salt.utils.win_update.WindowsUpdateAgent()
# Look for available
updates = wua.available(
skip_hidden=skip_hidden, skip_installed=skip_installed,
skip_mandatory=skip_mandatory, skip_reboot=skip_reboot,
software=software, drivers=drivers, categories=categories,
severities=severities)
# Return results as Summary or Details
return updates.summary() if summary else updates.list() | 0.001002 |
def embed(inp, n_inputs, n_features, initializer=None,
fix_parameters=False, apply_w=None):
""" Embed.
Embed slices a matrix/tensor with indexing array/tensor. Weights are initialized with :obj:`nnabla.initializer.UniformInitializer` within the range of :math:`-\\sqrt{3}` and :math:`\\sqrt{3}`.
Args:
x(~nnabla.Variable): [Integer] Indices with shape :math:`(I_0, ..., I_N)`
n_inputs : number of possible inputs, words or vocabraries
n_features : number of embedding features
fix_parameters (bool): When set to `True`, the embedding weight matrix
will not be updated.
apply_w (function): Lambda, function, or callable object applied to the weights.
Returns:
~nnabla.Variable: Output with shape :math:`(I_0, ..., I_N, W_1, ..., W_M)`
"""
if not initializer:
initializer = UniformInitializer((-np.sqrt(3.), np.sqrt(3)))
w = get_parameter_or_create("W", [n_inputs, n_features],
initializer, True, not fix_parameters)
if apply_w is not None:
w = apply_w(w)
return F.embed(inp, w) | 0.004413 |
def __bayesian_information_criterion(self, clusters, centers):
"""!
@brief Calculates splitting criterion for input clusters using bayesian information criterion.
@param[in] clusters (list): Clusters for which splitting criterion should be calculated.
@param[in] centers (list): Centers of the clusters.
@return (double) Splitting criterion in line with bayesian information criterion.
High value of splitting criterion means that current structure is much better.
@see __minimum_noiseless_description_length(clusters, centers)
"""
scores = [float('inf')] * len(clusters) # splitting criterion
dimension = len(self.__pointer_data[0])
# estimation of the noise variance in the data set
sigma_sqrt = 0.0
K = len(clusters)
N = 0.0
for index_cluster in range(0, len(clusters), 1):
for index_object in clusters[index_cluster]:
sigma_sqrt += euclidean_distance_square(self.__pointer_data[index_object], centers[index_cluster]);
N += len(clusters[index_cluster])
if N - K > 0:
sigma_sqrt /= (N - K)
p = (K - 1) + dimension * K + 1
# in case of the same points, sigma_sqrt can be zero (issue: #407)
sigma_multiplier = 0.0
if sigma_sqrt <= 0.0:
sigma_multiplier = float('-inf')
else:
sigma_multiplier = dimension * 0.5 * log(sigma_sqrt)
# splitting criterion
for index_cluster in range(0, len(clusters), 1):
n = len(clusters[index_cluster])
L = n * log(n) - n * log(N) - n * 0.5 * log(2.0 * numpy.pi) - n * sigma_multiplier - (n - K) * 0.5
# BIC calculation
scores[index_cluster] = L - p * 0.5 * log(N)
return sum(scores) | 0.009671 |
def diagnostics(self, **kwds):
"""
Endpoint: /system/diagnostics.json
Runs a set of diagnostic tests on the server.
Returns a dictionary containing the results.
"""
# Don't process the result automatically, since this raises an exception
# on failure, which doesn't provide the cause of the failure
self._client.get("/system/diagnostics.json", process_response=False,
**kwds)
return self._client.last_response.json()["result"] | 0.005725 |
def skills_name_show(self, name, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/skills#get-skill-by-name"
api_path = "/api/v2/skills/name/{name}"
api_path = api_path.format(name=name)
return self.call(api_path, **kwargs) | 0.011278 |
def silhouette_score(self, data, metric='euclidean', sample_size=None, random_state=None, **kwds):
"""
Computes the mean Silhouette Coefficient of all samples (implicit evaluation)
:param data: The data that the clusters are generated from
:param metric: the pairwise distance metric
:param sample_size: the size of the sample to use computing the Silhouette Coefficient
:param random_state: If an integer is given then it fixes its seed otherwise random.
:param kwds: any further parameters that are passed to the distance function
:return: the mean Silhouette Coefficient of all samples
"""
return silhouette_score(data, self.get_labels(self), metric, sample_size, random_state, **kwds) | 0.01039 |
def orient(args):
"""
%prog orient in.gff3 features.fasta > out.gff3
Change the feature orientations based on translation. This script is often
needed in fixing the strand information after mapping RNA-seq transcripts.
You can generate the features.fasta similar to this command:
$ %prog load --parents=EST_match --children=match_part clc.JCVIv4a.gff
JCVI.Medtr.v4.fasta -o features.fasta
"""
from jcvi.formats.fasta import longestorf
p = OptionParser(orient.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ingff3, fastafile = args
idsfile = fastafile.rsplit(".", 1)[0] + ".orf.ids"
if need_update(fastafile, idsfile):
longestorf([fastafile, "--ids"])
orientations = DictFile(idsfile)
gff = Gff(ingff3)
flipped = 0
for g in gff:
id = None
for tag in ("ID", "Parent"):
if tag in g.attributes:
id, = g.attributes[tag]
break
assert id
orientation = orientations.get(id, "+")
if orientation == '-':
g.strand = {"+": "-", "-": "+"}[g.strand]
flipped += 1
print(g)
logging.debug("A total of {0} features flipped.".format(flipped)) | 0.000777 |
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
script_text = (ScriptWriter.get_header(script_text) +
self._load_template(dev_path) % locals())
self.write_script(script_name, _to_ascii(script_text), 'b') | 0.004301 |
def init_from_acceptor(self, acceptor):
"""
Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None
"""
self.states = copy.deepcopy(acceptor.states)
self.alphabet = copy.deepcopy(acceptor.alphabet)
self.osyms = copy.deepcopy(acceptor.osyms)
self.isyms = copy.deepcopy(acceptor.isyms) | 0.005038 |
def get_object(self):
"""
If a single object has been requested, will set
`self.object` and return the object.
"""
queryset = None
slug = self.kwargs.get(self.slug_url_kwarg, None)
if slug is not None:
queryset = self.get_queryset()
slug_field = self.slug_field
queryset = queryset.filter(**{slug_field: slug})
try:
self.object = queryset.get()
except ObjectDoesNotExist:
raise http.Http404
return self.object | 0.003534 |
def _ns_query(self, session):
"""
Return a SQLAlchemy query that is already namespaced by the app and namespace given to this backend
during initialization.
Returns: a SQLAlchemy query object
"""
return session.query(ORMJob).filter(ORMJob.app == self.app,
ORMJob.namespace == self.namespace) | 0.007813 |
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED') | 0.00646 |
def applyIndex(self, lst, right):
"""Apply a list to something else."""
if len(right) != 1:
raise exceptions.EvaluationError('%r can only be applied to one argument, got %r' % (self.left, self.right))
right = right[0]
if isinstance(right, int):
return lst[right]
raise exceptions.EvaluationError("Can't apply %r to argument (%r): integer expected, got %r" % (self.left, self.right, right)) | 0.01182 |
def add_file(self, f):
"""Add a partition identity as a child of a dataset identity."""
if not self.files:
self.files = set()
self.files.add(f)
self.locations.set(f.type_) | 0.009174 |
def gen_goal(self, y_des):
"""Generate the goal for path imitation.
For rhythmic DMPs the goal is the average of the
desired trajectory.
y_des np.array: the desired trajectory to follow
"""
goal = np.zeros(self.dmps)
for n in range(self.dmps):
num_idx = ~np.isnan(y_des[n]) # ignore nan's when calculating goal
goal[n] = .5 * (y_des[n,num_idx].min() + \
y_des[n,num_idx].max())
return goal | 0.023301 |
def new(arg_name, annotated_with=None):
"""Creates a BindingKey.
Args:
arg_name: the name of the bound arg
annotation: an Annotation, or None to create an unannotated binding key
Returns:
a new BindingKey
"""
if annotated_with is not None:
annotation = annotations.Annotation(annotated_with)
else:
annotation = annotations.NO_ANNOTATION
return BindingKey(arg_name, annotation) | 0.002278 |
def __get_switch_arr(work_sheet, row_num):
'''
if valud of the column of the row is `1`, it will be added to the array.
'''
u_dic = []
for col_idx in FILTER_COLUMNS:
cell_val = work_sheet['{0}{1}'.format(col_idx, row_num)].value
if cell_val in [1, '1']:
# Appending the slug name of the switcher.
u_dic.append(work_sheet['{0}1'.format(col_idx)].value.strip().split(',')[0])
return u_dic | 0.004425 |
def get_or_connect(cls, **kwargs):
"""
Try to retrieve an object in db, and create it if it does not exist.
"""
try:
inst = cls.get(**kwargs)
created = False
except DoesNotExist:
inst = cls(**kwargs)
created = True
except Exception:
raise
return inst, created | 0.005333 |
def get_collections(self, username="", calculate_size=False, ext_preload=False, offset=0, limit=10):
"""Fetch collection folders
:param username: The user to list folders for, if omitted the authenticated user is used
:param calculate_size: The option to include the content count per each collection folder
:param ext_preload: Include first 5 deviations from the folder
:param offset: the pagination offset
:param limit: the pagination limit
"""
if not username and self.standard_grant_type == "authorization_code":
response = self._req('/collections/folders', {
"calculate_size":calculate_size,
"ext_preload":ext_preload,
"offset":offset,
"limit":limit
})
else:
if not username:
raise DeviantartError("No username defined.")
else:
response = self._req('/collections/folders', {
"username":username,
"calculate_size":calculate_size,
"ext_preload":ext_preload,
"offset":offset,
"limit":limit
})
folders = []
for item in response['results']:
f = {}
f['folderid'] = item['folderid']
f['name'] = item['name']
if "size" in item:
f['size'] = item['size']
if "deviations" in item:
f['deviations'] = []
for deviation_item in item['deviations']:
d = Deviation()
d.from_dict(deviation_item)
f['deviations'].append(d)
folders.append(f)
return {
"results" : folders,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
} | 0.008845 |
def get_context_data(self, **kwargs):
'''
Adds a 'base_template' attribute to context for the page_detail to
extend from
'''
context = super(PageList, self).get_context_data(**kwargs)
page_base_template = "nupages/base.html"
# if MultiTenantMiddleware is used, use a base template specific to
# the tenants SITE_ID
if hasattr(self.request, 'site_id'):
page_base_template = select_template(
["nupages/tenants/{}/base.html".format(self.request.site_id),
page_base_template])
context['base_template'] = page_base_template
print page_base_template
return context | 0.008559 |
def Network_getCertificate(self, origin):
"""
Function path: Network.getCertificate
Domain: Network
Method name: getCertificate
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'origin' (type: string) -> Origin to get certificate for.
Returns:
'tableNames' (type: array) -> No description
Description: Returns the DER-encoded certificate.
"""
assert isinstance(origin, (str,)
), "Argument 'origin' must be of type '['str']'. Received type: '%s'" % type(
origin)
subdom_funcs = self.synchronous_command('Network.getCertificate', origin=
origin)
return subdom_funcs | 0.049624 |
def get_input_location(location):
"""
Similar to :meth:`get_input_peer`, but for input messages.
Note that this returns a tuple ``(dc_id, location)``, the
``dc_id`` being present if known.
"""
try:
if location.SUBCLASS_OF_ID == 0x1523d462:
return None, location # crc32(b'InputFileLocation'):
except AttributeError:
_raise_cast_fail(location, 'InputFileLocation')
if isinstance(location, types.Message):
location = location.media
if isinstance(location, types.MessageMediaDocument):
location = location.document
elif isinstance(location, types.MessageMediaPhoto):
location = location.photo
if isinstance(location, types.Document):
return (location.dc_id, types.InputDocumentFileLocation(
id=location.id,
access_hash=location.access_hash,
file_reference=location.file_reference,
thumb_size='' # Presumably to download one of its thumbnails
))
elif isinstance(location, types.Photo):
return (location.dc_id, types.InputPhotoFileLocation(
id=location.id,
access_hash=location.access_hash,
file_reference=location.file_reference,
thumb_size=location.sizes[-1].type
))
if isinstance(location, types.FileLocationToBeDeprecated):
raise TypeError('Unavailable location cannot be used as input')
_raise_cast_fail(location, 'InputFileLocation') | 0.000673 |
def _gh(self, x):
""" Evaluates the constraint function values.
"""
Pgen = x[self._Pg.i1:self._Pg.iN + 1] # Active generation in p.u.
Qgen = x[self._Qg.i1:self._Qg.iN + 1] # Reactive generation in p.u.
for i, gen in enumerate(self._gn):
gen.p = Pgen[i] * self._base_mva # active generation in MW
gen.q = Qgen[i] * self._base_mva # reactive generation in MVAr
# Rebuild the net complex bus power injection vector in p.u.
Sbus = self.om.case.getSbus(self._bs)
Vang = x[self._Va.i1:self._Va.iN + 1]
Vmag = x[self._Vm.i1:self._Vm.iN + 1]
V = Vmag * exp(1j * Vang)
# Evaluate the power flow equations.
mis = V * conj(self._Ybus * V) - Sbus
# Equality constraints (power flow).
g = r_[mis.real, # active power mismatch for all buses
mis.imag] # reactive power mismatch for all buses
# Inequality constraints (branch flow limits).
# (line constraint is actually on square of limit)
flow_max = array([(l.rate_a / self._base_mva)**2 for l in self._ln])
# FIXME: There must be a more elegant method for this.
for i, v in enumerate(flow_max):
if v == 0.0:
flow_max[i] = Inf
if self.flow_lim == IFLOW:
If = self._Yf * V
It = self._Yt * V
# Branch current limits.
h = r_[(If * conj(If)) - flow_max,
(It * conj(It)) - flow_max]
else:
i_fbus = [e.from_bus._i for e in self._ln]
i_tbus = [e.to_bus._i for e in self._ln]
# Complex power injected at "from" bus (p.u.).
Sf = V[i_fbus] * conj(self._Yf * V)
# Complex power injected at "to" bus (p.u.).
St = V[i_tbus] * conj(self._Yt * V)
if self.flow_lim == PFLOW: # active power limit, P (Pan Wei)
# Branch real power limits.
h = r_[Sf.real()**2 - flow_max,
St.real()**2 - flow_max]
elif self.flow_lim == SFLOW: # apparent power limit, |S|
# Branch apparent power limits.
h = r_[(Sf * conj(Sf)) - flow_max,
(St * conj(St)) - flow_max].real
else:
raise ValueError
return h, g | 0.003815 |
def listener(self, acceptor, wrapper):
"""
Listens for new connections to the manager's endpoint. Once a
new connection is received, a TCPTendril object is generated
for it and it is passed to the acceptor, which must initialize
the state of the connection. If no acceptor is given, no new
connections can be initialized.
:param acceptor: If given, specifies a callable that will be
called with each newly received TCPTendril;
that callable is responsible for initial
acceptance of the connection and for setting
up the initial state of the connection. If
not given, no new connections will be
accepted by the TCPTendrilManager.
:param wrapper: A callable taking, as its first argument, a
socket.socket object. The callable must
return a valid proxy for the socket.socket
object, which will subsequently be used to
communicate on the connection.
"""
# If we have no acceptor, there's nothing for us to do here
if not acceptor:
# Not listening on anything
self.local_addr = None
# Just sleep in a loop
while True:
gevent.sleep(600)
return # Pragma: nocover
# OK, set up the socket
sock = socket.socket(self.addr_family, socket.SOCK_STREAM)
with utils.SocketCloser(sock):
# Set up SO_REUSEADDR
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind to our endpoint
sock.bind(self.endpoint)
# Get the assigned port number
self.local_addr = sock.getsockname()
# Call any wrappers
if wrapper:
sock = wrapper(sock)
# Initiate listening
sock.listen(self.backlog)
# OK, now go into an accept loop with an error threshold of 10
closer = utils.SocketCloser(sock, 10,
ignore=[application.RejectConnection])
while True:
with closer:
cli, addr = sock.accept()
# OK, the connection has been accepted; construct a
# Tendril for it
tend = TCPTendril(self, cli, addr)
# Set up the application
with utils.SocketCloser(cli):
tend.application = acceptor(tend)
# Make sure we track the new tendril, but only if
# the acceptor doesn't throw any exceptions
self._track_tendril(tend)
# Start the tendril
tend._start() | 0.000694 |
def getCol(self, x):
"""return the x-th column, starting at 0"""
return [self.getCell(x, i) for i in self.__size_range] | 0.014815 |
def addAction(self, action):
"""
Adds the inputed action to this toolbar.
:param action | <QAction>
"""
super(XDockToolbar, self).addAction(action)
label = XDockActionLabel(action, self.minimumPixmapSize(), self)
label.setPosition(self.position())
layout = self.layout()
layout.insertWidget(layout.count() - 1, label) | 0.011547 |
def evict(cls, urls):
"""Remove items from cache matching URLs.
Return the number of items removed.
"""
if isinstance(urls, text_type):
urls = [urls]
urls = set(normalize_url(url) for url in urls)
retval = 0
with cls.ca_lock:
for key in list(cls.cache):
if key[0] in urls:
retval += 1
del cls.cache[key]
del cls.timeouts[key]
return retval | 0.00396 |
def set_guest_access(self, allow_guests):
"""Set whether guests can join the room and return True if successful."""
guest_access = "can_join" if allow_guests else "forbidden"
try:
self.client.api.set_guest_access(self.room_id, guest_access)
self.guest_access = allow_guests
return True
except MatrixRequestError:
return False | 0.007407 |
def add_handler(self, handler):
''' Add an additional handler
Args:
handler:
A dictionary of handler configuration for the handler
that should be added. See :func:`__init__` for details
on valid parameters.
'''
handler['logger'] = self._get_logger(handler)
handler['reads'] = 0
handler['data_read'] = 0
self.capture_handlers.append(handler) | 0.004367 |
def _apply_conv(self, inputs, w):
"""Apply a `separable_conv2d` operation on `inputs` using `w`.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`.
w: A tuple of weight matrices of the same type as `inputs`, the first
being the depthwise weight matrix, and the second being the pointwise
weight matrix.
Returns:
outputs: The result of the convolution operation on `inputs`.
"""
w_dw, w_pw = w
outputs = tf.nn.separable_conv2d(inputs,
w_dw,
w_pw,
rate=self._rate,
strides=self.stride,
padding=self._conv_op_padding,
data_format=self._data_format)
return outputs | 0.001101 |
def parse_only_extr_license(self, extr_lic):
"""
Return an ExtractedLicense object to represent a license object.
But does not add it to the SPDXDocument model.
Return None if failed.
"""
# Grab all possible values
ident = self.get_extr_license_ident(extr_lic)
text = self.get_extr_license_text(extr_lic)
comment = self.get_extr_lics_comment(extr_lic)
xrefs = self.get_extr_lics_xref(extr_lic)
name = self.get_extr_lic_name(extr_lic)
if not ident:
# Must have identifier
return
# Set fields
# FIXME: the constructor of the license should alwas accept a name
lic = document.ExtractedLicense(ident)
if text is not None:
lic.text = text
if name is not None:
lic.full_name = name
if comment is not None:
lic.comment = comment
lic.cross_ref = map(lambda x: six.text_type(x), xrefs)
return lic | 0.001982 |
def fingerprint(self, word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG):
"""Return the occurrence halved fingerprint.
Based on the occurrence halved fingerprint from :cite:`Cislak:2017`.
Parameters
----------
word : str
The word to fingerprint
n_bits : int
Number of bits in the fingerprint returned
most_common : list
The most common tokens in the target language, ordered by frequency
Returns
-------
int
The occurrence halved fingerprint
Examples
--------
>>> ohf = OccurrenceHalved()
>>> bin(ohf.fingerprint('hat'))
'0b1010000000010'
>>> bin(ohf.fingerprint('niall'))
'0b10010100000'
>>> bin(ohf.fingerprint('colin'))
'0b1001010000'
>>> bin(ohf.fingerprint('atcg'))
'0b10100000000000'
>>> bin(ohf.fingerprint('entreatment'))
'0b1111010000110000'
"""
if n_bits % 2:
n_bits += 1
w_len = len(word) // 2
w_1 = set(word[:w_len])
w_2 = set(word[w_len:])
fingerprint = 0
for letter in most_common:
if n_bits:
fingerprint <<= 1
if letter in w_1:
fingerprint += 1
fingerprint <<= 1
if letter in w_2:
fingerprint += 1
n_bits -= 2
else:
break
if n_bits > 0:
fingerprint <<= n_bits
return fingerprint | 0.001256 |
def is_compliant(self, path):
"""Checks if the directory is compliant.
Used to determine if the path specified and all of its children
directories are in compliance with the check itself.
:param path: the directory path to check
:returns: True if the directory tree is compliant, otherwise False.
"""
if not os.path.isdir(path):
log('Path specified %s is not a directory.' % path, level=ERROR)
raise ValueError("%s is not a directory." % path)
if not self.recursive:
return super(DirectoryPermissionAudit, self).is_compliant(path)
compliant = True
for root, dirs, _ in os.walk(path):
if len(dirs) > 0:
continue
if not super(DirectoryPermissionAudit, self).is_compliant(root):
compliant = False
continue
return compliant | 0.002172 |
def store_api_url(self):
"""Returns the API url for storing events."""
return "%s://%s%sapi/%s/store/" % (
self.scheme,
self.host,
self.path,
self.project_id,
) | 0.008621 |
def lookup_field_help(self, field, default=None):
"""
Looks up the help text for the passed in field.
"""
help = None
# is there a label specified for this field
if field in self.field_config and 'help' in self.field_config[field]:
help = self.field_config[field]['help']
# if we were given a default, use that
elif default:
help = default
# try to see if there is a description on our model
elif hasattr(self, 'model'):
for model_field in self.model._meta.fields:
if model_field.name == field:
help = model_field.help_text
break
return help | 0.002759 |
def set_save_directory(base, source):
"""Sets the root save directory for saving screenshots.
Screenshots will be saved in subdirectories under this directory by
browser window size. """
root = os.path.join(base, source)
if not os.path.isdir(root):
os.makedirs(root)
world.screenshot_root = root | 0.006006 |
def indices_outside_segments(times, segment_files, ifo=None, segment_name=None):
""" Return the list of indices that are outside the segments in the
list of segment files.
Parameters
----------
times: numpy.ndarray of integer type
Array of gps start times
segment_files: string or list of strings
A string or list of strings that contain the path to xml files that
contain a segment table
ifo: string, optional
The ifo to retrieve segments for from the segment files
segment_name: str, optional
name of segment
Returns
--------
indices: numpy.ndarray
The array of index values outside the segments
segmentlist:
The segment list corresponding to the selected time.
"""
exclude, segs = indices_within_segments(times, segment_files,
ifo=ifo, segment_name=segment_name)
indices = numpy.arange(0, len(times))
return numpy.delete(indices, exclude), segs | 0.002973 |
def displayResponse(request, openid_response):
"""
Display an OpenID response. Errors will be displayed directly to
the user; successful responses and other protocol-level messages
will be sent using the proper mechanism (i.e., direct response,
redirection, etc.).
"""
s = getServer(request)
# Encode the response into something that is renderable.
try:
webresponse = s.encodeResponse(openid_response)
except EncodingError as why:
# If it couldn't be encoded, display an error.
text = why.response.encodeToKVForm()
return render_to_response(
'server/endpoint.html', {'error': cgi.escape(text)},
context_instance=RequestContext(request))
# Construct the appropriate django framework response.
r = http.HttpResponse(webresponse.body)
r.status_code = webresponse.code
for header, value in webresponse.headers.items():
r[header] = value
return r | 0.001031 |
def query_tag_values(self, metric_type=None, **tags):
"""
Query for possible tag values.
:param metric_type: A MetricType to be queried. If left to None, matches all the MetricTypes
:param tags: A dict of tag key/value pairs. Uses Hawkular-Metrics tag query language for syntax
"""
tagql = self._transform_tags(**tags)
return self._get(self._get_metrics_tags_url(self._get_url(metric_type)) + '/{}'.format(tagql)) | 0.010616 |
def sync(self, sync_set, force=0, site=None, role=None):
"""
Uploads media to an Amazon S3 bucket using s3sync.
Requires s3cmd. Install with:
pip install s3cmd
"""
from burlap.dj import dj
force = int(force)
r = self.local_renderer
r.env.sync_force_flag = ' --force ' if force else ''
_settings = dj.get_settings(site=site, role=role)
assert _settings, 'Unable to import settings.'
for k in _settings.__dict__.iterkeys():
if k.startswith('AWS_'):
r.genv[k] = _settings.__dict__[k]
site_data = r.genv.sites[r.genv.SITE]
r.env.update(site_data)
r.env.virtualenv_bin_dir = os.path.split(sys.executable)[0]
rets = []
for paths in r.env.sync_sets[sync_set]:
is_local = paths.get('is_local', True)
local_path = paths['local_path'] % r.genv
remote_path = paths['remote_path']
remote_path = remote_path.replace(':/', '/')
if not remote_path.startswith('s3://'):
remote_path = 's3://' + remote_path
local_path = local_path % r.genv
if is_local:
#local_or_dryrun('which s3sync')#, capture=True)
r.env.local_path = os.path.abspath(local_path)
else:
#run('which s3sync')
r.env.local_path = local_path
if local_path.endswith('/') and not r.env.local_path.endswith('/'):
r.env.local_path = r.env.local_path + '/'
r.env.remote_path = remote_path % r.genv
print('Syncing %s to %s...' % (r.env.local_path, r.env.remote_path))
# Superior Python version.
if force:
r.env.sync_cmd = 'put'
else:
r.env.sync_cmd = 'sync'
r.local(
'export AWS_ACCESS_KEY_ID={aws_access_key_id}; '\
'export AWS_SECRET_ACCESS_KEY={aws_secret_access_key}; '\
'{s3cmd_path} {sync_cmd} --progress --acl-public --guess-mime-type --no-mime-magic '\
'--delete-removed --cf-invalidate --recursive {sync_force_flag} '\
'{local_path} {remote_path}') | 0.00485 |
def enums():
"""
Return the dictionary of H₂O enums, retrieved from data in schemas(). For each entry in the dictionary its key is
the name of the enum, and the value is the set of all enum values.
"""
enumset = defaultdict(set)
for schema in schemas():
for field in schema["fields"]:
if field["type"] == "enum":
enumset[field["schema_name"]].update(field["values"])
return enumset | 0.004494 |
def _connect(self):
"""
Retrieve token from USMA API and create an authenticated session
:returns OAuth2Session: authenticated client session
"""
oauth_client = BackendApplicationClient(client_id=self.client_id)
oauth_session = OAuth2Session(client=oauth_client)
token = oauth_session.fetch_token(token_url=self.url + "oauth/token",
client_id=self.client_id,
client_secret=self.client_secret)
#verify=False)
return OAuth2Session(client_id=self.client_id,
token=token) | 0.010014 |
def tree(path, depth=2, topdown=True, followlinks=False, showhidden=False):
"""A generator return a tuple with three elements (root, dirs, files)."""
rt = []
for root, dirs, files in os.walk(path, topdown=topdown, followlinks=followlinks):
if not showhidden and File.is_hidden(root):
continue
current_depth = len(os.path.relpath(root, path).split(os.sep))
if current_depth > depth:
continue
if showhidden:
_tuple = (
root,
[File(os.path.join(root, _dir)) for _dir in dirs],
[File(os.path.join(root, _file)) for _file in files]
)
else:
_tuple = (
root,
[File(os.path.join(root, _dir)) for _dir in dirs if _dir[0] != '.'],
[File(os.path.join(root, _file)) for _file in files if _file[0] != '.']
)
rt.append(_tuple)
return rt | 0.00418 |
def search(self, q, field=None, page=None, per_page=None):
"""Search across all (without field) or in specific field
(valid fields at http://www.loc.gov/standards/mods/mods-outline.html)"""
def picker(results):
if type(results['result']) == list:
return results['result']
else:
return [results['result']]
return self._get(('search',), picker, q=q, field=field, page=page, per_page=per_page) | 0.008333 |
def get_framebuffer_size(window):
"""
Retrieves the size of the framebuffer of the specified window.
Wrapper for:
void glfwGetFramebufferSize(GLFWwindow* window, int* width, int* height);
"""
width_value = ctypes.c_int(0)
width = ctypes.pointer(width_value)
height_value = ctypes.c_int(0)
height = ctypes.pointer(height_value)
_glfw.glfwGetFramebufferSize(window, width, height)
return width_value.value, height_value.value | 0.004237 |
def set_flat(self, new_weights):
"""Sets the weights to new_weights, converting from a flat array.
Note:
You can only set all weights in the network using this function,
i.e., the length of the array must match get_flat_size.
Args:
new_weights (np.ndarray): Flat array containing weights.
"""
self._check_sess()
shapes = [v.get_shape().as_list() for v in self.variables.values()]
arrays = unflatten(new_weights, shapes)
placeholders = [
self.placeholders[k] for k, v in self.variables.items()
]
self.sess.run(
list(self.assignment_nodes.values()),
feed_dict=dict(zip(placeholders, arrays))) | 0.002688 |
def _ordered_struct_start_handler(handler, ctx):
"""Handles the special case of ordered structs, specified by the type ID 0xD1.
This coroutine's only purpose is to ensure that the struct in question declares at least one field name/value pair,
as required by the spec.
"""
_, self = yield
self_handler = _create_delegate_handler(self)
(length, _), _ = yield ctx.immediate_transition(
_var_uint_field_handler(self_handler, ctx)
)
if length < 2:
# A valid field name/value pair is at least two octets: one for the field name SID and one for the value.
raise IonException('Ordered structs (type ID 0xD1) must have at least one field name/value pair.')
yield ctx.immediate_transition(handler(length, ctx)) | 0.006527 |
def Cpu():
""" Get number of available CPUs """
cpu = 'Unknown'
try:
cpu = str(multiprocessing.cpu_count())
except Exception as e: # pragma: no cover
logger.error("Can't access CPU count' " + str(e))
return cpu | 0.004032 |
def asbaseline(self, pos):
"""Convert a position measure into a baseline measure. No actual
baseline is calculated, since operations can be done on positions,
with subtractions to obtain baselines at a later stage.
:param pos: a position measure
:returns: a baseline measure
"""
if not is_measure(pos) or pos['type'] not in ['position', 'baseline']:
raise TypeError('Argument is not a position/baseline measure')
if pos['type'] == 'position':
loc = self.measure(pos, 'itrf')
loc['type'] = 'baseline'
return self.measure(loc, 'j2000')
return pos | 0.003003 |
def kernel_integrity(attrs=None, where=None):
'''
Return kernel_integrity information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.kernel_integrity
'''
if __grains__['os_family'] in ['RedHat', 'Debian']:
return _osquery_cmd(table='kernel_integrity', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'} | 0.00464 |
def container_present(name, profile):
'''
Ensures a container is present.
:param name: Container name
:type name: ``str``
:param profile: The profile key
:type profile: ``str``
'''
containers = __salt__['libcloud_storage.list_containers'](profile)
match = [z for z in containers if z['name'] == name]
if match:
return state_result(True, "Container already exists", name, {})
else:
result = __salt__['libcloud_storage.create_container'](name, profile)
return state_result(True, "Created new container", name, result) | 0.001704 |
def mark_day(self, day, month=None, year=None):
"""Marks the specified month day with a visual marker
(typically by making the number bold).
If only day is specified and the calendar month and year
are changed, the marked day remain marked.
You can be more specific setting month and year parameters.
"""
self._remark_date(day, month, year, highlight=True) | 0.004854 |
def get_field_reduced_index(self, index):
"""
reduced index: modulo of extensible has been applied
"""
# return index if not extensible
if self.extensible_info is None:
return index
# manage extensible
cycle_start, cycle_len, _ = self.extensible_info
# base field
if index < cycle_start:
return index
# extensible field
return cycle_start + ((index - cycle_start) % cycle_len) | 0.009709 |
def read(self, **keys):
"""
read data from this HDU
By default, all data are read.
send columns= and rows= to select subsets of the data.
Table data are read into a recarray; use read_column() to get a single
column as an ordinary array. You can alternatively use slice notation
fits=fitsio.FITS(filename)
fits[ext][:]
fits[ext][2:5]
fits[ext][200:235:2]
fits[ext][rows]
fits[ext][cols][rows]
parameters
----------
columns: optional
An optional set of columns to read from table HDUs. Default is to
read all. Can be string or number. If a sequence, a recarray
is always returned. If a scalar, an ordinary array is returned.
rows: optional
An optional list of rows to read from table HDUS. Default is to
read all.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
"""
columns = keys.get('columns', None)
rows = keys.get('rows', None)
if columns is not None:
if 'columns' in keys:
del keys['columns']
data = self.read_columns(columns, **keys)
elif rows is not None:
if 'rows' in keys:
del keys['rows']
data = self.read_rows(rows, **keys)
else:
data = self._read_all(**keys)
return data | 0.001255 |
def get_device_by_ain(self, ain):
"""Returns a device specified by the AIN."""
devices = self.get_devices()
for device in devices:
if device.ain == ain:
return device | 0.009174 |
def kalman_filter(kalman_state, old_indices, coordinates, q, r):
'''Return the kalman filter for the features in the new frame
kalman_state - state from last frame
old_indices - the index per feature in the last frame or -1 for new
coordinates - Coordinates of the features in the new frame.
q - the process error covariance - see equ 1.3 and 1.10 from Welch
r - measurement error covariance of features - see eqn 1.7 and 1.8 from welch.
returns a new KalmanState containing the kalman filter of
the last state by the given coordinates.
Refer to kalmanGainLinearMotion.m and
http://www.cs.unc.edu/~welch/media/pdf/kalman_intro.pdf
for info on the algorithm.
'''
assert isinstance(kalman_state, KalmanState)
old_indices = np.array(old_indices)
if len(old_indices) == 0:
return KalmanState(kalman_state.observation_matrix,
kalman_state.translation_matrix)
#
# Cull missing features in old state and collect only matching coords
#
matching = old_indices != -1
new_indices = np.arange(len(old_indices))[~matching]
retained_indices = np.arange(len(old_indices))[matching]
new_coords = coordinates[new_indices]
observation_matrix_t = kalman_state.observation_matrix.transpose()
if len(retained_indices) > 0:
kalman_state = kalman_state.deep_copy()
coordinates = coordinates[retained_indices]
kalman_state.map_frames(old_indices[retained_indices])
#
# Time update equations
#
# From eqn 1.9 of Welch
#
state_vec = kalman_state.predicted_state_vec
#
# From eqn 1.10 of Welch
#
state_cov = dot_n(
dot_n(kalman_state.translation_matrix, kalman_state.state_cov),
kalman_state.translation_matrix.transpose()) + q[matching]
#
# From eqn 1.11 of welch
#
kalman_gain_numerator = dot_n(state_cov, observation_matrix_t)
kalman_gain_denominator = dot_n(
dot_n(kalman_state.observation_matrix, state_cov),
observation_matrix_t) + r[matching]
kalman_gain_denominator = inv_n(kalman_gain_denominator)
kalman_gain = dot_n(kalman_gain_numerator, kalman_gain_denominator)
#
# Eqn 1.12 of Welch
#
difference = coordinates - dot_n(kalman_state.observation_matrix,
state_vec[:,:,np.newaxis])[:,:,0]
state_noise = dot_n(kalman_gain, difference[:,:,np.newaxis])[:,:,0]
state_vec = state_vec + state_noise
#
# Eqn 1.13 of Welch (factored from (I - KH)P to P - KHP)
#
state_cov = (state_cov -
dot_n(dot_n(kalman_gain, kalman_state.observation_matrix),
state_cov))
#
# Collect all of the state noise in one array. We produce an I and J
# variance. Notes in kalmanGainLinearMotion indicate that you
# might want a single variance, combining I & J. An alternate
# might be R and theta, a variance of angular consistency and one
# of absolute velocity.
#
# Add an index to the state noise in the rightmost column
#
idx = np.arange(len(state_noise))
#
# Stack the rows with the old ones
#
all_state_noise = np.vstack((kalman_state.state_noise, state_noise))
all_state_noise_idx = np.hstack((kalman_state.state_noise_idx, idx))
noise_var = np.zeros((len(idx), all_state_noise.shape[1]))
for i in range(all_state_noise.shape[1]):
noise_var[:, i] = fix(scind.variance(all_state_noise[:, i],
all_state_noise_idx,
idx))
obs_vec = dot_n(kalman_state.observation_matrix,
state_vec[:,:,np.newaxis])[:,:,0]
kalman_state = KalmanState(kalman_state.observation_matrix,
kalman_state.translation_matrix,
state_vec, state_cov, noise_var,
all_state_noise,
all_state_noise_idx)
else:
# Erase all previous features
kalman_state = KalmanState(kalman_state.observation_matrix,
kalman_state.translation_matrix)
if len(new_coords) > 0:
#
# Fill in the initial states:
#
state_vec = dot_n(observation_matrix_t,
new_coords[:,:,np.newaxis])[:,:,0]
#
# The COV for the hidden, undetermined features should be large
# and the COV for others should be small
#
nstates = kalman_state.state_len
nnew_features = len(new_indices)
cov_vec = SMALL_KALMAN_COV / np.dot(observation_matrix_t,
np.ones(kalman_state.obs_len))
cov_vec[~ np.isfinite(cov_vec)] = LARGE_KALMAN_COV
cov_matrix = np.diag(cov_vec)
state_cov = cov_matrix[np.newaxis,:,:][np.zeros(nnew_features,int)]
#
# The noise variance is all ones in Jaqman
#
noise_var = np.ones((len(new_indices), kalman_state.state_len))
#
# Map the retained indices to their new slots and new ones to empty
# slots (=-1)
#
kalman_state.add_features(retained_indices,
new_indices,
state_vec, state_cov, noise_var)
return kalman_state | 0.0039 |
def perturbed_contents(self):
"""Perturb the given animal."""
animal = json.loads(self.contents)
for prop, prop_range in self.properties.items():
range = prop_range[1] - prop_range[0]
jittered = animal[prop] + random.gauss(0, 0.1 * range)
animal[prop] = max(min(jittered, prop_range[1]), prop_range[0])
return json.dumps(animal) | 0.005025 |
def get_instance(self, payload):
"""
Build an instance of InviteInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.service.channel.invite.InviteInstance
:rtype: twilio.rest.chat.v2.service.channel.invite.InviteInstance
"""
return InviteInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
) | 0.003922 |
def create(cls, train_ds, valid_ds, test_ds=None, path:PathOrStr='.', no_check:bool=False, bs=64, val_bs:int=None,
num_workers:int=0, device:torch.device=None, collate_fn:Callable=data_collate,
dl_tfms:Optional[Collection[Callable]]=None, bptt:int=70, backwards:bool=False, **dl_kwargs) -> DataBunch:
"Create a `TextDataBunch` in `path` from the `datasets` for language modelling. Passes `**dl_kwargs` on to `DataLoader()`"
datasets = cls._init_ds(train_ds, valid_ds, test_ds)
val_bs = ifnone(val_bs, bs)
datasets = [LanguageModelPreLoader(ds, shuffle=(i==0), bs=(bs if i==0 else val_bs), bptt=bptt, backwards=backwards)
for i,ds in enumerate(datasets)]
val_bs = bs
dls = [DataLoader(d, b, shuffle=False, **dl_kwargs) for d,b in zip(datasets, (bs,val_bs,val_bs,val_bs)) if d is not None]
return cls(*dls, path=path, device=device, dl_tfms=dl_tfms, collate_fn=collate_fn, no_check=no_check) | 0.045226 |
def t_less_variable(self, t):
r'@@?[\w-]+|@\{[^@\}]+\}'
v = t.value.lower()
if v in reserved.tokens:
t.type = reserved.tokens[v]
if t.type == "css_media":
t.lexer.push_state("mediaquery")
elif t.type == "css_import":
t.lexer.push_state("import")
return t | 0.00565 |
def files(self, entity_id, manifest=None, filename=None,
read_file=False, channel=None):
'''
Get the files or file contents of a file for an entity.
If all files are requested, a dictionary of filenames and urls for the
files in the archive are returned.
If filename is provided, the url of just that file is returned, if it
exists.
If filename is provided and read_file is true, the *contents* of the
file are returned, if the file exists.
@param entity_id The id of the entity to get files for
@param manifest The manifest of files for the entity. Providing this
reduces queries; if not provided, the manifest is looked up in the
charmstore.
@param filename The name of the file in the archive to get.
@param read_file Whether to get the url for the file or the file
contents.
@param channel Optional channel name.
'''
if manifest is None:
manifest_url = '{}/{}/meta/manifest'.format(self.url,
_get_path(entity_id))
manifest_url = _add_channel(manifest_url, channel)
manifest = self._get(manifest_url)
manifest = manifest.json()
files = {}
for f in manifest:
manifest_name = f['Name']
file_url = self.file_url(_get_path(entity_id), manifest_name,
channel=channel)
files[manifest_name] = file_url
if filename:
file_url = files.get(filename, None)
if file_url is None:
raise EntityNotFound(entity_id, filename)
if read_file:
data = self._get(file_url)
return data.text
else:
return file_url
else:
return files | 0.001565 |
def get_mailbox(value):
""" mailbox = name-addr / addr-spec
"""
# The only way to figure out if we are dealing with a name-addr or an
# addr-spec is to try parsing each one.
mailbox = Mailbox()
try:
token, value = get_name_addr(value)
except errors.HeaderParseError:
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected mailbox but found '{}'".format(value))
if any(isinstance(x, errors.InvalidHeaderDefect)
for x in token.all_defects):
mailbox.token_type = 'invalid-mailbox'
mailbox.append(token)
return mailbox, value | 0.002793 |
def onLiveLocation(
self,
mid=None,
location=None,
author_id=None,
thread_id=None,
thread_type=None,
ts=None,
msg=None,
):
"""
Called when the client is listening and somebody sends live location info
:param mid: The action ID
:param location: Sent location info
:param author_id: The ID of the person who sent location info
:param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads`
:param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads`
:param ts: A timestamp of the action
:param msg: A full set of the data recieved
:type location: models.LiveLocationAttachment
:type thread_type: models.ThreadType
"""
log.info(
"{} sent live location info in {} ({}) with latitude {} and longitude {}".format(
author_id, thread_id, thread_type, location.latitude, location.longitude
)
) | 0.007597 |
def set_special(index, color, iterm_name="h", alpha=100):
"""Convert a hex color to a special sequence."""
if OS == "Darwin" and iterm_name:
return "\033]P%s%s\033\\" % (iterm_name, color.strip("#"))
if index in [11, 708] and alpha != "100":
return "\033]%s;[%s]%s\033\\" % (index, alpha, color)
return "\033]%s;%s\033\\" % (index, color) | 0.002688 |
def kpoints(self):
"""
Generate gamma center k-points mesh grid for GW calc,
which is requested by GW calculation.
"""
return Kpoints.automatic_density_by_vol(self.structure,
self.reciprocal_density,
force_gamma=True) | 0.005666 |
def process_topojson(self, name, layer, input_path):
"""
Process layer using topojson.
"""
output_path = os.path.join(TEMP_DIRECTORY, '%s.topojson' % name)
# Use local topojson binary
topojson_binary = 'node_modules/bin/topojson'
if not os.path.exists(topojson_binary):
# try with global topojson binary
topojson_binary = 'topojson'
topo_cmd = [
topojson_binary,
'-o', output_path
]
if 'id-property' in layer:
topo_cmd.extend([
'--id-property', layer['id-property']
])
if layer.get('all-properties', False):
topo_cmd.append('-p')
elif 'properties' in layer:
topo_cmd.extend([
'-p', ','.join(layer['properties'])
])
topo_cmd.extend([
'--',
input_path
])
sys.stdout.write('* Running TopoJSON\n')
if self.args.verbose:
sys.stdout.write(' %s\n' % ' '.join(topo_cmd))
r = envoy.run(' '.join(topo_cmd))
if r.status_code != 0:
sys.stderr.write(r.std_err)
return output_path | 0.001642 |
def _snapshot_to_data(snapshot):
'''
Returns snapshot data from a D-Bus response.
A snapshot D-Bus response is a dbus.Struct containing the
information related to a snapshot:
[id, type, pre_snapshot, timestamp, user, description,
cleanup_algorithm, userdata]
id: dbus.UInt32
type: dbus.UInt16
pre_snapshot: dbus.UInt32
timestamp: dbus.Int64
user: dbus.UInt32
description: dbus.String
cleaup_algorithm: dbus.String
userdata: dbus.Dictionary
'''
data = {}
data['id'] = snapshot[0]
data['type'] = ['single', 'pre', 'post'][snapshot[1]]
if data['type'] == 'post':
data['pre'] = snapshot[2]
if snapshot[3] != -1:
data['timestamp'] = snapshot[3]
else:
data['timestamp'] = int(time.time())
data['user'] = getpwuid(snapshot[4])[0]
data['description'] = snapshot[5]
data['cleanup'] = snapshot[6]
data['userdata'] = {}
for key, value in snapshot[7].items():
data['userdata'][key] = value
return data | 0.000963 |
def make_embedded_push(value):
"""Returns a closure that pushed the given value onto a Machine's stack.
We use this to embed stack pushes in the VM code, so that the interpreter
can assume that all instructions are callable Python functions. This makes
dispatching much faster than checking if an instruction is a constant
(number, string, etc) or a Python function.
"""
push = lambda vm: vm.push(value)
push.tag = EMBEDDED_PUSH_TAG
return push | 0.004158 |
def cli(ctx, board, scons, project_dir, sayyes):
"""Manage apio projects."""
if scons:
Project().create_sconstruct(project_dir, sayyes)
elif board:
Project().create_ini(board, project_dir, sayyes)
else:
click.secho(ctx.get_help()) | 0.00369 |
def get_user(self, username, *, mode=OsuMode.osu, event_days=31):
"""Get a user profile.
Parameters
----------
username : str or int
A `str` representing the user's username, or an `int` representing the user's id.
mode : :class:`osuapi.enums.OsuMode`
The osu! game mode for which to look up. Defaults to osu!standard.
event_days : int
The number of days in the past to look for events. Defaults to 31 (the maximum).
"""
return self._make_req(endpoints.USER, dict(
k=self.key,
u=username,
type=_username_type(username),
m=mode.value,
event_days=event_days
), JsonList(User)) | 0.005355 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'from_') and self.from_ is not None:
_dict['from'] = self.from_
if hasattr(self, 'to') and self.to is not None:
_dict['to'] = self.to
if hasattr(self, 'speaker') and self.speaker is not None:
_dict['speaker'] = self.speaker
if hasattr(self, 'confidence') and self.confidence is not None:
_dict['confidence'] = self.confidence
if hasattr(self, 'final_results') and self.final_results is not None:
_dict['final'] = self.final_results
return _dict | 0.002976 |
def security_iter(nodearr):
""" provide a security data iterator by returning a tuple of (Element, SecurityError) which are mutually exclusive """
assert nodearr.Name == 'securityData' and nodearr.IsArray
for i in range(nodearr.NumValues):
node = nodearr.GetValue(i)
err = XmlHelper.get_security_error(node)
result = (None, err) if err else (node, None)
yield result | 0.006849 |
def get_randomness_stream(self, decision_point: str, for_initialization: bool=False) -> RandomnessStream:
"""Provides a new source of random numbers for the given decision point.
Parameters
----------
decision_point :
A unique identifier for a stream of random numbers. Typically represents
a decision that needs to be made each time step like 'moves_left' or
'gets_disease'.
for_initialization :
A flag indicating whether this stream is used to generate key initialization information
that will be used to identify simulants in the Common Random Number framework. These streams
cannot be copied and should only be used to generate the state table columns specified
in ``builder.configuration.randomness.key_columns``.
Raises
------
RandomnessError :
If another location in the simulation has already created a randomness stream
with the same identifier.
"""
if decision_point in self._decision_points:
raise RandomnessError(f"Two separate places are attempting to create "
f"the same randomness stream for {decision_point}")
stream = RandomnessStream(key=decision_point, clock=self._clock, seed=self._seed,
index_map=self._key_mapping, manager=self, for_initialization=for_initialization)
self._decision_points[decision_point] = stream
return stream | 0.010356 |
def sync(self):
"""Keep the repository in sync.
This method will synchronize the repository with its 'origin',
fetching newest objects and updating references. It uses low
level commands which allow to keep track of which things
have changed in the repository.
The method also returns a list of hashes related to the new
commits fetched during the process.
:returns: list of new commits
:raises RepositoryError: when an error occurs synchronizing
the repository
"""
pack_name, refs = self._fetch_pack()
if pack_name:
commits = self._read_commits_from_pack(pack_name)
else:
commits = []
logger.debug("Git repository %s (%s) does not have any new object",
self.uri, self.dirpath)
self._update_references(refs)
logger.debug("Git repository %s (%s) is synced",
self.uri, self.dirpath)
return commits | 0.001951 |
def init_widget(self):
""" Initialize the widget with the source. """
d = self.declaration
if d.source:
self.set_source(d.source)
else:
super(RawComponent, self).init_widget() | 0.008658 |
def create(cls, settings):
"""Create a :class:`BluetoothConnection`:
.. sourcecode:: python
from escpos import BluetoothConnection
from escpos.impl.epson import GenericESCPOS
conn = BluetoothConnection.create('00:01:02:03:04:05')
printer = GenericESCPOS(conn)
printer.init()
printer.text('Hello World!')
:param str settings: Bluetooth settings. You must specify bluetooth
address as six hexadecimal octets, like ``00:01:02:03:04:05``.
You can also specify a port number after address using a forward
slash, like ``00:01:02:03:04:05/2``. If there is no port number,
this method will use SPD (*Service Discovery Protocol*) to find a
suitable port number for the given address at RFCOMM protocol.
:raises BluetoothPortDiscoveryError: If port is not specified and the
algorithm cannot find a RFCOMM port for the given address.
"""
fields = settings.rsplit('/', 1)
address = fields[0]
if len(fields) == 1:
port = find_rfcomm_port(address)
else:
try:
port = int(fields[1])
except ValueError:
raise BluetoothConnectionError('Invalid settings: {!r}'.format(settings))
return cls(address, port=port) | 0.00216 |
def union(self, other, left_name="LEFT", right_name="RIGHT"):
"""
*Wrapper of* ``UNION``
The UNION operation is used to integrate homogeneous or heterogeneous samples of two
datasets within a single dataset; for each sample of either one of the input datasets, a
sample is created in the result as follows:
* its metadata are the same as in the original sample;
* its schema is the schema of the first (left) input dataset; new
identifiers are assigned to each output sample;
* its regions are the same (in coordinates and attribute values) as in the original
sample. Region attributes which are missing in an input dataset sample
(w.r.t. the merged schema) are set to null.
:param other: a GMQLDataset
:param left_name: name that you want to assign to the left dataset
:param right_name: name tha t you want to assign to the right dataset
:return: a new GMQLDataset
Example of usage::
import gmql as gl
d1 = gl.get_example_dataset("Example_Dataset_1")
d2 = gl.get_example_dataset("Example_Dataset_2")
result = d1.union(other=d2, left_name="D1", right_name="D2")
"""
if not isinstance(left_name, str) or \
not isinstance(right_name, str):
raise TypeError("left_name and right_name must be strings. "
"{} - {} was provided".format(type(left_name), type(right_name)))
if isinstance(other, GMQLDataset):
other_idx = other.__index
else:
raise TypeError("other must be a GMQLDataset. "
"{} was provided".format(type(other)))
if len(left_name) == 0 or len(right_name) == 0:
raise ValueError("left_name and right_name must not be empty")
new_index = self.opmng.union(self.__index, other_idx, left_name, right_name)
new_local_sources, new_remote_sources = self.__combine_sources(self, other)
new_location = self.__combine_locations(self, other)
return GMQLDataset(index=new_index, location=new_location,
local_sources=new_local_sources,
remote_sources=new_remote_sources,
meta_profile=self.meta_profile) | 0.005405 |
def print_headers(head, outfile=None, silent=False):
"""
Print the vcf headers.
If a result file is provided headers will be printed here, otherwise
they are printed to stdout.
Args:
head (HeaderParser): A vcf header object
outfile (FileHandle): A file handle
silent (Bool): If nothing should be printed.
"""
for header_line in head.print_header():
if outfile:
outfile.write(header_line+'\n')
else:
if not silent:
print(header_line)
return | 0.008636 |
def sendToLogbook(self, fileName, logType, location=None):
'''Process log information and push to selected logbooks.'''
import subprocess
success = True
if logType == "MCC":
fileString = ""
if not self.imagePixmap.isNull():
fileString = fileName + "." + self.imageType
logcmd = "xml2elog " + fileName + ".xml " + fileString
process = subprocess.Popen(logcmd, shell=True)
process.wait()
if process.returncode != 0:
success = False
else:
from shutil import copy
path = "/u1/" + location.lower() + "/physics/logbook/data/" # Prod path
# path = "/home/softegr/alverson/log_test/" # Dev path
try:
if not self.imagePixmap.isNull():
copy(fileName + ".png", path)
if self.imageType == "png":
copy(fileName + ".ps", path)
else:
copy(fileName + "." + self.imageType, path)
# Copy .xml file last to ensure images will be picked up by cron job
# print("Copying file " + fileName + " to path " + path)
copy(fileName + ".xml", path)
except IOError as error:
print(error)
success = False
return success | 0.005517 |
def get_as_list(self, tag_name):
"""
Return the value of a tag, making sure that it's a list. Absent
tags are returned as an empty-list; single tags are returned as a
one-element list.
The returned list is a copy, and modifications do not affect the
original object.
"""
val = self.get(tag_name, [])
if isinstance(val, list):
return val[:]
else:
return [val] | 0.00431 |
def count_consonants(text):
"""Count number of occurrences of consonants in a given string"""
count = 0
for i in text:
if i.lower() in config.AVRO_CONSONANTS:
count += 1
return count | 0.004587 |
def addNoise(self, nlf_coeff, date=None, info='', error=None):
'''
Args:
nlf_coeff (list)
error (float): absolute
info (str): additional information
date (str): "DD Mon YY" e.g. "30 Nov 16"
'''
date = _toDate(date)
d = self.coeffs['noise']
d.insert(_insertDateIndex(date, d), [date, info, nlf_coeff, error]) | 0.004843 |
def steppify(x, y):
"""
Steppify a curve (x, y). This is useful for filling histograms manually.
"""
dx = 0.5 * (x[1:] + x[:-1])
xx = numpy.zeros(2 * len(dx), dtype=float)
yy = numpy.zeros(2 * len(y), dtype=float)
xx[0::2], xx[1::2] = dx, dx
yy[0::2], yy[1::2] = y, y
xx = numpy.concatenate((
[x[0] - (dx[0] - x[0])],
xx,
[x[-1] + (x[-1] - dx[-1])]
))
return xx, yy | 0.002299 |
def run_blast_commands(ncbicommandline_method, **keywords):
"""Runs blastplus/tblastn search, collects result and pass as a xml temporary file. """
# temporary files for output
blast_out_tmp = tempfile.NamedTemporaryFile(mode="w+",delete=False)
keywords['out'] = blast_out_tmp.name
# unpack query temp file object
query_file_object_tmp = keywords['query']
keywords['query'] = query_file_object_tmp.name
stderr = ''
error_string = ''
try:
# formating blastplus command
blastplusx_cline = ncbicommandline_method(**keywords)
stdout, stderr = blastplusx_cline()
except ApplicationError as e:
error_string = "Runtime error: " + stderr + "\n" + e.cmd
# remove query temp file
os.unlink(query_file_object_tmp.name)
# os.remove(query_file_object_tmp.name)
return blast_out_tmp, error_string | 0.003405 |
def get_hash(fName, readSize, dire=pDir()):
"""
creates the required hash
"""
if not fileExists(fName, dire):
return -1
readSize = readSize * 1024 # bytes to be read
fName = os.path.join(dire, fName) # name coupled with path
with open(fName, 'rb') as f:
size = os.path.getsize(fName)
if size < readSize * 2:
return -1
data = f.read(readSize)
f.seek(-readSize, os.SEEK_END)
data += f.read(readSize)
return md5(data).hexdigest() | 0.005792 |
def get_instance_id():
"""Gets the instance ID of this EC2 instance
:return: String instance ID or None
"""
log = logging.getLogger(mod_logger + '.get_instance_id')
# Exit if not running on AWS
if not is_aws():
log.info('This machine is not running in AWS, exiting...')
return
instance_id_url = metadata_url + 'instance-id'
try:
response = urllib.urlopen(instance_id_url)
except(IOError, OSError) as ex:
msg = 'Unable to query URL to get instance ID: {u}\n{e}'. \
format(u=instance_id_url, e=ex)
log.error(msg)
return
# Check the code
if response.getcode() != 200:
msg = 'There was a problem querying url: {u}, returned code: {c}, unable to get the instance-id'.format(
u=instance_id_url, c=response.getcode())
log.error(msg)
return
instance_id = response.read()
return instance_id | 0.002134 |
def stats(self, date=None):
'''Return the current statistics for a given queue on a given date.
The results are returned are a JSON blob::
{
'total' : ...,
'mean' : ...,
'variance' : ...,
'histogram': [
...
]
}
The histogram's data points are at the second resolution for the first
minute, the minute resolution for the first hour, the 15-minute
resolution for the first day, the hour resolution for the first 3
days, and then at the day resolution from there on out. The
`histogram` key is a list of those values.'''
return json.loads(
self.client('stats', self.name, date or repr(time.time()))) | 0.002484 |
def connectionLost(self, reason):
"""
Tells the box receiver to stop receiving boxes.
"""
self._remote.boxReceiver.stopReceivingBoxes(reason)
return basic.NetstringReceiver.connectionLost(self, reason) | 0.008299 |
def describe_events(ApplicationName=None, VersionLabel=None, TemplateName=None, EnvironmentId=None, EnvironmentName=None, PlatformArn=None, RequestId=None, Severity=None, StartTime=None, EndTime=None, MaxRecords=None, NextToken=None):
"""
Returns list of event descriptions matching criteria up to the last 6 weeks.
See also: AWS API Documentation
Examples
The following operation retrieves events for an environment named my-env:
Expected Output:
:example: response = client.describe_events(
ApplicationName='string',
VersionLabel='string',
TemplateName='string',
EnvironmentId='string',
EnvironmentName='string',
PlatformArn='string',
RequestId='string',
Severity='TRACE'|'DEBUG'|'INFO'|'WARN'|'ERROR'|'FATAL',
StartTime=datetime(2015, 1, 1),
EndTime=datetime(2015, 1, 1),
MaxRecords=123,
NextToken='string'
)
:type ApplicationName: string
:param ApplicationName: If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only those associated with this application.
:type VersionLabel: string
:param VersionLabel: If specified, AWS Elastic Beanstalk restricts the returned descriptions to those associated with this application version.
:type TemplateName: string
:param TemplateName: If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that are associated with this environment configuration.
:type EnvironmentId: string
:param EnvironmentId: If specified, AWS Elastic Beanstalk restricts the returned descriptions to those associated with this environment.
:type EnvironmentName: string
:param EnvironmentName: If specified, AWS Elastic Beanstalk restricts the returned descriptions to those associated with this environment.
:type PlatformArn: string
:param PlatformArn: The ARN of the version of the custom platform.
:type RequestId: string
:param RequestId: If specified, AWS Elastic Beanstalk restricts the described events to include only those associated with this request ID.
:type Severity: string
:param Severity: If specified, limits the events returned from this call to include only those with the specified severity or higher.
:type StartTime: datetime
:param StartTime: If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that occur on or after this time.
:type EndTime: datetime
:param EndTime: If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that occur up to, but not including, the EndTime .
:type MaxRecords: integer
:param MaxRecords: Specifies the maximum number of events that can be returned, beginning with the most recent event.
:type NextToken: string
:param NextToken: Pagination token. If specified, the events return the next batch of results.
:rtype: dict
:return: {
'Events': [
{
'EventDate': datetime(2015, 1, 1),
'Message': 'string',
'ApplicationName': 'string',
'VersionLabel': 'string',
'TemplateName': 'string',
'EnvironmentName': 'string',
'PlatformArn': 'string',
'RequestId': 'string',
'Severity': 'TRACE'|'DEBUG'|'INFO'|'WARN'|'ERROR'|'FATAL'
},
],
'NextToken': 'string'
}
"""
pass | 0.005683 |
def _assert(self, expression: Bool):
"""Auxiliary method to send an assert"""
assert isinstance(expression, Bool)
smtlib = translate_to_smtlib(expression)
self._send('(assert %s)' % smtlib) | 0.00905 |
def link_bus(self, bus_idx):
"""
Return the indices of elements linking the given buses
:param bus_idx:
:return:
"""
ret = []
if not self._config['is_series']:
self.log(
'link_bus function is not valid for non-series model <{}>'.
format(self.name))
return []
if isinstance(bus_idx, (int, float, str)):
bus_idx = [bus_idx]
fkey = list(self._ac.keys())
if 'bus' in fkey:
fkey.remove('bus')
nfkey = len(fkey)
fkey_val = [self.__dict__[i] for i in fkey]
for item in bus_idx:
idx = []
key = []
for i in range(self.n):
for j in range(nfkey):
if fkey_val[j][i] == item:
idx.append(self.idx[i])
key.append(fkey[j])
# <= 1 terminal should connect to the same bus
break
if len(idx) == 0:
idx = None
if len(key) == 0:
key = None
ret.append((idx, key))
return ret | 0.001684 |
def _validate(cls, message):
"""Confirm the validitiy of a given dict as an OpenXC message.
Returns:
``True`` if the message contains at least a ``name`` and ``value``.
"""
valid = False
if(('name' in message and 'value' in message) or
('id' in message and 'data' in message)):
valid = True
return valid | 0.005102 |
def listpid(toggle='basic'): # Add method to exclude elements from list
'''list pids'''
proc=psutil.process_iter()# evalute if its better to keep one instance of this or generate here?
if toggle=='basic':
host=gethostname()
host2=os.getenv('HOME').split(sep='/' )[-1]
for row in proc:
#~ DPRINT([row.ppid(),row.name(),host],'username,row.name,host')
if row.username() in host or row.username() in host2: #new psutil using grabing timeyyy and not alfa for username so host 2 is getting the timeyyy on UBUNTU
yield row.name(), row.ppid()
elif toggle=='all':
for row in proc:
yield row.name(), row.ppid()
elif toggle =='windows-basic':
for row in proc:
try:
pname = psutil.Process(row.pid).name()
pname = pname[:-4]#removiing .exe from end
yield pname, row.pid
except:
pass | 0.019211 |
def upload_create(self, tags, rating, file_=None, source=None,
parent_id=None):
"""Function to create a new upload (Requires login).
Parameters:
tags (str):
rating (str): Can be: `s`, `q`, or `e`. Alternatively, you can
specify `rating:safe`, `rating:questionable`, or
`rating:explicit` in the tag string.
file_ (file_path): The file data encoded as a multipart form.
source (str): The source URL.
parent_id (int): The parent post id.
Raises:
PybooruAPIError: When file_ or source are empty.
"""
if file_ or source is not None:
params = {
'upload[source]': source,
'upload[rating]': rating,
'upload[parent_id]': parent_id,
'upload[tag_string]': tags
}
file_ = {'upload[file]': open(file_, 'rb')}
return self._get('uploads.json', params, 'POST', auth=True,
file_=file_)
else:
raise PybooruAPIError("'file_' or 'source' is required.") | 0.002532 |
def convert_to_id(s, id_set):
""" Some parts of .wxs need an Id attribute (for example: The File and
Directory directives. The charset is limited to A-Z, a-z, digits,
underscores, periods. Each Id must begin with a letter or with a
underscore. Google for "CNDL0015" for information about this.
Requirements:
* the string created must only contain chars from the target charset.
* the string created must have a minimal editing distance from the
original string.
* the string created must be unique for the whole .wxs file.
Observation:
* There are 62 chars in the charset.
Idea:
* filter out forbidden characters. Check for a collision with the help
of the id_set. Add the number of the number of the collision at the
end of the created string. Furthermore care for a correct start of
the string.
"""
charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz0123456789_.'
if s[0] in '0123456789.':
s += '_'+s
id = [c for c in s if c in charset]
# did we already generate an id for this file?
try:
return id_set[id][s]
except KeyError:
# no we did not, so initialize with the id
if id not in id_set: id_set[id] = { s : id }
# there is a collision, generate an id which is unique by appending
# the collision number
else: id_set[id][s] = id + str(len(id_set[id]))
return id_set[id][s] | 0.004096 |
def contract_from_file(fname):
"""
Loads a Barrister IDL JSON from the given file and returns a Contract class
:Parameters:
fname
Filename containing Barrister IDL JSON to load
"""
f = open(fname)
j = f.read()
f.close()
return Contract(json.loads(j)) | 0.003367 |
def OSIncludes(self):
"""
Microsoft Windows SDK Include
"""
include = os.path.join(self.si.WindowsSdkDir, 'include')
if self.vc_ver <= 10.0:
return [include, os.path.join(include, 'gl')]
else:
if self.vc_ver >= 14.0:
sdkver = self._sdk_subdir
else:
sdkver = ''
return [os.path.join(include, '%sshared' % sdkver),
os.path.join(include, '%sum' % sdkver),
os.path.join(include, '%swinrt' % sdkver)] | 0.00354 |
def list_roles(self, mount_point=DEFAULT_MOUNT_POINT):
"""List all the roles that are registered with the plugin.
Supported methods:
LIST: /auth/{mount_point}/roles. Produces: 200 application/json
:param mount_point: The "path" the azure auth method was mounted on.
:type mount_point: str | unicode
:return: The "data" key from the JSON response of the request.
:rtype: dict
"""
api_path = '/v1/auth/{mount_point}/roles'.format(mount_point=mount_point)
response = self._adapter.list(
url=api_path
)
return response.json().get('data') | 0.00463 |