hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1d311312627a88c1949f4689d306a5a8078da740 | 33,884 | py | Python | pcdsdevices/interface.py | slacAdpai/pcdsdevices | 7d79821f8b5652a7fd457b9a73d22ef37b2043d3 | [
"BSD-3-Clause-LBNL"
] | null | null | null | pcdsdevices/interface.py | slacAdpai/pcdsdevices | 7d79821f8b5652a7fd457b9a73d22ef37b2043d3 | [
"BSD-3-Clause-LBNL"
] | null | null | null | pcdsdevices/interface.py | slacAdpai/pcdsdevices | 7d79821f8b5652a7fd457b9a73d22ef37b2043d3 | [
"BSD-3-Clause-LBNL"
] | null | null | null | """
Module for defining bell-and-whistles movement features.
"""
import functools
import logging
import numbers
import re
import signal
import time
from contextlib import contextmanager
from pathlib import Path
from threading import Event, Thread
from types import MethodType, SimpleNamespace
from weakref import WeakSet
import yaml
from bluesky.utils import ProgressBar
from ophyd.device import Kind
from ophyd.ophydobj import OphydObject
from ophyd.status import wait as status_wait
from . import utils as util
try:
import fcntl
except ImportError:
fcntl = None
logger = logging.getLogger(__name__)
engineering_mode = True
OphydObject_whitelist = ["name", "connected", "check_value", "log"]
BlueskyInterface_whitelist = ["trigger", "read", "describe", "stage",
"unstage"]
Device_whitelist = ["read_attrs", "configuration_attrs", "summary",
"wait_for_connection", "stop", "get", "configure"]
Signal_whitelist = ["value", "put"]
Positioner_whitelist = ["settle_time", "timeout", "egu", "limits", "move",
"position", "moving"]
class BaseInterface(OphydObject):
"""
Interface layer to attach to any Device for SLAC features.
This class defines an API and some defaults for filtering tab-completion
results for new users to avoid confusion. The API involves setting the
tab_whitelist attribute on any subclass of BaseInterface. When in
non-engineering mode, only elements on the whitelists will be displayed to
the user.
Attributes
----------
tab_whitelist : list
List of string regex to show in autocomplete for non-engineering mode.
"""
tab_whitelist = (OphydObject_whitelist + BlueskyInterface_whitelist +
Device_whitelist + Signal_whitelist +
Positioner_whitelist)
_filtered_dir_cache = None
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
string_whitelist = []
for parent in cls.mro():
if hasattr(parent, "tab_whitelist"):
string_whitelist.extend(parent.tab_whitelist)
if getattr(parent, "tab_component_names", False):
for cpt_name in parent.component_names:
if getattr(parent, cpt_name).kind != Kind.omitted:
string_whitelist.append(cpt_name)
cls._tab_regex = re.compile("|".join(string_whitelist))
def __dir__(self):
if get_engineering_mode():
return super().__dir__()
elif self._filtered_dir_cache is None:
self._init_filtered_dir_cache()
return self._filtered_dir_cache
def _init_filtered_dir_cache(self):
self._filtered_dir_cache = self._get_filtered_tab_dir()
def _get_filtered_tab_dir(self):
return [elem
for elem in super().__dir__()
if self._tab_regex.fullmatch(elem)]
def __repr__(self):
"""Simplify the ophydobject repr to avoid crazy long represenations."""
prefix = getattr(self, 'prefix', None)
name = getattr(self, 'name', None)
return f"{self.__class__.__name__}({prefix}, name={name})"
def set_engineering_mode(expert):
"""
Switches between expert and user modes for :class:`BaseInterface` features.
Current features:
- Autocomplete filtering
Parameters
----------
expert : bool
Set to `True` to enable expert mode, or :keyword:`False` to
disable it. `True` is the starting value.
"""
global engineering_mode
engineering_mode = bool(expert)
def get_engineering_mode():
"""
Get the last value set by :meth:`set_engineering_mode`.
Returns
-------
expert : bool
The current engineering mode. See :meth:`set_engineering_mode`.
"""
return engineering_mode
class MvInterface(BaseInterface):
"""
Interface layer to attach to a positioner for motion shortcuts.
Defines common shortcuts that the beamline scientists like for moving
things on the command line. There is no need for these in a scripting
environnment, but this is a safe space for implementing move features that
would otherwise be disruptive to running scans and writing higher-level
applications.
"""
tab_whitelist = ["mv", "wm", "camonitor", "wm_update"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._mov_ev = Event()
def mv(self, position, timeout=None, wait=False):
"""
Absolute move to a position.
Parameters
----------
position
Desired end position.
timeout : float, optional
If provided, the mover will throw an error if motion takes longer
than timeout to complete. If omitted, the mover's default timeout
will be use.
wait : bool, optional
If `True`, wait for motion completion before returning.
Defaults to :keyword:`False`.
"""
self.move(position, timeout=timeout, wait=wait)
def wm(self):
"""Get the mover's current positon (where motor)."""
return self.position
def __call__(self, position=None, timeout=None, wait=False):
"""
Dispatches to :meth:`mv` or :meth:`wm` based on the arguments.
Calling the object will either move the object or get the current
position, depending on if the position argument is given. See the
docstrings for :meth:`mv` and :meth:`wm`.
"""
if position is None:
return self.wm()
else:
self.mv(position, timeout=timeout, wait=wait)
def camonitor(self):
"""
Shows a live-updating motor position in the terminal.
This will be the value that is returned by the :attr:`position`
attribute.
This method ends cleanly at a ctrl+c or after a call to
:meth:`end_monitor_thread`, which may be useful when this is called in
a background thread.
"""
try:
self._mov_ev.clear()
while not self._mov_ev.is_set():
print("\r {0:4f}".format(self.position), end=" ")
self._mov_ev.wait(0.1)
except KeyboardInterrupt:
pass
finally:
self._mov_ev.clear()
# Legacy alias
def wm_update(self):
return self.camonitor()
wm_update.__doc__ = camonitor.__doc__
def end_monitor_thread(self):
"""
Stop a :meth:`camonitor` or :meth:`wm_update` that is running in
another thread.
"""
self._mov_ev.set()
class FltMvInterface(MvInterface):
"""
Extension of :class:`MvInterface` for when the position is a float.
This lets us do more with the interface, such as relative moves.
Attributes
----------
presets : :class:`Presets`
Manager for preset positions.
"""
tab_whitelist = ["mvr", "umv", "umvr", "mv_ginput", "tweak",
"presets", "mv_.*", "wm_.*", "umv_.*"]
@property
def presets(self):
if not hasattr(self, "_presets"):
self._presets = Presets(self)
return self._presets
def mvr(self, delta, timeout=None, wait=False):
"""
Relative move from this position.
Parameters
----------
delta : float
Desired change in position.
timeout : float, optional
If provided, the mover will throw an error if motion takes longer
than timeout to complete. If omitted, the mover's default timeout
will be use.
wait : bool, optional
If `True`, wait for motion completion before returning.
Defaults to :keyword:`False`.
"""
self.mv(delta + self.wm(), timeout=timeout, wait=wait)
def umv(self, position, timeout=None):
"""
Move to a position, wait, and update with a progress bar.
Parameters
----------
position : float
Desired end position.
timeout : float, optional
If provided, the mover will throw an error if motion takes longer
than timeout to complete. If omitted, the mover's default timeout
will be use.
"""
status = self.move(position, timeout=timeout, wait=False)
AbsProgressBar([status])
try:
status_wait(status)
except KeyboardInterrupt:
self.stop()
def umvr(self, delta, timeout=None):
"""
Relative move from this position, wait, and update with a progress bar.
Parameters
----------
delta : float
Desired change in position.
timeout : float, optional
If provided, the mover will throw an error if motion takes longer
than timeout to complete. If omitted, the mover's default timeout
will be use.
"""
self.umv(delta + self.wm(), timeout=timeout)
def mv_ginput(self, timeout=None):
"""
Moves to a location the user clicks on.
If there are existing plots, this will be the position on the most
recently active plot. If there are no existing plots, an empty plot
will be created with the motor's limits as the range.
"""
# Importing forces backend selection, so do inside method
import matplotlib.pyplot as plt # NOQA
logger.info(("Select new motor x-position in current plot "
"by mouseclick"))
if not plt.get_fignums():
upper_limit = 0
lower_limit = self.limits[0]
if self.limits[0] == self.limits[1]:
upper_limit = self.limits[0]+100
else:
upper_limit = self.limits[1]
limit_plot = []
for x in range(lower_limit, upper_limit):
limit_plot.append(x)
plt.plot(limit_plot)
pos = plt.ginput(1)[0][0]
self.move(pos, timeout=timeout)
def tweak(self):
"""
Control this motor using the arrow keys.
Use left arrow to step negative and right arrow to step positive.
Use up arrow to increase step size and down arrow to decrease step
size. Press q or ctrl+c to quit.
"""
return tweak_base(self)
def setup_preset_paths(**paths):
"""
Prepare the :class:`Presets` class.
Sets the paths for saving and loading presets.
Parameters
----------
**paths : str keyword args
A mapping from type of preset to destination path. These will be
directories that contain the yaml files that define the preset
positions.
"""
Presets._paths = {}
for k, v in paths.items():
Presets._paths[k] = Path(v)
for preset in Presets._registry:
preset.sync()
class Presets:
"""
Manager for device preset positions.
This provides methods for adding new presets, checking which presets are
active, and related utilities.
It will install the :meth:`mv_presetname` and :meth:`wm_presetname` methods
onto the associated device, and the :meth:`add_preset` and
:meth:`add_preset_here` methods onto itself.
Parameters
----------
device : :class:`~ophyd.device.Device`
The device to manage saved preset positions for. It must implement the
:class:`FltMvInterface`.
Attributes
----------
positions : :class:`~types.SimpleNamespace`
A namespace that contains all of the active presets as
:class:`PresetPosition` objects.
"""
_registry = WeakSet()
_paths = {}
def __init__(self, device):
self._device = device
self._methods = []
self._fd = None
self._registry.add(self)
self.name = device.name + '_presets'
self.sync()
def _path(self, preset_type):
"""Utility function to get the preset file :class:`~pathlib.Path`."""
path = self._paths[preset_type] / (self._device.name + '.yml')
logger.debug('select presets path %s', path)
return path
def _read(self, preset_type):
"""Utility function to get a particular preset's datum dictionary."""
logger.debug('read presets for %s', self._device.name)
with self._file_open_rlock(preset_type) as f:
f.seek(0)
return yaml.full_load(f) or {}
def _write(self, preset_type, data):
"""
Utility function to overwrite a particular preset's datum dictionary.
"""
logger.debug('write presets for %s', self._device.name)
with self._file_open_rlock(preset_type) as f:
f.seek(0)
yaml.dump(data, f, default_flow_style=False)
f.truncate()
@contextmanager
def _file_open_rlock(self, preset_type, timeout=1.0):
"""
File locking context manager for this object.
Works like threading.Rlock in that you can acquire it multiple times
safely.
Parameters
----------
fd : file
The file descriptor to lock on.
Raises
------
BlockingIOError
If we cannot acquire the file lock.
"""
if self._fd is None:
path = self._path(preset_type)
with open(path, 'r+') as fd:
# Set up file lock timeout with a raising handler
# We will need this handler due to PEP 475
def interrupt(signum, frame):
raise InterruptedError()
old_handler = signal.signal(signal.SIGALRM, interrupt)
try:
signal.setitimer(signal.ITIMER_REAL, timeout)
fcntl.flock(fd, fcntl.LOCK_EX)
except InterruptedError:
# Ignore interrupted and proceed to cleanup
pass
finally:
# Clean up file lock timeout
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, old_handler)
# Error now if we still can't get the lock.
# Getting lock twice is safe.
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
logger.debug('acquired lock for %s', path)
self._fd = fd
yield fd
fcntl.flock(fd, fcntl.LOCK_UN)
logger.debug('released lock for %s', path)
self._fd = None
else:
logger.debug('using already open file descriptor')
yield self._fd
def _update(self, preset_type, name, value=None, comment=None,
active=True):
"""
Utility function to update a preset position.
Reads the existing preset's datum, updates the value the comment, and
the active state, and then writes the datum back to the file, updating
the history accordingly.
"""
logger.debug(('call %s presets._update(%s, %s, value=%s, comment=%s, '
'active=%s)'), self._device.name, preset_type, name,
value, comment, active)
if not isinstance(name, str):
raise TypeError(('name must be of type <str>, not type'
'{}'.format(type(name))))
if value is not None and not isinstance(value, numbers.Real):
raise TypeError(('value must be a real numeric type, not type'
'{}'.format(type(value))))
try:
path = self._path(preset_type)
if not path.exists():
path.touch()
path.chmod(0o666)
with self._file_open_rlock(preset_type):
data = self._read(preset_type)
if value is None and comment is not None:
value = data[name]['value']
if value is not None:
if name not in data:
data[name] = {}
ts = time.strftime('%d %b %Y %H:%M:%S')
data[name]['value'] = value
history = data[name].get('history', {})
if comment:
comment = ' ' + comment
else:
comment = ''
history[ts] = '{:10.4f}{}'.format(value, comment)
data[name]['history'] = history
if active:
data[name]['active'] = True
else:
data[name]['active'] = False
self._write(preset_type, data)
except BlockingIOError:
self._log_flock_error()
def sync(self):
"""Synchronize the presets with the database."""
logger.debug('call %s presets.sync()', self._device.name)
self._remove_methods()
self._cache = {}
logger.debug('filling %s cache', self.name)
for preset_type in self._paths.keys():
path = self._path(preset_type)
if path.exists():
try:
self._cache[preset_type] = self._read(preset_type)
except BlockingIOError:
self._log_flock_error()
else:
logger.debug('No %s preset file for %s',
preset_type, self._device.name)
self._create_methods()
def _log_flock_error(self):
logger.error(('Unable to acquire file lock for %s. '
'File may be being edited by another user.'), self.name)
logger.debug('', exc_info=True)
def _create_methods(self):
"""
Create the dynamic methods based on the configured paths.
Add methods to this object for adding presets of each type, add
methods to the associated device to move and check each preset, and
add :class:`PresetPosition` instances to :attr:`.positions` for
each preset name.
"""
logger.debug('call %s presets._create_methods()', self._device.name)
for preset_type in self._paths.keys():
add, add_here = self._make_add(preset_type)
self._register_method(self, 'add_' + preset_type, add)
self._register_method(self, 'add_here_' + preset_type, add_here)
for preset_type, data in self._cache.items():
for name, info in data.items():
if info['active']:
mv, umv = self._make_mv_pre(preset_type, name)
wm = self._make_wm_pre(preset_type, name)
self._register_method(self._device, 'mv_' + name, mv)
self._register_method(self._device, 'umv_' + name, umv)
self._register_method(self._device, 'wm_' + name, wm)
setattr(self.positions, name,
PresetPosition(self, preset_type, name))
def _register_method(self, obj, method_name, method):
"""
Utility function for managing dynamic methods.
Adds a method to the :attr:`._methods` list and binds the method to an
object.
"""
logger.debug('register method %s to %s', method_name, obj.name)
self._methods.append((obj, method_name))
setattr(obj, method_name, MethodType(method, obj))
def _make_add(self, preset_type):
"""
Create the functions that add preset positions.
Creates suitable versions of :meth:`.add` and :meth:`.add_here` for a
particular preset type, e.g. ``add_preset_type`` and
``add_here_preset_type``.
"""
def add(self, name, value, comment=None):
"""
Add a preset position of type "{}".
Parameters
----------
name : str
The name of the new preset position.
value : float
The value of the new preset_position.
comment : str, optional
A comment to associate with the preset position.
"""
self._update(preset_type, name, value=value,
comment=comment)
self.sync()
def add_here(self, name, comment=None):
"""
Add a preset of the current position of type "{}".
Parameters
----------
name : str
The name of the new preset position.
comment : str, optional
A comment to associate with the preset position.
"""
add(self, name, self._device.wm(), comment=comment)
add.__doc__ = add.__doc__.format(preset_type)
add_here.__doc__ = add_here.__doc__.format(preset_type)
return add, add_here
def _make_mv_pre(self, preset_type, name):
"""
Create the functions that move to preset positions.
Creates a suitable versions of :meth:`~MvInterface.mv` and
:meth:`~MvInterface.umv` for a particular preset type and name
e.g. ``mv_sample``.
"""
def mv_pre(self, timeout=None, wait=False):
"""
Move to the {} preset position.
Parameters
----------
timeout : float, optional
If provided, the mover will throw an error if motion takes
longer than timeout to complete. If omitted, the mover's
default timeout will be use.
wait : bool, optional
If `True`, wait for motion completion before
returning. Defaults to :keyword:`False`.
"""
pos = self.presets._cache[preset_type][name]['value']
self.mv(pos, timeout=timeout, wait=wait)
def umv_pre(self, timeout=None):
"""
Update move to the {} preset position.
Parameters
----------
timeout : float, optional
If provided, the mover will throw an error if motion takes
longer than timeout to complete. If omitted, the mover's
default timeout will be use.
"""
pos = self.presets._cache[preset_type][name]['value']
self.umv(pos, timeout=timeout)
mv_pre.__doc__ = mv_pre.__doc__.format(name)
umv_pre.__doc__ = umv_pre.__doc__.format(name)
return mv_pre, umv_pre
def _make_wm_pre(self, preset_type, name):
"""
Create a method to get the offset from a preset position.
Creates a suitable version of :meth:`~MvInterface.wm` for a particular
preset type and name e.g. ``wm_sample``.
"""
def wm_pre(self):
"""
Check the offset from the {} preset position.
Returns
-------
offset : float
How far we are from the preset position. If this is near zero,
we are at the position. If this positive, the preset position
is in the positive direction from us.
"""
pos = self.presets._cache[preset_type][name]['value']
return pos - self.wm()
wm_pre.__doc__ = wm_pre.__doc__.format(name)
return wm_pre
def _remove_methods(self):
"""Remove all methods created in the last call to _create_methods."""
logger.debug('call %s presets._remove_methods()', self._device.name)
for obj, method_name in self._methods:
try:
delattr(obj, method_name)
except AttributeError:
pass
self._methods = []
self.positions = SimpleNamespace()
class PresetPosition:
"""
Manager for a single preset position.
Parameters
----------
presets : :class:`Presets`
The main :class:`Presets` object that manages this position.
name : str
The name of this preset position.
"""
def __init__(self, presets, preset_type, name):
self._presets = presets
self._preset_type = preset_type
self._name = name
def update_pos(self, pos=None, comment=None):
"""
Change this preset position and save it.
Parameters
----------
pos : float, optional
The position to use for this preset. If omitted, we'll use the
current position.
comment : str, optional
A comment to associate with the preset position.
"""
if pos is None:
pos = self._presets._device.wm()
self._presets._update(self._preset_type, self._name, value=pos,
comment=comment)
self._presets.sync()
def update_comment(self, comment):
"""
Revise the most recent comment in the preset history.
Parameters
----------
comment : str
A comment to associate with the preset position.
"""
self._presets._update(self._preset_type, self._name, comment=comment)
self._presets.sync()
def deactivate(self):
"""
Deactivate a preset from a device.
This can always be undone unless you edit the underlying file.
"""
self._presets._update(self._preset_type, self._name, active=False)
self._presets.sync()
@property
def info(self):
"""All information associated with this preset, returned as a dict."""
return self._presets._cache[self._preset_type][self._name]
@property
def pos(self):
"""The set position of this preset, returned as a float."""
return self.info['value']
@property
def history(self):
"""
This position history associated with this preset, returned as a dict.
"""
return self.info['history']
@property
def path(self):
"""The filepath that defines this preset, returned as a string."""
return str(self._presets._path(self._preset_type))
def __repr__(self):
return str(self.pos)
def tweak_base(*args):
"""
Base function to control motors with the arrow keys.
With one motor, this will use the left and right arrows for the axis and up
and down arrows for scaling the step size. With two motors, this will use
left and right for the first axis and up and down for the second axis, with
shift+arrow used for scaling the step size. The q key quits, as does
ctrl+c.
"""
up = util.arrow_up
down = util.arrow_down
left = util.arrow_left
right = util.arrow_right
shift_up = util.shift_arrow_up
shift_down = util.shift_arrow_down
scale = 0.1
def thread_event():
"""Function call camonitor to display motor position."""
thrd = Thread(target=args[0].camonitor,)
thrd.start()
args[0]._mov_ev.set()
def _scale(scale, direction):
"""Function used to change the scale."""
if direction == up or direction == shift_up:
scale = scale*2
print("\r {0:4f}".format(scale), end=" ")
elif direction == down or direction == shift_down:
scale = scale/2
print("\r {0:4f}".format(scale), end=" ")
return scale
def movement(scale, direction):
"""Function used to know when and the direction to move the motor."""
try:
if direction == left:
args[0].umvr(-scale)
thread_event()
elif direction == right:
args[0].umvr(scale)
thread_event()
elif direction == up and len(args) > 1:
args[1].umvr(scale)
print("\r {0:4f}".format(args[1].position), end=" ")
except Exception as exc:
logger.error('Error in tweak move: %s', exc)
logger.debug('', exc_info=True)
# Loop takes in user key input and stops when 'q' is pressed
if len(args) == 1:
logger.info('Started tweak of %s', args[0])
else:
logger.info('Started tweak of %s', [mot.name for mot in args])
is_input = True
while is_input is True:
inp = util.get_input()
if inp in ('q', None):
is_input = False
else:
if len(args) > 1 and inp == down:
movement(-scale, up)
elif len(args) > 1 and inp == up:
movement(scale, inp)
elif inp not in (up, down, left, right, shift_down, shift_up):
print() # Newline
if len(args) == 1:
print(" Left: move x motor backward")
print(" Right: move x motor forward")
print(" Up: scale*2")
print(" Down: scale/2")
else:
print(" Left: move x motor left")
print(" Right: move x motor right")
print(" Down: move y motor down")
print(" Up: move y motor up")
print(" Shift_Up: scale*2")
print(" Shift_Down: scale/2")
print(" Press q to quit."
" Press any other key to display this message.")
print() # Newline
else:
movement(scale, inp)
scale = _scale(scale, inp)
print()
class AbsProgressBar(ProgressBar):
"""Progress bar that displays the absolute position as well."""
def update(self, *args, name=None, current=None, **kwargs):
if None not in (name, current):
super().update(*args, name='{} ({:.3f})'.format(name, current),
current=current, **kwargs)
else:
super().update(*args, name=name, current=current, **kwargs)
class LightpathMixin(OphydObject):
"""
Mix-in class that makes it easier to establish a lightpath interface.
Use this on classes that are not state positioners but would still like to
be used as a top-level device in lightpath.
"""
SUB_STATE = 'state'
_default_sub = SUB_STATE
# Component names whose values are relevant for inserted/removed
lightpath_cpts = []
# Flag to signify that subclass is another mixin, rather than a device
_lightpath_mixin = False
def __init__(self, *args, **kwargs):
self._lightpath_values = {}
self._lightpath_ready = False
self._retry_lightpath = False
super().__init__(*args, **kwargs)
def __init_subclass__(cls, **kwargs):
# Magic to subscribe to the list of components
super().__init_subclass__(**kwargs)
if cls._lightpath_mixin:
# Child of cls will inherit this as False
cls._lightpath_mixin = False
else:
if not cls.lightpath_cpts:
raise NotImplementedError('Did not implement LightpathMixin')
for cpt_name in cls.lightpath_cpts:
cpt = getattr(cls, cpt_name)
cpt.sub_default(cls._update_lightpath)
def _set_lightpath_states(self, lightpath_values):
# Override based on the use case
# update self._inserted, self._removed,
# and optionally self._transmission
raise NotImplementedError('Did not implement LightpathMixin')
def _update_lightpath(self, *args, obj, **kwargs):
try:
# Universally cache values
self._lightpath_values[obj] = kwargs
# Only do the first lightpath state once all cpts have chimed in
if len(self._lightpath_values) >= len(self.lightpath_cpts):
self._retry_lightpath = False
# Pass user function the full set of values
self._set_lightpath_states(self._lightpath_values)
self._lightpath_ready = not self._retry_lightpath
if self._lightpath_ready:
# Tell lightpath to update
self._run_subs(sub_type=self.SUB_STATE)
elif self._retry_lightpath and not self._destroyed:
# Use this when the device wasn't ready to set states
kw = dict(obj=obj)
kw.update(kwargs)
util.schedule_task(self._update_lightpath,
args=args, kwargs=kw, delay=0.2)
except Exception:
# Without this, callbacks fail silently
logger.exception('Error in lightpath update callback.')
@property
def inserted(self):
return self._lightpath_ready and bool(self._inserted)
@property
def removed(self):
return self._lightpath_ready and bool(self._removed)
@property
def transmission(self):
try:
return self._transmission
except AttributeError:
if self.inserted:
return 0
else:
return 1
class LightpathInOutMixin(LightpathMixin):
"""
LightpathMixin for parent device with InOut subdevices
"""
_lightpath_mixin = True
def _set_lightpath_states(self, lightpath_values):
in_check = []
out_check = []
trans_check = []
for obj, kwarg_dct in lightpath_values.items():
if not obj._state_initialized:
# This would prevent make check_inserted, etc. fail
self._retry_lightpath = True
return
in_check.append(obj.check_inserted(kwarg_dct['value']))
out_check.append(obj.check_removed(kwarg_dct['value']))
trans_check.append(obj.check_transmission(kwarg_dct['value']))
self._inserted = any(in_check)
self._removed = all(out_check)
self._transmission = functools.reduce(lambda a, b: a*b, trans_check)
| 33.884 | 79 | 0.578267 |
b7ef48ad6ae2e2e52dc5f4669657354bb2214ea5 | 403 | py | Python | history_actions/loader.py | marcosschroh/django-history-actions | fc29eee29ed4f6ba71a366783fefdbe223cbed21 | [
"MIT"
] | 1 | 2018-09-11T18:35:42.000Z | 2018-09-11T18:35:42.000Z | history_actions/loader.py | marcosschroh/django-history-actions | fc29eee29ed4f6ba71a366783fefdbe223cbed21 | [
"MIT"
] | null | null | null | history_actions/loader.py | marcosschroh/django-history-actions | fc29eee29ed4f6ba71a366783fefdbe223cbed21 | [
"MIT"
] | null | null | null | from django.apps import apps
from django.db.models.signals import post_save
from history_actions import mixins
def subscribe_to_signals():
for app, models in apps.all_models.items():
for _, model in models.items():
if issubclass(model, mixins.PostSaveHistory):
post_save.connect(
mixins.PostSaveHistory.save_signal_callback, sender=model)
| 28.785714 | 78 | 0.692308 |
06f81c54e0c39f1f522c11571ac1e4ec5ba138e1 | 525 | py | Python | test/test_blender.py | ottopasuuna/blender-render-tool | 99f7b9e3ee1004efb739c7812bd0e976cf386511 | [
"Apache-2.0"
] | 3 | 2020-10-04T22:06:10.000Z | 2020-10-30T13:48:28.000Z | test/test_blender.py | ottopasuuna/blender-render-tool | 99f7b9e3ee1004efb739c7812bd0e976cf386511 | [
"Apache-2.0"
] | 11 | 2018-09-04T02:50:55.000Z | 2020-04-10T19:17:58.000Z | test/test_blender.py | ottopasuuna/blender-render-tool | 99f7b9e3ee1004efb739c7812bd0e976cf386511 | [
"Apache-2.0"
] | 3 | 2018-10-13T20:01:36.000Z | 2021-03-01T20:09:42.000Z | from .context import src
from src.blender import *
def test_build_blender_cmd():
cmd = build_blender('test.blend', 'outdir', frames=range(1, 3))
assert cmd == 'blender -b test.blend -o outdir/####.png -f 1,2'
cmd = build_blender('test.blend', 'outdir', frames=range(1, 6, 2))
assert cmd == 'blender -b test.blend -o outdir/####.png -f 1,3,5'
def test_split_frames_per_host():
fph = split_frames_per_host(range(1, 5), ['localhost', 'server'])
assert fph == {'localhost': [1, 2], 'server': [3, 4]}
| 32.8125 | 70 | 0.64 |
469bef3074f386ec65194b5074d16ddf8c8c7c21 | 4,870 | py | Python | test/functional/rpc_bind.py | HunterCanimun/surgeofficial-surge-coin | 663dc25517e9045a65a9b1e0993bbaa06d564284 | [
"MIT"
] | null | null | null | test/functional/rpc_bind.py | HunterCanimun/surgeofficial-surge-coin | 663dc25517e9045a65a9b1e0993bbaa06d564284 | [
"MIT"
] | null | null | null | test/functional/rpc_bind.py | HunterCanimun/surgeofficial-surge-coin | 663dc25517e9045a65a9b1e0993bbaa06d564284 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running bitcoind with the -rpcbind and -rpcallowip options."""
import socket
import sys
from test_framework.test_framework import SurgeTestFramework, SkipTest
from test_framework.util import *
from test_framework.netutil import *
class RPCBindTest(SurgeTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
self.nodes[0].rpchost = None
self.start_nodes([base_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
raise SkipTest("This test requires at least one non-loopback IPv4 interface.")
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(("::1",1))
s.close
except OSError:
raise SkipTest("This test requires IPv6 support.")
self.log.info("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
self.run_bind_test([non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, defaultport)
if __name__ == '__main__':
RPCBindTest().main()
| 45.092593 | 162 | 0.622793 |
857761cc8ab950d1c17148ddbcef304b3c6e1889 | 6,660 | py | Python | predict.py | mme384/Image-Classifier-MobileNet-V2 | 28a285c69dc30f45bbcb4c80e60676aec22d1083 | [
"Unlicense"
] | null | null | null | predict.py | mme384/Image-Classifier-MobileNet-V2 | 28a285c69dc30f45bbcb4c80e60676aec22d1083 | [
"Unlicense"
] | null | null | null | predict.py | mme384/Image-Classifier-MobileNet-V2 | 28a285c69dc30f45bbcb4c80e60676aec22d1083 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
FILE NAME: predict.py
AUTHOR: Michalis Meyer
DATE CREATED: 22.04.2020
DATE LAST MODIFIED:
PYTHON VERSION: 3.7
SCRIPT PURPOSE: Predict the class of an image
SAMPLE COMMAND LINE: python predict.py --file_path './test_images/hard-leaved_pocket_orchid.jpg' --model_filename 'model_20200422_223607.h5' --top_k 5 --category_names 'label_map.json'
"""
# Import modules
import warnings # Import module to deal with warnings
from argparse import ArgumentParser # Import module to parse arguments
import numpy as np # Import module to use numpy
import matplotlib.pyplot as plt # Import module to use matplotlib
import tensorflow as tf # Import module to use tensorflow
import tensorflow_datasets as tfds # Import module to use tensorflow datasets
import tensorflow_hub as hub # Import module to import model from tensorflow Hub
import json # Import module for label mapping
import os # Import module to deal with path names used in image data generator
from PIL import Image # Process image for prediction
import utility_functions as utf # Import module with custom utility functions
def set_up_workspace():
"""
Setup up the workspace
Parameters: None
Returns: None
"""
# Avoid Error #15: Initializing libiomp5.dylib, but found libiomp5.dylib already initialized.
# https://stackoverflow.com/questions/53014306/error-15-initializing-libiomp5-dylib-but-found-libiomp5-dylib-already-initial
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
# Magic command for inline plotting
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
def get_input_args():
"""
Retrieve and parse command line arguments.
Command Line Arguments:
- Image file path as --file_path
- Model path and file name as --model_filename
- Top k classes to be returned as --top_k
- Path to json file mapping labels as --category_names
This function returns these arguments as an ArgumentParser object.
Parameters:
None - simply using argparse module to create & store command line arguments
Returns:
parse_args() - data structure that stores the command line arguments object.
"""
# Create Parse using ArgumentParser
parser = ArgumentParser()
# Image file path as --file_path
# Path to test images
# image_path = './test_images/hard-leaved_pocket_orchid.jpg'
# image_path = './test_images/cautleya_spicata.jpg'
# image_path = './test_images/orange_dahlia.jpg'
# image_path = './test_images/wild_pansy.jpg'
parser.add_argument("--file_path",
type = str,
default = './test_images/hard-leaved_pocket_orchid.jpg',
help = "Image file path.")
# Model file name as --model_filename
parser.add_argument("--model_filename",
type = str,
default = 'model_20200422_223607.h5',
help = "Model path and file name.")
# Top k classes to be returned as --top_k with default value 5
parser.add_argument("--top_k",
type = int,
default = 5,
help = "Number of epochs. Default = 5")
# json file mapping labels as --category_names
parser.add_argument("--category_names",
type = str,
default = 'label_map.json',
help = "json file mapping labels.")
return parser.parse_args()
def load_model(model_filename):
"""
Load the Keras model
Parameters: The model file name for the trained TensorFlow Keras model
Returns: Returns the model
"""
# Reload model
model = tf.keras.models.load_model(model_filename, custom_objects={'KerasLayer': hub.KerasLayer})
# Display model summary
model.summary()
return model
def predict(image_path, model, class_names, top_k):
'''
Predicts class of image based on model
Parameters: image_path Path of the image to be classified
model: Name of the trained model
class_names: Complete list containing class names and their indices
top_k: Top k probalibilites and classes to be returned by the function
Returns: top_k_probs_np Numpy array of top k probabilities predicted by the model
top_k_classes List of top k classes predicted by the model
'''
# Open image
image = Image.open(image_path)
# Conevrt image to numpy array
image_np = np.asarray(image)
# Process image to be ready for prediction
processed_image = utf.process_image(image_np)
# Expand shape (224, 224, 3) to (1, 224, 224, 3) to represent the batch size.
expanded_image = np.expand_dims(processed_image, axis=0)
# Predict class
probs = model.predict(expanded_image)
# Get top k probabilities and their index
top_k_probs, top_k_indices = tf.math.top_k(probs, k=top_k, sorted=True)
# Convert top k probabilities and their index from tf.Tensor to numpy array and squeeze the shape
top_k_probs_np = top_k_probs.numpy().squeeze()
top_k_indices_np = top_k_indices.numpy().squeeze()
# Convert int to str
top_k_indices_np_str = np.char.mod('%d', top_k_indices_np)
# Create top_k_classes list
top_k_classes = []
[top_k_classes.append(class_names[label]) for label in top_k_indices_np_str]
return top_k_probs_np, top_k_classes
def main():
"""
Main function
Parameters: None
Returns: None
"""
# Set up the workspace
set_up_workspace()
# Assigns variable in_args to parse_args()
in_args = get_input_args()
# Load model
model = load_model(in_args.model_filename)
# Load mapping from label to category name
with open(in_args.category_names, 'r') as f:
class_names = json.load(f)
# Load test image, convert to numpy array, process image
org_image = Image.open(in_args.file_path)
test_image = np.asarray(org_image)
test_image = utf.process_image(test_image)
# Predict class and probability of image
probs, top_k_classes = predict(in_args.file_path, model, class_names, in_args.top_k)
# Print classes and probabilities
utf.display_predictions(probs, top_k_classes)
# Plot image, classes and probabilities
utf.show_image(test_image, probs, top_k_classes)
# Show all matplotlib plots made in the script
plt.show()
# Run main function
if __name__ == '__main__':
main() | 37 | 189 | 0.672973 |
ba650962e9b117b332a162f731e7909d0773b5ea | 2,068 | py | Python | camera_calib/fiducial_detect.py | justinblaber/camera_calib_python | 9427ff31d55af7619e7aee74136446a31d10def0 | [
"Apache-2.0"
] | 3 | 2020-10-14T10:24:09.000Z | 2021-09-19T20:48:40.000Z | camera_calib/fiducial_detect.py | justinblaber/camera_calib_python | 9427ff31d55af7619e7aee74136446a31d10def0 | [
"Apache-2.0"
] | 1 | 2021-09-28T02:06:42.000Z | 2021-09-28T02:06:42.000Z | camera_calib/fiducial_detect.py | justinblaber/camera_calib_python | 9427ff31d55af7619e7aee74136446a31d10def0 | [
"Apache-2.0"
] | 2 | 2021-01-07T20:13:31.000Z | 2021-01-08T18:16:53.000Z | # AUTOGENERATED! DO NOT EDIT! File to edit: fiducial_detect.ipynb (unless otherwise specified).
__all__ = ['DotVisionCheckerDLDetector']
# Cell
import copy
import math
import warnings
import torch
from skimage.measure import label, regionprops
from torchvision import transforms
from .utils import *
# Cell
class DotVisionCheckerDLDetector():
def __init__(self, file_model, device=torch.device('cpu')):
self.model = torch.jit.load(file_model.as_posix(), map_location=device).eval()
self.device = device
def format_arr(self, arr):
if arr.min() < 0: warnings.warn('Value less than zero detected')
if arr.max() > 1: warnings.warn('Value greater than 1 detected')
arr = arr.float() # Must be single precision
arr = imresize(arr, 384) # Network trained on grayscale 384 sized images
arr = rescale(arr, (0, 1), (-1, 1)) # Network trained on images between [-1,1]
arr = arr[None, None] # Add batch and channel dimension
arr = arr.to(self.device) # Move to device
return arr
def get_mask(self, arr):
with torch.no_grad():
mask = self.model(self.format_arr(arr)) # Inference
mask = mask.to(arr.device) # Make sure its in the same device as array
mask = mask.argmax(dim=1) # Convert from scores to labels
mask = mask.squeeze(0) # Remove batch dimension
return mask
def __call__(self, arr):
mask = self.get_mask(arr)
# Extract fiducial points from mask
ps_f = arr.new_full((4,2), math.nan)
for idx, p_f in enumerate(ps_f):
regions = regionprops(label(torch2np(mask) == (idx+1))) # regionprops doesnt work for gpu
if len(regions) > 0:
region = regions[arr.new_tensor([r.area for r in regions]).argmax()]
ps_f[idx] = arr.new_tensor(reverse(region.centroid))
ps_f *= (shape(arr)/shape(mask)).mean()
return ps_f | 39.018868 | 101 | 0.610251 |
37cfb208d246f6f08f6193188ae4bc9348ee45aa | 685 | py | Python | Medium/784. Letter Case Permutation/solution (1).py | czs108/LeetCode-Solutions | 889f5b6a573769ad077a6283c058ed925d52c9ec | [
"MIT"
] | 3 | 2020-05-09T12:55:09.000Z | 2022-03-11T18:56:05.000Z | Medium/784. Letter Case Permutation/solution (1).py | czs108/LeetCode-Solutions | 889f5b6a573769ad077a6283c058ed925d52c9ec | [
"MIT"
] | null | null | null | Medium/784. Letter Case Permutation/solution (1).py | czs108/LeetCode-Solutions | 889f5b6a573769ad077a6283c058ed925d52c9ec | [
"MIT"
] | 1 | 2022-03-11T18:56:16.000Z | 2022-03-11T18:56:16.000Z | # 784. Letter Case Permutation
# Runtime: 98 ms, faster than 16.21% of Python3 online submissions for Letter Case Permutation.
# Memory Usage: 15.5 MB, less than 27.62% of Python3 online submissions for Letter Case Permutation.
class Solution:
# Recursion
def letterCasePermutation(self, s: str) -> list[str]:
ans = []
def select(curr: str, i: int) -> None:
if i == len(s):
ans.append(curr)
elif s[i].isalpha():
select(curr + s[i].lower(), i + 1)
select(curr + s[i].upper(), i + 1)
else:
select(curr + s[i], i + 1)
select("", 0)
return ans | 29.782609 | 100 | 0.540146 |
511c2c796aede7f3c0c061bf30347288bf2b566e | 2,124 | py | Python | historia/test/test_economy.py | eranimo/historia | 5e0b047d4bcdd534f48f8b9bf19d425b0b31a3fd | [
"MIT"
] | 6 | 2016-04-26T18:39:36.000Z | 2021-09-01T09:13:38.000Z | historia/test/test_economy.py | eranimo/historia | 5e0b047d4bcdd534f48f8b9bf19d425b0b31a3fd | [
"MIT"
] | null | null | null | historia/test/test_economy.py | eranimo/historia | 5e0b047d4bcdd534f48f8b9bf19d425b0b31a3fd | [
"MIT"
] | 4 | 2016-04-10T23:47:23.000Z | 2021-08-15T11:40:28.000Z | from unittest import TestCase
from historia.pops.enums.pop_job import PopJob
from historia.economy import Good
from historia.test.mocks import mock_manager, mock_map, make_mock_country, make_mock_pop
random_hex = mock_map.random_hex()
country = make_mock_country(mock_manager, random_hex)
province = country.provinces[0]
market = province.market
class TestEconomy(TestCase):
def test_money(self):
farmer = make_mock_pop(province, PopJob.farmer)
province.add_pops([farmer])
farmer.money += 10
self.assertEqual(farmer.money, 20)
farmer.money -= 10
self.assertEqual(farmer.money, 10)
def test_idle_fee(self):
farmer = make_mock_pop(province, PopJob.farmer)
province.add_pops([farmer])
farmer.inventory.set(Good.timber, 0)
self.assertEqual(farmer.money, 10)
farmer.perform_logic()
self.assertEqual(farmer.money, 8)
def test_production(self):
farmer = make_mock_pop(province, PopJob.farmer)
province.add_pops([farmer])
grain = farmer.inventory.get_amount(Good.grain)
tools = farmer.inventory.get_amount(Good.tools)
bread = farmer.inventory.get_amount(Good.bread)
timber = farmer.inventory.get_amount(Good.timber)
farmer.perform_logic()
self.assertEqual(farmer.inventory.get_amount(Good.grain), grain + 4)
self.assertEqual(farmer.inventory.get_amount(Good.bread), bread - 1)
self.assertEqual(farmer.inventory.get_amount(Good.timber), timber - 1)
farmer.inventory.add(Good.timber, 1)
farmer.inventory.add(Good.bread, 1)
grain = farmer.inventory.get_amount(Good.grain)
tools = farmer.inventory.get_amount(Good.tools)
bread = farmer.inventory.get_amount(Good.bread)
timber = farmer.inventory.get_amount(Good.timber)
farmer.perform_logic()
self.assertEqual(farmer.inventory.get_amount(Good.grain), grain + 4)
self.assertEqual(farmer.inventory.get_amount(Good.bread), bread - 1)
self.assertEqual(farmer.inventory.get_amount(Good.timber), timber - 1)
| 34.258065 | 88 | 0.700094 |
b3e5b9a0e2df12959ec5190d80968cb5f77a4bfe | 1,429 | py | Python | consumer_avro.py | Codnos/kafka-avro-mongo-webflux | 2c3f7b6519c58f13e3f4a76b3307152f40c6eb12 | [
"Apache-2.0"
] | 1 | 2018-06-04T16:28:40.000Z | 2018-06-04T16:28:40.000Z | consumer_avro.py | Codnos/kafka-avro-mongo-webflux | 2c3f7b6519c58f13e3f4a76b3307152f40c6eb12 | [
"Apache-2.0"
] | 1 | 2022-03-08T21:11:03.000Z | 2022-03-08T21:11:03.000Z | consumer_avro.py | Codnos/kafka-avro-mongo-webflux-reactjs | 2c3f7b6519c58f13e3f4a76b3307152f40c6eb12 | [
"Apache-2.0"
] | null | null | null | import io
import urllib.request
import pandas as pd
import avro.schema
import avro.io
import decimal
def twos_comp(val, bits):
"""compute the 2's complement of int value val"""
if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255
val = val - (1 << bits) # compute negative value
return val # return positive value as is
def decimal_from_bytes(bytes, scale):
original = int.from_bytes(bytes, byteorder='big', signed=False)
bits = len(bytes)*8
decoded = twos_comp(original, bits)
return decimal.Decimal(decoded) / decimal.Decimal(10**scale)
url = "http://localhost:8080/api/users"
headers = {"Accept": "application/avro"}
request = urllib.request.Request(url, headers=headers)
response = urllib.request.urlopen(request)
schema = avro.schema.Parse(open("users-domain/src/main/avro/user.avsc", "r").read())
resp = response.read()
bytes_reader = io.BytesIO(resp)
decoder = avro.io.BinaryDecoder(bytes_reader)
reader = avro.io.DatumReader(schema)
user = reader.read(decoder)
print(user)
columns = user['salary_structure']
extended = ['place'] + columns
scale = user['salary_precision']
rows = []
for key, value in user['salaries'].items():
numbers = []
for val in value:
numbers.append(decimal_from_bytes(val, scale))
row = [key] + numbers
rows.append(row)
df = pd.DataFrame(rows, columns=(extended))
print(df) | 28.58 | 84 | 0.679496 |
c57c0657e06889f184323af25ba681655993b701 | 12,669 | py | Python | trove/tests/scenario/groups/guest_log_group.py | a4913994/openstack_trove | 3b550048dd1e5841ad0f3295679e0f0b913a5687 | [
"Apache-2.0"
] | 244 | 2015-01-01T12:04:44.000Z | 2022-03-25T23:38:39.000Z | trove/tests/scenario/groups/guest_log_group.py | a4913994/openstack_trove | 3b550048dd1e5841ad0f3295679e0f0b913a5687 | [
"Apache-2.0"
] | 6 | 2015-08-18T08:19:10.000Z | 2022-03-05T02:32:36.000Z | trove/tests/scenario/groups/guest_log_group.py | a4913994/openstack_trove | 3b550048dd1e5841ad0f3295679e0f0b913a5687 | [
"Apache-2.0"
] | 178 | 2015-01-02T15:16:58.000Z | 2022-03-23T03:30:20.000Z | # Copyright 2015 Tesora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests.scenario import groups
from trove.tests.scenario.groups.test_group import TestGroup
from trove.tests.scenario.runners import test_runners
GROUP = "scenario.guest_log_group"
class GuestLogRunnerFactory(test_runners.RunnerFactory):
_runner_ns = 'guest_log_runners'
_runner_cls = 'GuestLogRunner'
@test(depends_on_groups=[groups.DB_ACTION_INST_DELETE_WAIT],
groups=[GROUP, groups.INST_LOG])
class GuestLogGroup(TestGroup):
"""Test Guest Log functionality."""
def __init__(self):
super(GuestLogGroup, self).__init__(
GuestLogRunnerFactory.instance())
@test
def test_log_list(self):
"""Test that log-list works."""
self.test_runner.run_test_log_list()
@test
def test_admin_log_list(self):
"""Test that log-list works for admin user."""
self.test_runner.run_test_admin_log_list()
@test
def test_log_enable_sys(self):
"""Ensure log-enable on SYS log fails."""
self.test_runner.run_test_log_enable_sys()
@test
def test_log_disable_sys(self):
"""Ensure log-disable on SYS log fails."""
self.test_runner.run_test_log_disable_sys()
@test
def test_log_show_unauth_user(self):
"""Ensure log-show by unauth client on USER log fails."""
self.test_runner.run_test_log_show_unauth_user()
@test
def test_log_list_unauth_user(self):
"""Ensure log-list by unauth client on USER log fails."""
self.test_runner.run_test_log_list_unauth_user()
@test
def test_log_generator_unauth_user(self):
"""Ensure log-generator by unauth client on USER log fails."""
self.test_runner.run_test_log_generator_unauth_user()
@test
def test_log_generator_publish_unauth_user(self):
"""Ensure log-generator by unauth client with publish fails."""
self.test_runner.run_test_log_generator_publish_unauth_user()
@test
def test_log_show_unexposed_user(self):
"""Ensure log-show on unexposed log fails for auth client."""
self.test_runner.run_test_log_show_unexposed_user()
@test
def test_log_enable_unexposed_user(self):
"""Ensure log-enable on unexposed log fails for auth client."""
self.test_runner.run_test_log_enable_unexposed_user()
@test
def test_log_disable_unexposed_user(self):
"""Ensure log-disable on unexposed log fails for auth client."""
self.test_runner.run_test_log_disable_unexposed_user()
@test
def test_log_publish_unexposed_user(self):
"""Ensure log-publish on unexposed log fails for auth client."""
self.test_runner.run_test_log_publish_unexposed_user()
@test
def test_log_discard_unexposed_user(self):
"""Ensure log-discard on unexposed log fails for auth client."""
self.test_runner.run_test_log_discard_unexposed_user()
# USER log tests
@test(runs_after=[test_log_list, test_admin_log_list])
def test_log_show(self):
"""Test that log-show works on USER log."""
self.test_runner.run_test_log_show()
@test(runs_after=[test_log_show])
def test_log_enable_user(self):
"""Test log-enable on USER log."""
self.test_runner.run_test_log_enable_user()
@test(runs_after=[test_log_enable_user])
def test_log_enable_flip_user(self):
"""Test that flipping restart-required log-enable works."""
self.test_runner.run_test_log_enable_flip_user()
@test(runs_after=[test_log_enable_flip_user])
def test_restart_datastore(self):
"""Test restart datastore if required."""
self.test_runner.run_test_restart_datastore()
@test(runs_after=[test_restart_datastore])
def test_wait_for_restart(self):
"""Wait for restart to complete."""
self.test_runner.run_test_wait_for_restart()
@test(runs_after=[test_wait_for_restart])
def test_log_publish_user(self):
"""Test log-publish on USER log."""
self.test_runner.run_test_log_publish_user()
@test(runs_after=[test_log_publish_user])
def test_add_data(self):
"""Add data for second log-publish on USER log."""
self.test_runner.run_test_add_data()
@test(runs_after=[test_add_data])
def test_verify_data(self):
"""Verify data for second log-publish on USER log."""
self.test_runner.run_test_verify_data()
@test(runs_after=[test_verify_data])
def test_log_publish_again_user(self):
"""Test log-publish again on USER log."""
self.test_runner.run_test_log_publish_again_user()
@test(runs_after=[test_log_publish_again_user])
def test_log_generator_user(self):
"""Test log-generator on USER log."""
self.test_runner.run_test_log_generator_user()
@test(runs_after=[test_log_generator_user])
def test_log_generator_publish_user(self):
"""Test log-generator with publish on USER log."""
self.test_runner.run_test_log_generator_publish_user()
@test(runs_after=[test_log_generator_publish_user])
def test_log_generator_swift_client_user(self):
"""Test log-generator on USER log with passed-in Swift client."""
self.test_runner.run_test_log_generator_swift_client_user()
@test(runs_after=[test_log_generator_swift_client_user])
def test_add_data_again(self):
"""Add more data for log-generator row-by-row test on USER log."""
self.test_runner.run_test_add_data_again()
@test(runs_after=[test_add_data_again])
def test_verify_data_again(self):
"""Verify data for log-generator row-by-row test on USER log."""
self.test_runner.run_test_verify_data_again()
@test(runs_after=[test_verify_data_again])
def test_log_generator_user_by_row(self):
"""Test log-generator on USER log row-by-row."""
self.test_runner.run_test_log_generator_user_by_row()
@test(depends_on=[test_log_publish_user],
runs_after=[test_log_generator_user_by_row])
def test_log_save_user(self):
"""Test log-save on USER log."""
self.test_runner.run_test_log_save_user()
@test(depends_on=[test_log_publish_user],
runs_after=[test_log_save_user])
def test_log_save_publish_user(self):
"""Test log-save on USER log with publish."""
self.test_runner.run_test_log_save_publish_user()
@test(runs_after=[test_log_save_publish_user])
def test_log_discard_user(self):
"""Test log-discard on USER log."""
self.test_runner.run_test_log_discard_user()
@test(runs_after=[test_log_discard_user])
def test_log_disable_user(self):
"""Test log-disable on USER log."""
self.test_runner.run_test_log_disable_user()
@test(runs_after=[test_log_disable_user])
def test_restart_datastore_again(self):
"""Test restart datastore again if required."""
self.test_runner.run_test_restart_datastore()
@test(runs_after=[test_restart_datastore_again])
def test_wait_for_restart_again(self):
"""Wait for restart to complete again."""
self.test_runner.run_test_wait_for_restart()
@test(runs_after=[test_wait_for_restart_again])
def test_log_show_after_stop_details(self):
"""Get log-show details before adding data."""
self.test_runner.run_test_log_show_after_stop_details()
@test(runs_after=[test_log_show_after_stop_details])
def test_add_data_again_after_stop(self):
"""Add more data to ensure logging has stopped on USER log."""
self.test_runner.run_test_add_data_again_after_stop()
@test(runs_after=[test_add_data_again_after_stop])
def test_verify_data_again_after_stop(self):
"""Verify data for stopped logging on USER log."""
self.test_runner.run_test_verify_data_again_after_stop()
@test(runs_after=[test_verify_data_again_after_stop])
def test_log_show_after_stop(self):
"""Test that log-show has same values on USER log."""
self.test_runner.run_test_log_show_after_stop()
@test(runs_after=[test_log_show_after_stop])
def test_log_enable_user_after_stop(self):
"""Test log-enable still works on USER log."""
self.test_runner.run_test_log_enable_user_after_stop()
@test(runs_after=[test_log_enable_user_after_stop])
def test_restart_datastore_after_stop_start(self):
"""Test restart datastore after stop/start if required."""
self.test_runner.run_test_restart_datastore()
@test(runs_after=[test_restart_datastore_after_stop_start])
def test_wait_for_restart_after_stop_start(self):
"""Wait for restart to complete again after stop/start."""
self.test_runner.run_test_wait_for_restart()
@test(runs_after=[test_wait_for_restart_after_stop_start])
def test_add_data_again_after_stop_start(self):
"""Add more data to ensure logging works again on USER log."""
self.test_runner.run_test_add_data_again_after_stop_start()
@test(runs_after=[test_add_data_again_after_stop_start])
def test_verify_data_again_after_stop_start(self):
"""Verify data for re-enabled logging on USER log."""
self.test_runner.run_test_verify_data_again_after_stop_start()
@test(runs_after=[test_verify_data_again_after_stop_start])
def test_log_publish_after_stop_start(self):
"""Test log-publish after stop/start on USER log."""
self.test_runner.run_test_log_publish_after_stop_start()
@test(runs_after=[test_log_publish_after_stop_start])
def test_log_disable_user_after_stop_start(self):
"""Test log-disable on USER log after stop/start."""
self.test_runner.run_test_log_disable_user_after_stop_start()
@test(runs_after=[test_log_disable_user_after_stop_start])
def test_restart_datastore_after_final_stop(self):
"""Test restart datastore again if required after final stop."""
self.test_runner.run_test_restart_datastore()
@test(runs_after=[test_restart_datastore_after_final_stop])
def test_wait_for_restart_after_final_stop(self):
"""Wait for restart to complete again after final stop."""
self.test_runner.run_test_wait_for_restart()
# SYS log tests
@test
def test_log_show_sys(self):
"""Test that log-show works for SYS log."""
self.test_runner.run_test_log_show_sys()
@test(runs_after=[test_log_show_sys])
def test_log_publish_sys(self):
"""Test log-publish on SYS log."""
self.test_runner.run_test_log_publish_sys()
@test(runs_after=[test_log_publish_sys])
def test_log_publish_again_sys(self):
"""Test log-publish again on SYS log."""
self.test_runner.run_test_log_publish_again_sys()
@test(depends_on=[test_log_publish_again_sys])
def test_log_generator_sys(self):
"""Test log-generator on SYS log."""
self.test_runner.run_test_log_generator_sys()
@test(runs_after=[test_log_generator_sys])
def test_log_generator_publish_sys(self):
"""Test log-generator with publish on SYS log."""
self.test_runner.run_test_log_generator_publish_sys()
@test(depends_on=[test_log_publish_sys],
runs_after=[test_log_generator_publish_sys])
def test_log_generator_swift_client_sys(self):
"""Test log-generator on SYS log with passed-in Swift client."""
self.test_runner.run_test_log_generator_swift_client_sys()
@test(depends_on=[test_log_publish_sys],
runs_after=[test_log_generator_swift_client_sys])
def test_log_save_sys(self):
"""Test log-save on SYS log."""
self.test_runner.run_test_log_save_sys()
@test(runs_after=[test_log_save_sys])
def test_log_save_publish_sys(self):
"""Test log-save on SYS log with publish."""
self.test_runner.run_test_log_save_publish_sys()
@test(runs_after=[test_log_save_publish_sys])
def test_log_discard_sys(self):
"""Test log-discard on SYS log."""
self.test_runner.run_test_log_discard_sys()
| 38.981538 | 78 | 0.721446 |
9e76f30a099e36500602df7f34f0ee06cf311bfe | 1,502 | py | Python | data_random.py | nuaaflash/NuaaOldBookStore | 259f551a5fa97f2862745cb239c50d8ef87efe49 | [
"Apache-2.0"
] | 1 | 2020-04-15T17:04:07.000Z | 2020-04-15T17:04:07.000Z | data_random.py | nuaaflash/NuaaOldBookStore | 259f551a5fa97f2862745cb239c50d8ef87efe49 | [
"Apache-2.0"
] | null | null | null | data_random.py | nuaaflash/NuaaOldBookStore | 259f551a5fa97f2862745cb239c50d8ef87efe49 | [
"Apache-2.0"
] | null | null | null | from Bookstore.models import *
from django.utils import timezone
import random
import json
import hashlib
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def get_publisher():
return "Online Bookstore Publisher"
def get_date():
return timezone.now()
def get_isbn(book):
print book['book_name']
mm = hashlib.sha256(book['book_name']).hexdigest()
return mm[:5] + '-' + mm[5:10] + '-' + mm[10:15]
def get_language():
languages = ['Chinese', 'English', 'Japanese', 'Spanish', 'Germany']
return random.choice(languages)
def insert_book():
books_info = json.load(open('book.json'))
for book in books_info:
haha = Book.objects.create(isbn=get_isbn(book), name=book['book_name'],
description=book['book_description'],
publish_date=timezone.now(),
price=random.randint(20, 200),
publisher=get_publisher(),
page_number=random.randint(200, 500),
language=get_language(),
cover_image=book['book_image_name'])
def get_recommend_books(this_book, all_books):
rt = random.sample(all_books, 4)
while this_book in rt:
rt = random.sample(all_books, 4)
return rt
def insert_relations():
all_books = Book.objects.all()
for book in all_books:
recommend_books = get_recommend_books(book, all_books)
for rbook in recommend_books:
book.related_books.add(rbook)
if __name__ == '__main__':
main()
| 28.339623 | 80 | 0.647137 |
a544c4c4974f55921d03264f3a15e1ef693ddf98 | 8,249 | py | Python | pyk4a/pyk4a.py | kevinkit/pyk4a | 159c6eff46a2aecfea8553efba0fd20950ccc763 | [
"MIT"
] | 2 | 2020-02-11T13:05:09.000Z | 2020-06-16T14:22:28.000Z | pyk4a/pyk4a.py | kevinkit/pyk4a | 159c6eff46a2aecfea8553efba0fd20950ccc763 | [
"MIT"
] | null | null | null | pyk4a/pyk4a.py | kevinkit/pyk4a | 159c6eff46a2aecfea8553efba0fd20950ccc763 | [
"MIT"
] | 1 | 2020-02-11T13:06:36.000Z | 2020-02-11T13:06:36.000Z | from typing import Tuple, Union, Optional
import k4a_module
from enum import Enum
import numpy as np
from pyk4a.config import Config, ColorControlMode, ColorControlCommand
# k4a_wait_result_t
class Result(Enum):
Success = 0
Failed = 1
Timeout = 2
class K4AException(Exception):
pass
class K4ATimeoutException(K4AException):
pass
class PyK4A:
TIMEOUT_WAIT_INFINITE = -1
def __init__(self, config=Config(), device_id=0):
self._device_id = device_id
self._config = config
self.is_running = False
def __del__(self):
if self.is_running:
self.disconnect()
def connect(self):
self._device_open()
self._start_cameras()
self.is_running = True
def disconnect(self):
self._stop_cameras()
self._device_close()
self.is_running = False
def _device_open(self):
res = k4a_module.device_open(self._device_id)
self._verify_error(res)
def _device_close(self):
res = k4a_module.device_close()
self._verify_error(res)
def _start_cameras(self):
res = k4a_module.device_start_cameras(*self._config.unpack())
self._verify_error(res)
def _stop_cameras(self):
res = k4a_module.device_stop_cameras()
self._verify_error(res)
def get_capture(self, timeout=TIMEOUT_WAIT_INFINITE, color_only=False, transform_depth_to_color=True):
r"""Fetch a capture from the device and return as numpy array(s) or None.
Arguments:
:param timeout: Timeout in ms. Default is infinite.
:param color_only: If true, returns color image only as np.array
:param transform_depth_to_color: If true, transforms the depth image to the color image reference, using the
kinect azure device calibration parameters.
Returns:
:return img_color [, img_depth] # image could be None if config synchronized_images_only==False
Examples::
- if config synchronized_images_only=True
>>> k4a.get_capture(color_only=True) # type: np.ndarray
- if config synchronized_images_only=False, you must check if returs for each image is None
>>> k4a.get_capture(color_only=True) # type: Optional[np.ndarray]
>>> k4a.get_capture() # type: Tuple[Optional[np.ndarray], Optional[np.ndarray]]
"""
res = k4a_module.device_get_capture(timeout)
self._verify_error(res)
color = self._get_capture_color()
if color_only:
return color
else:
depth = self._get_capture_depth(transform_depth_to_color)
ir = self._get_capture_ir(False)
return color, depth, ir
def _get_capture_color(self) -> Optional[np.ndarray]:
return k4a_module.device_get_color_image()
def _get_capture_depth(self, transform_depth_to_color: bool) -> Optional[np.ndarray]:
return k4a_module.device_get_depth_image(transform_depth_to_color)
def _get_capture_ir(self, transform_ir_to_color: bool) -> Optional[np.ndarray]:
return k4a_module.device_get_ir_image(transform_ir_to_color)
@property
def sync_jack_status(self) -> Tuple[bool, bool]:
res, jack_in, jack_out = k4a_module.device_get_sync_jack()
self._verify_error(res)
return jack_in == 1, jack_out == 1
def _get_color_control(self, cmd: ColorControlCommand) -> Tuple[int, ColorControlMode]:
res, mode, value = k4a_module.device_get_color_control(cmd)
self._verify_error(res)
return value, ColorControlMode(mode)
def _set_color_control(self, cmd: ColorControlCommand, value: int, mode=ColorControlMode.MANUAL):
res = k4a_module.device_set_color_control(cmd, mode, value)
self._verify_error(res)
@property
def brightness(self) -> int:
return self._get_color_control(ColorControlCommand.BRIGHTNESS)[0]
@property
def contrast(self) -> int:
return self._get_color_control(ColorControlCommand.CONTRAST)[0]
@property
def saturation(self) -> int:
return self._get_color_control(ColorControlCommand.SATURATION)[0]
@property
def sharpness(self) -> int:
return self._get_color_control(ColorControlCommand.SHARPNESS)[0]
@property
def backlight_compensation(self) -> int:
return self._get_color_control(ColorControlCommand.BACKLIGHT_COMPENSATION)[0]
@property
def gain(self) -> int:
return self._get_color_control(ColorControlCommand.GAIN)[0]
@property
def powerline_frequency(self) -> int:
return self._get_color_control(ColorControlCommand.POWERLINE_FREQUENCY)[0]
@property
def exposure(self) -> int:
# sets mode to manual
return self._get_color_control(ColorControlCommand.EXPOSURE_TIME_ABSOLUTE)[0]
@property
def exposure_mode_auto(self) -> bool:
return self._get_color_control(ColorControlCommand.EXPOSURE_TIME_ABSOLUTE)[1] == ColorControlMode.AUTO
@property
def whitebalance(self) -> int:
# sets mode to manual
return self._get_color_control(ColorControlCommand.WHITEBALANCE)[0]
@property
def whitebalance_mode_auto(self) -> bool:
return self._get_color_control(ColorControlCommand.WHITEBALANCE)[1] == ColorControlMode.AUTO
@brightness.setter
def brightness(self, value: int):
self._set_color_control(ColorControlCommand.BRIGHTNESS, value)
@contrast.setter
def contrast(self, value: int):
self._set_color_control(ColorControlCommand.CONTRAST, value)
@saturation.setter
def saturation(self, value: int):
self._set_color_control(ColorControlCommand.SATURATION, value)
@sharpness.setter
def sharpness(self, value: int):
self._set_color_control(ColorControlCommand.SHARPNESS, value)
@backlight_compensation.setter
def backlight_compensation(self, value: int):
self._set_color_control(ColorControlCommand.BACKLIGHT_COMPENSATION, value)
@gain.setter
def gain(self, value: int):
self._set_color_control(ColorControlCommand.GAIN, value)
@powerline_frequency.setter
def powerline_frequency(self, value: int):
self._set_color_control(ColorControlCommand.POWERLINE_FREQUENCY, value)
@exposure.setter
def exposure(self, value: int):
self._set_color_control(ColorControlCommand.EXPOSURE_TIME_ABSOLUTE, value)
@exposure_mode_auto.setter
def exposure_mode_auto(self, mode_auto: bool, value=2500):
mode = ColorControlMode.AUTO if mode_auto else ColorControlMode.MANUAL
self._set_color_control(ColorControlCommand.EXPOSURE_TIME_ABSOLUTE, value=value, mode=mode)
@whitebalance.setter
def whitebalance(self, value: int):
self._set_color_control(ColorControlCommand.WHITEBALANCE, value)
@whitebalance_mode_auto.setter
def whitebalance_mode_auto(self, mode_auto: bool, value=2500):
mode = ColorControlMode.AUTO if mode_auto else ColorControlMode.MANUAL
self._set_color_control(ColorControlCommand.WHITEBALANCE, value=value, mode=mode)
def _get_color_control_capabilities(self, cmd: ColorControlCommand) -> (bool, int, int, int, int, int):
(res, supports_auto, min_value, max_value,
step_value, default_value, default_mode) = k4a_module.device_get_color_control_capabilities(cmd)
self._verify_error(res)
return {
"color_control_command": cmd,
"supports_auto": supports_auto == 1,
"min_value": min_value,
"max_value": max_value,
"step_value": step_value,
"default_value": default_value,
"default_mode": default_mode,
}
def reset_color_control_to_default(self):
for cmd in ColorControlCommand:
capability = self._get_color_control_capabilities(cmd)
self._set_color_control(cmd, capability["default_value"], capability["default_mode"])
@staticmethod
def _verify_error(res):
res = Result(res)
if res == Result.Failed:
raise K4AException()
elif res == Result.Timeout:
raise K4ATimeoutException()
| 34.514644 | 120 | 0.693781 |
613dca9917ec51818666f3676713d8d46aa071bf | 10,350 | py | Python | my_plugins/youcompleteme/third_party/ycmd/ycmd/request_wrap.py | VirtualLG/vimrc | 33f961b0e465b852753479bc4aa0a32a6ff017cf | [
"MIT"
] | null | null | null | my_plugins/youcompleteme/third_party/ycmd/ycmd/request_wrap.py | VirtualLG/vimrc | 33f961b0e465b852753479bc4aa0a32a6ff017cf | [
"MIT"
] | null | null | null | my_plugins/youcompleteme/third_party/ycmd/ycmd/request_wrap.py | VirtualLG/vimrc | 33f961b0e465b852753479bc4aa0a32a6ff017cf | [
"MIT"
] | null | null | null | # Copyright (C) 2014-2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from ycmd.utils import ( ByteOffsetToCodepointOffset,
CodepointOffsetToByteOffset,
HashableDict,
LOGGER,
ToUnicode,
ToBytes,
SplitLines )
from ycmd.identifier_utils import StartOfLongestIdentifierEndingAtIndex
from ycmd.request_validation import EnsureRequestValid
# TODO: Change the custom computed (and other) keys to be actual properties on
# the object.
class RequestWrap:
def __init__( self, request, validate = True ):
if validate:
EnsureRequestValid( request )
self._request = request
# Maps the keys returned by this objects __getitem__ to a # tuple of
# ( getter_method, setter_method ). Values computed by getter_method (or set
# by setter_method) are cached in _cached_computed. setter_method may be
# None for read-only items.
self._computed_key = {
# Unicode string representation of the current line. If the line requested
# is not in the file, returns ''.
'line_value': ( self._CurrentLine, None ),
# The calculated start column, as a codepoint offset into the
# unicode string line_value
'start_codepoint': ( self._GetCompletionStartCodepoint,
self._SetCompletionStartCodepoint ),
# The 'column_num' as a unicode codepoint offset
'column_codepoint': ( lambda: ByteOffsetToCodepointOffset(
self[ 'line_bytes' ],
self[ 'column_num' ] ),
None ),
# Bytes string representation of the current line
'line_bytes': ( lambda: ToBytes( self[ 'line_value' ] ),
None ),
# The calculated start column, as a byte offset into the UTF-8 encoded
# bytes returned by line_bytes
'start_column': ( self._GetCompletionStartColumn,
self._SetCompletionStartColumn ),
# Note: column_num is the byte offset into the UTF-8 encoded bytes
# returned by line_bytes
# unicode string representation of the 'query' after the beginning
# of the identifier to be completed
'query': ( self._Query, None ),
# Unicode string representation of the line value up to the character
# before the start of 'query'
'prefix': ( self._Prefix, None ),
'filetypes': ( self._Filetypes, None ),
'first_filetype': ( self._FirstFiletype, None ),
'force_semantic': ( self._GetForceSemantic, None ),
'lines': ( self._CurrentLines, None ),
'extra_conf_data': ( self._GetExtraConfData, None ),
}
self._cached_computed = {}
def __getitem__( self, key ):
if key in self._cached_computed:
return self._cached_computed[ key ]
if key in self._computed_key:
getter, _ = self._computed_key[ key ]
value = getter()
self._cached_computed[ key ] = value
return value
return self._request[ key ]
def __setitem__( self, key, value ):
if key in self._computed_key:
_, setter = self._computed_key[ key ]
if setter:
setter( value )
return
raise ValueError( f'Key "{ key }" is read-only' )
def __contains__( self, key ):
return key in self._computed_key or key in self._request
def __eq__( self, other ):
if ( self[ 'filepath' ] != other[ 'filepath' ] or
self[ 'filetypes' ] != other[ 'filetypes' ] or
self[ 'line_num' ] != other[ 'line_num' ] or
self[ 'start_column' ] != other[ 'start_column' ] or
self[ 'prefix' ] != other[ 'prefix' ] or
self[ 'force_semantic' ] != other[ 'force_semantic' ] or
self[ 'extra_conf_data' ] != other[ 'extra_conf_data' ] or
len( self[ 'file_data' ] ) != len( other[ 'file_data' ] ) ):
return False
for filename, file_data in self[ 'file_data' ].items():
if filename == self[ 'filepath' ]:
lines = self[ 'lines' ]
other_lines = other[ 'lines' ]
if len( lines ) != len( other_lines ):
return False
line_num = self[ 'line_num' ]
if ( lines[ : line_num - 1 ] != other_lines[ : line_num - 1 ] or
lines[ line_num : ] != other_lines[ line_num : ] ):
return False
elif ( filename not in other[ 'file_data' ] or
file_data != other[ 'file_data' ][ filename ] ):
return False
return True
def get( self, key, default = None ):
try:
return self[ key ]
except KeyError:
return default
def _CurrentLines( self ):
current_file = self[ 'filepath' ]
contents = self[ 'file_data' ][ current_file ][ 'contents' ]
return SplitLines( contents )
def _CurrentLine( self ):
try:
return self[ 'lines' ][ self[ 'line_num' ] - 1 ]
except IndexError:
LOGGER.exception( 'Client returned invalid line number %s '
'for file %s. Assuming empty',
self[ 'line_num' ],
self[ 'filepath' ] )
return ''
def _GetCompletionStartColumn( self ):
return CompletionStartColumn( self[ 'line_value' ],
self[ 'column_num' ],
self[ 'first_filetype' ] )
def _SetCompletionStartColumn( self, column_num ):
self._cached_computed[ 'start_column' ] = column_num
# Note: We must pre-compute (and cache) the codepoint equivalent. This is
# because the value calculated by the getter (_GetCompletionStartCodepoint)
# would be based on self[ 'column_codepoint' ] which would be incorrect; it
# does not know that the user has forced this value to be independent of the
# column.
self._cached_computed[ 'start_codepoint' ] = ByteOffsetToCodepointOffset(
self[ 'line_value' ],
column_num )
# The same applies to the 'prefix' (the bit before the start column) and the
# 'query' (the bit after the start column up to the cursor column). They are
# dependent on the 'start_codepoint' so we must reset them.
self._cached_computed.pop( 'prefix', None )
self._cached_computed.pop( 'query', None )
def _GetCompletionStartCodepoint( self ):
return CompletionStartCodepoint( self[ 'line_value' ],
self[ 'column_num' ],
self[ 'first_filetype' ] )
def _SetCompletionStartCodepoint( self, codepoint_offset ):
self._cached_computed[ 'start_codepoint' ] = codepoint_offset
# Note: We must pre-compute (and cache) the byte equivalent. This is because
# the value calculated by the getter (_GetCompletionStartColumn) would be
# based on self[ 'column_num' ], which would be incorrect; it does not know
# that the user has forced this value to be independent of the column.
self._cached_computed[ 'start_column' ] = CodepointOffsetToByteOffset(
self[ 'line_value' ],
codepoint_offset )
# The same applies to the 'prefix' (the bit before the start column) and the
# 'query' (the bit after the start column up to the cursor column). They are
# dependent on the 'start_codepoint' so we must reset them.
self._cached_computed.pop( 'prefix', None )
self._cached_computed.pop( 'query', None )
def _Query( self ):
return self[ 'line_value' ][
self[ 'start_codepoint' ] - 1 : self[ 'column_codepoint' ] - 1
]
def _Prefix( self ):
return self[ 'line_value' ][ : ( self[ 'start_codepoint' ] - 1 ) ]
def _FirstFiletype( self ):
try:
return self[ 'filetypes' ][ 0 ]
except ( KeyError, IndexError ):
return None
def _Filetypes( self ):
path = self[ 'filepath' ]
return self[ 'file_data' ][ path ][ 'filetypes' ]
def _GetForceSemantic( self ):
return bool( self._request.get( 'force_semantic', False ) )
def _GetExtraConfData( self ):
return HashableDict( self._request.get( 'extra_conf_data', {} ) )
def CompletionStartColumn( line_value, column_num, filetype ):
"""Returns the 1-based byte index where the completion query should start.
So if the user enters:
foo.bar^
with the cursor being at the location of the caret (so the character *AFTER*
'r'), then the starting column would be the index of the letter 'b'.
NOTE: if the line contains multi-byte characters, then the result is not
the 'character' index (see CompletionStartCodepoint for that), and therefore
it is not safe to perform any character-relevant arithmetic on the result
of this method."""
return CodepointOffsetToByteOffset(
ToUnicode( line_value ),
CompletionStartCodepoint( line_value, column_num, filetype ) )
def CompletionStartCodepoint( line_value, column_num, filetype ):
"""Returns the 1-based codepoint index where the completion query should
start. So if the user enters:
ƒøø.∫å®^
with the cursor being at the location of the caret (so the character *AFTER*
'®'), then the starting column would be the index of the character '∫'
(i.e. 5, not its byte index)."""
# NOTE: column_num and other numbers on the wire are byte indices, but we need
# to walk codepoints for identifier checks.
codepoint_column_num = ByteOffsetToCodepointOffset( line_value, column_num )
unicode_line_value = ToUnicode( line_value )
# -1 and then +1 to account for difference between 0-based and 1-based
# indices/columns
codepoint_start_column = StartOfLongestIdentifierEndingAtIndex(
unicode_line_value, codepoint_column_num - 1, filetype ) + 1
return codepoint_start_column
| 36.572438 | 80 | 0.644928 |
a5ee6a540775fe729a045b264bb72f276362042b | 2,856 | py | Python | aliyun-python-sdk-hbr/aliyunsdkhbr/request/v20170908/UpdateBackupSourceGroupRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-hbr/aliyunsdkhbr/request/v20170908/UpdateBackupSourceGroupRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | 1 | 2020-05-31T14:51:47.000Z | 2020-05-31T14:51:47.000Z | aliyun-python-sdk-hbr/aliyunsdkhbr/request/v20170908/UpdateBackupSourceGroupRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class UpdateBackupSourceGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'hbr', '2017-09-08', 'UpdateBackupSourceGroup','hbr')
def get_BackupSources(self):
return self.get_query_params().get('BackupSources')
def set_BackupSources(self,BackupSources):
for i in range(len(BackupSources)):
if BackupSources[i].get('BackupSourceId') is not None:
self.add_query_param('BackupSource.' + str(i + 1) + '.BackupSourceId' , BackupSources[i].get('BackupSourceId'))
if BackupSources[i].get('DatabaseName') is not None:
self.add_query_param('BackupSource.' + str(i + 1) + '.DatabaseName' , BackupSources[i].get('DatabaseName'))
if BackupSources[i].get('Description') is not None:
self.add_query_param('BackupSource.' + str(i + 1) + '.Description' , BackupSources[i].get('Description'))
if BackupSources[i].get('ClusterId') is not None:
self.add_query_param('BackupSource.' + str(i + 1) + '.ClusterId' , BackupSources[i].get('ClusterId'))
def get_ImplicitlyCreateBackupSources(self):
return self.get_query_params().get('ImplicitlyCreateBackupSources')
def set_ImplicitlyCreateBackupSources(self,ImplicitlyCreateBackupSources):
self.add_query_param('ImplicitlyCreateBackupSources',ImplicitlyCreateBackupSources)
def get_BackupSourceIds(self):
return self.get_query_params().get('BackupSourceIds')
def set_BackupSourceIds(self,BackupSourceIds):
for i in range(len(BackupSourceIds)):
if BackupSourceIds[i] is not None:
self.add_query_param('BackupSourceId.' + str(i + 1) , BackupSourceIds[i]);
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_BackupSourceGroupId(self):
return self.get_query_params().get('BackupSourceGroupId')
def set_BackupSourceGroupId(self,BackupSourceGroupId):
self.add_query_param('BackupSourceGroupId',BackupSourceGroupId) | 43.938462 | 116 | 0.75035 |
284c5390604afef5813e1aa93a93a936bcc25a35 | 102,155 | py | Python | salt/utils/parsers.py | lyft/salt | 2715908423a412f736253d0e5d3cfe185a0179a2 | [
"Apache-2.0"
] | 3 | 2015-04-16T18:42:35.000Z | 2017-10-30T16:57:49.000Z | salt/utils/parsers.py | lyft/salt | 2715908423a412f736253d0e5d3cfe185a0179a2 | [
"Apache-2.0"
] | 16 | 2015-11-18T00:44:03.000Z | 2018-10-29T20:48:27.000Z | salt/utils/parsers.py | lyft/salt | 2715908423a412f736253d0e5d3cfe185a0179a2 | [
"Apache-2.0"
] | 1 | 2017-01-27T21:33:36.000Z | 2017-01-27T21:33:36.000Z | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio ([email protected])`
salt.utils.parsers
~~~~~~~~~~~~~~~~~~
This is where all the black magic happens on all of salt's CLI tools.
'''
# Import python libs
from __future__ import absolute_import, print_function
import os
import sys
import getpass
import logging
import optparse
import traceback
from functools import partial
# Import 3rd-party libs
import salt.ext.six as six
# Import salt libs
import salt.config as config
import salt.defaults.exitcodes
import salt.loader as loader
import salt.log.setup as log
import salt.syspaths as syspaths
import salt.utils as utils
import salt.version as version
import salt.utils.args
import salt.utils.xdg
from salt.utils import kinds
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.validate.path import is_writeable
def _sorted(mixins_or_funcs):
return sorted(
mixins_or_funcs, key=lambda mf: getattr(mf, '_mixin_prio_', 1000)
)
class MixInMeta(type):
# This attribute here won't actually do anything. But, if you need to
# specify an order or a dependency within the mix-ins, please define the
# attribute on your own MixIn
_mixin_prio_ = 0
def __new__(mcs, name, bases, attrs):
instance = super(MixInMeta, mcs).__new__(mcs, name, bases, attrs)
if not hasattr(instance, '_mixin_setup'):
raise RuntimeError(
'Don\'t subclass {0} in {1} if you\'re not going to use it '
'as a salt parser mix-in.'.format(mcs.__name__, name)
)
return instance
class OptionParserMeta(MixInMeta):
def __new__(mcs, name, bases, attrs):
instance = super(OptionParserMeta, mcs).__new__(mcs,
name,
bases,
attrs)
if not hasattr(instance, '_mixin_setup_funcs'):
instance._mixin_setup_funcs = []
if not hasattr(instance, '_mixin_process_funcs'):
instance._mixin_process_funcs = []
if not hasattr(instance, '_mixin_after_parsed_funcs'):
instance._mixin_after_parsed_funcs = []
for base in _sorted(bases + (instance,)):
func = getattr(base, '_mixin_setup', None)
if func is not None and func not in instance._mixin_setup_funcs:
instance._mixin_setup_funcs.append(func)
func = getattr(base, '_mixin_after_parsed', None)
if func is not None and func not in \
instance._mixin_after_parsed_funcs:
instance._mixin_after_parsed_funcs.append(func)
# Mark process_<opt> functions with the base priority for sorting
for func in dir(base):
if not func.startswith('process_'):
continue
func = getattr(base, func)
if getattr(func, '_mixin_prio_', None) is not None:
# Function already has the attribute set, don't override it
continue
if six.PY2:
func.__func__._mixin_prio_ = getattr(
base, '_mixin_prio_', 1000
)
else:
func._mixin_prio_ = getattr(
base, '_mixin_prio_', 1000
)
return instance
class OptionParser(optparse.OptionParser, object):
VERSION = version.__saltstack_version__.formatted_version
usage = '%prog'
epilog = ('You can find additional help about %prog issuing "man %prog" '
'or on http://docs.saltstack.org')
description = None
# Private attributes
_mixin_prio_ = 100
def __init__(self, *args, **kwargs):
kwargs.setdefault('version', '%prog {0}'.format(self.VERSION))
kwargs.setdefault('usage', self.usage)
if self.description:
kwargs.setdefault('description', self.description)
if self.epilog:
kwargs.setdefault('epilog', self.epilog)
optparse.OptionParser.__init__(self, *args, **kwargs)
if self.epilog and '%prog' in self.epilog:
self.epilog = self.epilog.replace('%prog', self.get_prog_name())
def parse_args(self, args=None, values=None):
options, args = optparse.OptionParser.parse_args(self, args, values)
if 'args_stdin' in options.__dict__ and options.args_stdin is True:
# Read additional options and/or arguments from stdin and combine
# them with the options and arguments from the command line.
new_inargs = sys.stdin.readlines()
new_inargs = [arg.rstrip('\r\n') for arg in new_inargs]
new_options, new_args = optparse.OptionParser.parse_args(
self,
new_inargs)
options.__dict__.update(new_options.__dict__)
args.extend(new_args)
if options.versions_report:
self.print_versions_report()
self.options, self.args = options, args
# Let's get some proper sys.stderr logging as soon as possible!!!
# This logging handler will be removed once the proper console or
# logfile logging is setup.
log.setup_temp_logger(
getattr(self.options, 'log_level', 'error')
)
# Gather and run the process_<option> functions in the proper order
process_option_funcs = []
for option_key in options.__dict__:
process_option_func = getattr(
self, 'process_{0}'.format(option_key), None
)
if process_option_func is not None:
process_option_funcs.append(process_option_func)
for process_option_func in _sorted(process_option_funcs):
try:
process_option_func()
except Exception as err:
logging.getLogger(__name__).exception(err)
self.error(
'Error while processing {0}: {1}'.format(
process_option_func, traceback.format_exc(err)
)
)
# Run the functions on self._mixin_after_parsed_funcs
for mixin_after_parsed_func in self._mixin_after_parsed_funcs:
try:
mixin_after_parsed_func(self)
except Exception as err:
logging.getLogger(__name__).exception(err)
self.error(
'Error while processing {0}: {1}'.format(
mixin_after_parsed_func, traceback.format_exc(err)
)
)
if self.config.get('conf_file', None) is not None:
logging.getLogger(__name__).debug(
'Configuration file path: {0}'.format(
self.config['conf_file']
)
)
# Retain the standard behavior of optparse to return options and args
return options, args
def _populate_option_list(self, option_list, add_help=True):
optparse.OptionParser._populate_option_list(
self, option_list, add_help=add_help
)
for mixin_setup_func in self._mixin_setup_funcs:
mixin_setup_func(self)
def _add_version_option(self):
optparse.OptionParser._add_version_option(self)
self.add_option(
'--versions-report', action='store_true',
help='show program\'s dependencies version number and exit'
)
def print_versions_report(self, file=sys.stdout):
print('\n'.join(version.versions_report()), file=file)
self.exit(salt.defaults.exitcodes.EX_OK)
class MergeConfigMixIn(six.with_metaclass(MixInMeta, object)):
'''
This mix-in will simply merge the CLI-passed options, by overriding the
configuration file loaded settings.
This mix-in should run last.
'''
_mixin_prio_ = six.MAXSIZE
def _mixin_setup(self):
if not hasattr(self, 'setup_config') and not hasattr(self, 'config'):
# No configuration was loaded on this parser.
# There's nothing to do here.
return
# Add an additional function that will merge the shell options with
# the config options and if needed override them
self._mixin_after_parsed_funcs.append(self.__merge_config_with_cli)
def __merge_config_with_cli(self, *args):
# Merge parser options
for option in self.option_list:
if option.dest is None:
# --version does not have dest attribute set for example.
# All options defined by us, even if not explicitly(by kwarg),
# will have the dest attribute set
continue
# Get the passed value from shell. If empty get the default one
default = self.defaults.get(option.dest)
value = getattr(self.options, option.dest, default)
if option.dest not in self.config:
# There's no value in the configuration file
if value is not None:
# There's an actual value, add it to the config
self.config[option.dest] = value
elif value is not None and value != default:
# Only set the value in the config file IF it's not the default
# value, this makes it possible to tweak settings on the
# configuration files bypassing the shell option flags
self.config[option.dest] = value
elif option.dest in self.config:
# Let's update the option value with the one from the
# configuration file. This allows the parsers to make use of
# the updated value by using self.options.<option>
setattr(self.options, option.dest, self.config[option.dest])
# Merge parser group options if any
for group in self.option_groups:
for option in group.option_list:
if option.dest is None:
continue
# Get the passed value from shell. If empty get the default one
default = self.defaults.get(option.dest)
value = getattr(self.options, option.dest, default)
if option.dest not in self.config:
# There's no value in the configuration file
if value is not None:
# There's an actual value, add it to the config
self.config[option.dest] = value
elif value is not None and value != default:
# Only set the value in the config file IF it's not the
# default value, this makes it possible to tweak settings
# on the configuration files bypassing the shell option
# flags
self.config[option.dest] = value
elif option.dest in self.config:
# Let's update the option value with the one from the
# configuration file. This allows the parsers to make use
# of the updated value by using self.options.<option>
setattr(self.options,
option.dest,
self.config[option.dest])
class SaltfileMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = -20
def _mixin_setup(self):
self.add_option(
'--saltfile', default=None,
help='Specify the path to a Saltfile. If not passed, one will be '
'searched for in the current working directory'
)
def process_saltfile(self):
if self.options.saltfile is None:
# No one passed a Saltfile as an option, environment variable!?
self.options.saltfile = os.environ.get('SALT_SALTFILE', None)
if self.options.saltfile is None:
# If we're here, no one passed a Saltfile either to the CLI tool or
# as an environment variable.
# Is there a Saltfile in the current directory?
try: # cwd may not exist if it was removed but salt was run from it
saltfile = os.path.join(os.getcwd(), 'Saltfile')
except OSError:
saltfile = ''
if os.path.isfile(saltfile):
self.options.saltfile = saltfile
else:
saltfile = self.options.saltfile
if not self.options.saltfile:
# There's still no valid Saltfile? No need to continue...
return
if not os.path.isfile(self.options.saltfile):
self.error(
'{0!r} file does not exist.\n'.format(self.options.saltfile
)
)
# Make sure we have an absolute path
self.options.saltfile = os.path.abspath(self.options.saltfile)
# Make sure we let the user know that we will be loading a Saltfile
logging.getLogger(__name__).info(
'Loading Saltfile from {0!r}'.format(self.options.saltfile)
)
saltfile_config = config._read_conf_file(saltfile)
if not saltfile_config:
# No configuration was loaded from the Saltfile
return
if self.get_prog_name() not in saltfile_config:
# There's no configuration specific to the CLI tool. Stop!
return
# We just want our own configuration
cli_config = saltfile_config[self.get_prog_name()]
# If there are any options, who's names match any key from the loaded
# Saltfile, we need to update its default value
for option in self.option_list:
if option.dest is None:
# --version does not have dest attribute set for example.
continue
if option.dest not in cli_config:
# If we don't have anything in Saltfile for this option, let's
# continue processing right now
continue
# Get the passed value from shell. If empty get the default one
default = self.defaults.get(option.dest)
value = getattr(self.options, option.dest, default)
if value != default:
# The user passed an argument, we won't override it with the
# one from Saltfile, if any
continue
# We reached this far! Set the Saltfile value on the option
setattr(self.options, option.dest, cli_config[option.dest])
# Let's also search for options referred in any option groups
for group in self.option_groups:
for option in group.option_list:
if option.dest is None:
continue
if option.dest not in cli_config:
# If we don't have anything in Saltfile for this option,
# let's continue processing right now
continue
# Get the passed value from shell. If empty get the default one
default = self.defaults.get(option.dest)
value = getattr(self.options, option.dest, default)
if value != default:
# The user passed an argument, we won't override it with
# the one from Saltfile, if any
continue
if option.dest in cli_config:
setattr(self.options,
option.dest,
cli_config[option.dest])
# Any left over value in the saltfile can now be safely added
for key in cli_config:
setattr(self.options, key, cli_config[key])
class HardCrashMixin(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 40
_config_filename_ = None
def _mixin_setup(self):
hc = os.environ.get('SALT_HARD_CRASH', False)
self.add_option(
'--hard-crash', action='store_true', default=hc,
help=('Raise any original exception rather than exiting gracefully'
' Default: %default')
)
class ConfigDirMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = -10
_config_filename_ = None
_default_config_dir_ = syspaths.CONFIG_DIR
_default_config_dir_env_var_ = 'SALT_CONFIG_DIR'
def _mixin_setup(self):
config_dir = os.environ.get(self._default_config_dir_env_var_, None)
if not config_dir:
config_dir = self._default_config_dir_
logging.getLogger(__name__).debug('SYSPATHS setup as: {0}'.format(syspaths.CONFIG_DIR))
self.add_option(
'-c', '--config-dir', default=config_dir,
help=('Pass in an alternative configuration directory. Default: '
'%default')
)
def process_config_dir(self):
if not os.path.isdir(self.options.config_dir):
# No logging is configured yet
sys.stderr.write(
'WARNING: CONFIG {0!r} directory does not exist.\n'.format(
self.options.config_dir
)
)
# Make sure we have an absolute path
self.options.config_dir = os.path.abspath(self.options.config_dir)
if hasattr(self, 'setup_config'):
if not hasattr(self, 'config'):
self.config = {}
try:
self.config.update(self.setup_config())
except (IOError, OSError) as exc:
self.error(
'Failed to load configuration: {0}'.format(exc)
)
def get_config_file_path(self, configfile=None):
if configfile is None:
configfile = self._config_filename_
return os.path.join(self.options.config_dir, configfile)
class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 10
_default_logging_level_ = 'warning'
_default_logging_logfile_ = None
_logfile_config_setting_name_ = 'log_file'
_loglevel_config_setting_name_ = 'log_level'
_logfile_loglevel_config_setting_name_ = 'log_level_logfile'
_skip_console_logging_config_ = False
def _mixin_setup(self):
if self._default_logging_logfile_ is None:
# This is an attribute available for programmers, so, raise a
# RuntimeError to let them know about the proper usage.
raise RuntimeError(
'Please set {0}._default_logging_logfile_'.format(
self.__class__.__name__
)
)
group = self.logging_options_group = optparse.OptionGroup(
self, 'Logging Options',
'Logging options which override any settings defined on the '
'configuration files.'
)
self.add_option_group(group)
if not getattr(self, '_skip_console_logging_config_', False):
group.add_option(
'-l', '--log-level',
choices=list(log.LOG_LEVELS),
help='Console logging log level. One of {0}. '
'Default: \'{1}\'.'.format(
', '.join([repr(l) for l in log.SORTED_LEVEL_NAMES]),
getattr(self, '_default_logging_level_', 'warning')
)
)
group.add_option(
'--log-file',
default=None,
help='Log file path. Default: {0}.'.format(
self._default_logging_logfile_
)
)
group.add_option(
'--log-file-level',
dest=self._logfile_loglevel_config_setting_name_,
choices=list(log.LOG_LEVELS),
help='Logfile logging log level. One of {0}. '
'Default: \'{1}\'.'.format(
', '.join([repr(l) for l in log.SORTED_LEVEL_NAMES]),
getattr(self, '_default_logging_level_', 'warning')
)
)
def process_log_level(self):
if not self.options.log_level:
cli_log_level = 'cli_{0}_log_level'.format(
self.get_prog_name().replace('-', '_')
)
if self.config.get(cli_log_level, None) is not None:
self.options.log_level = self.config.get(cli_log_level)
elif self.config.get(self._loglevel_config_setting_name_, None):
self.options.log_level = self.config.get(
self._loglevel_config_setting_name_
)
else:
self.options.log_level = self._default_logging_level_
# Setup extended logging right before the last step
self._mixin_after_parsed_funcs.append(self.__setup_extended_logging)
# Setup the console as the last _mixin_after_parsed_func to run
self._mixin_after_parsed_funcs.append(self.__setup_console_logger)
def process_log_file(self):
if not self.options.log_file:
cli_setting_name = 'cli_{0}_log_file'.format(
self.get_prog_name().replace('-', '_')
)
if self.config.get(cli_setting_name, None) is not None:
# There's a configuration setting defining this log file path,
# i.e., `key_log_file` if the cli tool is `salt-key`
self.options.log_file = self.config.get(cli_setting_name)
elif self.config.get(self._logfile_config_setting_name_, None):
# Is the regular log file setting set?
self.options.log_file = self.config.get(
self._logfile_config_setting_name_
)
else:
# Nothing is set on the configuration? Let's use the cli tool
# defined default
self.options.log_file = self._default_logging_logfile_
def process_log_file_level(self):
if not self.options.log_file_level:
cli_setting_name = 'cli_{0}_log_file_level'.format(
self.get_prog_name().replace('-', '_')
)
if self.config.get(cli_setting_name, None) is not None:
# There's a configuration setting defining this log file
# logging level, i.e., `key_log_file_level` if the cli tool is
# `salt-key`
self.options.log_file_level = self.config.get(cli_setting_name)
elif self.config.get(
self._logfile_loglevel_config_setting_name_, None):
# Is the regular log file level setting set?
self.options.log_file_level = self.config.get(
self._logfile_loglevel_config_setting_name_
)
else:
# Nothing is set on the configuration? Let's use the cli tool
# defined default
self.options.log_level = self._default_logging_level_
def setup_logfile_logger(self):
if self._logfile_loglevel_config_setting_name_ in self.config and not \
self.config.get(self._logfile_loglevel_config_setting_name_):
# Remove it from config so it inherits from log_level
self.config.pop(self._logfile_loglevel_config_setting_name_)
loglevel = self.config.get(
self._logfile_loglevel_config_setting_name_,
self.config.get(
# From the config setting
self._loglevel_config_setting_name_,
# From the console setting
self.config['log_level']
)
)
cli_log_path = 'cli_{0}_log_file'.format(
self.get_prog_name().replace('-', '_')
)
if cli_log_path in self.config and not self.config.get(cli_log_path):
# Remove it from config so it inherits from log_level_logfile
self.config.pop(cli_log_path)
if self._logfile_config_setting_name_ in self.config and not \
self.config.get(self._logfile_config_setting_name_):
# Remove it from config so it inherits from log_file
self.config.pop(self._logfile_config_setting_name_)
logfile = self.config.get(
# First from the config cli setting
cli_log_path,
self.config.get(
# From the config setting
self._logfile_config_setting_name_,
# From the default setting
self._default_logging_logfile_
)
)
cli_log_file_fmt = 'cli_{0}_log_file_fmt'.format(
self.get_prog_name().replace('-', '_')
)
if cli_log_file_fmt in self.config and not \
self.config.get(cli_log_file_fmt):
# Remove it from config so it inherits from log_fmt_logfile
self.config.pop(cli_log_file_fmt)
if self.config.get('log_fmt_logfile', None) is None:
# Remove it from config so it inherits from log_fmt_console
self.config.pop('log_fmt_logfile', None)
log_file_fmt = self.config.get(
cli_log_file_fmt,
self.config.get(
'cli_{0}_log_fmt'.format(
self.get_prog_name().replace('-', '_')
),
self.config.get(
'log_fmt_logfile',
self.config.get(
'log_fmt_console',
self.config.get(
'log_fmt',
config._DFLT_LOG_FMT_CONSOLE
)
)
)
)
)
cli_log_file_datefmt = 'cli_{0}_log_file_datefmt'.format(
self.get_prog_name().replace('-', '_')
)
if cli_log_file_datefmt in self.config and not \
self.config.get(cli_log_file_datefmt):
# Remove it from config so it inherits from log_datefmt_logfile
self.config.pop(cli_log_file_datefmt)
if self.config.get('log_datefmt_logfile', None) is None:
# Remove it from config so it inherits from log_datefmt_console
self.config.pop('log_datefmt_logfile', None)
if self.config.get('log_datefmt_console', None) is None:
# Remove it from config so it inherits from log_datefmt
self.config.pop('log_datefmt_console', None)
log_file_datefmt = self.config.get(
cli_log_file_datefmt,
self.config.get(
'cli_{0}_log_datefmt'.format(
self.get_prog_name().replace('-', '_')
),
self.config.get(
'log_datefmt_logfile',
self.config.get(
'log_datefmt_console',
self.config.get(
'log_datefmt',
'%Y-%m-%d %H:%M:%S'
)
)
)
)
)
if not is_writeable(logfile, check_parent=True):
# Since we're not be able to write to the log file or its parent
# directory (if the log file does not exit), are we the same user
# as the one defined in the configuration file?
current_user = salt.utils.get_user()
if self.config['user'] != current_user:
# Yep, not the same user!
# Is the current user in ACL?
if current_user in self.config.get('client_acl', {}):
# Yep, the user is in ACL!
# Let's write the logfile to its home directory instead.
xdg_dir = salt.utils.xdg.xdg_config_dir()
user_salt_dir = (xdg_dir if os.path.isdir(xdg_dir) else
os.path.expanduser('~/.salt'))
if not os.path.isdir(user_salt_dir):
os.makedirs(user_salt_dir, 0o750)
logfile_basename = os.path.basename(
self._default_logging_logfile_
)
logging.getLogger(__name__).debug(
'The user {0!r} is not allowed to write to {1!r}. '
'The log file will be stored in '
'\'~/.salt/{2!r}.log\''.format(
current_user,
logfile,
logfile_basename
)
)
logfile = os.path.join(
user_salt_dir, '{0}.log'.format(logfile_basename)
)
# If we haven't changed the logfile path and it's not writeable,
# salt will fail once we try to setup the logfile logging.
log.setup_logfile_logger(
logfile,
loglevel,
log_format=log_file_fmt,
date_format=log_file_datefmt
)
for name, level in six.iteritems(self.config['log_granular_levels']):
log.set_logger_level(name, level)
def __setup_extended_logging(self, *args):
log.setup_extended_logging(self.config)
def __setup_console_logger(self, *args):
# If daemon is set force console logger to quiet
if getattr(self.options, 'daemon', False) is True:
return
# Since we're not going to be a daemon, setup the console logger
cli_log_fmt = 'cli_{0}_log_fmt'.format(
self.get_prog_name().replace('-', '_')
)
if cli_log_fmt in self.config and not self.config.get(cli_log_fmt):
# Remove it from config so it inherits from log_fmt_console
self.config.pop(cli_log_fmt)
logfmt = self.config.get(
cli_log_fmt, self.config.get(
'log_fmt_console',
self.config.get(
'log_fmt',
config._DFLT_LOG_FMT_CONSOLE
)
)
)
cli_log_datefmt = 'cli_{0}_log_datefmt'.format(
self.get_prog_name().replace('-', '_')
)
if cli_log_datefmt in self.config and not \
self.config.get(cli_log_datefmt):
# Remove it from config so it inherits from log_datefmt_console
self.config.pop(cli_log_datefmt)
if self.config.get('log_datefmt_console', None) is None:
# Remove it from config so it inherits from log_datefmt
self.config.pop('log_datefmt_console', None)
datefmt = self.config.get(
cli_log_datefmt,
self.config.get(
'log_datefmt_console',
self.config.get(
'log_datefmt',
'%Y-%m-%d %H:%M:%S'
)
)
)
log.setup_console_logger(
self.config['log_level'], log_format=logfmt, date_format=datefmt
)
for name, level in six.iteritems(self.config['log_granular_levels']):
log.set_logger_level(name, level)
class RunUserMixin(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 20
def _mixin_setup(self):
self.add_option(
'-u', '--user',
help='Specify user to run {0}'.format(self.get_prog_name())
)
class DaemonMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 30
def _mixin_setup(self):
self.add_option(
'-d', '--daemon',
default=False,
action='store_true',
help='Run the {0} as a daemon'.format(self.get_prog_name())
)
def daemonize_if_required(self):
if self.options.daemon:
# Late import so logging works correctly
import salt.utils
salt.utils.daemonize()
class PidfileMixin(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 40
def _mixin_setup(self):
self.add_option(
'--pid-file', dest='pidfile',
default=os.path.join(
syspaths.PIDFILE_DIR, '{0}.pid'.format(self.get_prog_name())
),
help=('Specify the location of the pidfile. Default: %default')
)
def set_pidfile(self):
from salt.utils.process import set_pidfile
set_pidfile(self.config['pidfile'], self.config['user'])
class TargetOptionsMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 20
selected_target_option = None
def _mixin_setup(self):
group = self.target_options_group = optparse.OptionGroup(
self, 'Target Options', 'Target Selection Options'
)
self.add_option_group(group)
group.add_option(
'-E', '--pcre',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'servers, use pcre regular expressions')
)
group.add_option(
'-L', '--list',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'servers, take a comma or space delimited list of '
'servers.')
)
group.add_option(
'-G', '--grain',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use a grain value to identify targets, the syntax '
'for the target is the grain key followed by a glob'
'expression:\n"os:Arch*"')
)
group.add_option(
'-P', '--grain-pcre',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use a grain value to identify targets, the syntax '
'for the target is the grain key followed by a pcre '
'regular expression:\n"os:Arch.*"')
)
group.add_option(
'-N', '--nodegroup',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use one of the predefined nodegroups to identify a '
'list of targets.')
)
group.add_option(
'-R', '--range',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use a range expression to identify targets. '
'Range expressions look like %cluster')
)
group = self.additional_target_options_group = optparse.OptionGroup(
self,
'Additional Target Options',
'Additional Options for Minion Targeting'
)
self.add_option_group(group)
group.add_option(
'--delimiter',
default=DEFAULT_TARGET_DELIM,
help=('Change the default delimiter for matching in multi-level '
'data structures. default=\'%default\'')
)
self._create_process_functions()
def _create_process_functions(self):
for option in self.target_options_group.option_list:
def process(opt):
if getattr(self.options, opt.dest):
self.selected_target_option = opt.dest
funcname = 'process_{0}'.format(option.dest)
if not hasattr(self, funcname):
setattr(self, funcname, partial(process, option))
def _mixin_after_parsed(self):
group_options_selected = [
option for option in self.target_options_group.option_list if
getattr(self.options, option.dest) is True
]
if len(group_options_selected) > 1:
self.error(
'The options {0} are mutually exclusive. Please only choose '
'one of them'.format('/'.join(
[option.get_opt_string()
for option in group_options_selected]))
)
self.config['selected_target_option'] = self.selected_target_option
class ExtendedTargetOptionsMixIn(TargetOptionsMixIn):
def _mixin_setup(self):
TargetOptionsMixIn._mixin_setup(self)
group = self.target_options_group
group.add_option(
'-C', '--compound',
default=False,
action='store_true',
help=('The compound target option allows for multiple target '
'types to be evaluated, allowing for greater granularity in '
'target matching. The compound target is space delimited, '
'targets other than globs are preceded with an identifier '
'matching the specific targets argument type: salt '
'\'G@os:RedHat and webser* or E@database.*\'')
)
group.add_option(
'-I', '--pillar',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use a pillar value to identify targets, the syntax '
'for the target is the pillar key followed by a glob '
'expression:\n"role:production*"')
)
group.add_option(
'-J', '--pillar-pcre',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use a pillar value to identify targets, the syntax '
'for the target is the pillar key followed by a pcre '
'regular expression:\n"role:prod.*"')
)
group.add_option(
'-S', '--ipcidr',
default=False,
action='store_true',
help=('Match based on Subnet (CIDR notation) or IP address.')
)
self._create_process_functions()
class TimeoutMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 10
def _mixin_setup(self):
if not hasattr(self, 'default_timeout'):
raise RuntimeError(
'You need to define the \'default_timeout\' attribute '
'on {0}'.format(self.__class__.__name__)
)
self.add_option(
'-t', '--timeout',
type=int,
default=self.default_timeout,
help=('Change the timeout, if applicable, for the running '
'command (in seconds); default=%default')
)
class ArgsStdinMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 10
def _mixin_setup(self):
self.add_option(
'--args-stdin',
default=False,
dest='args_stdin',
action='store_true',
help=('Read additional options and/or arguments from stdin. '
'Each entry is newline separated.')
)
class ProxyIdMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio = 40
def _mixin_setup(self):
self.add_option(
'--proxyid',
default=None,
dest='proxyid',
help=('Id for this proxy')
)
class OutputOptionsMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 40
_include_text_out_ = False
selected_output_option = None
def _mixin_setup(self):
group = self.output_options_group = optparse.OptionGroup(
self, 'Output Options', 'Configure your preferred output format'
)
self.add_option_group(group)
outputters = loader.outputters(
config.minion_config(None)
)
group.add_option(
'--out', '--output',
dest='output',
help=(
'Print the output from the {0!r} command using the '
'specified outputter. The builtins are {1}.'.format(
self.get_prog_name(),
', '.join([repr(k) for k in outputters])
)
)
)
group.add_option(
'--out-indent', '--output-indent',
dest='output_indent',
default=None,
type=int,
help=('Print the output indented by the provided value in spaces. '
'Negative values disables indentation. Only applicable in '
'outputters that support indentation.')
)
group.add_option(
'--out-file', '--output-file',
dest='output_file',
default=None,
help='Write the output to the specified file'
)
group.add_option(
'--out-file-append', '--output-file-append',
action='store_true',
dest='output_file_append',
default=False,
help='Append the output to the specified file'
)
group.add_option(
'--no-color', '--no-colour',
default=False,
action='store_true',
help='Disable all colored output'
)
group.add_option(
'--force-color', '--force-colour',
default=False,
action='store_true',
help='Force colored output'
)
group.add_option(
'--state-output', '--state_output',
default='full',
help=('Override the configured state_output value for minion '
'output. One of full, terse, mixed, changes or filter. '
'Default: full.')
)
for option in self.output_options_group.option_list:
def process(opt):
default = self.defaults.get(opt.dest)
if getattr(self.options, opt.dest, default) is False:
return
self.selected_output_option = opt.dest
funcname = 'process_{0}'.format(option.dest)
if not hasattr(self, funcname):
setattr(self, funcname, partial(process, option))
def process_output(self):
self.selected_output_option = self.options.output
def process_output_file(self):
if self.options.output_file is not None and self.options.output_file_append is False:
if os.path.isfile(self.options.output_file):
try:
with utils.fopen(self.options.output_file, 'w') as ofh:
# Make this a zero length filename instead of removing
# it. This way we keep the file permissions.
ofh.write('')
except (IOError, OSError) as exc:
self.error(
'{0}: Access denied: {1}'.format(
self.options.output_file,
exc
)
)
def _mixin_after_parsed(self):
group_options_selected = [
option for option in self.output_options_group.option_list if (
getattr(self.options, option.dest) and
(option.dest.endswith('_out') or option.dest == 'output'))
]
if len(group_options_selected) > 1:
self.error(
'The options {0} are mutually exclusive. Please only choose '
'one of them'.format('/'.join([
option.get_opt_string() for
option in group_options_selected
]))
)
self.config['selected_output_option'] = self.selected_output_option
class ExecutionOptionsMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 10
def _mixin_setup(self):
group = self.execution_group = optparse.OptionGroup(
self,
'Execution Options',
# Include description here as a string
)
group.add_option(
'-L', '--location',
default=None,
help='Specify which region to connect to.'
)
group.add_option(
'-a', '--action',
default=None,
help='Perform an action that may be specific to this cloud '
'provider. This argument requires one or more instance '
'names to be specified.'
)
group.add_option(
'-f', '--function',
nargs=2,
default=None,
metavar='<FUNC-NAME> <PROVIDER>',
help='Perform an function that may be specific to this cloud '
'provider, that does not apply to an instance. This '
'argument requires a provider to be specified (i.e.: nova).'
)
group.add_option(
'-p', '--profile',
default=None,
help='Create an instance using the specified profile.'
)
group.add_option(
'-m', '--map',
default=None,
help='Specify a cloud map file to use for deployment. This option '
'may be used alone, or in conjunction with -Q, -F, -S or -d.'
'The map can also be filtered by a list of VM names.'
)
group.add_option(
'-H', '--hard',
default=False,
action='store_true',
help='Delete all VMs that are not defined in the map file. '
'CAUTION!!! This operation can irrevocably destroy VMs! It '
'must be explicitly enabled in the cloud config file.'
)
group.add_option(
'-d', '--destroy',
default=False,
action='store_true',
help='Destroy the specified instance(s).'
)
group.add_option(
'--no-deploy',
default=True,
dest='deploy',
action='store_false',
help='Don\'t run a deploy script after instance creation.'
)
group.add_option(
'-P', '--parallel',
default=False,
action='store_true',
help='Build all of the specified instances in parallel.'
)
group.add_option(
'-u', '--update-bootstrap',
default=False,
action='store_true',
help='Update salt-bootstrap to the latest develop version on '
'GitHub.'
)
group.add_option(
'-y', '--assume-yes',
default=False,
action='store_true',
help='Default yes in answer to all confirmation questions.'
)
group.add_option(
'-k', '--keep-tmp',
default=False,
action='store_true',
help='Do not remove files from /tmp/ after deploy.sh finishes.'
)
group.add_option(
'--show-deploy-args',
default=False,
action='store_true',
help='Include the options used to deploy the minion in the data '
'returned.'
)
group.add_option(
'--script-args',
default=None,
help='Script arguments to be fed to the bootstrap script when '
'deploying the VM'
)
self.add_option_group(group)
def process_function(self):
if self.options.function:
self.function_name, self.function_provider = self.options.function
if self.function_provider.startswith('-') or \
'=' in self.function_provider:
self.error(
'--function expects two arguments: <function-name> '
'<provider>'
)
class CloudQueriesMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 20
selected_query_option = None
def _mixin_setup(self):
group = self.cloud_queries_group = optparse.OptionGroup(
self,
'Query Options',
# Include description here as a string
)
group.add_option(
'-Q', '--query',
default=False,
action='store_true',
help=('Execute a query and return some information about the '
'nodes running on configured cloud providers')
)
group.add_option(
'-F', '--full-query',
default=False,
action='store_true',
help=('Execute a query and return all information about the '
'nodes running on configured cloud providers')
)
group.add_option(
'-S', '--select-query',
default=False,
action='store_true',
help=('Execute a query and return select information about '
'the nodes running on configured cloud providers')
)
group.add_option(
'--list-providers',
default=False,
action='store_true',
help='Display a list of configured providers.'
)
group.add_option(
'--list-profiles',
default=None,
action='store',
help='Display a list of configured profiles. Pass in a cloud '
'provider to view the provider\'s associated profiles, '
'such as digital_ocean, or pass in "all" to list all the '
'configured profiles.'
)
self.add_option_group(group)
self._create_process_functions()
def _create_process_functions(self):
for option in self.cloud_queries_group.option_list:
def process(opt):
if getattr(self.options, opt.dest):
query = 'list_nodes'
if opt.dest == 'full_query':
query += '_full'
elif opt.dest == 'select_query':
query += '_select'
elif opt.dest == 'list_providers':
query = 'list_providers'
if self.args:
self.error(
'\'--list-providers\' does not accept any '
'arguments'
)
elif opt.dest == 'list_profiles':
query = 'list_profiles'
option_dict = vars(self.options)
if option_dict.get('list_profiles') == '--list-providers':
self.error(
'\'--list-profiles\' does not accept '
'\'--list-providers\' as an argument'
)
self.selected_query_option = query
funcname = 'process_{0}'.format(option.dest)
if not hasattr(self, funcname):
setattr(self, funcname, partial(process, option))
def _mixin_after_parsed(self):
group_options_selected = [
option for option in self.cloud_queries_group.option_list if
getattr(self.options, option.dest) is not False and
getattr(self.options, option.dest) is not None
]
if len(group_options_selected) > 1:
self.error(
'The options {0} are mutually exclusive. Please only choose '
'one of them'.format('/'.join([
option.get_opt_string() for option in
group_options_selected
]))
)
self.config['selected_query_option'] = self.selected_query_option
class CloudProvidersListsMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 30
def _mixin_setup(self):
group = self.providers_listings_group = optparse.OptionGroup(
self,
'Cloud Providers Listings',
# Include description here as a string
)
group.add_option(
'--list-locations',
default=None,
help=('Display a list of locations available in configured cloud '
'providers. Pass the cloud provider that available '
'locations are desired on, aka "linode", or pass "all" to '
'list locations for all configured cloud providers')
)
group.add_option(
'--list-images',
default=None,
help=('Display a list of images available in configured cloud '
'providers. Pass the cloud provider that available images '
'are desired on, aka "linode", or pass "all" to list images '
'for all configured cloud providers')
)
group.add_option(
'--list-sizes',
default=None,
help=('Display a list of sizes available in configured cloud '
'providers. Pass the cloud provider that available sizes '
'are desired on, aka "AWS", or pass "all" to list sizes '
'for all configured cloud providers')
)
self.add_option_group(group)
def _mixin_after_parsed(self):
list_options_selected = [
option for option in self.providers_listings_group.option_list if
getattr(self.options, option.dest) is not None
]
if len(list_options_selected) > 1:
self.error(
'The options {0} are mutually exclusive. Please only choose '
'one of them'.format(
'/'.join([
option.get_opt_string() for option in
list_options_selected
])
)
)
class ProfilingPMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 130
def _mixin_setup(self):
group = self.profiling_group = optparse.OptionGroup(
self,
'Profiling support',
# Include description here as a string
)
group.add_option(
'--profiling-path',
dest='profiling_path',
default='/tmp/stats',
help=('Folder that will hold all'
' Stats generations path (/tmp/stats)')
)
group.add_option(
'--enable-profiling',
dest='profiling_enabled',
default=False,
action='store_true',
help=('Enable generating profiling stats'
' in /tmp/stats (--profiling-path)')
)
self.add_option_group(group)
class CloudCredentialsMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 30
def _mixin_setup(self):
group = self.cloud_credentials_group = optparse.OptionGroup(
self,
'Cloud Credentials',
# Include description here as a string
)
group.add_option(
'--set-password',
default=None,
nargs=2,
metavar='<USERNAME> <PROVIDER>',
help=('Configure password for a cloud provider and save it to the keyring.'
' PROVIDER can be specified with or without a driver, for example:'
' "--set-password bob rackspace"'
' or more specific'
' "--set-password bob rackspace:openstack"'
' DEPRECATED!')
)
self.add_option_group(group)
def process_set_password(self):
if self.options.set_password:
raise RuntimeError(
'This functionality is not supported; '
'please see the keyring module at http://docs.saltstack.com/en/latest/topics/sdb/'
)
class MasterOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
MergeConfigMixIn,
LogLevelMixIn,
RunUserMixin,
DaemonMixIn,
PidfileMixin,
SaltfileMixIn)):
description = 'The Salt master, used to control the Salt minions.'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'master')
def setup_config(self):
return config.master_config(self.get_config_file_path())
class MinionOptionParser(six.with_metaclass(OptionParserMeta, MasterOptionParser)): # pylint: disable=no-init
description = (
'The Salt minion, receives commands from a remote Salt master.'
)
# ConfigDirMixIn config filename attribute
_config_filename_ = 'minion'
# LogLevelMixIn attributes
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'minion')
def setup_config(self):
return config.minion_config(self.get_config_file_path(),
cache_minion_id=True)
class ProxyMinionOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
MergeConfigMixIn,
LogLevelMixIn,
RunUserMixin,
DaemonMixIn,
PidfileMixin,
SaltfileMixIn,
ProxyIdMixIn)): # pylint: disable=no-init
description = (
'The Salt proxy minion, connects to and controls devices not able to run a minion. Receives commands from a remote Salt master.'
)
# ConfigDirMixIn config filename attribute
_config_filename_ = 'proxy'
# LogLevelMixIn attributes
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'proxy')
def setup_config(self):
return config.minion_config(self.get_config_file_path(),
cache_minion_id=False)
class SyndicOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
MergeConfigMixIn,
LogLevelMixIn,
RunUserMixin,
DaemonMixIn,
PidfileMixin,
SaltfileMixIn)):
description = (
'A seamless master of masters. Scale Salt to thousands of hosts or '
'across many different networks.'
)
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'master')
def setup_config(self):
return config.syndic_config(
self.get_config_file_path(),
self.get_config_file_path('minion'))
class SaltCMDOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
MergeConfigMixIn,
TimeoutMixIn,
ExtendedTargetOptionsMixIn,
OutputOptionsMixIn,
LogLevelMixIn,
HardCrashMixin,
SaltfileMixIn,
ArgsStdinMixIn)):
default_timeout = 5
usage = '%prog [options] \'<target>\' <function> [arguments]'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_level_ = 'warning'
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'master')
_loglevel_config_setting_name_ = 'cli_salt_log_file'
try:
os.getcwd()
except OSError:
sys.exit("Cannot access current working directory. Exiting!")
def _mixin_setup(self):
self.add_option(
'-s', '--static',
default=False,
action='store_true',
help=('Return the data from minions as a group after they '
'all return.')
)
self.add_option(
'-p', '--progress',
default=False,
action='store_true',
help=('Display a progress graph. [Requires `progressbar` python package.]')
)
self.add_option(
'--failhard',
default=False,
action='store_true',
help=('Stop batch execution upon first "bad" return')
)
self.add_option(
'--async',
default=False,
dest='async',
action='store_true',
help=('Run the salt command but don\'t wait for a reply')
)
self.add_option(
'--subset',
default=0,
type=int,
help=('Execute the routine on a random subset of the targeted '
'minions. The minions will be verified that they have the '
'named function before executing')
)
self.add_option(
'-v', '--verbose',
default=False,
action='store_true',
help=('Turn on command verbosity, display jid and active job '
'queries')
)
self.add_option(
'--hide-timeout',
dest='show_timeout',
default=True,
action='store_false',
help=('Hide minions that timeout')
)
self.add_option(
'--show-jid',
default=False,
action='store_true',
help=('Display jid without the additional output of --verbose')
)
self.add_option(
'-b', '--batch',
'--batch-size',
default='',
dest='batch',
help=('Execute the salt job in batch mode, pass either the number '
'of minions to batch at a time, or the percentage of '
'minions to have running')
)
self.add_option(
'-a', '--auth', '--eauth', '--external-auth',
default='',
dest='eauth',
help=('Specify an external authentication system to use.')
)
self.add_option(
'-T', '--make-token',
default=False,
dest='mktoken',
action='store_true',
help=('Generate and save an authentication token for re-use. The '
'token is generated and made available for the period '
'defined in the Salt Master.')
)
self.add_option(
'--return',
default='',
metavar='RETURNER',
help=('Set an alternative return method. By default salt will '
'send the return data from the command back to the master, '
'but the return data can be redirected into any number of '
'systems, databases or applications.')
)
self.add_option(
'--return_config',
default='',
metavar='RETURNER_CONF',
help=('Set an alternative return method. By default salt will '
'send the return data from the command back to the master, '
'but the return data can be redirected into any number of '
'systems, databases or applications.')
)
self.add_option(
'-d', '--doc', '--documentation',
dest='doc',
default=False,
action='store_true',
help=('Return the documentation for the specified module or for '
'all modules if none are specified.')
)
self.add_option(
'--args-separator',
dest='args_separator',
default=',',
help=('Set the special argument used as a delimiter between '
'command arguments of compound commands. This is useful '
'when one wants to pass commas as arguments to '
'some of the commands in a compound command.')
)
self.add_option(
'--summary',
dest='cli_summary',
default=False,
action='store_true',
help=('Display summary information about a salt command')
)
self.add_option(
'--username',
dest='username',
nargs=1,
help=('Username for external authentication')
)
self.add_option(
'--password',
dest='password',
nargs=1,
help=('Password for external authentication')
)
self.add_option(
'--metadata',
default='',
metavar='METADATA',
help=('Pass metadata into Salt, used to search jobs.')
)
self.add_option(
'--output-diff',
dest='state_output_diff',
action='store_true',
default=False,
help=('Report only those states that have changed')
)
def _mixin_after_parsed(self):
if len(self.args) <= 1 and not self.options.doc:
try:
self.print_help()
except Exception:
# We get an argument that Python's optparser just can't deal
# with. Perhaps stdout was redirected, or a file glob was
# passed in. Regardless, we're in an unknown state here.
sys.stdout.write('Invalid options passed. Please try -h for '
'help.') # Try to warn if we can.
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
if self.options.doc:
# Include the target
if not self.args:
self.args.insert(0, '*')
if len(self.args) < 2:
# Include the function
self.args.insert(1, 'sys.doc')
if self.args[1] != 'sys.doc':
self.args.insert(1, 'sys.doc')
if len(self.args) > 3:
self.error('You can only get documentation for one method at one time.')
if self.options.list:
try:
if ',' in self.args[0]:
self.config['tgt'] = self.args[0].replace(' ', '').split(',')
else:
self.config['tgt'] = self.args[0].split()
except IndexError:
self.exit(42, '\nCannot execute command without defining a target.\n\n')
else:
try:
self.config['tgt'] = self.args[0]
except IndexError:
self.exit(42, '\nCannot execute command without defining a target.\n\n')
# Detect compound command and set up the data for it
if self.args:
try:
if ',' in self.args[1]:
self.config['fun'] = self.args[1].split(',')
self.config['arg'] = [[]]
cmd_index = 0
if (self.args[2:].count(self.options.args_separator) ==
len(self.config['fun']) - 1):
# new style parsing: standalone argument separator
for arg in self.args[2:]:
if arg == self.options.args_separator:
cmd_index += 1
self.config['arg'].append([])
else:
self.config['arg'][cmd_index].append(arg)
else:
# old style parsing: argument separator can be inside args
for arg in self.args[2:]:
if self.options.args_separator in arg:
sub_args = arg.split(self.options.args_separator)
for sub_arg_index, sub_arg in enumerate(sub_args):
if sub_arg:
self.config['arg'][cmd_index].append(sub_arg)
if sub_arg_index != len(sub_args) - 1:
cmd_index += 1
self.config['arg'].append([])
else:
self.config['arg'][cmd_index].append(arg)
if len(self.config['fun']) != len(self.config['arg']):
self.exit(42, 'Cannot execute compound command without '
'defining all arguments.\n')
else:
self.config['fun'] = self.args[1]
self.config['arg'] = self.args[2:]
# parse the args and kwargs before sending to the publish interface
self.config['arg'] = \
salt.utils.args.parse_input(self.config['arg'])
except IndexError:
self.exit(42, '\nIncomplete options passed.\n\n')
def setup_config(self):
return config.client_config(self.get_config_file_path())
class SaltCPOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
OutputOptionsMixIn,
ConfigDirMixIn,
MergeConfigMixIn,
TimeoutMixIn,
TargetOptionsMixIn,
LogLevelMixIn,
HardCrashMixin,
SaltfileMixIn)):
description = (
'salt-cp is NOT intended to broadcast large files, it is intended to '
'handle text files.\nsalt-cp can be used to distribute configuration '
'files.'
)
default_timeout = 5
usage = '%prog [options] \'<target>\' SOURCE DEST'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_level_ = 'warning'
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'master')
_loglevel_config_setting_name_ = 'cli_salt_cp_log_file'
def _mixin_after_parsed(self):
# salt-cp needs arguments
if len(self.args) <= 1:
self.print_help()
self.exit(salt.defaults.exitcodes.EX_USAGE)
if self.options.list:
if ',' in self.args[0]:
self.config['tgt'] = self.args[0].split(',')
else:
self.config['tgt'] = self.args[0].split()
else:
self.config['tgt'] = self.args[0]
self.config['src'] = self.args[1:-1]
self.config['dest'] = self.args[-1]
def setup_config(self):
return config.master_config(self.get_config_file_path())
class SaltKeyOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
MergeConfigMixIn,
LogLevelMixIn,
OutputOptionsMixIn,
RunUserMixin,
HardCrashMixin,
SaltfileMixIn)):
description = 'Salt key is used to manage Salt authentication keys'
usage = '%prog [options]'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_skip_console_logging_config_ = True
_logfile_config_setting_name_ = 'key_logfile'
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'key')
def _mixin_setup(self):
actions_group = optparse.OptionGroup(self, 'Actions')
actions_group.add_option(
'-l', '--list',
default='',
metavar='ARG',
help=('List the public keys. The args '
'"pre", "un", and "unaccepted" will list '
'unaccepted/unsigned keys. '
'"acc" or "accepted" will list accepted/signed keys. '
'"rej" or "rejected" will list rejected keys. '
'"den" or "denied" will list denied keys. '
'Finally, "all" will list all keys.')
)
actions_group.add_option(
'-L', '--list-all',
default=False,
action='store_true',
help='List all public keys. (Deprecated: use "--list all")'
)
actions_group.add_option(
'-a', '--accept',
default='',
help='Accept the specified public key (use --include-all to '
'match rejected keys in addition to pending keys). Globs are '
'supported.'
)
actions_group.add_option(
'-A', '--accept-all',
default=False,
action='store_true',
help='Accept all pending keys'
)
actions_group.add_option(
'-r', '--reject',
default='',
help='Reject the specified public key (use --include-all to '
'match accepted keys in addition to pending keys). Globs are '
'supported.'
)
actions_group.add_option(
'-R', '--reject-all',
default=False,
action='store_true',
help='Reject all pending keys'
)
actions_group.add_option(
'--include-all',
default=False,
action='store_true',
help='Include non-pending keys when accepting/rejecting'
)
actions_group.add_option(
'-p', '--print',
default='',
help='Print the specified public key'
)
actions_group.add_option(
'-P', '--print-all',
default=False,
action='store_true',
help='Print all public keys'
)
actions_group.add_option(
'-d', '--delete',
default='',
help='Delete the specified key. Globs are supported.'
)
actions_group.add_option(
'-D', '--delete-all',
default=False,
action='store_true',
help='Delete all keys'
)
actions_group.add_option(
'-f', '--finger',
default='',
help='Print the specified key\'s fingerprint'
)
actions_group.add_option(
'-F', '--finger-all',
default=False,
action='store_true',
help='Print all keys\' fingerprints'
)
self.add_option_group(actions_group)
self.add_option(
'-q', '--quiet',
default=False,
action='store_true',
help='Suppress output'
)
self.add_option(
'-y', '--yes',
default=False,
action='store_true',
help='Answer Yes to all questions presented, defaults to False'
)
self.add_option(
'--rotate-aes-key',
default=True,
help=('Setting this to False prevents the master from refreshing '
'the key session when keys are deleted or rejected, this '
'lowers the security of the key deletion/rejection operation. '
'Default is True.')
)
key_options_group = optparse.OptionGroup(
self, 'Key Generation Options'
)
self.add_option_group(key_options_group)
key_options_group.add_option(
'--gen-keys',
default='',
help='Set a name to generate a keypair for use with salt'
)
key_options_group.add_option(
'--gen-keys-dir',
default='.',
help=('Set the directory to save the generated keypair, only '
'works with "gen_keys_dir" option; default=.')
)
key_options_group.add_option(
'--keysize',
default=2048,
type=int,
help=('Set the keysize for the generated key, only works with '
'the "--gen-keys" option, the key size must be 2048 or '
'higher, otherwise it will be rounded up to 2048; '
'; default=%default')
)
key_options_group.add_option(
'--gen-signature',
default=False,
action='store_true',
help=('Create a signature file of the masters public-key named '
'master_pubkey_signature. The signature can be send to a '
'minion in the masters auth-reply and enables the minion '
'to verify the masters public-key cryptographically. '
'This requires a new signing-key-pair which can be auto-created '
'with the --auto-create parameter')
)
key_options_group.add_option(
'--priv',
default='',
type=str,
help=('The private-key file to create a signature with')
)
key_options_group.add_option(
'--signature-path',
default='',
type=str,
help=('The path where the signature file should be written')
)
key_options_group.add_option(
'--pub',
default='',
type=str,
help=('The public-key file to create a signature for')
)
key_options_group.add_option(
'--auto-create',
default=False,
action='store_true',
help=('Auto-create a signing key-pair if it does not yet exist')
)
def process_config_dir(self):
if self.options.gen_keys:
# We're generating keys, override the default behavior of this
# function if we don't have any access to the configuration
# directory.
if not os.access(self.options.config_dir, os.R_OK):
if not os.path.isdir(self.options.gen_keys_dir):
# This would be done at a latter stage, but we need it now
# so no errors are thrown
os.makedirs(self.options.gen_keys_dir)
self.options.config_dir = self.options.gen_keys_dir
super(SaltKeyOptionParser, self).process_config_dir()
# Don't change its mixin priority!
process_config_dir._mixin_prio_ = ConfigDirMixIn._mixin_prio_
def setup_config(self):
keys_config = config.master_config(self.get_config_file_path())
if self.options.gen_keys:
# Since we're generating the keys, some defaults can be assumed
# or tweaked
keys_config['key_logfile'] = os.devnull
keys_config['pki_dir'] = self.options.gen_keys_dir
return keys_config
def process_list(self):
# Filter accepted list arguments as soon as possible
if not self.options.list:
return
if not self.options.list.startswith(('acc', 'pre', 'un', 'rej', 'den', 'all')):
self.error(
'{0!r} is not a valid argument to \'--list\''.format(
self.options.list
)
)
def process_keysize(self):
if self.options.keysize < 2048:
self.error('The minimum value for keysize is 2048')
elif self.options.keysize > 32768:
self.error('The maximum value for keysize is 32768')
def process_gen_keys_dir(self):
# Schedule __create_keys_dir() to run if there's a value for
# --create-keys-dir
self._mixin_after_parsed_funcs.append(self.__create_keys_dir)
def _mixin_after_parsed(self):
# It was decided to always set this to info, since it really all is
# info or error.
self.config['loglevel'] = 'info'
def __create_keys_dir(self, *args):
if not os.path.isdir(self.config['gen_keys_dir']):
os.makedirs(self.config['gen_keys_dir'])
class SaltCallOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
MergeConfigMixIn,
LogLevelMixIn,
OutputOptionsMixIn,
HardCrashMixin,
SaltfileMixIn,
ArgsStdinMixIn,
ProfilingPMixIn)):
description = ('Salt call is used to execute module functions locally '
'on a minion')
usage = '%prog [options] <function> [arguments]'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'minion'
# LogLevelMixIn attributes
_default_logging_level_ = 'info'
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'minion')
def _mixin_setup(self):
self.add_option(
'-g', '--grains',
dest='grains_run',
default=False,
action='store_true',
help='Return the information generated by the salt grains'
)
self.add_option(
'-m', '--module-dirs',
default=[],
action='append',
help=('Specify an additional directory to pull modules from. '
'Multiple directories can be provided by passing '
'`-m/--module-dirs` multiple times.')
)
self.add_option(
'-d', '--doc', '--documentation',
dest='doc',
default=False,
action='store_true',
help=('Return the documentation for the specified module or for '
'all modules if none are specified.')
)
self.add_option(
'--master',
default='',
dest='master',
help=('Specify the master to use. The minion must be '
'authenticated with the master. If this option is omitted, '
'the master options from the minion config will be used. '
'If multi masters are set up the first listed master that '
'responds will be used.')
)
self.add_option(
'--return',
default='',
metavar='RETURNER',
help=('Set salt-call to pass the return data to one or many '
'returner interfaces.')
)
self.add_option(
'--local',
default=False,
action='store_true',
help='Run salt-call locally, as if there was no master running.'
)
self.add_option(
'--file-root',
default=None,
help='Set this directory as the base file root.'
)
self.add_option(
'--pillar-root',
default=None,
help='Set this directory as the base pillar root.'
)
self.add_option(
'--retcode-passthrough',
default=False,
action='store_true',
help=('Exit with the salt call retcode and not the salt binary '
'retcode')
)
self.add_option(
'--metadata',
default=False,
dest='metadata',
action='store_true',
help=('Print out the execution metadata as well as the return. '
'This will print out the outputter data, the return code, '
'etc.')
)
self.add_option(
'--id',
default='',
dest='id',
help=('Specify the minion id to use. If this option is omitted, '
'the id option from the minion config will be used.')
)
self.add_option(
'--skip-grains',
default=False,
action='store_true',
help=('Do not load grains.')
)
self.add_option(
'--refresh-grains-cache',
default=False,
action='store_true',
help=('Force a refresh of the grains cache')
)
self.add_option(
'-t', '--timeout',
default=60,
dest='auth_timeout',
type=int,
help=('Change the timeout, if applicable, for the running '
'command; default=60')
)
self.add_option(
'--output-diff',
dest='state_output_diff',
action='store_true',
default=False,
help=('Report only those states that have changed')
)
def _mixin_after_parsed(self):
if not self.args and not self.options.grains_run and not self.options.doc:
self.print_help()
self.exit(salt.defaults.exitcodes.EX_USAGE)
elif len(self.args) >= 1:
if self.options.grains_run:
self.error('-g/--grains does not accept any arguments')
if self.options.doc and len(self.args) > 1:
self.error('You can only get documentation for one method at one time')
self.config['fun'] = self.args[0]
self.config['arg'] = self.args[1:]
def setup_config(self):
opts = config.minion_config(self.get_config_file_path(),
cache_minion_id=True)
if opts.get('transport') == 'raet':
if not self._find_raet_minion(opts): # must create caller minion
opts['__role'] = kinds.APPL_KIND_NAMES[kinds.applKinds.caller]
return opts
def _find_raet_minion(self, opts):
'''
Returns true if local RAET Minion is available
'''
yardname = 'manor'
dirpath = opts['sock_dir']
role = opts.get('id')
if not role:
emsg = ("Missing role required to setup RAET SaltCaller.")
logging.getLogger(__name__).error(emsg + "\n")
raise ValueError(emsg)
kind = opts.get('__role') # application kind 'master', 'minion', etc
if kind not in kinds.APPL_KINDS:
emsg = ("Invalid application kind = '{0}' for RAET SaltCaller.".format(kind))
logging.getLogger(__name__).error(emsg + "\n")
raise ValueError(emsg)
if kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.minion],
kinds.APPL_KIND_NAMES[kinds.applKinds.caller], ]:
lanename = "{0}_{1}".format(role, kind)
else:
emsg = ("Unsupported application kind '{0}' for RAET SaltCaller.".format(kind))
logging.getLogger(__name__).error(emsg + '\n')
raise ValueError(emsg)
if kind == kinds.APPL_KIND_NAMES[kinds.applKinds.minion]: # minion check
from raet.lane.yarding import Yard
ha, dirpath = Yard.computeHa(dirpath, lanename, yardname)
if (os.path.exists(ha) and
not os.path.isfile(ha) and
not os.path.isdir(ha)): # minion manor yard
return True
return False
def process_module_dirs(self):
for module_dir in self.options.module_dirs:
# Provide some backwards compatibility with previous comma
# delimited format
if ',' in module_dir:
self.config.setdefault('module_dirs', []).extend(
os.path.abspath(x) for x in module_dir.split(','))
continue
self.config.setdefault('module_dirs',
[]).append(os.path.abspath(module_dir))
class SaltRunOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
MergeConfigMixIn,
TimeoutMixIn,
LogLevelMixIn,
HardCrashMixin,
SaltfileMixIn,
OutputOptionsMixIn,
ArgsStdinMixIn,
ProfilingPMixIn)):
default_timeout = 1
usage = '%prog [options]'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_level_ = 'warning'
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'master')
_loglevel_config_setting_name_ = 'cli_salt_run_log_file'
def _mixin_setup(self):
self.add_option(
'-d', '--doc', '--documentation',
dest='doc',
default=False,
action='store_true',
help=('Display documentation for runners, pass a runner or '
'runner.function to see documentation on only that runner '
'or function.')
)
self.add_option(
'--async',
default=False,
action='store_true',
help=('Start the runner operation and immediately return control.')
)
group = self.output_options_group = optparse.OptionGroup(
self, 'Output Options', 'Configure your preferred output format'
)
self.add_option_group(group)
group.add_option(
'--quiet',
default=False,
action='store_true',
help='Do not display the results of the run'
)
def _mixin_after_parsed(self):
if self.options.doc and len(self.args) > 1:
self.error('You can only get documentation for one method at one time')
if len(self.args) > 0:
self.config['fun'] = self.args[0]
else:
self.config['fun'] = ''
if len(self.args) > 1:
self.config['arg'] = self.args[1:]
else:
self.config['arg'] = []
def setup_config(self):
return config.master_config(self.get_config_file_path())
class SaltSSHOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
MergeConfigMixIn,
LogLevelMixIn,
TargetOptionsMixIn,
OutputOptionsMixIn,
SaltfileMixIn,
HardCrashMixin)):
usage = '%prog [options]'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_level_ = 'warning'
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'ssh')
_loglevel_config_setting_name_ = 'cli_salt_run_log_file'
def _mixin_setup(self):
self.add_option(
'-r', '--raw', '--raw-shell',
dest='raw_shell',
default=False,
action='store_true',
help=('Don\'t execute a salt routine on the targets, execute a '
'raw shell command')
)
self.add_option(
'--roster',
dest='roster',
default='flat',
help=('Define which roster system to use, this defines if a '
'database backend, scanner, or custom roster system is '
'used. Default is the flat file roster.')
)
self.add_option(
'--roster-file',
dest='roster_file',
default='',
help=('define an alternative location for the default roster '
'file location. The default roster file is called roster '
'and is found in the same directory as the master config '
'file.')
)
self.add_option(
'--refresh', '--refresh-cache',
dest='refresh_cache',
default=False,
action='store_true',
help=('Force a refresh of the master side data cache of the '
'target\'s data. This is needed if a target\'s grains have '
'been changed and the auto refresh timeframe has not been '
'reached.')
)
self.add_option(
'--max-procs',
dest='ssh_max_procs',
default=25,
type=int,
help='Set the number of concurrent minions to communicate with. '
'This value defines how many processes are opened up at a '
'time to manage connections, the more running processes the '
'faster communication should be, default is %default'
)
self.add_option(
'--extra-filerefs',
dest='extra_filerefs',
default=None,
help='Pass in extra files to include in the state tarball'
)
self.add_option(
'-v', '--verbose',
default=False,
action='store_true',
help=('Turn on command verbosity, display jid')
)
self.add_option(
'-s', '--static',
default=False,
action='store_true',
help=('Return the data from minions as a group after they '
'all return.')
)
self.add_option(
'-w', '--wipe',
default=False,
action='store_true',
dest='wipe_ssh',
help='Remove the deployment of the salt files when done executing.',
)
self.add_option(
'-W', '--rand-thin-dir',
default=False,
action='store_true',
help=('Select a random temp dir to deploy on the remote system. '
'The dir will be cleaned after the execution.'))
auth_group = optparse.OptionGroup(
self, 'Authentication Options',
'Parameters affecting authentication'
)
auth_group.add_option(
'--priv',
dest='ssh_priv',
help='Ssh private key file'
)
auth_group.add_option(
'-i',
'--ignore-host-keys',
dest='ignore_host_keys',
default=False,
action='store_true',
help='By default ssh host keys are honored and connections will '
'ask for approval'
)
auth_group.add_option(
'--user',
dest='ssh_user',
default='root',
help='Set the default user to attempt to use when '
'authenticating'
)
auth_group.add_option(
'--passwd',
dest='ssh_passwd',
default='',
help='Set the default password to attempt to use when '
'authenticating'
)
auth_group.add_option(
'--askpass',
dest='ssh_askpass',
default=False,
action='store_true',
help='Interactively ask for the SSH password with no echo - avoids '
'password in process args and stored in history'
)
auth_group.add_option(
'--key-deploy',
dest='ssh_key_deploy',
default=False,
action='store_true',
help='Set this flag to attempt to deploy the authorized ssh key '
'with all minions. This combined with --passwd can make '
'initial deployment of keys very fast and easy'
)
auth_group.add_option(
'--identities-only',
dest='ssh_identities_only',
default=False,
action='store_true',
help='Use the only authentication identity files configured in the '
'ssh_config files. See IdentitiesOnly flag in man ssh_config'
)
self.add_option_group(auth_group)
scan_group = optparse.OptionGroup(
self, 'Scan Roster Options',
'Parameters affecting scan roster'
)
scan_group.add_option(
'--scan-ports',
default='22',
dest='ssh_scan_ports',
help='Comma-separated list of ports to scan in the scan roster.',
)
scan_group.add_option(
'--scan-timeout',
default=0.01,
dest='ssh_scan_timeout',
help='Scanning socket timeout for the scan roster.',
)
self.add_option_group(scan_group)
def _mixin_after_parsed(self):
if not self.args:
self.print_help()
self.exit(salt.defaults.exitcodes.EX_USAGE)
if self.options.list:
if ',' in self.args[0]:
self.config['tgt'] = self.args[0].split(',')
else:
self.config['tgt'] = self.args[0].split()
else:
self.config['tgt'] = self.args[0]
self.config['argv'] = self.args[1:]
if not self.config['argv'] or not self.config['tgt']:
self.print_help()
self.exit(salt.defaults.exitcodes.EX_USAGE)
if self.options.ssh_askpass:
self.options.ssh_passwd = getpass.getpass('Password: ')
def setup_config(self):
return config.master_config(self.get_config_file_path())
class SaltCloudParser(six.with_metaclass(OptionParserMeta,
OptionParser,
LogLevelMixIn,
MergeConfigMixIn,
OutputOptionsMixIn,
ConfigDirMixIn,
CloudQueriesMixIn,
ExecutionOptionsMixIn,
CloudProvidersListsMixIn,
CloudCredentialsMixIn,
HardCrashMixin,
SaltfileMixIn)):
# ConfigDirMixIn attributes
_config_filename_ = 'cloud'
# LogLevelMixIn attributes
_default_logging_level_ = 'info'
_logfile_config_setting_name_ = 'log_file'
_loglevel_config_setting_name_ = 'log_level_logfile'
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'cloud')
def print_versions_report(self, file=sys.stdout):
print('\n'.join(version.versions_report(include_salt_cloud=True)),
file=file)
self.exit(salt.defaults.exitcodes.EX_OK)
def parse_args(self, args=None, values=None):
try:
# Late import in order not to break setup
from salt.cloud import libcloudfuncs
libcloudfuncs.check_libcloud_version()
except ImportError as exc:
self.error(exc)
return super(SaltCloudParser, self).parse_args(args, values)
def _mixin_after_parsed(self):
if 'DUMP_SALT_CLOUD_CONFIG' in os.environ:
import pprint
print('Salt cloud configuration dump(INCLUDES SENSIBLE DATA):')
pprint.pprint(self.config)
self.exit(salt.defaults.exitcodes.EX_OK)
if self.args:
self.config['names'] = self.args
def setup_config(self):
try:
return config.cloud_config(self.get_config_file_path())
except salt.exceptions.SaltCloudConfigError as exc:
self.error(exc)
class SPMParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
LogLevelMixIn,
MergeConfigMixIn)):
'''
The cli parser object used to fire up the salt spm system.
'''
description = 'SPM is used to manage 3rd party formulas and other Salt components'
usage = '%prog [options] <function> [arguments]'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'spm'
# LogLevelMixIn attributes
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'spm')
def _mixin_setup(self):
self.add_option(
'-y', '--assume-yes',
default=False,
action='store_true',
help='Default yes in answer to all confirmation questions.'
)
self.add_option(
'-f', '--force',
default=False,
action='store_true',
help='Default yes in answer to all confirmation questions.'
)
def _mixin_after_parsed(self):
# spm needs arguments
if len(self.args) <= 1:
if not self.args or self.args[0] not in ('update_repo',):
self.print_help()
self.exit(salt.defaults.exitcodes.EX_USAGE)
def setup_config(self):
return salt.config.spm_config(self.get_config_file_path())
| 38.074916 | 137 | 0.533836 |
b65e80fe93ed9ea6f4e16ff45ab677dd4047b40a | 24,050 | py | Python | mne/tests/test_event.py | stevemats/mne-python | 47051833f21bb372d60afc3adbf4305648ac7f69 | [
"BSD-3-Clause"
] | 1 | 2021-12-21T16:16:40.000Z | 2021-12-21T16:16:40.000Z | mne/tests/test_event.py | stevemats/mne-python | 47051833f21bb372d60afc3adbf4305648ac7f69 | [
"BSD-3-Clause"
] | 2 | 2018-10-29T09:09:34.000Z | 2019-08-02T16:24:09.000Z | mne/tests/test_event.py | stevemats/mne-python | 47051833f21bb372d60afc3adbf4305648ac7f69 | [
"BSD-3-Clause"
] | 1 | 2021-07-22T17:57:33.000Z | 2021-07-22T17:57:33.000Z | # -*- coding: utf-8 -*-
# Author: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD-3-Clause
import os.path as op
import os
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_allclose)
import pytest
from mne import (read_events, write_events, make_fixed_length_events,
find_events, pick_events, find_stim_steps, pick_channels,
read_evokeds, Epochs, create_info, compute_raw_covariance,
Annotations)
from mne.io import read_raw_fif, RawArray
from mne.event import (define_target_events, merge_events, AcqParserFIF,
shift_time_events)
from mne.datasets import testing
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname = op.join(base_dir, 'test-eve.fif')
fname_raw = op.join(base_dir, 'test_raw.fif')
fname_gz = op.join(base_dir, 'test-eve.fif.gz')
fname_1 = op.join(base_dir, 'test-1-eve.fif')
fname_txt = op.join(base_dir, 'test-eve.eve')
fname_txt_1 = op.join(base_dir, 'test-eve-1.eve')
fname_c_annot = op.join(base_dir, 'test_raw-annot.fif')
# for testing Elekta averager
elekta_base_dir = op.join(testing.data_path(download=False), 'misc')
fname_raw_elekta = op.join(elekta_base_dir, 'test_elekta_3ch_raw.fif')
fname_ave_elekta = op.join(elekta_base_dir, 'test_elekta-ave.fif')
# using mne_process_raw --raw test_raw.fif --eventsout test-mpr-eve.eve:
fname_txt_mpr = op.join(base_dir, 'test-mpr-eve.eve')
fname_old_txt = op.join(base_dir, 'test-eve-old-style.eve')
raw_fname = op.join(base_dir, 'test_raw.fif')
def test_fix_stim():
"""Test fixing stim STI016 for Neuromag."""
raw = read_raw_fif(raw_fname, preload=True)
# 32768 (016) + 3 (002+001) bits gets incorrectly coded during acquisition
raw._data[raw.ch_names.index('STI 014'), :3] = [0, -32765, 0]
with pytest.warns(RuntimeWarning, match='STI016'):
events = find_events(raw, 'STI 014')
assert_array_equal(events[0], [raw.first_samp + 1, 0, 32765])
events = find_events(raw, 'STI 014', uint_cast=True)
assert_array_equal(events[0], [raw.first_samp + 1, 0, 32771])
def test_add_events():
"""Test adding events to a Raw file."""
# need preload
raw = read_raw_fif(raw_fname)
events = np.array([[raw.first_samp, 0, 1]])
pytest.raises(RuntimeError, raw.add_events, events, 'STI 014')
raw = read_raw_fif(raw_fname, preload=True)
orig_events = find_events(raw, 'STI 014')
# add some events
events = np.array([raw.first_samp, 0, 1])
pytest.raises(ValueError, raw.add_events, events, 'STI 014') # bad shape
events[0] = raw.first_samp + raw.n_times + 1
events = events[np.newaxis, :]
pytest.raises(ValueError, raw.add_events, events, 'STI 014') # bad time
events[0, 0] = raw.first_samp - 1
pytest.raises(ValueError, raw.add_events, events, 'STI 014') # bad time
events[0, 0] = raw.first_samp + 1 # can't actually be first_samp
pytest.raises(ValueError, raw.add_events, events, 'STI FOO')
raw.add_events(events, 'STI 014')
new_events = find_events(raw, 'STI 014')
assert_array_equal(new_events, np.concatenate((events, orig_events)))
raw.add_events(events, 'STI 014', replace=True)
new_events = find_events(raw, 'STI 014')
assert_array_equal(new_events, events)
def test_merge_events():
"""Test event merging."""
events_orig = [[1, 0, 1], [3, 0, 2], [10, 0, 3], [20, 0, 4]]
events_replacement = \
[[1, 0, 12],
[3, 0, 12],
[10, 0, 34],
[20, 0, 34]]
events_no_replacement = \
[[1, 0, 1],
[1, 0, 12],
[1, 0, 1234],
[3, 0, 2],
[3, 0, 12],
[3, 0, 1234],
[10, 0, 3],
[10, 0, 34],
[10, 0, 1234],
[20, 0, 4],
[20, 0, 34],
[20, 0, 1234]]
for replace_events, events_good in [(True, events_replacement),
(False, events_no_replacement)]:
events = merge_events(events_orig, [1, 2], 12, replace_events)
events = merge_events(events, [3, 4], 34, replace_events)
events = merge_events(events, [1, 2, 3, 4], 1234, replace_events)
assert_array_equal(events, events_good)
def test_io_events(tmp_path):
"""Test IO for events."""
# Test binary fif IO
events = read_events(fname) # Use as the gold standard
fname_temp = tmp_path / 'events-eve.fif'
write_events(fname_temp, events)
events2 = read_events(fname_temp)
assert_array_almost_equal(events, events2)
# Test binary fif.gz IO
events2 = read_events(fname_gz) # Use as the gold standard
assert_array_almost_equal(events, events2)
fname_temp = str(fname_temp) + '.gz'
write_events(fname_temp, events2)
events2 = read_events(fname_temp)
assert_array_almost_equal(events, events2)
# Test new format text file IO
fname_temp = tmp_path / 'events.eve'
write_events(fname_temp, events)
events2 = read_events(fname_temp)
assert_array_almost_equal(events, events2)
with pytest.warns(RuntimeWarning, match='first row of'):
events2 = read_events(fname_txt_mpr, mask=0, mask_type='not_and')
assert_array_almost_equal(events, events2)
# Test old format text file IO
events2 = read_events(fname_old_txt)
assert_array_almost_equal(events, events2)
write_events(fname_temp, events, overwrite=True)
events2 = read_events(fname_temp)
assert_array_almost_equal(events, events2)
# Test event selection
fname_temp = tmp_path / 'events-eve.fif'
a = read_events(fname_temp, include=1)
b = read_events(fname_temp, include=[1])
c = read_events(fname_temp, exclude=[2, 3, 4, 5, 32])
d = read_events(fname_temp, include=1, exclude=[2, 3])
assert_array_equal(a, b)
assert_array_equal(a, c)
assert_array_equal(a, d)
# test reading file with mask=None
events2 = events.copy()
events2[:, -1] = range(events2.shape[0])
write_events(fname_temp, events2, overwrite=True)
events3 = read_events(fname_temp, mask=None)
assert_array_almost_equal(events2, events3)
# Test binary file IO for 1 event
events = read_events(fname_1) # Use as the new gold standard
write_events(fname_temp, events, overwrite=True)
events2 = read_events(fname_temp)
assert_array_almost_equal(events, events2)
# Test text file IO for 1 event
fname_temp = tmp_path / 'events.eve'
write_events(fname_temp, events, overwrite=True)
events2 = read_events(fname_temp)
assert_array_almost_equal(events, events2)
# test warnings on bad filenames
fname2 = tmp_path / 'test-bad-name.fif'
with pytest.warns(RuntimeWarning, match='-eve.fif'):
write_events(fname2, events)
with pytest.warns(RuntimeWarning, match='-eve.fif'):
read_events(fname2)
# No event_id
with pytest.raises(RuntimeError, match='No event_id'):
read_events(fname, return_event_id=True)
def test_io_c_annot():
"""Test I/O of MNE-C -annot.fif files."""
raw = read_raw_fif(fname_raw)
sfreq, first_samp = raw.info['sfreq'], raw.first_samp
events = read_events(fname_c_annot)
events_2, event_id = read_events(fname_c_annot, return_event_id=True)
assert_array_equal(events_2, events)
expected = np.arange(2, 5) * sfreq + first_samp
assert_allclose(events[:, 0], expected, atol=3) # clicking accuracy (samp)
expected = {'Two sec': 1001, 'Three and four sec': 1002}
assert event_id == expected
def test_find_events():
"""Test find events in raw file."""
events = read_events(fname)
raw = read_raw_fif(raw_fname, preload=True)
# let's test the defaulting behavior while we're at it
extra_ends = ['', '_1']
orig_envs = [os.getenv('MNE_STIM_CHANNEL%s' % s) for s in extra_ends]
os.environ['MNE_STIM_CHANNEL'] = 'STI 014'
if 'MNE_STIM_CHANNEL_1' in os.environ:
del os.environ['MNE_STIM_CHANNEL_1']
events2 = find_events(raw)
assert_array_almost_equal(events, events2)
# now test with mask
events11 = find_events(raw, mask=3, mask_type='not_and')
with pytest.warns(RuntimeWarning, match='events masked'):
events22 = read_events(fname, mask=3, mask_type='not_and')
assert_array_equal(events11, events22)
# Reset some data for ease of comparison
raw._first_samps[0] = 0
with raw.info._unlock():
raw.info['sfreq'] = 1000
stim_channel = 'STI 014'
stim_channel_idx = pick_channels(raw.info['ch_names'],
include=[stim_channel])
# test digital masking
raw._data[stim_channel_idx, :5] = np.arange(5)
raw._data[stim_channel_idx, 5:] = 0
# 1 == '0b1', 2 == '0b10', 3 == '0b11', 4 == '0b100'
pytest.raises(TypeError, find_events, raw, mask="0", mask_type='and')
pytest.raises(ValueError, find_events, raw, mask=0, mask_type='blah')
# testing mask_type. default = 'not_and'
assert_array_equal(find_events(raw, shortest_event=1, mask=1,
mask_type='not_and'),
[[2, 0, 2], [4, 2, 4]])
assert_array_equal(find_events(raw, shortest_event=1, mask=2,
mask_type='not_and'),
[[1, 0, 1], [3, 0, 1], [4, 1, 4]])
assert_array_equal(find_events(raw, shortest_event=1, mask=3,
mask_type='not_and'),
[[4, 0, 4]])
assert_array_equal(find_events(raw, shortest_event=1, mask=4,
mask_type='not_and'),
[[1, 0, 1], [2, 1, 2], [3, 2, 3]])
# testing with mask_type = 'and'
assert_array_equal(find_events(raw, shortest_event=1, mask=1,
mask_type='and'),
[[1, 0, 1], [3, 0, 1]])
assert_array_equal(find_events(raw, shortest_event=1, mask=2,
mask_type='and'),
[[2, 0, 2]])
assert_array_equal(find_events(raw, shortest_event=1, mask=3,
mask_type='and'),
[[1, 0, 1], [2, 1, 2], [3, 2, 3]])
assert_array_equal(find_events(raw, shortest_event=1, mask=4,
mask_type='and'),
[[4, 0, 4]])
# test empty events channel
raw._data[stim_channel_idx, :] = 0
assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))
raw._data[stim_channel_idx, :4] = 1
assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))
raw._data[stim_channel_idx, -1:] = 9
assert_array_equal(find_events(raw), [[14399, 0, 9]])
# Test that we can handle consecutive events with no gap
raw._data[stim_channel_idx, 10:20] = 5
raw._data[stim_channel_idx, 20:30] = 6
raw._data[stim_channel_idx, 30:32] = 5
raw._data[stim_channel_idx, 40] = 6
assert_array_equal(find_events(raw, consecutive=False),
[[10, 0, 5],
[40, 0, 6],
[14399, 0, 9]])
assert_array_equal(find_events(raw, consecutive=True),
[[10, 0, 5],
[20, 5, 6],
[30, 6, 5],
[40, 0, 6],
[14399, 0, 9]])
assert_array_equal(find_events(raw),
[[10, 0, 5],
[20, 5, 6],
[40, 0, 6],
[14399, 0, 9]])
assert_array_equal(find_events(raw, output='offset', consecutive=False),
[[31, 0, 5],
[40, 0, 6],
[14399, 0, 9]])
assert_array_equal(find_events(raw, output='offset', consecutive=True),
[[19, 6, 5],
[29, 5, 6],
[31, 0, 5],
[40, 0, 6],
[14399, 0, 9]])
pytest.raises(ValueError, find_events, raw, output='step',
consecutive=True)
assert_array_equal(find_events(raw, output='step', consecutive=True,
shortest_event=1),
[[10, 0, 5],
[20, 5, 6],
[30, 6, 5],
[32, 5, 0],
[40, 0, 6],
[41, 6, 0],
[14399, 0, 9],
[14400, 9, 0]])
assert_array_equal(find_events(raw, output='offset'),
[[19, 6, 5],
[31, 0, 6],
[40, 0, 6],
[14399, 0, 9]])
assert_array_equal(find_events(raw, consecutive=False, min_duration=0.002),
[[10, 0, 5]])
assert_array_equal(find_events(raw, consecutive=True, min_duration=0.002),
[[10, 0, 5],
[20, 5, 6],
[30, 6, 5]])
assert_array_equal(find_events(raw, output='offset', consecutive=False,
min_duration=0.002),
[[31, 0, 5]])
assert_array_equal(find_events(raw, output='offset', consecutive=True,
min_duration=0.002),
[[19, 6, 5],
[29, 5, 6],
[31, 0, 5]])
assert_array_equal(find_events(raw, consecutive=True, min_duration=0.003),
[[10, 0, 5],
[20, 5, 6]])
# test find_stim_steps merge parameter
raw._data[stim_channel_idx, :] = 0
raw._data[stim_channel_idx, 0] = 1
raw._data[stim_channel_idx, 10] = 4
raw._data[stim_channel_idx, 11:20] = 5
assert_array_equal(find_stim_steps(raw, pad_start=0, merge=0,
stim_channel=stim_channel),
[[0, 0, 1],
[1, 1, 0],
[10, 0, 4],
[11, 4, 5],
[20, 5, 0]])
assert_array_equal(find_stim_steps(raw, merge=-1,
stim_channel=stim_channel),
[[1, 1, 0],
[10, 0, 5],
[20, 5, 0]])
assert_array_equal(find_stim_steps(raw, merge=1,
stim_channel=stim_channel),
[[1, 1, 0],
[11, 0, 5],
[20, 5, 0]])
# put back the env vars we trampled on
for s, o in zip(extra_ends, orig_envs):
if o is not None:
os.environ['MNE_STIM_CHANNEL%s' % s] = o
# Test with list of stim channels
raw._data[stim_channel_idx, 1:101] = np.zeros(100)
raw._data[stim_channel_idx, 10:11] = 1
raw._data[stim_channel_idx, 30:31] = 3
stim_channel2 = 'STI 015'
stim_channel2_idx = pick_channels(raw.info['ch_names'],
include=[stim_channel2])
raw._data[stim_channel2_idx, :] = 0
raw._data[stim_channel2_idx, :100] = raw._data[stim_channel_idx, 5:105]
events1 = find_events(raw, stim_channel='STI 014')
events2 = events1.copy()
events2[:, 0] -= 5
events = find_events(raw, stim_channel=['STI 014', stim_channel2])
assert_array_equal(events[::2], events2)
assert_array_equal(events[1::2], events1)
# test initial_event argument
info = create_info(['MYSTI'], 1000, 'stim')
data = np.zeros((1, 1000))
raw = RawArray(data, info)
data[0, :10] = 100
data[0, 30:40] = 200
assert_array_equal(find_events(raw, 'MYSTI'), [[30, 0, 200]])
assert_array_equal(find_events(raw, 'MYSTI', initial_event=True),
[[0, 0, 100], [30, 0, 200]])
# test error message for raw without stim channels
raw = read_raw_fif(raw_fname, preload=True)
raw.pick_types(meg=True, stim=False)
# raw does not have annotations
with pytest.raises(ValueError, match="'stim_channel'"):
find_events(raw)
# if raw has annotations, we show a different error message
raw.set_annotations(Annotations(0, 2, "test"))
with pytest.raises(ValueError, match="mne.events_from_annotations"):
find_events(raw)
def test_pick_events():
"""Test pick events in a events ndarray."""
events = np.array([[1, 0, 1],
[2, 1, 0],
[3, 0, 4],
[4, 4, 2],
[5, 2, 0]])
assert_array_equal(pick_events(events, include=[1, 4], exclude=4),
[[1, 0, 1],
[3, 0, 4]])
assert_array_equal(pick_events(events, exclude=[0, 2]),
[[1, 0, 1],
[3, 0, 4]])
assert_array_equal(pick_events(events, include=[1, 2], step=True),
[[1, 0, 1],
[2, 1, 0],
[4, 4, 2],
[5, 2, 0]])
def test_make_fixed_length_events():
"""Test making events of a fixed length."""
raw = read_raw_fif(raw_fname)
events = make_fixed_length_events(raw, id=1)
assert events.shape[1] == 3
events_zero = make_fixed_length_events(raw, 1, first_samp=False)
assert_equal(events_zero[0, 0], 0)
assert_array_equal(events_zero[:, 0], events[:, 0] - raw.first_samp)
# With limits
tmin, tmax = raw.times[[0, -1]]
duration = tmax - tmin
events = make_fixed_length_events(raw, 1, tmin, tmax, duration)
assert_equal(events.shape[0], 1)
# With bad limits (no resulting events)
pytest.raises(ValueError, make_fixed_length_events, raw, 1,
tmin, tmax - 1e-3, duration)
# not raw, bad id or duration
pytest.raises(TypeError, make_fixed_length_events, raw, 2.3)
pytest.raises(TypeError, make_fixed_length_events, 'not raw', 2)
pytest.raises(TypeError, make_fixed_length_events, raw, 23, tmin, tmax,
'abc')
# Let's try some ugly sample rate/sample count combos
data = np.random.RandomState(0).randn(1, 27768)
# This breaks unless np.round() is used in make_fixed_length_events
info = create_info(1, 155.4499969482422)
raw = RawArray(data, info)
events = make_fixed_length_events(raw, 1, duration=raw.times[-1])
assert events[0, 0] == 0
assert len(events) == 1
# Without use_rounding=True this breaks
raw = RawArray(data[:, :21216], info)
events = make_fixed_length_events(raw, 1, duration=raw.times[-1])
assert events[0, 0] == 0
assert len(events) == 1
# Make sure it gets used properly by compute_raw_covariance
cov = compute_raw_covariance(raw, tstep=None)
expected = np.cov(data[:, :21216])
assert_allclose(cov['data'], expected, atol=1e-12)
# overlaps
events = make_fixed_length_events(raw, 1, duration=1)
assert len(events) == 136
events_ol = make_fixed_length_events(raw, 1, duration=1, overlap=0.5)
assert len(events_ol) == 271
events_ol_2 = make_fixed_length_events(raw, 1, duration=1, overlap=0.9)
assert len(events_ol_2) == 1355
assert_array_equal(events_ol_2[:, 0], np.unique(events_ol_2[:, 0]))
with pytest.raises(ValueError, match='overlap must be'):
make_fixed_length_events(raw, 1, duration=1, overlap=1.1)
def test_define_events():
"""Test defining response events."""
events = read_events(fname)
raw = read_raw_fif(raw_fname)
events_, _ = define_target_events(events, 5, 32, raw.info['sfreq'],
.2, 0.7, 42, 99)
n_target = events[events[:, 2] == 5].shape[0]
n_miss = events_[events_[:, 2] == 99].shape[0]
n_target_ = events_[events_[:, 2] == 42].shape[0]
assert (n_target_ == (n_target - n_miss))
events = np.array([[0, 0, 1],
[375, 0, 2],
[500, 0, 1],
[875, 0, 3],
[1000, 0, 1],
[1375, 0, 3],
[1100, 0, 1],
[1475, 0, 2],
[1500, 0, 1],
[1875, 0, 2]])
true_lag_nofill = [1500., 1500., 1500.]
true_lag_fill = [1500., np.nan, np.nan, 1500., 1500.]
n, lag_nofill = define_target_events(events, 1, 2, 250., 1.4, 1.6, 5)
n, lag_fill = define_target_events(events, 1, 2, 250., 1.4, 1.6, 5, 99)
assert_array_equal(true_lag_fill, lag_fill)
assert_array_equal(true_lag_nofill, lag_nofill)
@testing.requires_testing_data
def test_acqparser():
"""Test AcqParserFIF."""
# no acquisition parameters
pytest.raises(ValueError, AcqParserFIF, {'acq_pars': ''})
# invalid acquisition parameters
pytest.raises(ValueError, AcqParserFIF, {'acq_pars': 'baaa'})
pytest.raises(ValueError, AcqParserFIF, {'acq_pars': 'ERFVersion\n1'})
# test oldish file
raw = read_raw_fif(raw_fname, preload=False)
acqp = AcqParserFIF(raw.info)
# test __repr__()
assert (repr(acqp))
# old file should trigger compat mode
assert (acqp.compat)
# count events and categories
assert_equal(len(acqp.categories), 6)
assert_equal(len(acqp._categories), 17)
assert_equal(len(acqp.events), 6)
assert_equal(len(acqp._events), 17)
# get category
assert (acqp['Surprise visual'])
# test TRIUX file
raw = read_raw_fif(fname_raw_elekta, preload=False)
acqp = raw.acqparser
assert (acqp is raw.acqparser) # same one, not regenerated
# test __repr__()
assert (repr(acqp))
# this file should not be in compatibility mode
assert (not acqp.compat)
# nonexistent category
pytest.raises(KeyError, acqp.__getitem__, 'does not exist')
pytest.raises(KeyError, acqp.get_condition, raw, 'foo')
# category not a string
pytest.raises(TypeError, acqp.__getitem__, 0)
# number of events / categories
assert_equal(len(acqp), 7)
assert_equal(len(acqp.categories), 7)
assert_equal(len(acqp._categories), 32)
assert_equal(len(acqp.events), 6)
assert_equal(len(acqp._events), 32)
# get category
assert (acqp['Test event 5'])
@testing.requires_testing_data
def test_acqparser_averaging():
"""Test averaging with AcqParserFIF vs. Elekta software."""
raw = read_raw_fif(fname_raw_elekta, preload=True)
acqp = AcqParserFIF(raw.info)
for cat in acqp.categories:
# XXX datasets match only when baseline is applied to both,
# not sure where relative dc shift comes from
cond = acqp.get_condition(raw, cat)
eps = Epochs(raw, baseline=(-.05, 0), **cond)
ev = eps.average()
ev_ref = read_evokeds(fname_ave_elekta, cat['comment'],
baseline=(-.05, 0), proj=False)
ev_mag = ev.copy()
ev_mag.pick_channels(['MEG0111'])
ev_grad = ev.copy()
ev_grad.pick_channels(['MEG2643', 'MEG1622'])
ev_ref_mag = ev_ref.copy()
ev_ref_mag.pick_channels(['MEG0111'])
ev_ref_grad = ev_ref.copy()
ev_ref_grad.pick_channels(['MEG2643', 'MEG1622'])
assert_allclose(ev_mag.data, ev_ref_mag.data,
rtol=0, atol=1e-15) # tol = 1 fT
# Elekta put these in a different order
assert ev_grad.ch_names[::-1] == ev_ref_grad.ch_names
assert_allclose(ev_grad.data[::-1], ev_ref_grad.data,
rtol=0, atol=1e-13) # tol = 1 fT/cm
def test_shift_time_events():
"""Test events latency shift by a given amount."""
events = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
EXPECTED = [1, 2, 3]
new_events = shift_time_events(events, ids=None, tshift=1, sfreq=1)
assert all(new_events[:, 0] == EXPECTED)
events = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
EXPECTED = [0, 2, 3]
new_events = shift_time_events(events, ids=[1, 2], tshift=1, sfreq=1)
assert all(new_events[:, 0] == EXPECTED)
| 40.488215 | 79 | 0.588358 |
06e2f7c7a167269cfb3d4b12db86e379b7fbdd6c | 7,105 | py | Python | sdl2/surface.py | cmitu/py-sdl2 | d178fe2a3c86e1cdfdf3cc77920b98b572d86a2d | [
"CC0-1.0"
] | 222 | 2017-08-19T00:51:59.000Z | 2022-02-05T19:39:33.000Z | sdl2/surface.py | cmitu/py-sdl2 | d178fe2a3c86e1cdfdf3cc77920b98b572d86a2d | [
"CC0-1.0"
] | 103 | 2017-08-20T17:13:05.000Z | 2022-02-05T20:20:01.000Z | sdl2/surface.py | cmitu/py-sdl2 | d178fe2a3c86e1cdfdf3cc77920b98b572d86a2d | [
"CC0-1.0"
] | 54 | 2017-08-20T17:13:00.000Z | 2022-01-14T23:51:13.000Z | from ctypes import CFUNCTYPE, Structure, POINTER, c_int, c_void_p
from .dll import _bind
from .stdinc import Uint8, Uint32, SDL_bool
from .blendmode import SDL_BlendMode
from .rect import SDL_Rect
from .pixels import SDL_PixelFormat, SDL_Palette
from .rwops import SDL_RWops, SDL_RWFromFile
__all__ = [
# Structs & Opaque Types
"SDL_BlitMap", "SDL_Surface",
# Defines
"SDL_SWSURFACE", "SDL_PREALLOC", "SDL_RLEACCEL", "SDL_DONTFREE",
"SDL_SIMD_ALIGNED",
# Macro Functions
"SDL_MUSTLOCK",
# Functions
"SDL_CreateRGBSurface", "SDL_CreateRGBSurfaceFrom", "SDL_FreeSurface",
"SDL_SetSurfacePalette", "SDL_LockSurface", "SDL_UnlockSurface",
"SDL_LoadBMP_RW", "SDL_LoadBMP", "SDL_SaveBMP_RW", "SDL_SaveBMP",
"SDL_SetSurfaceRLE", "SDL_HasSurfaceRLE",
"SDL_HasColorKey", "SDL_SetColorKey", "SDL_GetColorKey",
"SDL_SetSurfaceColorMod", "SDL_GetSurfaceColorMod",
"SDL_SetSurfaceAlphaMod", "SDL_GetSurfaceAlphaMod",
"SDL_SetSurfaceBlendMode", "SDL_GetSurfaceBlendMode",
"SDL_SetClipRect", "SDL_GetClipRect", "SDL_ConvertSurface",
"SDL_ConvertSurfaceFormat", "SDL_ConvertPixels", "SDL_FillRect",
"SDL_FillRects", "SDL_UpperBlit", "SDL_BlitSurface", "SDL_LowerBlit",
"SDL_SoftStretch", "SDL_SoftStretchLinear",
"SDL_UpperBlitScaled", "SDL_BlitScaled",
"SDL_LowerBlitScaled", "SDL_CreateRGBSurfaceWithFormat",
"SDL_CreateRGBSurfaceWithFormatFrom", "SDL_DuplicateSurface",
"SDL_SetYUVConversionMode", "SDL_GetYUVConversionMode",
"SDL_GetYUVConversionModeForResolution",
# Callback Functions
"SDL_Blit"
]
SDL_SWSURFACE = 0
SDL_PREALLOC = 0x00000001
SDL_RLEACCEL = 0x00000002
SDL_DONTFREE = 0x00000004
SDL_SIMD_ALIGNED = 0x00000008
SDL_MUSTLOCK = lambda s: ((s.flags & SDL_RLEACCEL) != 0)
SDL_YUV_CONVERSION_MODE = c_int
SDL_YUV_CONVERSION_JPEG = 0
SDL_YUV_CONVERSION_BT601 = 1
SDL_YUV_CONVERSION_BT709 = 2
SDL_YUV_CONVERSION_AUTOMATIC = 3
class SDL_BlitMap(c_void_p):
pass
class SDL_Surface(Structure):
_fields_ = [("flags", Uint32),
("format", POINTER(SDL_PixelFormat)),
("w", c_int), ("h", c_int),
("pitch", c_int),
("pixels", c_void_p),
("userdata", c_void_p),
("locked", c_int),
("list_blitmap", c_void_p),
("clip_rect", SDL_Rect),
("map", POINTER(SDL_BlitMap)),
("refcount", c_int)
]
SDL_Blit = CFUNCTYPE(c_int, POINTER(SDL_Surface), POINTER(SDL_Rect), POINTER(SDL_Surface), POINTER(SDL_Rect))
SDL_CreateRGBSurface = _bind("SDL_CreateRGBSurface", [Uint32, c_int, c_int, c_int, Uint32, Uint32, Uint32, Uint32], POINTER(SDL_Surface))
SDL_CreateRGBSurfaceFrom = _bind("SDL_CreateRGBSurfaceFrom", [c_void_p, c_int, c_int, c_int, c_int, Uint32, Uint32, Uint32, Uint32], POINTER(SDL_Surface))
SDL_CreateRGBSurfaceWithFormat = _bind("SDL_CreateRGBSurfaceWithFormat", [Uint32, c_int, c_int, c_int, Uint32], POINTER(SDL_Surface))
SDL_CreateRGBSurfaceWithFormatFrom = _bind("SDL_CreateRGBSurfaceWithFormatFrom", [c_void_p, c_int, c_int, c_int, c_int, Uint32], POINTER(SDL_Surface))
SDL_FreeSurface = _bind("SDL_FreeSurface", [POINTER(SDL_Surface)])
SDL_SetSurfacePalette = _bind("SDL_SetSurfacePalette", [POINTER(SDL_Surface), POINTER(SDL_Palette)], c_int)
SDL_LockSurface = _bind("SDL_LockSurface", [POINTER(SDL_Surface)], c_int)
SDL_UnlockSurface = _bind("SDL_UnlockSurface", [POINTER(SDL_Surface)])
SDL_DuplicateSurface = _bind("SDL_DuplicateSurface", [POINTER(SDL_Surface)], POINTER(SDL_Surface), added='2.0.6')
SDL_LoadBMP_RW = _bind("SDL_LoadBMP_RW", [POINTER(SDL_RWops), c_int], POINTER(SDL_Surface))
SDL_LoadBMP = lambda fname: SDL_LoadBMP_RW(SDL_RWFromFile(fname, b"rb"), 1)
SDL_SaveBMP_RW = _bind("SDL_SaveBMP_RW", [POINTER(SDL_Surface), POINTER(SDL_RWops), c_int], c_int)
SDL_SaveBMP = lambda surface, fname: SDL_SaveBMP_RW(surface, SDL_RWFromFile(fname, b"wb"), 1)
SDL_SetSurfaceRLE = _bind("SDL_SetSurfaceRLE", [POINTER(SDL_Surface), c_int], c_int)
SDL_HasSurfaceRLE = _bind("SDL_HasSurfaceRLE", [POINTER(SDL_Surface)], SDL_bool, added='2.0.14')
SDL_HasColorKey = _bind("SDL_HasColorKey", [POINTER(SDL_Surface)], SDL_bool, added='2.0.9')
SDL_SetColorKey = _bind("SDL_SetColorKey", [POINTER(SDL_Surface), c_int, Uint32], c_int)
SDL_GetColorKey = _bind("SDL_GetColorKey", [POINTER(SDL_Surface), POINTER(Uint32)], c_int)
SDL_SetSurfaceColorMod = _bind("SDL_SetSurfaceColorMod", [POINTER(SDL_Surface), Uint8, Uint8, Uint8], c_int)
SDL_GetSurfaceColorMod = _bind("SDL_GetSurfaceColorMod", [POINTER(SDL_Surface), POINTER(Uint8), POINTER(Uint8), POINTER(Uint8)], c_int)
SDL_SetSurfaceAlphaMod = _bind("SDL_SetSurfaceAlphaMod", [POINTER(SDL_Surface), Uint8], c_int)
SDL_GetSurfaceAlphaMod = _bind("SDL_GetSurfaceAlphaMod", [POINTER(SDL_Surface), POINTER(Uint8)], c_int)
SDL_SetSurfaceBlendMode = _bind("SDL_SetSurfaceBlendMode", [POINTER(SDL_Surface), SDL_BlendMode], c_int)
SDL_GetSurfaceBlendMode = _bind("SDL_GetSurfaceBlendMode", [POINTER(SDL_Surface), POINTER(SDL_BlendMode)], c_int)
SDL_SetClipRect = _bind("SDL_SetClipRect", [POINTER(SDL_Surface), POINTER(SDL_Rect)], SDL_bool)
SDL_GetClipRect = _bind("SDL_GetClipRect", [POINTER(SDL_Surface), POINTER(SDL_Rect)])
SDL_ConvertSurface = _bind("SDL_ConvertSurface", [POINTER(SDL_Surface), POINTER(SDL_PixelFormat), Uint32], POINTER(SDL_Surface))
SDL_ConvertSurfaceFormat = _bind("SDL_ConvertSurfaceFormat", [POINTER(SDL_Surface), Uint32, Uint32], POINTER(SDL_Surface))
SDL_ConvertPixels = _bind("SDL_ConvertPixels", [c_int, c_int, Uint32, c_void_p, c_int, Uint32, c_void_p, c_int], c_int)
SDL_FillRect = _bind("SDL_FillRect", [POINTER(SDL_Surface), POINTER(SDL_Rect), Uint32], c_int)
SDL_FillRects = _bind("SDL_FillRects", [POINTER(SDL_Surface), POINTER(SDL_Rect), c_int, Uint32], c_int)
SDL_UpperBlit = _bind("SDL_UpperBlit", [POINTER(SDL_Surface), POINTER(SDL_Rect), POINTER(SDL_Surface), POINTER(SDL_Rect)], c_int)
SDL_BlitSurface = SDL_UpperBlit
SDL_LowerBlit = _bind("SDL_LowerBlit", [POINTER(SDL_Surface), POINTER(SDL_Rect), POINTER(SDL_Surface), POINTER(SDL_Rect)], c_int)
SDL_SoftStretch = _bind("SDL_SoftStretch", [POINTER(SDL_Surface), POINTER(SDL_Rect), POINTER(SDL_Surface), POINTER(SDL_Rect)], c_int)
SDL_SoftStretchLinear = _bind("SDL_SoftStretchLinear", [POINTER(SDL_Surface), POINTER(SDL_Rect), POINTER(SDL_Surface), POINTER(SDL_Rect)], c_int, added='2.0.16')
SDL_UpperBlitScaled = _bind("SDL_UpperBlitScaled", [POINTER(SDL_Surface), POINTER(SDL_Rect), POINTER(SDL_Surface), POINTER(SDL_Rect)], c_int)
SDL_BlitScaled = SDL_UpperBlitScaled
SDL_LowerBlitScaled = _bind("SDL_LowerBlitScaled", [POINTER(SDL_Surface), POINTER(SDL_Rect), POINTER(SDL_Surface), POINTER(SDL_Rect)], c_int)
SDL_SetYUVConversionMode = _bind("SDL_SetYUVConversionMode", [SDL_YUV_CONVERSION_MODE], None, added='2.0.8')
SDL_GetYUVConversionMode = _bind("SDL_GetYUVConversionMode", None, SDL_YUV_CONVERSION_MODE, added='2.0.8')
SDL_GetYUVConversionModeForResolution = _bind("SDL_GetYUVConversionModeForResolution", [c_int, c_int], SDL_YUV_CONVERSION_MODE, added='2.0.8')
| 57.764228 | 161 | 0.760028 |
7fc81868643d58ce070b66c53c9024cc6415dfd0 | 1,296 | py | Python | maize/util/chain_utils.py | denern/maize-blockchain | b8639899f44b03232dda90c706d061e5e1158ca3 | [
"Apache-2.0"
] | 14 | 2021-07-21T19:45:05.000Z | 2022-02-09T04:29:51.000Z | maize/util/chain_utils.py | denern/maize-blockchain | b8639899f44b03232dda90c706d061e5e1158ca3 | [
"Apache-2.0"
] | 9 | 2021-07-24T09:30:46.000Z | 2021-12-05T19:51:29.000Z | maize/util/chain_utils.py | denern/maize-blockchain | b8639899f44b03232dda90c706d061e5e1158ca3 | [
"Apache-2.0"
] | 5 | 2021-10-04T17:33:47.000Z | 2022-03-15T08:37:51.000Z | from typing import List
from clvm.casts import int_from_bytes
from maize.types.blockchain_format.coin import Coin
from maize.types.blockchain_format.program import SerializedProgram
from maize.types.blockchain_format.sized_bytes import bytes32
from maize.types.condition_opcodes import ConditionOpcode
from maize.util.condition_tools import (
conditions_dict_for_solution,
created_outputs_for_conditions_dict,
)
def additions_for_solution(
coin_name: bytes32, puzzle_reveal: SerializedProgram, solution: SerializedProgram, max_cost: int
) -> List[Coin]:
"""
Checks the conditions created by CoinSpend and returns the list of all coins created
"""
err, dic, cost = conditions_dict_for_solution(puzzle_reveal, solution, max_cost)
if err or dic is None:
return []
return created_outputs_for_conditions_dict(dic, coin_name)
def fee_for_solution(puzzle_reveal: SerializedProgram, solution: SerializedProgram, max_cost: int) -> int:
err, dic, cost = conditions_dict_for_solution(puzzle_reveal, solution, max_cost)
if err or dic is None:
return 0
total = 0
for cvp in dic.get(ConditionOpcode.RESERVE_FEE, []):
amount_bin = cvp.vars[0]
amount = int_from_bytes(amount_bin)
total += amount
return total
| 34.105263 | 106 | 0.760031 |
8e8c2de39def63382d0cac2b61d8f9e82f5e29d9 | 658 | py | Python | builder/settings.py | mscroggs/defelement.com | 0cfc01a0cc3b801ef10ede12bf64f77a6a70efb1 | [
"CC-BY-4.0",
"MIT"
] | 9 | 2020-12-30T17:24:46.000Z | 2022-02-16T22:10:47.000Z | builder/settings.py | mscroggs/defelement.com | 0cfc01a0cc3b801ef10ede12bf64f77a6a70efb1 | [
"CC-BY-4.0",
"MIT"
] | 85 | 2021-01-09T09:44:45.000Z | 2022-03-26T07:34:00.000Z | builder/settings.py | mscroggs/defelement.com | 0cfc01a0cc3b801ef10ede12bf64f77a6a70efb1 | [
"CC-BY-4.0",
"MIT"
] | 1 | 2022-03-28T08:15:38.000Z | 2022-03-28T08:15:38.000Z | import os as _os
dir_path = _os.path.join(_os.path.dirname(_os.path.realpath(__file__)), "..")
element_path = _os.path.join(dir_path, "elements")
template_path = _os.path.join(dir_path, "templates")
files_path = _os.path.join(dir_path, "files")
pages_path = _os.path.join(dir_path, "pages")
data_path = _os.path.join(dir_path, "data")
img_path = _os.path.join(dir_path, "img")
html_path = _os.path.join(dir_path, "_html")
htmlelement_path = _os.path.join(html_path, "elements")
htmlimg_path = _os.path.join(html_path, "img")
htmlindices_path = _os.path.join(html_path, "lists")
htmlfamilies_path = _os.path.join(html_path, "families")
github_token = None
| 36.555556 | 77 | 0.74924 |
eaa35e1933dc2ea319add0347a60d1282bea483e | 467 | py | Python | my_codes/input/eos.py | iwamura-lab/my_codes | 70140fe81b70d7ea4969c442771db40054cc109e | [
"MIT"
] | null | null | null | my_codes/input/eos.py | iwamura-lab/my_codes | 70140fe81b70d7ea4969c442771db40054cc109e | [
"MIT"
] | null | null | null | my_codes/input/eos.py | iwamura-lab/my_codes | 70140fe81b70d7ea4969c442771db40054cc109e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import shutil
if __name__ == "__main__":
f = open("./original/POSCAR", "r")
content = [column for column in f]
f.close()
for i in range(99):
content[1] = str(7.00 + (i + 1) / 10**4) + "\n"
fpath = "7_00" + str(i + 1).zfill(2)
shutil.copytree("./original", "./" + fpath)
f = open(fpath + "/POSCAR", "w")
for column in content:
print(column, file=f, end=" ")
f.close()
| 27.470588 | 55 | 0.505353 |
c598d36796c4526459241dfe9f292a7c2038c5ea | 315 | py | Python | destacame/buses/migrations/0002_rename_buss_buss2.py | osocaramelosofer/destacame | 7b816be1d785bcd6525265d708c584545490816e | [
"MIT"
] | null | null | null | destacame/buses/migrations/0002_rename_buss_buss2.py | osocaramelosofer/destacame | 7b816be1d785bcd6525265d708c584545490816e | [
"MIT"
] | null | null | null | destacame/buses/migrations/0002_rename_buss_buss2.py | osocaramelosofer/destacame | 7b816be1d785bcd6525265d708c584545490816e | [
"MIT"
] | null | null | null | # Generated by Django 3.2.12 on 2022-03-12 19:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('buses', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Buss',
new_name='Buss2',
),
]
| 17.5 | 48 | 0.574603 |
0b45c27f40b81a62f5615b7981e0e0d98e568468 | 9,103 | py | Python | library/rabbitmq_queue.py | joe-pll/rabbitmq-ansible-modules | e56ff0caa650c7417d32d39b7e6e961e8ce5e124 | [
"Apache-2.0"
] | null | null | null | library/rabbitmq_queue.py | joe-pll/rabbitmq-ansible-modules | e56ff0caa650c7417d32d39b7e6e961e8ce5e124 | [
"Apache-2.0"
] | null | null | null | library/rabbitmq_queue.py | joe-pll/rabbitmq-ansible-modules | e56ff0caa650c7417d32d39b7e6e961e8ce5e124 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# (c) 2017, Giuseppe Pellegrino <[email protected]>
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_queue
short_description: Manage RabbitMQ vhost
description:
- Create, delete, update a RabbitMQ virtual host.
requirements: [ "requests >= 1.0.0" ]
author: '"Giuseppe Pellegrino @joe-pll"'
options:
arguments:
description:
- Extra arguments for the queue.
- This argument is a key/value dictionary
auto_delete:
description:
- If yes, the queue will delete itself after at least one consumer has connected, and then all consumers have disconnected.
required: false
choices: [yes, no]
default: no
auto_expires:
description:
- How long a queue can be unused for before it is automatically deleted (milliseconds).
required: false
dead_letter_exchange:
description:
- Optional name of an exchange to which messages will be republished if they are rejected or expire.
required: false
dead_letter_routing_key:
description:
- Optional replacement routing key to use when a message is dead-lettered. If this is not set, the message's original routing key will be used.
required: false
durable:
description:
- Durable queues are persisted to disk and thus survive broker restarts.
required: false
default: yes
choices: [yes, no]
login_host:
description:
- The RabbitMQ REST API endpoint host
required: false
default: localhost
login_port:
description:
- The RabbitMQ REST API endpoint port
required: false
default: 15672
login_user:
description:
- The user to authenticate with in RabbitMQ
default: guest
required: false
login_password:
description:
- The password of the user that authenticate in RabbitMQ
default: guest
required: false
max_length:
description:
- How many (ready) messages a queue can contain before it starts to drop them from its head.
required: false
max_length_bytes:
description:
- Total body size for ready messages a queue can contain before it starts to drop them from its head.
required: false
maximum_priority:
description:
- Maximum number of priority levels for the queue to support; if not set, the queue will not support message priorities.
required: false
message_ttl:
description:
- How long a message published to a queue can live before it is discarded (milliseconds).
required: false
name:
description:
- The name of the queue to create or update
required: true
default: null
aliases: [queue]
queue_mode:
description:
- The mode of the queue under which it can operate
default: default
choices: [default, lazy]
ssl_enabled:
description:
- Whether or not RabbitMQ is listening on HTTPS
default: false
required: false
ssl_verify:
description:
- Whether or not there must be a SSL certificate verification
state:
description:
- The state of vhost
default: present
choices: [present, absent]
'''
EXAMPLES = '''
# Ensure that the queue 'test' exists.
- rabbitmq_queue:
login_host: rabbitmq.example.com
login_user: myuser
login_password: mypassword
name: test
state: present
vhost: /vhost
message_ttl: 50000
# Ensure that the user another_user has all the permissions
- rabbitmq_queue:
login_host: rabbitmq.example.com
login_user: myuser
login_password: mypassword
name: test
state: present
vhost: /vhost
maximum_priority: 2
arguments:
x-message-ttl: 50000
# Ensure that the queue 'test' is not present
- rabbitmq_queue:
login_host: rabbitmq.example.com
login_user: myuser
login_password: mypassword
name: test
vhost: '/vhost'
state: absent
'''
import urllib
from ansible.module_utils.rabbitmq_common import RabbitMQ
ARGUMENTS_TRANSLATOR = {
"auto_expires": "x-expires",
"dead_letter_exchange": "x-dead-letter-exchange",
"dead_letter_routing_key": "x-dead-letter-routing-key",
"max_length": "x-max-length",
"max_length_bytes": "x-max-length-bytes",
"maximum_priority": "x-max-priority",
"message_ttl": "x-message-ttl",
"queue_mode": "x-queue-mode",
}
class RabbitMQQueue(RabbitMQ):
def __init__(self):
self.arguments_spec = dict(
arguments=dict(type='dict', default=dict(), required=False),
auto_delete=dict(type='bool', default=False, required=False),
auto_expires=dict(type='int', default=None, required=False),
dead_letter_exchange=dict(type='str', default=None, required=False),
dead_letter_routing_key=dict(type='str', default=None, required=False),
durable=dict(type='bool', default=True, required=False),
max_length=dict(type='int', default=None, required=False),
max_length_bytes=dict(type='int', default=None, required=False),
maximum_priority=dict(type='int', default=None, required=False),
message_ttl=dict(type='int', default=None, required=False),
name=dict(type='str', required=True, aliases=['queue']),
queue_mode=dict(type='str', default='default', choices=['default', 'lazy']),
vhost=dict(type='str', default='/', required=False),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
super(RabbitMQQueue, self).__init__(
derived_args_spec=self.arguments_spec,
supports_check_mode=True)
def _build_path(self):
safe_vhost = urllib.quote(self.vhost, '')
path = '/api/queues/{vhost}'.format(vhost=safe_vhost)
return path
def _list(self, path):
request = self.rabbitmq_request('get', path=path)
self.error_handling(request.status_code)
return request.json()
def list_queues(self):
return self._list(self._build_path())
def list_vhosts(self):
return self._list('/api/vhosts')
def add_queue(self):
path = '/api/queues/{vhost}/{queue}'.format(
vhost=urllib.quote(self.vhost, ''),
queue=urllib.quote(self.name, ''))
queue = dict(
durable=self.durable,
auto_delete=self.auto_delete,
arguments=self.arguments,
)
request = self.rabbitmq_request('put', path=path, payload=queue)
self.error_handling(request.status_code)
def delete_queue(self):
path = '/api/queues/{vhost}/{queue}'.format(
vhost=urllib.quote(self.vhost, ''),
queue=urllib.quote(self.name, ''))
request = self.rabbitmq_request('delete', path=path)
self.error_handling(request.status_code)
def exec_module(self, **params):
for key in self.arguments_spec.keys():
setattr(self, key, self.module.params[key])
existing_vhosts_names = [v['name'] for v in self.list_vhosts()]
if self.vhost not in existing_vhosts_names:
self.fail('Vhost `{vhost}` does not exist '
'in Rabbitmq.'.format(vhost=self.vhost))
existing_queues = self.list_queues()
existing_queues = {q['name']: q for q in existing_queues}
if self.state == 'present':
for arg_key, arg_value in ARGUMENTS_TRANSLATOR.items():
if getattr(self, arg_key) is not None:
self.arguments[arg_value] = getattr(self, arg_key)
if self.name not in existing_queues.keys():
self.changed = True
if not self.check_mode:
self.add_queue()
else:
opts_changed = False
existing_queue = existing_queues[self.name]
for arg_k, arg_v in self.arguments.items():
if (arg_k not in existing_queue['arguments'] or
arg_v != existing_queue['arguments'][arg_k]):
opts_changed = True
break
if (existing_queue['durable'] != self.durable or
existing_queue['auto_delete'] != self.auto_delete):
opts_changed = True
if opts_changed:
self.fail('A queue in RabbitMQ can not be updated. '
'Delete the queue and re-create a new one.')
self.ok = True
else:
if self.name in existing_queues.keys():
self.changed = True
if not self.check_mode:
self.delete_queue()
else:
self.ok = True
def main():
"""Call the RabbitMQQueue module."""
RabbitMQQueue()
if __name__ == "__main__":
main()
| 34.093633 | 149 | 0.621004 |
190b88e1f0ff3a466215e94730c9412eb07aa5e9 | 8,052 | py | Python | fusions/robotics/sensor_fusion.py | TianhaoFu/MultiBench | b174a3187124d6f92be1ff3b487eef292f7883bb | [
"MIT"
] | 7 | 2022-03-03T12:10:46.000Z | 2022-03-08T18:14:40.000Z | fusions/robotics/sensor_fusion.py | TianhaoFu/MultiBench | b174a3187124d6f92be1ff3b487eef292f7883bb | [
"MIT"
] | null | null | null | fusions/robotics/sensor_fusion.py | TianhaoFu/MultiBench | b174a3187124d6f92be1ff3b487eef292f7883bb | [
"MIT"
] | 1 | 2022-03-07T08:18:42.000Z | 2022-03-07T08:18:42.000Z | import torch
import torch.nn as nn
from .models_utils import (
duplicate,
gaussian_parameters,
product_of_experts,
sample_gaussian,
)
class SensorFusion(nn.Module):
"""
#
Regular SensorFusionNetwork Architecture
Number of parameters:
Inputs:
image: batch_size x 3 x 128 x 128
force: batch_size x 6 x 32
proprio: batch_size x 8
action: batch_size x action_dim
"""
def __init__(
self, device, z_dim=128, action_dim=4, encoder=False, deterministic=False
):
super().__init__()
self.z_dim = z_dim
self.encoder_bool = encoder
self.device = device
self.deterministic = deterministic
# zero centered, 1 std normal distribution
self.z_prior_m = torch.nn.Parameter(
torch.zeros(1, self.z_dim), requires_grad=False
)
self.z_prior_v = torch.nn.Parameter(
torch.ones(1, self.z_dim), requires_grad=False
)
self.z_prior = (self.z_prior_m, self.z_prior_v)
# -----------------------
# action fusion network
# -----------------------
self.st_fusion_fc1 = nn.Sequential(
nn.Linear(32 + self.z_dim, 128), nn.LeakyReLU(0.1, inplace=True)
)
self.st_fusion_fc2 = nn.Sequential(
nn.Linear(128, self.z_dim), nn.LeakyReLU(0.1, inplace=True)
)
if deterministic:
# -----------------------
# modality fusion network
# -----------------------
# 4 Total modalities each (2 * z_dim)
self.fusion_fc1 = nn.Sequential(
nn.Linear(4 * 2 * self.z_dim, 128), nn.LeakyReLU(0.1, inplace=True)
)
self.fusion_fc2 = nn.Sequential(
nn.Linear(self.z_dim, self.z_dim), nn.LeakyReLU(0.1, inplace=True)
)
# -----------------------
# weight initialization
# -----------------------
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward_encoder(self, img_encoded, frc_encoded, proprio_encoded, depth_encoded, action_encoded):
# Get encoded outputs
img_out, img_out_convs = img_encoded
depth_out, depth_out_convs = depth_encoded
frc_out = frc_encoded
proprio_out = proprio_encoded
# batch size
batch_dim = img_out.size()[0]
if self.deterministic:
# multimodal embedding
mm_f1 = torch.cat([img_out, frc_out, proprio_out, depth_out], 1).squeeze()
mm_f2 = self.fusion_fc1(mm_f1)
z = self.fusion_fc2(mm_f2)
else:
# Encoder priors
mu_prior, var_prior = self.z_prior
# Duplicate prior parameters for each data point in the batch
mu_prior_resized = duplicate(mu_prior, batch_dim).unsqueeze(2)
var_prior_resized = duplicate(var_prior, batch_dim).unsqueeze(2)
# Modality Mean and Variances
mu_z_img, var_z_img = gaussian_parameters(img_out, dim=1)
mu_z_frc, var_z_frc = gaussian_parameters(frc_out, dim=1)
mu_z_proprio, var_z_proprio = gaussian_parameters(proprio_out, dim=1)
mu_z_depth, var_z_depth = gaussian_parameters(depth_out, dim=1)
# Tile distribution parameters using concatonation
m_vect = torch.cat(
[mu_z_img, mu_z_frc, mu_z_proprio, mu_z_depth, mu_prior_resized], dim=2
)
var_vect = torch.cat(
[var_z_img, var_z_frc, var_z_proprio, var_z_depth, var_prior_resized],
dim=2,
)
# Fuse modalities mean / variances using product of experts
mu_z, var_z = product_of_experts(m_vect, var_vect)
# Sample Gaussian to get latent
z = sample_gaussian(mu_z, var_z, self.device)
if self.encoder_bool or action_encoded is None:
if self.deterministic:
return img_out, frc_out, proprio_out, depth_out, z
else:
return img_out_convs, img_out, frc_out, proprio_out, depth_out, z
else:
# action embedding
act_feat = action_encoded
# state-action feature
mm_act_f1 = torch.cat([z, act_feat], 1)
mm_act_f2 = self.st_fusion_fc1(mm_act_f1)
mm_act_feat = self.st_fusion_fc2(mm_act_f2)
if self.deterministic:
return img_out_convs, mm_act_feat, z
else:
return img_out_convs, mm_act_feat, z, mu_z, var_z, mu_prior, var_prior
def weight_parameters(self):
return [param for name, param in self.named_parameters() if "weight" in name]
def bias_parameters(self):
return [param for name, param in self.named_parameters() if "bias" in name]
class SensorFusionSelfSupervised(SensorFusion):
"""
Regular SensorFusionNetwork Architecture
Inputs:
image: batch_size x 3 x 128 x 128
force: batch_size x 6 x 32
proprio: batch_size x 8
action: batch_size x action_dim
"""
def __init__(
self, device, z_dim=128, encoder=False, deterministic=False
):
super().__init__(device, z_dim, encoder, deterministic)
self.deterministic = deterministic
def forward(self, input, training=False):
img_encoded, frc_encoded, proprio_encoded, depth_encoded, action_encoded = input
if self.encoder_bool:
# returning latent space representation if model is set in encoder mode
z = self.forward_encoder(img_encoded, frc_encoded, proprio_encoded, depth_encoded, action_encoded)
return z
elif action_encoded is None:
z = self.forward_encoder(img_encoded, frc_encoded, proprio_encoded, depth_encoded, None)
pair_out = self.pair_fc(z)
return pair_out
else:
if self.deterministic:
img_out_convs, mm_act_feat, z = self.forward_encoder(
img_encoded, frc_encoded, proprio_encoded, depth_encoded, action_encoded
)
else:
img_out_convs, mm_act_feat, z, mu_z, var_z, mu_prior, var_prior = self.forward_encoder(
img_encoded,
frc_encoded,
proprio_encoded,
depth_encoded,
action_encoded,
)
# ---------------- Training Objectives ----------------
# tile state-action features and append to conv map
batch_dim = mm_act_feat.size(0) # batch size
tiled_feat = mm_act_feat.view(batch_dim, self.z_dim, 1, 1).expand(-1, -1, 2, 2)
if self.deterministic:
return z, mm_act_feat, tiled_feat, img_out_convs
else:
return z, mm_act_feat, tiled_feat, img_out_convs, mu_z, var_z, mu_prior, var_prior
class roboticsConcat(nn.Module):
def __init__(self,name=None):
super(roboticsConcat,self).__init__()
self.name=name
def forward(self,x,training=False):
#print(x[0][0].size())
#print(x[1].size())
#print(x[2].size())
#print(x[3][0].size())
#print(x[4].size())
if self.name=="noconcat":
return [x[0][0].squeeze(),x[1].squeeze(),x[2].squeeze(),x[3][0].squeeze(),x[4]]
if self.name=="image":
return torch.cat([x[0][0].squeeze(),x[1][0].squeeze(),x[2]],1)
if self.name=="simple":
return torch.cat([x[0].squeeze(),x[1]],1)
return torch.cat([x[0][0].squeeze(),x[1].squeeze(),x[2].squeeze(),x[3][0].squeeze(),x[4]],1)
| 36.434389 | 110 | 0.576503 |
3bf99daee5f757406887aa6187264073f6f75a6b | 666 | py | Python | manage.py | Code-Forever-com/Django-CMS | 6a61d4b796f1177e3a342d975ebaa1d1589fc40a | [
"MIT"
] | null | null | null | manage.py | Code-Forever-com/Django-CMS | 6a61d4b796f1177e3a342d975ebaa1d1589fc40a | [
"MIT"
] | null | null | null | manage.py | Code-Forever-com/Django-CMS | 6a61d4b796f1177e3a342d975ebaa1d1589fc40a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_cms.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.956522 | 74 | 0.68018 |
04d05d89ebcc070461b0c57b86a32d4a1dae759c | 228 | py | Python | Codewars/Unique_in_order - (6 kyu).py | maxcohen31/A-bored-math-student | 007beb4dabf7b4406f48e9a3a967c29d032eab89 | [
"MIT"
] | null | null | null | Codewars/Unique_in_order - (6 kyu).py | maxcohen31/A-bored-math-student | 007beb4dabf7b4406f48e9a3a967c29d032eab89 | [
"MIT"
] | null | null | null | Codewars/Unique_in_order - (6 kyu).py | maxcohen31/A-bored-math-student | 007beb4dabf7b4406f48e9a3a967c29d032eab89 | [
"MIT"
] | null | null | null | def unique_in_order(iterable):
result = []
for x in iterable:
if len(result) < 1 or not x == result[len(result) - 1]:
result.append(x)
return result
print(unique_in_order('AAAABBBCCDAABBB')) | 25.333333 | 63 | 0.614035 |
10f4d5b924f334ba6c61210173b806f155073185 | 598 | py | Python | venv/Lib/site-packages/google/api_core/version.py | alexandermarquesm/DiscordBot | 7b0829f885c728538bee5e9df53248213885c199 | [
"MIT"
] | 3 | 2020-12-30T06:37:10.000Z | 2021-03-05T11:56:04.000Z | venv/Lib/site-packages/google/api_core/version.py | alexandermarquesm/DiscordBot | 7b0829f885c728538bee5e9df53248213885c199 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/google/api_core/version.py | alexandermarquesm/DiscordBot | 7b0829f885c728538bee5e9df53248213885c199 | [
"MIT"
] | 1 | 2020-12-21T03:53:03.000Z | 2020-12-21T03:53:03.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "1.24.1"
| 37.375 | 74 | 0.757525 |
ef9be003cfe7d14435f8f0d3cba8e1e87e0e3059 | 5,853 | py | Python | tests/simulation/test_simulator.py | mbnunes/hathor-core | e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8 | [
"Apache-2.0"
] | null | null | null | tests/simulation/test_simulator.py | mbnunes/hathor-core | e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8 | [
"Apache-2.0"
] | null | null | null | tests/simulation/test_simulator.py | mbnunes/hathor-core | e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8 | [
"Apache-2.0"
] | null | null | null | from typing import Optional
import pytest
from hathor.simulator import FakeConnection, Simulator
from tests import unittest
class SimulatorTestCase(unittest.TestCase):
__test__ = False
seed_config: Optional[int] = None
def setUp(self):
super().setUp()
self.simulator = Simulator(self.seed_config)
self.simulator.start()
print('-'*30)
print('Simulation seed config:', self.simulator.seed)
print('-'*30)
def tearDown(self):
self.simulator.stop()
super().tearDown()
def create_peer(self, enable_sync_v1=None, enable_sync_v2=None):
if enable_sync_v1 is None:
assert hasattr(self, '_enable_sync_v1'), ('`_enable_sync_v1` has no default by design, either set one on '
'the test class or pass `enable_sync_v1` by argument')
enable_sync_v1 = self._enable_sync_v1
if enable_sync_v2 is None:
assert hasattr(self, '_enable_sync_v2'), ('`_enable_sync_v2` has no default by design, either set one on '
'the test class or pass `enable_sync_v2` by argument')
enable_sync_v2 = self._enable_sync_v2
assert enable_sync_v1 or enable_sync_v2, 'enable at least one sync version'
return self.simulator.create_peer(
peer_id=self.get_random_peer_id_from_pool(),
enable_sync_v1=enable_sync_v1,
enable_sync_v2=enable_sync_v2,
)
class BaseRandomSimulatorTestCase(SimulatorTestCase):
def test_one_node(self):
manager1 = self.create_peer()
miner1 = self.simulator.create_miner(manager1, hashpower=100e6)
miner1.start()
self.simulator.run(10)
gen_tx1 = self.simulator.create_tx_generator(manager1, rate=2 / 60., hashpower=1e6, ignore_no_funds=True)
gen_tx1.start()
self.simulator.run(60 * 60)
def test_two_nodes(self):
manager1 = self.create_peer()
manager2 = self.create_peer()
miner1 = self.simulator.create_miner(manager1, hashpower=10e6)
miner1.start()
self.simulator.run(10)
gen_tx1 = self.simulator.create_tx_generator(manager1, rate=3 / 60., hashpower=1e6, ignore_no_funds=True)
gen_tx1.start()
self.simulator.run(60)
conn12 = FakeConnection(manager1, manager2, latency=0.150)
self.simulator.add_connection(conn12)
self.simulator.run(60)
miner2 = self.simulator.create_miner(manager2, hashpower=100e6)
miner2.start()
self.simulator.run(120)
gen_tx2 = self.simulator.create_tx_generator(manager2, rate=10 / 60., hashpower=1e6, ignore_no_funds=True)
gen_tx2.start()
self.simulator.run(10 * 60)
miner1.stop()
miner2.stop()
gen_tx1.stop()
gen_tx2.stop()
self.simulator.run(5 * 60)
self.assertTrue(conn12.is_connected)
self.assertTipsEqual(manager1, manager2)
def test_many_miners_since_beginning(self):
nodes = []
miners = []
for hashpower in [10e6, 5e6, 1e6, 1e6, 1e6]:
manager = self.create_peer()
for node in nodes:
conn = FakeConnection(manager, node, latency=0.085)
self.simulator.add_connection(conn)
nodes.append(manager)
miner = self.simulator.create_miner(manager, hashpower=hashpower)
miner.start()
miners.append(miner)
self.simulator.run(600)
for miner in miners:
miner.stop()
self.simulator.run(15)
for node in nodes[1:]:
self.assertTipsEqual(nodes[0], node)
@pytest.mark.flaky(max_runs=5, min_passes=1)
def test_new_syncing_peer(self):
nodes = []
miners = []
tx_generators = []
manager = self.create_peer()
nodes.append(manager)
miner = self.simulator.create_miner(manager, hashpower=10e6)
miner.start()
miners.append(miner)
self.simulator.run(600)
for hashpower in [10e6, 8e6, 5e6]:
manager = self.create_peer()
for node in nodes:
conn = FakeConnection(manager, node, latency=0.085)
self.simulator.add_connection(conn)
nodes.append(manager)
miner = self.simulator.create_miner(manager, hashpower=hashpower)
miner.start()
miners.append(miner)
for i, rate in enumerate([5, 4, 3]):
tx_gen = self.simulator.create_tx_generator(nodes[i], rate=rate * 1 / 60., hashpower=1e6,
ignore_no_funds=True)
tx_gen.start()
tx_generators.append(tx_gen)
self.simulator.run(600)
self.log.debug('adding late node')
late_manager = self.create_peer()
for node in nodes:
conn = FakeConnection(late_manager, node, latency=0.300)
self.simulator.add_connection(conn)
self.simulator.run(600)
for tx_gen in tx_generators:
tx_gen.stop()
for miner in miners:
miner.stop()
self.simulator.run_until_complete(600)
for idx, node in enumerate(nodes):
self.log.debug(f'checking node {idx}')
self.assertConsensusValid(node)
self.assertConsensusEqual(node, late_manager)
class SyncV1RandomSimulatorTestCase(unittest.SyncV1Params, BaseRandomSimulatorTestCase):
__test__ = True
class SyncV2RandomSimulatorTestCase(unittest.SyncV2Params, BaseRandomSimulatorTestCase):
__test__ = True
# sync-bridge should behave like sync-v2
class SyncBridgeRandomSimulatorTestCase(unittest.SyncBridgeParams, SyncV2RandomSimulatorTestCase):
__test__ = True
| 32.337017 | 118 | 0.623441 |
45562e4e36572bc587246facac1cdc9285bb7c68 | 3,912 | py | Python | src/aioredis_cluster/command_info/__init__.py | VadimPushtaev/aioredis-cluster | 754797c1c6f864cf1436ff3bd82aa3f61a750724 | [
"MIT"
] | 18 | 2020-12-11T16:56:55.000Z | 2021-12-14T21:18:46.000Z | src/aioredis_cluster/command_info/__init__.py | VadimPushtaev/aioredis-cluster | 754797c1c6f864cf1436ff3bd82aa3f61a750724 | [
"MIT"
] | 3 | 2021-02-21T13:10:07.000Z | 2021-11-15T11:06:36.000Z | src/aioredis_cluster/command_info/__init__.py | VadimPushtaev/aioredis-cluster | 754797c1c6f864cf1436ff3bd82aa3f61a750724 | [
"MIT"
] | 3 | 2021-11-09T14:37:59.000Z | 2022-03-18T07:25:33.000Z | from typing import AnyStr, FrozenSet, List, NoReturn, Sequence
import attr
from aioredis_cluster.util import ensure_str
from .commands import COMMANDS
__all__ = [
"COMMANDS",
"CommandsRegistry",
"CommandInfo",
"CommandInfoError",
"UnknownCommandError",
"InvalidCommandError",
"extract_keys",
"create_registry",
]
class CommandInfoError(Exception):
pass
class UnknownCommandError(CommandInfoError):
def __init__(self, command: str) -> None:
super().__init__(command)
self.command = command
class InvalidCommandError(CommandInfoError):
pass
def _raise_wrong_num_of_arguments(cmd) -> NoReturn:
raise InvalidCommandError(f"Wrong number of arguments for {cmd.name!r} command")
@attr.s(slots=True, frozen=True)
class CommandInfo:
name: str = attr.ib()
arity: int = attr.ib()
flags: FrozenSet[str] = attr.ib()
first_key_arg: int = attr.ib()
last_key_arg: int = attr.ib()
key_args_step: int = attr.ib()
def is_readonly(self) -> bool:
return "readonly" in self.flags
class CommandsRegistry:
def __init__(self, commands: Sequence[CommandInfo]) -> None:
self._commands = {cmd.name: cmd for cmd in commands}
def get_info(self, cmd: AnyStr) -> CommandInfo:
cmd_name = ensure_str(cmd).upper()
try:
info = self._commands[cmd_name]
except KeyError:
raise UnknownCommandError(cmd_name) from None
return info
def size(self) -> int:
return len(self._commands)
def _extract_keys_general(info: CommandInfo, exec_command: Sequence[bytes]) -> List[bytes]:
keys: List[bytes] = []
if info.first_key_arg <= 0:
return []
if info.last_key_arg == -1:
last_key_arg = len(exec_command) - 1
else:
last_key_arg = info.last_key_arg
num_of_args = last_key_arg - info.first_key_arg + 1
if info.key_args_step > 1 and num_of_args % info.key_args_step != 0:
_raise_wrong_num_of_arguments(info)
for key_idx in range(info.first_key_arg, last_key_arg + 1, info.key_args_step):
keys.append(exec_command[key_idx])
return keys
def _extract_keys_eval(info: CommandInfo, exec_command: Sequence[bytes]) -> List[bytes]:
abs_arity = abs(info.arity)
num_of_keys = int(exec_command[abs_arity - 1])
keys = exec_command[abs_arity : abs_arity + num_of_keys]
if len(keys) != num_of_keys:
_raise_wrong_num_of_arguments(info)
return list(keys)
def extract_keys(info: CommandInfo, exec_command: Sequence[bytes]) -> List[bytes]:
if len(exec_command) < 1:
raise ValueError("Execute command is empty")
cmd_name = ensure_str(exec_command[0]).upper()
if info.name != cmd_name:
raise ValueError(f"Incorrect info command: {info.name} != {cmd_name}")
if info.arity > 0 and len(exec_command) > info.arity or len(exec_command) < abs(info.arity):
_raise_wrong_num_of_arguments(info)
# special parsing for command
if info.name in {"EVAL", "EVALSHA"}:
keys = _extract_keys_eval(info, exec_command)
else:
keys = _extract_keys_general(info, exec_command)
return keys
def create_registry(raw_commands: Sequence[List]) -> CommandsRegistry:
cmds = []
for raw_cmd in raw_commands:
first_key_arg = raw_cmd[3]
last_key_arg = raw_cmd[4]
key_args_step = raw_cmd[5]
if first_key_arg >= 1 and (key_args_step == 0 or last_key_arg == 0):
raise ValueError("Incorrect command")
cmd = CommandInfo(
name=raw_cmd[0].upper(),
arity=raw_cmd[1],
flags=frozenset(raw_cmd[2]),
first_key_arg=first_key_arg,
last_key_arg=last_key_arg,
key_args_step=key_args_step,
)
cmds.append(cmd)
return CommandsRegistry(cmds)
default_registry = create_registry(COMMANDS)
| 26.97931 | 96 | 0.667434 |
828f30c40eba787d10efc63cf2f74b3c9b7c8ca0 | 21,344 | py | Python | tensorflow/python/framework/dtypes.py | yolman230/tensorflow | 8180678e1b71f9e4326b9d84987d78232000bac2 | [
"Apache-2.0"
] | 56 | 2018-06-21T13:47:23.000Z | 2020-05-13T09:31:47.000Z | tensorflow/python/framework/dtypes.py | kaist-ina/tensorflow | 169124c0c9630b719e7f0e55722c38c7ecd6c5ac | [
"Apache-2.0"
] | 2 | 2021-08-25T16:14:13.000Z | 2022-02-10T04:19:15.000Z | tensorflow/python/framework/dtypes.py | kaist-ina/tensorflow | 169124c0c9630b719e7f0e55722c38c7ecd6c5ac | [
"Apache-2.0"
] | 15 | 2018-09-06T14:18:32.000Z | 2020-05-14T06:35:30.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of dtypes (Tensor element types)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import builtins
from tensorflow.core.framework import types_pb2
# We need to import pywrap_tensorflow prior to the bfloat wrapper to avoid
# protobuf errors where a file is defined twice on MacOS.
# pylint: disable=invalid-import-order,g-bad-import-order
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
from tensorflow.python import _pywrap_bfloat16
from tensorflow.python import _dtypes
from tensorflow.python.util.tf_export import tf_export
_np_bfloat16 = _pywrap_bfloat16.TF_bfloat16_type()
# pylint: disable=slots-on-old-class
@tf_export("dtypes.DType", "DType")
class DType(_dtypes.DType):
"""Represents the type of the elements in a `Tensor`.
The following `DType` objects are defined:
* `tf.float16`: 16-bit half-precision floating-point.
* `tf.float32`: 32-bit single-precision floating-point.
* `tf.float64`: 64-bit double-precision floating-point.
* `tf.bfloat16`: 16-bit truncated floating-point.
* `tf.complex64`: 64-bit single-precision complex.
* `tf.complex128`: 128-bit double-precision complex.
* `tf.int8`: 8-bit signed integer.
* `tf.uint8`: 8-bit unsigned integer.
* `tf.uint16`: 16-bit unsigned integer.
* `tf.uint32`: 32-bit unsigned integer.
* `tf.uint64`: 64-bit unsigned integer.
* `tf.int16`: 16-bit signed integer.
* `tf.int32`: 32-bit signed integer.
* `tf.int64`: 64-bit signed integer.
* `tf.bool`: Boolean.
* `tf.string`: String.
* `tf.qint8`: Quantized 8-bit signed integer.
* `tf.quint8`: Quantized 8-bit unsigned integer.
* `tf.qint16`: Quantized 16-bit signed integer.
* `tf.quint16`: Quantized 16-bit unsigned integer.
* `tf.qint32`: Quantized 32-bit signed integer.
* `tf.resource`: Handle to a mutable resource.
* `tf.variant`: Values of arbitrary types.
The `tf.as_dtype()` function converts numpy types and string type
names to a `DType` object.
"""
__slots__ = ()
@property
def _is_ref_dtype(self):
"""Returns `True` if this `DType` represents a reference type."""
return self._type_enum > 100
@property
def _as_ref(self):
"""Returns a reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return self
else:
return _INTERN_TABLE[self._type_enum + 100]
@property
def base_dtype(self):
"""Returns a non-reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return _INTERN_TABLE[self._type_enum - 100]
else:
return self
@property
def real_dtype(self):
"""Returns the dtype correspond to this dtype's real part."""
base = self.base_dtype
if base == complex64:
return float32
elif base == complex128:
return float64
else:
return self
@property
def as_numpy_dtype(self):
"""Returns a `numpy.dtype` based on this `DType`."""
return _TF_TO_NP[self._type_enum]
@property
def min(self):
"""Returns the minimum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or
self.base_dtype in (bool, string, complex64, complex128)):
raise TypeError("Cannot find minimum value of %s." % self)
# there is no simple way to get the min value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype).min
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype).min
except:
if self.base_dtype == bfloat16:
return _np_bfloat16(float.fromhex("-0x1.FEp127"))
raise TypeError("Cannot find minimum value of %s." % self)
@property
def max(self):
"""Returns the maximum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or
self.base_dtype in (bool, string, complex64, complex128)):
raise TypeError("Cannot find maximum value of %s." % self)
# there is no simple way to get the max value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype).max
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype).max
except:
if self.base_dtype == bfloat16:
return _np_bfloat16(float.fromhex("0x1.FEp127"))
raise TypeError("Cannot find maximum value of %s." % self)
@property
def limits(self, clip_negative=True):
"""Return intensity limits, i.e.
(min, max) tuple, of the dtype.
Args:
clip_negative : bool, optional If True, clip the negative range (i.e.
return 0 for min intensity) even if the image dtype allows negative
values. Returns
min, max : tuple Lower and upper intensity limits.
"""
min, max = dtype_range[self.as_numpy_dtype] # pylint: disable=redefined-builtin
if clip_negative:
min = 0 # pylint: disable=redefined-builtin
return min, max
def is_compatible_with(self, other):
"""Returns True if the `other` DType will be converted to this DType.
The conversion rules are as follows:
```python
DType(T) .is_compatible_with(DType(T)) == True
```
Args:
other: A `DType` (or object that may be converted to a `DType`).
Returns:
True if a Tensor of the `other` `DType` will be implicitly converted to
this `DType`.
"""
other = as_dtype(other)
return self._type_enum in (other.as_datatype_enum,
other.base_dtype.as_datatype_enum)
def __eq__(self, other):
"""Returns True iff this DType refers to the same type as `other`."""
if other is None:
return False
if type(other) != DType: # pylint: disable=unidiomatic-typecheck
try:
other = as_dtype(other)
except TypeError:
return False
return self._type_enum == other._type_enum # pylint: disable=protected-access
def __ne__(self, other):
"""Returns True iff self != other."""
return not self.__eq__(other)
# "If a class that overrides __eq__() needs to retain the implementation
# of __hash__() from a parent class, the interpreter must be told this
# explicitly by setting __hash__ = <ParentClass>.__hash__."
# TODO(slebedev): Remove once __eq__ and __ne__ are implemented in C++.
__hash__ = _dtypes.DType.__hash__
def __reduce__(self):
return as_dtype, (self.name,)
# pylint: enable=slots-on-old-class
# Define data type range of numpy dtype
dtype_range = {
np.bool_: (False, True),
np.bool8: (False, True),
np.uint8: (0, 255),
np.uint16: (0, 65535),
np.int8: (-128, 127),
np.int16: (-32768, 32767),
np.int64: (-2**63, 2**63 - 1),
np.uint64: (0, 2**64 - 1),
np.int32: (-2**31, 2**31 - 1),
np.uint32: (0, 2**32 - 1),
np.float32: (-1, 1),
np.float64: (-1, 1)
}
# Define standard wrappers for the types_pb2.DataType enum.
resource = DType(types_pb2.DT_RESOURCE)
tf_export("dtypes.resource", "resource").export_constant(__name__, "resource")
variant = DType(types_pb2.DT_VARIANT)
tf_export("dtypes.variant", "variant").export_constant(__name__, "variant")
float16 = DType(types_pb2.DT_HALF)
tf_export("dtypes.float16", "float16").export_constant(__name__, "float16")
half = float16
tf_export("dtypes.half", "half").export_constant(__name__, "half")
float32 = DType(types_pb2.DT_FLOAT)
tf_export("dtypes.float32", "float32").export_constant(__name__, "float32")
float64 = DType(types_pb2.DT_DOUBLE)
tf_export("dtypes.float64", "float64").export_constant(__name__, "float64")
double = float64
tf_export("dtypes.double", "double").export_constant(__name__, "double")
int32 = DType(types_pb2.DT_INT32)
tf_export("dtypes.int32", "int32").export_constant(__name__, "int32")
uint8 = DType(types_pb2.DT_UINT8)
tf_export("dtypes.uint8", "uint8").export_constant(__name__, "uint8")
uint16 = DType(types_pb2.DT_UINT16)
tf_export("dtypes.uint16", "uint16").export_constant(__name__, "uint16")
uint32 = DType(types_pb2.DT_UINT32)
tf_export("dtypes.uint32", "uint32").export_constant(__name__, "uint32")
uint64 = DType(types_pb2.DT_UINT64)
tf_export("dtypes.uint64", "uint64").export_constant(__name__, "uint64")
int16 = DType(types_pb2.DT_INT16)
tf_export("dtypes.int16", "int16").export_constant(__name__, "int16")
int8 = DType(types_pb2.DT_INT8)
tf_export("dtypes.int8", "int8").export_constant(__name__, "int8")
string = DType(types_pb2.DT_STRING)
tf_export("dtypes.string", "string").export_constant(__name__, "string")
complex64 = DType(types_pb2.DT_COMPLEX64)
tf_export("dtypes.complex64",
"complex64").export_constant(__name__, "complex64")
complex128 = DType(types_pb2.DT_COMPLEX128)
tf_export("dtypes.complex128",
"complex128").export_constant(__name__, "complex128")
int64 = DType(types_pb2.DT_INT64)
tf_export("dtypes.int64", "int64").export_constant(__name__, "int64")
bool = DType(types_pb2.DT_BOOL) # pylint: disable=redefined-builtin
tf_export("dtypes.bool", "bool").export_constant(__name__, "bool")
qint8 = DType(types_pb2.DT_QINT8)
tf_export("dtypes.qint8", "qint8").export_constant(__name__, "qint8")
quint8 = DType(types_pb2.DT_QUINT8)
tf_export("dtypes.quint8", "quint8").export_constant(__name__, "quint8")
qint16 = DType(types_pb2.DT_QINT16)
tf_export("dtypes.qint16", "qint16").export_constant(__name__, "qint16")
quint16 = DType(types_pb2.DT_QUINT16)
tf_export("dtypes.quint16", "quint16").export_constant(__name__, "quint16")
qint32 = DType(types_pb2.DT_QINT32)
tf_export("dtypes.qint32", "qint32").export_constant(__name__, "qint32")
resource_ref = DType(types_pb2.DT_RESOURCE_REF)
variant_ref = DType(types_pb2.DT_VARIANT_REF)
bfloat16 = DType(types_pb2.DT_BFLOAT16)
tf_export("dtypes.bfloat16", "bfloat16").export_constant(__name__, "bfloat16")
float16_ref = DType(types_pb2.DT_HALF_REF)
half_ref = float16_ref
float32_ref = DType(types_pb2.DT_FLOAT_REF)
float64_ref = DType(types_pb2.DT_DOUBLE_REF)
double_ref = float64_ref
int32_ref = DType(types_pb2.DT_INT32_REF)
uint32_ref = DType(types_pb2.DT_UINT32_REF)
uint8_ref = DType(types_pb2.DT_UINT8_REF)
uint16_ref = DType(types_pb2.DT_UINT16_REF)
int16_ref = DType(types_pb2.DT_INT16_REF)
int8_ref = DType(types_pb2.DT_INT8_REF)
string_ref = DType(types_pb2.DT_STRING_REF)
complex64_ref = DType(types_pb2.DT_COMPLEX64_REF)
complex128_ref = DType(types_pb2.DT_COMPLEX128_REF)
int64_ref = DType(types_pb2.DT_INT64_REF)
uint64_ref = DType(types_pb2.DT_UINT64_REF)
bool_ref = DType(types_pb2.DT_BOOL_REF)
qint8_ref = DType(types_pb2.DT_QINT8_REF)
quint8_ref = DType(types_pb2.DT_QUINT8_REF)
qint16_ref = DType(types_pb2.DT_QINT16_REF)
quint16_ref = DType(types_pb2.DT_QUINT16_REF)
qint32_ref = DType(types_pb2.DT_QINT32_REF)
bfloat16_ref = DType(types_pb2.DT_BFLOAT16_REF)
# Maintain an intern table so that we don't have to create a large
# number of small objects.
_INTERN_TABLE = {
types_pb2.DT_HALF: float16,
types_pb2.DT_FLOAT: float32,
types_pb2.DT_DOUBLE: float64,
types_pb2.DT_INT32: int32,
types_pb2.DT_UINT8: uint8,
types_pb2.DT_UINT16: uint16,
types_pb2.DT_UINT32: uint32,
types_pb2.DT_UINT64: uint64,
types_pb2.DT_INT16: int16,
types_pb2.DT_INT8: int8,
types_pb2.DT_STRING: string,
types_pb2.DT_COMPLEX64: complex64,
types_pb2.DT_COMPLEX128: complex128,
types_pb2.DT_INT64: int64,
types_pb2.DT_BOOL: bool,
types_pb2.DT_QINT8: qint8,
types_pb2.DT_QUINT8: quint8,
types_pb2.DT_QINT16: qint16,
types_pb2.DT_QUINT16: quint16,
types_pb2.DT_QINT32: qint32,
types_pb2.DT_BFLOAT16: bfloat16,
types_pb2.DT_RESOURCE: resource,
types_pb2.DT_VARIANT: variant,
types_pb2.DT_HALF_REF: float16_ref,
types_pb2.DT_FLOAT_REF: float32_ref,
types_pb2.DT_DOUBLE_REF: float64_ref,
types_pb2.DT_INT32_REF: int32_ref,
types_pb2.DT_UINT32_REF: uint32_ref,
types_pb2.DT_UINT8_REF: uint8_ref,
types_pb2.DT_UINT16_REF: uint16_ref,
types_pb2.DT_INT16_REF: int16_ref,
types_pb2.DT_INT8_REF: int8_ref,
types_pb2.DT_STRING_REF: string_ref,
types_pb2.DT_COMPLEX64_REF: complex64_ref,
types_pb2.DT_COMPLEX128_REF: complex128_ref,
types_pb2.DT_INT64_REF: int64_ref,
types_pb2.DT_UINT64_REF: uint64_ref,
types_pb2.DT_BOOL_REF: bool_ref,
types_pb2.DT_QINT8_REF: qint8_ref,
types_pb2.DT_QUINT8_REF: quint8_ref,
types_pb2.DT_QINT16_REF: qint16_ref,
types_pb2.DT_QUINT16_REF: quint16_ref,
types_pb2.DT_QINT32_REF: qint32_ref,
types_pb2.DT_BFLOAT16_REF: bfloat16_ref,
types_pb2.DT_RESOURCE_REF: resource_ref,
types_pb2.DT_VARIANT_REF: variant_ref,
}
# Standard mappings between types_pb2.DataType values and string names.
_TYPE_TO_STRING = {
types_pb2.DT_HALF: "float16",
types_pb2.DT_FLOAT: "float32",
types_pb2.DT_DOUBLE: "float64",
types_pb2.DT_INT32: "int32",
types_pb2.DT_UINT8: "uint8",
types_pb2.DT_UINT16: "uint16",
types_pb2.DT_UINT32: "uint32",
types_pb2.DT_UINT64: "uint64",
types_pb2.DT_INT16: "int16",
types_pb2.DT_INT8: "int8",
types_pb2.DT_STRING: "string",
types_pb2.DT_COMPLEX64: "complex64",
types_pb2.DT_COMPLEX128: "complex128",
types_pb2.DT_INT64: "int64",
types_pb2.DT_BOOL: "bool",
types_pb2.DT_QINT8: "qint8",
types_pb2.DT_QUINT8: "quint8",
types_pb2.DT_QINT16: "qint16",
types_pb2.DT_QUINT16: "quint16",
types_pb2.DT_QINT32: "qint32",
types_pb2.DT_BFLOAT16: "bfloat16",
types_pb2.DT_RESOURCE: "resource",
types_pb2.DT_VARIANT: "variant",
types_pb2.DT_HALF_REF: "float16_ref",
types_pb2.DT_FLOAT_REF: "float32_ref",
types_pb2.DT_DOUBLE_REF: "float64_ref",
types_pb2.DT_INT32_REF: "int32_ref",
types_pb2.DT_UINT32_REF: "uint32_ref",
types_pb2.DT_UINT8_REF: "uint8_ref",
types_pb2.DT_UINT16_REF: "uint16_ref",
types_pb2.DT_INT16_REF: "int16_ref",
types_pb2.DT_INT8_REF: "int8_ref",
types_pb2.DT_STRING_REF: "string_ref",
types_pb2.DT_COMPLEX64_REF: "complex64_ref",
types_pb2.DT_COMPLEX128_REF: "complex128_ref",
types_pb2.DT_INT64_REF: "int64_ref",
types_pb2.DT_UINT64_REF: "uint64_ref",
types_pb2.DT_BOOL_REF: "bool_ref",
types_pb2.DT_QINT8_REF: "qint8_ref",
types_pb2.DT_QUINT8_REF: "quint8_ref",
types_pb2.DT_QINT16_REF: "qint16_ref",
types_pb2.DT_QUINT16_REF: "quint16_ref",
types_pb2.DT_QINT32_REF: "qint32_ref",
types_pb2.DT_BFLOAT16_REF: "bfloat16_ref",
types_pb2.DT_RESOURCE_REF: "resource_ref",
types_pb2.DT_VARIANT_REF: "variant_ref",
}
_STRING_TO_TF = {
value: _INTERN_TABLE[key] for key, value in _TYPE_TO_STRING.items()
}
# Add non-canonical aliases.
_STRING_TO_TF["half"] = float16
_STRING_TO_TF["half_ref"] = float16_ref
_STRING_TO_TF["float"] = float32
_STRING_TO_TF["float_ref"] = float32_ref
_STRING_TO_TF["double"] = float64
_STRING_TO_TF["double_ref"] = float64_ref
# Numpy representation for quantized dtypes.
#
# These are magic strings that are used in the swig wrapper to identify
# quantized types.
# TODO(mrry,keveman): Investigate Numpy type registration to replace this
# hard-coding of names.
_np_qint8 = np.dtype([("qint8", np.int8)])
_np_quint8 = np.dtype([("quint8", np.uint8)])
_np_qint16 = np.dtype([("qint16", np.int16)])
_np_quint16 = np.dtype([("quint16", np.uint16)])
_np_qint32 = np.dtype([("qint32", np.int32)])
# _np_bfloat16 is defined by a module import.
# Custom struct dtype for directly-fed ResourceHandles of supported type(s).
np_resource = np.dtype([("resource", np.ubyte)])
# Standard mappings between types_pb2.DataType values and numpy.dtypes.
_NP_TO_TF = {
np.float16: float16,
np.float32: float32,
np.float64: float64,
np.int32: int32,
np.int64: int64,
np.uint8: uint8,
np.uint16: uint16,
np.uint32: uint32,
np.uint64: uint64,
np.int16: int16,
np.int8: int8,
np.complex64: complex64,
np.complex128: complex128,
np.object_: string,
np.string_: string,
np.unicode_: string,
np.bool_: bool,
_np_qint8: qint8,
_np_quint8: quint8,
_np_qint16: qint16,
_np_quint16: quint16,
_np_qint32: qint32,
_np_bfloat16: bfloat16,
}
# Map (some) NumPy platform dtypes to TF ones using their fixed-width
# synonyms. Note that platform dtypes are not always simples aliases,
# i.e. reference equality is not guaranteed. See e.g. numpy/numpy#9799.
for pdt in [
np.intc,
np.uintc,
np.int_,
np.uint,
np.longlong,
np.ulonglong,
]:
if pdt not in _NP_TO_TF:
_NP_TO_TF[pdt] = next(
_NP_TO_TF[dt] for dt in _NP_TO_TF if dt == pdt().dtype)
TF_VALUE_DTYPES = set(_NP_TO_TF.values())
_TF_TO_NP = {
types_pb2.DT_HALF:
np.float16,
types_pb2.DT_FLOAT:
np.float32,
types_pb2.DT_DOUBLE:
np.float64,
types_pb2.DT_INT32:
np.int32,
types_pb2.DT_UINT8:
np.uint8,
types_pb2.DT_UINT16:
np.uint16,
types_pb2.DT_UINT32:
np.uint32,
types_pb2.DT_UINT64:
np.uint64,
types_pb2.DT_INT16:
np.int16,
types_pb2.DT_INT8:
np.int8,
# NOTE(touts): For strings we use np.object as it supports variable length
# strings.
types_pb2.DT_STRING:
np.object,
types_pb2.DT_COMPLEX64:
np.complex64,
types_pb2.DT_COMPLEX128:
np.complex128,
types_pb2.DT_INT64:
np.int64,
types_pb2.DT_BOOL:
np.bool,
types_pb2.DT_QINT8:
_np_qint8,
types_pb2.DT_QUINT8:
_np_quint8,
types_pb2.DT_QINT16:
_np_qint16,
types_pb2.DT_QUINT16:
_np_quint16,
types_pb2.DT_QINT32:
_np_qint32,
types_pb2.DT_BFLOAT16:
_np_bfloat16,
# Ref types
types_pb2.DT_HALF_REF:
np.float16,
types_pb2.DT_FLOAT_REF:
np.float32,
types_pb2.DT_DOUBLE_REF:
np.float64,
types_pb2.DT_INT32_REF:
np.int32,
types_pb2.DT_UINT32_REF:
np.uint32,
types_pb2.DT_UINT8_REF:
np.uint8,
types_pb2.DT_UINT16_REF:
np.uint16,
types_pb2.DT_INT16_REF:
np.int16,
types_pb2.DT_INT8_REF:
np.int8,
types_pb2.DT_STRING_REF:
np.object,
types_pb2.DT_COMPLEX64_REF:
np.complex64,
types_pb2.DT_COMPLEX128_REF:
np.complex128,
types_pb2.DT_INT64_REF:
np.int64,
types_pb2.DT_UINT64_REF:
np.uint64,
types_pb2.DT_BOOL_REF:
np.bool,
types_pb2.DT_QINT8_REF:
_np_qint8,
types_pb2.DT_QUINT8_REF:
_np_quint8,
types_pb2.DT_QINT16_REF:
_np_qint16,
types_pb2.DT_QUINT16_REF:
_np_quint16,
types_pb2.DT_QINT32_REF:
_np_qint32,
types_pb2.DT_BFLOAT16_REF:
_np_bfloat16,
}
_QUANTIZED_DTYPES_NO_REF = frozenset([qint8, quint8, qint16, quint16, qint32])
_QUANTIZED_DTYPES_REF = frozenset(
[qint8_ref, quint8_ref, qint16_ref, quint16_ref, qint32_ref])
QUANTIZED_DTYPES = _QUANTIZED_DTYPES_REF.union(_QUANTIZED_DTYPES_NO_REF)
tf_export(
"dtypes.QUANTIZED_DTYPES",
v1=["dtypes.QUANTIZED_DTYPES",
"QUANTIZED_DTYPES"]).export_constant(__name__, "QUANTIZED_DTYPES")
_PYTHON_TO_TF = {
builtins.float: float32,
builtins.bool: bool,
builtins.object: string
}
_ANY_TO_TF = {}
_ANY_TO_TF.update(_INTERN_TABLE)
_ANY_TO_TF.update(_STRING_TO_TF)
_ANY_TO_TF.update(_PYTHON_TO_TF)
_ANY_TO_TF.update(_NP_TO_TF)
# Ensure no collisions.
assert len(_ANY_TO_TF) == sum(
len(d) for d in [_INTERN_TABLE, _STRING_TO_TF, _PYTHON_TO_TF, _NP_TO_TF])
@tf_export("dtypes.as_dtype", "as_dtype")
def as_dtype(type_value):
"""Converts the given `type_value` to a `DType`.
Args:
type_value: A value that can be converted to a `tf.DType` object. This may
currently be a `tf.DType` object, a [`DataType`
enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto),
a string type name, or a `numpy.dtype`.
Returns:
A `DType` corresponding to `type_value`.
Raises:
TypeError: If `type_value` cannot be converted to a `DType`.
"""
if isinstance(type_value, DType):
return type_value
if isinstance(type_value, np.dtype):
try:
return _NP_TO_TF[type_value.type]
except KeyError:
pass
try:
return _ANY_TO_TF[type_value]
except KeyError:
pass
raise TypeError("Cannot convert value %r to a TensorFlow DType." %
(type_value,))
| 33.454545 | 84 | 0.707365 |
02db0894b67e36755128d5f976abe8e3e6952841 | 2,569 | py | Python | test/optimization/test_vehicle_routing.py | IanJoel/qiskit-aqua | 7707172d01f0539358f1ce2406f307e830105303 | [
"Apache-2.0"
] | null | null | null | test/optimization/test_vehicle_routing.py | IanJoel/qiskit-aqua | 7707172d01f0539358f1ce2406f307e830105303 | [
"Apache-2.0"
] | null | null | null | test/optimization/test_vehicle_routing.py | IanJoel/qiskit-aqua | 7707172d01f0539358f1ce2406f307e830105303 | [
"Apache-2.0"
] | 2 | 2020-02-13T02:17:58.000Z | 2020-08-09T07:56:25.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Vehicle Routing """
from test.optimization.common import QiskitOptimizationTestCase
import numpy as np
from qiskit.quantum_info import Pauli
from qiskit.aqua import aqua_globals
from qiskit.aqua.algorithms import ExactEigensolver
from qiskit.optimization.ising.vehicle_routing import get_operator
# To run only this test, issue:
# python -m unittest test.test_vrp.TestVehicleRouting
class TestVehicleRouting(QiskitOptimizationTestCase):
"""Tests vehicle routing Ising translator."""
def setUp(self):
super().setUp()
aqua_globals.random_seed = 100
self.n = 2
self.k = 1
self.instance = np.zeros((self.n, self.n))
self.instance[0, 1] = 0.8
self.instance[1, 0] = 0.8
self.qubit_op = get_operator(self.instance, self.n, self.k)
def test_simple1(self):
""" simple1 test """
# Compares the output in terms of Paulis.
paulis = [(79.6, Pauli(z=[True, False], x=[False, False])),
(79.6, Pauli(z=[False, True], x=[False, False])),
(160.8, Pauli(z=[False, False], x=[False, False]))]
# Could also consider op = Operator(paulis) and then __eq__, but
# that would not use assert_approx_equal
for pauli_a, pauli_b in zip(self.qubit_op._paulis, paulis):
cost_a, binary_a = pauli_a
cost_b, binary_b = pauli_b
# Note that the construction is a bit iffy, i.e.,
# can be a small bit off even when the random seed is fixed,
# even when the ordering is the same. Obviously, when the
# ordering changes, the test will become invalid.
np.testing.assert_approx_equal(np.real(cost_a), cost_b, 2)
self.assertEqual(binary_a, binary_b)
def test_simple2(self):
""" simple2 test """
# Solve the problem using the exact eigensolver
result = ExactEigensolver(self.qubit_op).run()
arr = np.array([0., 0., 0., 1.])
np.testing.assert_array_almost_equal(arr, result['eigvecs'][0], 4)
| 38.343284 | 77 | 0.659011 |
87fa897d022eea500eacb8bcbc414797ebf935f5 | 477 | py | Python | cogs/utils/db/__init__.py | iomintz/Chiaki-Nanami | f59dc04e34f320689dc4232ecd1b82ecd73fba04 | [
"MIT"
] | 1 | 2018-07-15T21:40:43.000Z | 2018-07-15T21:40:43.000Z | cogs/utils/db/__init__.py | bmintz/Chiaki-Nanami | f59dc04e34f320689dc4232ecd1b82ecd73fba04 | [
"MIT"
] | null | null | null | cogs/utils/db/__init__.py | bmintz/Chiaki-Nanami | f59dc04e34f320689dc4232ecd1b82ecd73fba04 | [
"MIT"
] | null | null | null | """Database utilities for Chiaki
Despite appearances, while this tries to be an ORM, a fully fledged ORM
is not the goal. It's too complex of a project to even bother with one.
Queries will still be made using raw SQL.
The main purpose is to create a self-documenting table class that can be
used as reference and to make creation and migrations much easier.
"""
from .column import *
from .table import *
from .misc import *
__author__ = 'Ikusaba-san'
__license__ = 'MIT'
| 28.058824 | 72 | 0.761006 |
012773ad6902bcc4fdfff5597383e3483220b498 | 1,037 | py | Python | esmvalcore/preprocessor/_derive/rtnt.py | markelg/ESMValCore | b2f7ffc3232f174dd5ebc50ad20b4a02d3517c2c | [
"Apache-2.0"
] | 26 | 2019-06-07T07:50:07.000Z | 2022-03-22T21:04:01.000Z | esmvalcore/preprocessor/_derive/rtnt.py | markelg/ESMValCore | b2f7ffc3232f174dd5ebc50ad20b4a02d3517c2c | [
"Apache-2.0"
] | 1,370 | 2019-06-06T09:03:07.000Z | 2022-03-31T04:37:20.000Z | esmvalcore/preprocessor/_derive/rtnt.py | zklaus/ESMValCore | 5656fb8b546eeb4d750a424de7ed56a237edfabb | [
"Apache-2.0"
] | 26 | 2019-07-03T13:08:48.000Z | 2022-03-02T16:08:47.000Z | """Derivation of variable `rtnt`."""
from iris import Constraint
from ._baseclass import DerivedVariableBase
class DerivedVariable(DerivedVariableBase):
"""Derivation of variable `rtnt`."""
@staticmethod
def required(project):
"""Declare the variables needed for derivation."""
required = [
{
'short_name': 'rsdt'
},
{
'short_name': 'rsut'
},
{
'short_name': 'rlut'
},
]
return required
@staticmethod
def calculate(cubes):
"""Compute toa net downward total radiation."""
rsdt_cube = cubes.extract_cube(
Constraint(name='toa_incoming_shortwave_flux'))
rsut_cube = cubes.extract_cube(
Constraint(name='toa_outgoing_shortwave_flux'))
rlut_cube = cubes.extract_cube(
Constraint(name='toa_outgoing_longwave_flux'))
rtnt_cube = rsdt_cube - rsut_cube - rlut_cube
return rtnt_cube
| 25.925 | 59 | 0.575699 |
08f7f32298a1481af6c836b7af8def45f7956c94 | 6,768 | py | Python | dac/gail.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-13T21:48:52.000Z | 2022-03-13T21:48:52.000Z | dac/gail.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | null | null | null | dac/gail.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:20:29.000Z | 2022-03-30T07:20:29.000Z | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of GAIL with WGAN discriminator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import summary as contrib_summary
from tensorflow.contrib.eager.python import tfe as contrib_eager_python_tfe
from tensorflow.contrib.gan.python.losses.python import losses_impl as contrib_gan_python_losses_python_losses_impl
class Discriminator(tf.keras.Model):
"""Implementation of a discriminator network."""
def __init__(self, input_dim):
"""Initializes a discriminator.
Args:
input_dim: size of the input space.
"""
super(Discriminator, self).__init__()
kernel_init = tf.keras.initializers.Orthogonal(gain=1.0)
self.main = tf.keras.Sequential([
tf.layers.Dense(
units=256,
input_shape=(input_dim,),
activation='tanh',
kernel_initializer=kernel_init),
tf.layers.Dense(
units=256, activation='tanh', kernel_initializer=kernel_init),
tf.layers.Dense(units=1, kernel_initializer=kernel_init)
])
def call(self, inputs):
"""Performs a forward pass given the inputs.
Args:
inputs: a batch of observations (tfe.Variable).
Returns:
Values of observations.
"""
return self.main(inputs)
class GAIL(object):
"""Implementation of GAIL (https://arxiv.org/abs/1606.03476).
Instead of the original GAN, it uses WGAN (https://arxiv.org/pdf/1704.00028).
"""
def __init__(self, input_dim, subsampling_rate, lambd=10.0, gail_loss='airl'):
"""Initializes actor, critic, target networks and optimizers.
Args:
input_dim: size of the observation space.
subsampling_rate: subsampling rate that was used for expert trajectories.
lambd: gradient penalty coefficient for wgan.
gail_loss: gail loss to use.
"""
self.subsampling_rate = subsampling_rate
self.lambd = lambd
self.gail_loss = gail_loss
with tf.variable_scope('discriminator'):
self.disc_step = contrib_eager_python_tfe.Variable(
0, dtype=tf.int64, name='step')
self.discriminator = Discriminator(input_dim)
self.discriminator_optimizer = tf.train.AdamOptimizer()
self.discriminator_optimizer._create_slots(self.discriminator.variables) # pylint: disable=protected-access
def update(self, batch, expert_batch):
"""Updates the WGAN potential function or GAN discriminator.
Args:
batch: A batch from training policy.
expert_batch: A batch from the expert.
"""
obs = contrib_eager_python_tfe.Variable(
np.stack(batch.obs).astype('float32'))
expert_obs = contrib_eager_python_tfe.Variable(
np.stack(expert_batch.obs).astype('float32'))
expert_mask = contrib_eager_python_tfe.Variable(
np.stack(expert_batch.mask).astype('float32'))
# Since expert trajectories were resampled but no absorbing state,
# statistics of the states changes, we need to adjust weights accordingly.
expert_mask = tf.maximum(0, -expert_mask)
expert_weight = expert_mask / self.subsampling_rate + (1 - expert_mask)
action = contrib_eager_python_tfe.Variable(
np.stack(batch.action).astype('float32'))
expert_action = contrib_eager_python_tfe.Variable(
np.stack(expert_batch.action).astype('float32'))
inputs = tf.concat([obs, action], -1)
expert_inputs = tf.concat([expert_obs, expert_action], -1)
# Avoid using tensorflow random functions since it's impossible to get
# the state of the random number generator used by TensorFlow.
alpha = np.random.uniform(size=(inputs.get_shape()[0], 1))
alpha = contrib_eager_python_tfe.Variable(alpha.astype('float32'))
inter = alpha * inputs + (1 - alpha) * expert_inputs
with tf.GradientTape() as tape:
output = self.discriminator(inputs)
expert_output = self.discriminator(expert_inputs)
with contrib_summary.record_summaries_every_n_global_steps(
100, self.disc_step):
gan_loss = contrib_gan_python_losses_python_losses_impl.modified_discriminator_loss(
expert_output,
output,
label_smoothing=0.0,
real_weights=expert_weight)
contrib_summary.scalar(
'discriminator/expert_output',
tf.reduce_mean(expert_output),
step=self.disc_step)
contrib_summary.scalar(
'discriminator/policy_output',
tf.reduce_mean(output),
step=self.disc_step)
with tf.GradientTape() as tape2:
tape2.watch(inter)
output = self.discriminator(inter)
grad = tape2.gradient(output, [inter])[0]
grad_penalty = tf.reduce_mean(tf.pow(tf.norm(grad, axis=-1) - 1, 2))
loss = gan_loss + self.lambd * grad_penalty
with contrib_summary.record_summaries_every_n_global_steps(
100, self.disc_step):
contrib_summary.scalar(
'discriminator/grad_penalty', grad_penalty, step=self.disc_step)
with contrib_summary.record_summaries_every_n_global_steps(
100, self.disc_step):
contrib_summary.scalar(
'discriminator/loss', gan_loss, step=self.disc_step)
grads = tape.gradient(loss, self.discriminator.variables)
self.discriminator_optimizer.apply_gradients(
zip(grads, self.discriminator.variables), global_step=self.disc_step)
def get_reward(self, obs, action, next_obs): # pylint: disable=unused-argument
if self.gail_loss == 'airl':
inputs = tf.concat([obs, action], -1)
return self.discriminator(inputs)
else:
inputs = tf.concat([obs, action], -1)
return -tf.log(1 - tf.nn.sigmoid(self.discriminator(inputs)) + 1e-8)
@property
def variables(self):
"""Returns all variables including optimizer variables.
Returns:
A dictionary of all variables that are defined in the model.
variables.
"""
disc_vars = (
self.discriminator.variables + self.discriminator_optimizer.variables()
+ [self.disc_step])
return disc_vars
| 35.621053 | 115 | 0.701684 |
6c8e41b6d4d1ddef572cbd1a611ea6d7a153ad59 | 13,541 | py | Python | h2o-perf/bench/py/h2oPerf/Alerting.py | gigliovale/h2o | be350f3f2c2fb6f135cc07c41f83fd0e4f521ac1 | [
"Apache-2.0"
] | 882 | 2015-05-22T02:59:21.000Z | 2022-02-17T05:02:48.000Z | h2o-perf/bench/py/h2oPerf/Alerting.py | VonRosenchild/h2o-2 | be350f3f2c2fb6f135cc07c41f83fd0e4f521ac1 | [
"Apache-2.0"
] | 1 | 2015-01-14T23:54:56.000Z | 2015-01-15T20:04:17.000Z | h2o-perf/bench/py/h2oPerf/Alerting.py | VonRosenchild/h2o-2 | be350f3f2c2fb6f135cc07c41f83fd0e4f521ac1 | [
"Apache-2.0"
] | 392 | 2015-05-22T17:04:11.000Z | 2022-02-22T09:04:39.000Z | import abc
import MySQLdb
import requests
import os
from datetime import datetime, timedelta
from LMSAdaptiveFilter import LMSAdaptiveFilter
# Global queries used throughout
TEST_NAMES_QUERY = \
"""
SELECT DISTINCT tr.test_name
FROM test_run tr
INNER JOIN test_run_phase_result tp
USING(test_run_id)
WHERE tp.end_epoch_ms >= {}
AND tp.phase_name = 'model';
"""
MOST_RECENTLY_RUN_TEST_NAME = \
"""
SELECT build_version
FROM test_run
WHERE test_name = '{}'
ORDER BY build_version DESC
LIMIT 1;
"""
CONTAMINATED = \
"""
SELECT contaminated
FROM test_run
WHERE test_name = '{}'
ORDER BY build_version DESC
"""
MULTIPLE_IDS = \
"""
SELECT tr.test_run_id, COUNT(*) cnt
FROM test_run tr
INNER JOIN test_run_phase_result tp
USING(test_run_id)
WHERE tp.phase_name = 'model'
AND tr.build_version LIKE '%{}%'
AND tr.test_name = '{}'
GROUP BY tr.test_run_id
HAVING cnt > 1;
"""
CORRECT = \
"""
SELECT correctness_passed
FROM test_run
WHERE test_name = '{}'
ORDER BY build_version DESC;
"""
TIMING = \
"""
SELECT (tp.end_epoch_ms - tp.start_epoch_ms) / 1000 elapsed
FROM test_run tr
INNER JOIN test_run_phase_result tp
USING (test_run_id)
WHERE tr.timing_passed = 1
AND tr.test_name = '{}'
ORDER BY tr.start_epoch_ms DESC
LIMIT {};
"""
# A dictionary of the queries appearing above
QUERIES = {
"test_names": TEST_NAMES_QUERY,
"test_build_num": MOST_RECENTLY_RUN_TEST_NAME,
"contaminated": CONTAMINATED,
"multiple_ids": MULTIPLE_IDS,
"correct": CORRECT,
"timing": TIMING,
}
CORRECT_ALERT_HEADER = \
"""
Correctness Alerts
------------------
"""
TIMING_ALERT_HEADER = \
"""
Timing Alerts
-------------
"""
INFRASTRUCTURE_ALERT_HEADER = \
"""
Infrastructure Alerts
---------------------
"""
class Alert:
"""
The Alert class.
This is an abstract class whose subclasses contain methods and state for sending out email alerts
when a performance test fails.
There are three flavors of failure for which there is alerting:
1. Speed related.
2. Correctness related.
3. Infrastructure related.
The following sets of conditions must be satisfied in order for an alert to be sent out:
1. Speed related:
Condition 1: No multiple test IDs for the same run phase and build number (infrastructure problem)
Condition 2: Test is not contaminated
Condition 3: The run time of the test phase is detected by the LMS adaptive filter.
2. Correctness related:
Condition 1: No multiple test IDs for the same run phase and build number (infrastructure problem)
Condition 2: Test is not contaminated
Condition 3: The correct field for the test_run is FALSE or 0
3. Infrastructure related:
Condition 1: Multiple test IDs for the same phase and build number.
Condition 2: Test did not run/complete.
NB: If the build fails, Jenkins already alerts [email protected]
Developer Note:
Private methods (those that begin with '_') are to be used for performing queries of the MySQL database PerfDB.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, order):
"""
Every Alert object will have a list of test names that have runs from the last N days.
:param order: The order is the number of days back to look back (this is the `N` above).
:return:
"""
self.order = order
# Setup a connection to the db
self.host = 'mr-0x1'
self.db = MySQLdb.connect(host=self.host,
user="spencer",
passwd="spencer",
db="PerfDB",
port=3306)
self.cursor = self.db.cursor()
# A list of test names from the last `order` days
self.test_names = self._get_test_names()
# A dictionary of tests to alert on and messages to alert with
self.alert_list = {}
@abc.abstractmethod
def should_alert(self, test_name):
"""
Retrieve run data from PerfDB for this test_name. If no recent data available,
then create and send infrastructure alert.
Recent data means: Build number matches current build number from master.
"""
return
def is_recent(self, test_name):
cur_bn = Alert._get_build_number('master')
test_bn = self._get_test_build_number(test_name)
return cur_bn == test_bn
def was_contaminated(self, test_name):
"""
Check the most recent run of this test_name.
If not recent, returns "NA".
Expect that the InfrastructureAlert object will handle the alerting for inconsistent build numbers.
"""
if self.is_recent(test_name):
return self._check_contaminated(test_name)
return False
def has_multiple_ids(self, test_name):
"""
Check if the test_name has multiple IDs.
If not recent, returns "NA".
"""
if self.is_recent(test_name):
return self._multiple_ids_helper(test_name)
return False
def add_to_alert_list(self, test_name, message):
self.alert_list[test_name] = message
def _multiple_ids_helper(self, test_name):
test_build_number = self._get_test_build_number(test_name, True).strip('"')
query = QUERIES["multiple_ids"].format(test_build_number, test_name.strip('"'))
self.cursor.execute(query)
res = self.cursor.fetchall()
if len(res) != 0:
return True
return False
def _check_contaminated(self, test_name):
query = QUERIES["contaminated"].format(test_name.strip('"'))
self.cursor.execute(query)
res = self.cursor.fetchone()
return res[0] == 0
def _get_test_build_number(self, test_name, full=False):
query = QUERIES["test_build_num"].format(test_name.strip('"'))
self.cursor.execute(query)
bn = self.cursor.fetchone()
if full:
return bn[0].strip()
return bn[0].strip().split('.')[-1]
def _get_test_names(self):
epoch = datetime.utcfromtimestamp(0)
dt = datetime.now()
dt2 = dt - timedelta(self.order)
reference_time_millis = (dt2 - epoch).total_seconds() * 1000
test_names_query = QUERIES["test_names"].format(reference_time_millis)
self.cursor.execute(test_names_query)
test_names = self.cursor.fetchall()
return [test_names[i][0] for i in range(len(test_names))]
@staticmethod
def _get_build_number(branch):
build_number = requests.get("http://s3.amazonaws.com/h2o-release/h2o/" + branch + "/latest").text
return str(build_number.strip())
class CorrectAlert(Alert):
"""
This class is responsible for sending out alerts when a test fails its correctness criteria.
The correctness of each test is stored in the `test_run` table under the column `correctness_passed`, which
is a boolean:
0: Incorrect
1: Correct
"""
def __init__(self, order):
super(CorrectAlert, self).__init__(order)
def should_alert(self, test_name):
if not self.was_contaminated(test_name) \
and not self.has_multiple_ids(test_name) \
and self.is_recent(test_name):
return self._is_correct(test_name)
return False
def _is_correct(self, test_name):
query = QUERIES["correct"].format(test_name.strip('"'))
self.cursor.execute(query)
res = self.cursor.fetchone()
return res[0] == 0 # 1: Correct, 0: Incorrect
class SpeedAlert(Alert):
"""
This class is responsible for sending out alerts when a test fails its timing criteria.
Unlike correctness alerts based on how long the test took to run are based on an outlier detector. Here we use the
LMS adaptive filter ( *which additionally implements the exclusion of outliers**) to detect test run times that are
out of the ordinary.
This is where the `order`, or in time-series parlance `lag` (or `lag order`) comes in to play. This is just the
number of previous data points we want to include in our evaluation of the new data point. If the incoming point is
"OK" then nothing happens and it does not update the `timing_passed` field in the `test_run` table. If it is
determined to be an outlier, the `timing_passed` field switches from 1 -> 0. All points with a `timing_passed` value
of 0 are excluded from future computations (as we do not wish to contaminate the running statistics by including
spurious results).
"""
def __init__(self, order):
super(SpeedAlert, self).__init__(order)
def should_alert(self, test_name):
if not self.was_contaminated(test_name) \
and not self.has_multiple_ids(test_name) \
and self.is_recent(test_name):
return self._is_ontime(test_name)
return False
def _is_ontime(self, test_name):
"""
The input stream is an incoming stream of elapsed times from the last `order` runs of the given test_name.
The input stream is initially sorted by most recent to furthest back in time. Therefore, exclude the first
entry, and perform the LMS on the next `order - 1` data points.
"""
input_stream = self._get_input_stream(test_name)
if input_stream == "NA": return False # This condition should never happen
if len(input_stream) == 1: return True # Only have a single data point, nothing to compute.
query_point = input_stream[0]
data_points = input_stream[1:]
fil = LMSAdaptiveFilter(len(data_points))
for t in data_points:
fil.X.add(t)
return fil.is_signal_outlier(query_point)
def _get_input_stream(self, test_name):
query = QUERIES["timing"].format(test_name.strip('"'), self.order)
self.cursor.execute(query)
res = self.cursor.fetchall()
if len(res) == 0:
return "NA"
if len(res) == 1:
return [int(res[0])]
if len(res) > 1:
return [int(res[i][0]) for i in range(len(res))]
class InfrastructureAlert(Alert):
"""
This class is responsible for sending out alerts when a test fails for reasons other than speed and correctness.
"""
def __init__(self, order):
super(InfrastructureAlert, self).__init__(order)
def should_alert(self, test_name):
return not self.is_recent(test_name)
class Alerter:
"""
The Alerter class.
This class manages the various types of alerts that may occur. In addition, this class handles the actual
alerting by email.
"""
def __init__(self, order, names):
self.correct_alert = CorrectAlert(order)
self.speed_alert = SpeedAlert(order)
self.infrastructure_alert = InfrastructureAlert(order)
self.test_list = names
self.test_names = self.correct_alert.test_names # `correct_alert` chosen WLOG
def alert(self):
self._gather_alerts()
self._do_alert()
def _gather_alerts(self):
for name in self.test_names:
if name not in self.test_list: continue
if self.correct_alert.should_alert(name):
self.correct_alert.add_to_alert_list(name, "Failed correctness.")
if self.speed_alert.should_alert(name):
self.speed_alert.add_to_alert_list(name, "Failed timing.")
if self.infrastructure_alert.should_alert(name):
self.infrastructure_alert.add_to_alert_list(name, "Test failed to run.")
for name in self.test_list:
if name not in self.test_names:
if name not in self.infrastructure_alert.alert_list:
self.infrastructure_alert.add_to_alert_list(name, "Test failed to run.")
def _do_alert(self):
this_path = os.path.dirname(os.path.realpath(__file__))
res_path = os.path.join(this_path, '..', "results", "Alerts.txt")
with open(res_path, 'w') as f:
# Check & Report Correctness Alerts
f.write(CORRECT_ALERT_HEADER)
f.write('\n')
if len(self.correct_alert.alert_list) > 0:
for key in self.correct_alert.alert_list:
f.write("FAIL " + key + " failed: " + self.correct_alert.alert_list[key])
f.write('\n')
else:
f.write("All tests were correct.")
f.write("\n")
# Check & Report Timing Alerts
f.write(TIMING_ALERT_HEADER)
f.write('\n')
if len(self.speed_alert.alert_list) > 0:
for key in self.speed_alert.alert_list:
f.write("FAIL " + key + " failed: " + self.speed_alert.alert_list[key])
f.write('\n')
else:
f.write("No tests failed due to untimeliness.")
f.write("\n")
# Check & Report Infrastructure Alerts
f.write(INFRASTRUCTURE_ALERT_HEADER)
f.write('\n')
if len(self.infrastructure_alert.alert_list) > 0:
for key in self.infrastructure_alert.alert_list:
f.write("FAIL " + key + " failed: " + self.infrastructure_alert.alert_list[key])
f.write('\n')
else:
f.write("All tests ran.")
f.write("\n")
| 33.434568 | 120 | 0.637028 |
056fcba404ad61d34f1bfb379d9d3ee82f08017d | 8,996 | py | Python | dyspatch_client/models/template_meta_read.py | getdyspatch/dyspatch-python | 23ffb05eff820944acf235fa3b225bf8caec903d | [
"Apache-2.0"
] | null | null | null | dyspatch_client/models/template_meta_read.py | getdyspatch/dyspatch-python | 23ffb05eff820944acf235fa3b225bf8caec903d | [
"Apache-2.0"
] | null | null | null | dyspatch_client/models/template_meta_read.py | getdyspatch/dyspatch-python | 23ffb05eff820944acf235fa3b225bf8caec903d | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Dyspatch API
# Introduction The Dyspatch API is based on the REST paradigm, and features resource based URLs with standard HTTP response codes to indicate errors. We use standard HTTP authentication and request verbs, and all responses are JSON formatted. See our [Implementation Guide](https://docs.dyspatch.io/development/implementing_dyspatch/) for more details on how to implement Dyspatch. ## API Client Libraries Dyspatch provides API Clients for popular languages and web frameworks. - [Java](https://github.com/getdyspatch/dyspatch-java) - [Javascript](https://github.com/getdyspatch/dyspatch-javascript) - [Python](https://github.com/getdyspatch/dyspatch-python) - [C#](https://github.com/getdyspatch/dyspatch-dotnet) - [Go](https://github.com/getdyspatch/dyspatch-golang) - [Ruby](https://github.com/getdyspatch/dyspatch-ruby) # noqa: E501
The version of the OpenAPI document: 2020.11
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from dyspatch_client.configuration import Configuration
class TemplateMetaRead(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'name': 'str',
'description': 'str',
'url': 'str',
'localizations': 'list[LocalizationMetaRead]',
'created_at': 'datetime',
'updated_at': 'datetime'
}
attribute_map = {
'id': 'id',
'name': 'name',
'description': 'description',
'url': 'url',
'localizations': 'localizations',
'created_at': 'createdAt',
'updated_at': 'updatedAt'
}
def __init__(self, id=None, name=None, description=None, url=None, localizations=None, created_at=None, updated_at=None, local_vars_configuration=None): # noqa: E501
"""TemplateMetaRead - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._name = None
self._description = None
self._url = None
self._localizations = None
self._created_at = None
self._updated_at = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if description is not None:
self.description = description
if url is not None:
self.url = url
if localizations is not None:
self.localizations = localizations
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
@property
def id(self):
"""Gets the id of this TemplateMetaRead. # noqa: E501
An opaque, unique identifier for a template # noqa: E501
:return: The id of this TemplateMetaRead. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this TemplateMetaRead.
An opaque, unique identifier for a template # noqa: E501
:param id: The id of this TemplateMetaRead. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this TemplateMetaRead. # noqa: E501
The name of a template # noqa: E501
:return: The name of this TemplateMetaRead. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TemplateMetaRead.
The name of a template # noqa: E501
:param name: The name of this TemplateMetaRead. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this TemplateMetaRead. # noqa: E501
A description of the template # noqa: E501
:return: The description of this TemplateMetaRead. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this TemplateMetaRead.
A description of the template # noqa: E501
:param description: The description of this TemplateMetaRead. # noqa: E501
:type: str
"""
self._description = description
@property
def url(self):
"""Gets the url of this TemplateMetaRead. # noqa: E501
The API url for a specific template # noqa: E501
:return: The url of this TemplateMetaRead. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this TemplateMetaRead.
The API url for a specific template # noqa: E501
:param url: The url of this TemplateMetaRead. # noqa: E501
:type: str
"""
self._url = url
@property
def localizations(self):
"""Gets the localizations of this TemplateMetaRead. # noqa: E501
A list of the template's available localization objects # noqa: E501
:return: The localizations of this TemplateMetaRead. # noqa: E501
:rtype: list[LocalizationMetaRead]
"""
return self._localizations
@localizations.setter
def localizations(self, localizations):
"""Sets the localizations of this TemplateMetaRead.
A list of the template's available localization objects # noqa: E501
:param localizations: The localizations of this TemplateMetaRead. # noqa: E501
:type: list[LocalizationMetaRead]
"""
self._localizations = localizations
@property
def created_at(self):
"""Gets the created_at of this TemplateMetaRead. # noqa: E501
The time of initial creation # noqa: E501
:return: The created_at of this TemplateMetaRead. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this TemplateMetaRead.
The time of initial creation # noqa: E501
:param created_at: The created_at of this TemplateMetaRead. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this TemplateMetaRead. # noqa: E501
The time of last update # noqa: E501
:return: The updated_at of this TemplateMetaRead. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this TemplateMetaRead.
The time of last update # noqa: E501
:param updated_at: The updated_at of this TemplateMetaRead. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TemplateMetaRead):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TemplateMetaRead):
return True
return self.to_dict() != other.to_dict()
| 30.808219 | 845 | 0.605825 |
86f7c637998505f94e8b4a9deb3603997dfbb456 | 1,049 | py | Python | examples/heat_map.py | zkx741481546/keract | 6f25711e54f7f8b5387fff8f79ad35a0a1113d33 | [
"MIT"
] | null | null | null | examples/heat_map.py | zkx741481546/keract | 6f25711e54f7f8b5387fff8f79ad35a0a1113d33 | [
"MIT"
] | null | null | null | examples/heat_map.py | zkx741481546/keract | 6f25711e54f7f8b5387fff8f79ad35a0a1113d33 | [
"MIT"
] | null | null | null | from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import decode_predictions
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing.image import img_to_array
model = VGG16()
from PIL import Image
import requests
from io import BytesIO
import numpy as np
url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/1/14/Gatto_europeo4.jpg/250px-Gatto_europeo4.jpg'
response = requests.get(url)
image = Image.open(BytesIO(response.content))
image = image.crop((0, 0, 224, 224))
image = img_to_array(image)
arr_image = np.array(image)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
image = preprocess_input(image)
yhat = model.predict(image)
label = decode_predictions(yhat)
label = label[0][0]
print('{} ({})'.format(label[1], label[2] * 100))
import keract
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
activations = keract.get_activations(model, image)
keract.display_heatmaps(activations, arr_image)
| 31.787879 | 109 | 0.757865 |
fac603281b95ce1ceb3b5321f97db5f0281ad6c6 | 7,580 | py | Python | entsoe/mappings.py | gjertro/entsoe-py | 6823dde6ad8ecddaabe67fbaf66e9814dd29d037 | [
"MIT"
] | 1 | 2019-02-08T21:26:54.000Z | 2019-02-08T21:26:54.000Z | entsoe/mappings.py | gjertro/entsoe-py | 6823dde6ad8ecddaabe67fbaf66e9814dd29d037 | [
"MIT"
] | null | null | null | entsoe/mappings.py | gjertro/entsoe-py | 6823dde6ad8ecddaabe67fbaf66e9814dd29d037 | [
"MIT"
] | null | null | null | DOMAIN_MAPPINGS = {
'AL': '10YAL-KESH-----5',
'AT': '10YAT-APG------L',
'BA': '10YBA-JPCC-----D',
'BE': '10YBE----------2',
'BG': '10YCA-BULGARIA-R',
'BY': '10Y1001A1001A51S',
'CH': '10YCH-SWISSGRIDZ',
'CZ': '10YCZ-CEPS-----N',
'DE': '10Y1001A1001A83F',
'DK': '10Y1001A1001A65H',
'EE': '10Y1001A1001A39I',
'ES': '10YES-REE------0',
'FI': '10YFI-1--------U',
'FR': '10YFR-RTE------C',
'GB': '10YGB----------A',
'GB-NIR': '10Y1001A1001A016',
'GR': '10YGR-HTSO-----Y',
'HR': '10YHR-HEP------M',
'HU': '10YHU-MAVIR----U',
'IE': '10YIE-1001A00010',
'IT': '10YIT-GRTN-----B',
'LT': '10YLT-1001A0008Q',
'LU': '10YLU-CEGEDEL-NQ',
'LV': '10YLV-1001A00074',
# 'MD': 'MD',
'ME': '10YCS-CG-TSO---S',
'MK': '10YMK-MEPSO----8',
'MT': '10Y1001A1001A93C',
'NL': '10YNL----------L',
'NO': '10YNO-0--------C',
'PL': '10YPL-AREA-----S',
'PT': '10YPT-REN------W',
'RO': '10YRO-TEL------P',
'RS': '10YCS-SERBIATSOV',
'RU': '10Y1001A1001A49F',
'RU-KGD': '10Y1001A1001A50U',
'SE': '10YSE-1--------K',
'SI': '10YSI-ELES-----O',
'SK': '10YSK-SEPS-----K',
'TR': '10YTR-TEIAS----W',
'UA': '10YUA-WEPS-----0',
'DE-AT-LU': '10Y1001A1001A63L',
}
BIDDING_ZONES = DOMAIN_MAPPINGS.copy()
BIDDING_ZONES.update({
'DE': '10Y1001A1001A63L', # DE-AT-LU
'LU': '10Y1001A1001A63L', # DE-AT-LU
'IT-NORD': '10Y1001A1001A73I',
'IT-CNOR': '10Y1001A1001A70O',
'IT-CSUD': '10Y1001A1001A71M',
'IT-SUD': '10Y1001A1001A788',
'IT-FOGN': '10Y1001A1001A72K',
'IT-ROSN': '10Y1001A1001A77A',
'IT-BRNN': '10Y1001A1001A699',
'IT-PRGP': '10Y1001A1001A76C',
'IT-SARD': '10Y1001A1001A74G',
'IT-SICI': '10Y1001A1001A75E',
'NO-1': '10YNO-1--------2',
'NO-2': '10YNO-2--------T',
'NO-3': '10YNO-3--------J',
'NO-4': '10YNO-4--------9',
'NO-5': '10Y1001A1001A48H',
'SE-1': '10Y1001A1001A44P',
'SE-2': '10Y1001A1001A45N',
'SE-3': '10Y1001A1001A46L',
'SE-4': '10Y1001A1001A47J',
'DK-1': '10YDK-1--------W',
'DK-2': '10YDK-2--------M'
})
TIMEZONE_MAPPINGS = {
'AL': 'Europe/Tirane',
'AT': 'Europe/Vienna',
'BA': 'Europe/Sarajevo',
'BE': 'Europe/Brussels',
'BG': 'Europe/Sofia',
'BY': 'Europe/Minsk',
'CH': 'Europe/Zurich',
'CZ': 'Europe/Prague',
'DE': 'Europe/Berlin',
'DK': 'Europe/Copenhagen',
'EE': 'Europe/Tallinn',
'ES': 'Europe/Madrid',
'FI': 'Europe/Helsinki',
'FR': 'Europe/Paris',
'GB': 'Europe/London',
'GB-NIR': 'Europe/Belfast',
'GR': 'Europe/Athens',
'HR': 'Europe/Zagreb',
'HU': 'Europe/Budapest',
'IE': 'Europe/Dublin',
'IT': 'Europe/Rome',
'LT': 'Europe/Vilnius',
'LU': 'Europe/Luxembourg',
'LV': 'Europe/Riga',
# 'MD': 'MD',
'ME': 'Europe/Podgorica',
'MK': 'Europe/Skopje',
'MT': 'Europe/Malta',
'NL': 'Europe/Amsterdam',
'NO': 'Europe/Oslo',
'PL': 'Europe/Warsaw',
'PT': 'Europe/Lisbon',
'RO': 'Europe/Bucharest',
'RS': 'Europe/Belgrade',
'RU': 'Europe/Moscow',
'RU-KGD': 'Europe/Kaliningrad',
'SE': 'Europe/Stockholm',
'SI': 'Europe/Ljubljana',
'SK': 'Europe/Bratislava',
'TR': 'Europe/Istanbul',
'UA': 'Europe/Kiev',
'IT-NORD': 'Europe/Rome',
'IT-CNOR': 'Europe/Rome',
'IT-CSUD': 'Europe/Rome',
'IT-SUD': 'Europe/Rome',
'IT-FOGN': 'Europe/Rome',
'IT-ROSN': 'Europe/Rome',
'IT-BRNN': 'Europe/Rome',
'IT-PRGP': 'Europe/Rome',
'IT-SARD': 'Europe/Rome',
'IT-SICI': 'Europe/Rome',
'DE-AT-LU': 'Europe/Berlin',
'NO-1': 'Europe/Oslo',
'NO-2': 'Europe/Oslo',
'NO-3': 'Europe/Oslo',
'NO-4': 'Europe/Oslo',
'NO-5': 'Europe/Oslo',
'SE-1': 'Europe/Stockholm',
'SE-2': 'Europe/Stockholm',
'SE-3': 'Europe/Stockholm',
'SE-4': 'Europe/Stockholm',
'DK-1': 'Europe/Copenhagen',
'DK-2': 'Europe/Copenhagen'
}
PSRTYPE_MAPPINGS = {
'A03': 'Mixed',
'A04': 'Generation',
'A05': 'Load',
'B01': 'Biomass',
'B02': 'Fossil Brown coal/Lignite',
'B03': 'Fossil Coal-derived gas',
'B04': 'Fossil Gas',
'B05': 'Fossil Hard coal',
'B06': 'Fossil Oil',
'B07': 'Fossil Oil shale',
'B08': 'Fossil Peat',
'B09': 'Geothermal',
'B10': 'Hydro Pumped Storage',
'B11': 'Hydro Run-of-river and poundage',
'B12': 'Hydro Water Reservoir',
'B13': 'Marine',
'B14': 'Nuclear',
'B15': 'Other renewable',
'B16': 'Solar',
'B17': 'Waste',
'B18': 'Wind Offshore',
'B19': 'Wind Onshore',
'B20': 'Other',
'B21': 'AC Link',
'B22': 'DC Link',
'B23': 'Substation',
'B24': 'Transformer'}
DOCSTATUS = {'A05': 'Active', 'A09': 'Cancelled', 'A13': 'Withdrawn'}
BSNTYPE = {'A29': 'Already allocated capacity (AAC)',
'A43': 'Requested capacity (without price)',
'A46': 'System Operator redispatching',
'A53': 'Planned maintenance',
'A54': 'Unplanned outage',
'A85': 'Internal redispatch',
'A95': 'Frequency containment reserve',
'A96': 'Automatic frequency restoration reserve',
'A97': 'Manual frequency restoration reserve',
'A98': 'Replacement reserve',
'B01': 'Interconnector network evolution',
'B02': 'Interconnector network dismantling',
'B03': 'Counter trade',
'B04': 'Congestion costs',
'B05': 'Capacity allocated (including price)',
'B07': 'Auction revenue',
'B08': 'Total nominated capacity',
'B09': 'Net position',
'B10': 'Congestion income',
'B11': 'Production unit'}
DOCUMENTTYPE = {'A09': 'Finalised schedule',
'A11': 'Aggregated energy data report',
'A25': 'Allocation result document',
'A26': 'Capacity document',
'A31': 'Agreed capacity',
'A44': 'Price Document',
'A61': 'Estimated Net Transfer Capacity',
'A63': 'Redispatch notice',
'A65': 'System total load',
'A68': 'Installed generation per type',
'A69': 'Wind and solar forecast',
'A70': 'Load forecast margin',
'A71': 'Generation forecast',
'A72': 'Reservoir filling information',
'A73': 'Actual generation',
'A74': 'Wind and solar generation',
'A75': 'Actual generation per type',
'A76': 'Load unavailability',
'A77': 'Production unavailability',
'A78': 'Transmission unavailability',
'A79': 'Offshore grid infrastructure unavailability',
'A80': 'Generation unavailability',
'A81': 'Contracted reserves',
'A82': 'Accepted offers',
'A83': 'Activated balancing quantities',
'A84': 'Activated balancing prices',
'A85': 'Imbalance prices',
'A86': 'Imbalance volume',
'A87': 'Financial situation',
'A88': 'Cross border balancing',
'A89': 'Contracted reserve prices',
'A90': 'Interconnection network expansion',
'A91': 'Counter trade notice',
'A92': 'Congestion costs',
'A93': 'DC link capacity',
'A94': 'Non EU allocations',
'A95': 'Configuration document',
'B11': 'Flow-based allocations'}
| 33.100437 | 69 | 0.512533 |
df8fde63cf99aa8436e8ee76631cc9c7f431e87c | 453 | py | Python | pythonProject/MUNDO 3/Desafio 105.py | lucasjlgc/Aulas-de-Python- | 6aaed1c660487a680e9c449210600ccdfa326612 | [
"MIT"
] | null | null | null | pythonProject/MUNDO 3/Desafio 105.py | lucasjlgc/Aulas-de-Python- | 6aaed1c660487a680e9c449210600ccdfa326612 | [
"MIT"
] | 1 | 2021-06-25T15:29:11.000Z | 2021-06-25T15:29:11.000Z | pythonProject/MUNDO 3/Desafio 105.py | lucasjlgc/Aulas-de-Python- | 6aaed1c660487a680e9c449210600ccdfa326612 | [
"MIT"
] | null | null | null | def notas(*num, sit = False):
r = dict()
r['Total'] = len(num)
r['Maior'] = max(num)
r['Menor'] = min(num)
r['Média'] = sum(num)/len(num)
if sit:
if r['Média'] <6:
r['Situação'] = 'Ruim'
elif r['Média']>6 and r['Média']<9:
r['Situação'] = 'Regular'
elif['Média'] > 9:
r['Situação'] = 'Boa'
return r
#Programa
resp = notas(5.5,2.5,10.5,6.5, sit=True)
print(resp) | 21.571429 | 43 | 0.470199 |
c9b88cadbe1e2951b98a2cabf6eae1daf8cde68b | 14,689 | py | Python | intersight/models/iam_o_auth_token_all_of.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 21 | 2018-03-29T14:20:35.000Z | 2021-10-13T05:11:41.000Z | intersight/models/iam_o_auth_token_all_of.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 14 | 2018-01-30T15:45:46.000Z | 2022-02-23T14:23:21.000Z | intersight/models/iam_o_auth_token_all_of.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 18 | 2018-01-03T15:09:56.000Z | 2021-07-16T02:21:54.000Z | # coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from intersight.configuration import Configuration
class IamOAuthTokenAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'access_expiration_time': 'datetime',
'client_id': 'str',
'client_ip_address': 'str',
'client_name': 'str',
'expiration_time': 'datetime',
'last_login_client': 'str',
'last_login_time': 'datetime',
'token_id': 'str',
'user_meta': 'IamClientMeta',
'app_registration': 'IamAppRegistration',
'permission': 'IamPermission',
'user': 'IamUser'
}
attribute_map = {
'access_expiration_time': 'AccessExpirationTime',
'client_id': 'ClientId',
'client_ip_address': 'ClientIpAddress',
'client_name': 'ClientName',
'expiration_time': 'ExpirationTime',
'last_login_client': 'LastLoginClient',
'last_login_time': 'LastLoginTime',
'token_id': 'TokenId',
'user_meta': 'UserMeta',
'app_registration': 'AppRegistration',
'permission': 'Permission',
'user': 'User'
}
def __init__(self,
access_expiration_time=None,
client_id=None,
client_ip_address=None,
client_name=None,
expiration_time=None,
last_login_client=None,
last_login_time=None,
token_id=None,
user_meta=None,
app_registration=None,
permission=None,
user=None,
local_vars_configuration=None): # noqa: E501
"""IamOAuthTokenAllOf - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._access_expiration_time = None
self._client_id = None
self._client_ip_address = None
self._client_name = None
self._expiration_time = None
self._last_login_client = None
self._last_login_time = None
self._token_id = None
self._user_meta = None
self._app_registration = None
self._permission = None
self._user = None
self.discriminator = None
if access_expiration_time is not None:
self.access_expiration_time = access_expiration_time
if client_id is not None:
self.client_id = client_id
if client_ip_address is not None:
self.client_ip_address = client_ip_address
if client_name is not None:
self.client_name = client_name
if expiration_time is not None:
self.expiration_time = expiration_time
if last_login_client is not None:
self.last_login_client = last_login_client
if last_login_time is not None:
self.last_login_time = last_login_time
if token_id is not None:
self.token_id = token_id
if user_meta is not None:
self.user_meta = user_meta
if app_registration is not None:
self.app_registration = app_registration
if permission is not None:
self.permission = permission
if user is not None:
self.user = user
@property
def access_expiration_time(self):
"""Gets the access_expiration_time of this IamOAuthTokenAllOf. # noqa: E501
Expiration time for the JWT token to which it can be used for api calls. # noqa: E501
:return: The access_expiration_time of this IamOAuthTokenAllOf. # noqa: E501
:rtype: datetime
"""
return self._access_expiration_time
@access_expiration_time.setter
def access_expiration_time(self, access_expiration_time):
"""Sets the access_expiration_time of this IamOAuthTokenAllOf.
Expiration time for the JWT token to which it can be used for api calls. # noqa: E501
:param access_expiration_time: The access_expiration_time of this IamOAuthTokenAllOf. # noqa: E501
:type: datetime
"""
self._access_expiration_time = access_expiration_time
@property
def client_id(self):
"""Gets the client_id of this IamOAuthTokenAllOf. # noqa: E501
The identifier of the registered application to which the token belongs. # noqa: E501
:return: The client_id of this IamOAuthTokenAllOf. # noqa: E501
:rtype: str
"""
return self._client_id
@client_id.setter
def client_id(self, client_id):
"""Sets the client_id of this IamOAuthTokenAllOf.
The identifier of the registered application to which the token belongs. # noqa: E501
:param client_id: The client_id of this IamOAuthTokenAllOf. # noqa: E501
:type: str
"""
self._client_id = client_id
@property
def client_ip_address(self):
"""Gets the client_ip_address of this IamOAuthTokenAllOf. # noqa: E501
The user agent IP address from which the auth token is launched. # noqa: E501
:return: The client_ip_address of this IamOAuthTokenAllOf. # noqa: E501
:rtype: str
"""
return self._client_ip_address
@client_ip_address.setter
def client_ip_address(self, client_ip_address):
"""Sets the client_ip_address of this IamOAuthTokenAllOf.
The user agent IP address from which the auth token is launched. # noqa: E501
:param client_ip_address: The client_ip_address of this IamOAuthTokenAllOf. # noqa: E501
:type: str
"""
self._client_ip_address = client_ip_address
@property
def client_name(self):
"""Gets the client_name of this IamOAuthTokenAllOf. # noqa: E501
The name of the registered application to which the token belongs. # noqa: E501
:return: The client_name of this IamOAuthTokenAllOf. # noqa: E501
:rtype: str
"""
return self._client_name
@client_name.setter
def client_name(self, client_name):
"""Sets the client_name of this IamOAuthTokenAllOf.
The name of the registered application to which the token belongs. # noqa: E501
:param client_name: The client_name of this IamOAuthTokenAllOf. # noqa: E501
:type: str
"""
self._client_name = client_name
@property
def expiration_time(self):
"""Gets the expiration_time of this IamOAuthTokenAllOf. # noqa: E501
Expiration time for the JWT token to which it can be refreshed. # noqa: E501
:return: The expiration_time of this IamOAuthTokenAllOf. # noqa: E501
:rtype: datetime
"""
return self._expiration_time
@expiration_time.setter
def expiration_time(self, expiration_time):
"""Sets the expiration_time of this IamOAuthTokenAllOf.
Expiration time for the JWT token to which it can be refreshed. # noqa: E501
:param expiration_time: The expiration_time of this IamOAuthTokenAllOf. # noqa: E501
:type: datetime
"""
self._expiration_time = expiration_time
@property
def last_login_client(self):
"""Gets the last_login_client of this IamOAuthTokenAllOf. # noqa: E501
The client address from which last login is initiated. # noqa: E501
:return: The last_login_client of this IamOAuthTokenAllOf. # noqa: E501
:rtype: str
"""
return self._last_login_client
@last_login_client.setter
def last_login_client(self, last_login_client):
"""Sets the last_login_client of this IamOAuthTokenAllOf.
The client address from which last login is initiated. # noqa: E501
:param last_login_client: The last_login_client of this IamOAuthTokenAllOf. # noqa: E501
:type: str
"""
self._last_login_client = last_login_client
@property
def last_login_time(self):
"""Gets the last_login_time of this IamOAuthTokenAllOf. # noqa: E501
The last login time for user. # noqa: E501
:return: The last_login_time of this IamOAuthTokenAllOf. # noqa: E501
:rtype: datetime
"""
return self._last_login_time
@last_login_time.setter
def last_login_time(self, last_login_time):
"""Sets the last_login_time of this IamOAuthTokenAllOf.
The last login time for user. # noqa: E501
:param last_login_time: The last_login_time of this IamOAuthTokenAllOf. # noqa: E501
:type: datetime
"""
self._last_login_time = last_login_time
@property
def token_id(self):
"""Gets the token_id of this IamOAuthTokenAllOf. # noqa: E501
Token identifier. Not the Access Token itself. # noqa: E501
:return: The token_id of this IamOAuthTokenAllOf. # noqa: E501
:rtype: str
"""
return self._token_id
@token_id.setter
def token_id(self, token_id):
"""Sets the token_id of this IamOAuthTokenAllOf.
Token identifier. Not the Access Token itself. # noqa: E501
:param token_id: The token_id of this IamOAuthTokenAllOf. # noqa: E501
:type: str
"""
self._token_id = token_id
@property
def user_meta(self):
"""Gets the user_meta of this IamOAuthTokenAllOf. # noqa: E501
:return: The user_meta of this IamOAuthTokenAllOf. # noqa: E501
:rtype: IamClientMeta
"""
return self._user_meta
@user_meta.setter
def user_meta(self, user_meta):
"""Sets the user_meta of this IamOAuthTokenAllOf.
:param user_meta: The user_meta of this IamOAuthTokenAllOf. # noqa: E501
:type: IamClientMeta
"""
self._user_meta = user_meta
@property
def app_registration(self):
"""Gets the app_registration of this IamOAuthTokenAllOf. # noqa: E501
:return: The app_registration of this IamOAuthTokenAllOf. # noqa: E501
:rtype: IamAppRegistration
"""
return self._app_registration
@app_registration.setter
def app_registration(self, app_registration):
"""Sets the app_registration of this IamOAuthTokenAllOf.
:param app_registration: The app_registration of this IamOAuthTokenAllOf. # noqa: E501
:type: IamAppRegistration
"""
self._app_registration = app_registration
@property
def permission(self):
"""Gets the permission of this IamOAuthTokenAllOf. # noqa: E501
:return: The permission of this IamOAuthTokenAllOf. # noqa: E501
:rtype: IamPermission
"""
return self._permission
@permission.setter
def permission(self, permission):
"""Sets the permission of this IamOAuthTokenAllOf.
:param permission: The permission of this IamOAuthTokenAllOf. # noqa: E501
:type: IamPermission
"""
self._permission = permission
@property
def user(self):
"""Gets the user of this IamOAuthTokenAllOf. # noqa: E501
:return: The user of this IamOAuthTokenAllOf. # noqa: E501
:rtype: IamUser
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this IamOAuthTokenAllOf.
:param user: The user of this IamOAuthTokenAllOf. # noqa: E501
:type: IamUser
"""
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict()
if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IamOAuthTokenAllOf):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IamOAuthTokenAllOf):
return True
return self.to_dict() != other.to_dict()
| 33.923788 | 1,052 | 0.638301 |
583ef94c3b81d62adb8e663e24a57161f2cfd968 | 4,484 | py | Python | back/user/views.py | yeezy-na-izi/.shSkill | 54608cd89ddc90377d190104115debc702d9aa1b | [
"Apache-2.0"
] | null | null | null | back/user/views.py | yeezy-na-izi/.shSkill | 54608cd89ddc90377d190104115debc702d9aa1b | [
"Apache-2.0"
] | null | null | null | back/user/views.py | yeezy-na-izi/.shSkill | 54608cd89ddc90377d190104115debc702d9aa1b | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render, redirect
from django.contrib.auth import logout
from django.contrib import messages
from django.contrib.auth import login
from django.contrib.sites.shortcuts import get_current_site
from django.urls import reverse
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.core.mail import EmailMessage
from django.db.models import Q
from user.forms import LoginUserForm, CreateUserForm
from user.utils import token_generator
from user.models import Account, Student
from education.models import Group
from user.models import Account
from user.utils import return_correct_phone
def login_and_register(request):
if 'login' in request.POST:
form = LoginUserForm(data=request.POST)
if form.is_valid():
user = form.get_user()
login(request, user)
messages.success(request, 'Вы успешно вошли')
else:
messages.error(request, 'Неверные данные!')
return redirect(request.path)
elif 'register' in request.POST:
form = CreateUserForm(data=request.POST)
if form.is_valid():
user = form.save()
student = Student.objects.create(balance=0)
user.student = student
user.is_active = False
user_id = urlsafe_base64_encode(force_bytes(user.username))
domain = get_current_site(request).domain
relative = reverse('activate', kwargs={'user_id': user_id, 'token': token_generator.make_token(user)})
activate_url = f'http://{domain}{relative}'
email_subject = 'Подтверждение почты'
email_body = f'Привет, {user.username}, это активация аккаунта, перейди по ссылке чтобы ' \
f'верифицировать свой аккаунт\n{activate_url}'
email = EmailMessage(email_subject, email_body, '[email protected]', [user.email], )
try:
email.send(fail_silently=False)
messages.success(request, 'На почту пришло письмо, перейдите по ссылке и активируйте аккаунт')
except:
messages.error(request, 'Что-то пошло не так')
user.save()
return redirect(request.path)
def verification_email(request, user_id, token):
if request.user.is_authenticated:
messages.success(request, 'Вы уже авторизованны')
return redirect(f'/profile/{request.user.username}')
try:
username = force_text(urlsafe_base64_decode(user_id))
user = Account.objects.get(username=username)
if token_generator.check_token(user, token) and not user.is_active:
user.is_active = True
user.save()
messages.success(request, 'Аккаунт успешно активирован')
return redirect('/')
messages.error(request, 'Аккаунт по каким-то причинам не был активирован')
return redirect('/')
except:
messages.error(request, 'Что-то пошло не так')
return redirect('/')
def profile(request, username):
user = Account.objects.get(username=username)
ended_courses = []
in_progress_courses = []
if request.method == 'POST':
login_and_register(request)
if 'settings' in request.POST:
user.first_name = request.POST['first_name']
user.last_name = request.POST['last_name']
user.phone = request.POST['phone']
user.about_me = request.POST['about_me']
user.save()
return redirect(request.path)
if request.user == user:
try:
student = user.student
ended_courses = Group.objects.filter(users=student, ended=True)
in_progress_courses = Group.objects.filter(users=student, ended=False)
except Exception:
pass
user.phone = return_correct_phone(user.phone)
context = {
'user': user,
'ended': ended_courses,
'not_ended': in_progress_courses
}
return render(request, 'user/profile/index.html', context)
def logout_page(request):
messages.info(request, f'Вы вышли из аккаунта {request.user}')
logout(request)
return redirect('/')
def teachers(request):
_teachers = Account.objects.filter(~Q(teacher=None))
if request.method == 'POST':
login_and_register(request)
context = {'teachers': _teachers}
return render(request, 'user/teachers.html', context)
| 38.655172 | 114 | 0.661909 |
1294fc4a37230780e6b6240cac9bf9e3c745eb88 | 2,661 | py | Python | src/jobs/json_renderer.py | ZendriXXX/predict-python | fe0360b4888980421f8f91f158d6523729bfc5f7 | [
"MIT"
] | null | null | null | src/jobs/json_renderer.py | ZendriXXX/predict-python | fe0360b4888980421f8f91f158d6523729bfc5f7 | [
"MIT"
] | null | null | null | src/jobs/json_renderer.py | ZendriXXX/predict-python | fe0360b4888980421f8f91f158d6523729bfc5f7 | [
"MIT"
] | null | null | null | from json.encoder import encode_basestring_ascii, encode_basestring, INFINITY, _make_iterencode
from rest_framework.renderers import JSONRenderer
from rest_framework.utils.encoders import JSONEncoder
# default renderer cannot handle JSON inf values
# https://stackoverflow.com/questions/35939464/django-rest-framework-json-data-monkey-patching
class CustomJSONEncoder(JSONEncoder):
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
:param o:
:param _one_shot:
:return:
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
# Hack to enforce
c_make_encoder = None
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
def floatstr(o, allow_nan=True, _repr=lambda o: format(o, '.4f'), _inf=INFINITY, _neginf=-INFINITY):
"""Convert float number into a string
:param o:
:param allow_nan:
:param _repr:
:param _inf:
:param _neginf:
:return:
"""
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on the
# internals.
if o != o:
text = '0'
elif o == _inf:
text = '1000000' # infinity is 1000000
elif o == _neginf:
text = '-1000000'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
if _one_shot and c_make_encoder is not None and self.indent is None:
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan) # TODO: fix call to non-callable object
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
class PalJSONRenderer(JSONRenderer):
encoder_class = CustomJSONEncoder
| 32.851852 | 108 | 0.582864 |
8e7116101dce308dd76d59661c118414af61108b | 1,383 | py | Python | foxylib/singleton/slack/foxylib_slack.py | foxytrixy-com/foxylib | 94b8c5b9f8b12423393c68f7d9f910258840ed18 | [
"BSD-3-Clause"
] | null | null | null | foxylib/singleton/slack/foxylib_slack.py | foxytrixy-com/foxylib | 94b8c5b9f8b12423393c68f7d9f910258840ed18 | [
"BSD-3-Clause"
] | 3 | 2019-12-12T05:17:44.000Z | 2022-03-11T23:40:50.000Z | foxylib/singleton/slack/foxylib_slack.py | foxytrixy-com/foxylib | 94b8c5b9f8b12423393c68f7d9f910258840ed18 | [
"BSD-3-Clause"
] | 2 | 2019-10-16T17:39:34.000Z | 2020-02-10T06:32:08.000Z | # https://pypi.org/project/slackclient/
import logging
import os
from functools import lru_cache, partial
from slack import RTMClient, WebClient
from foxylib.singleton.env.foxylib_env import FoxylibEnv
from foxylib.tools.log.foxylib_logger import FoxylibLogger
from foxylib.tools.messenger.slack.slack_tool import SlackTool
from foxylib.tools.env.env_tool import EnvTool
from foxylib.tools.function.function_tool import FunctionTool
class FoxylibSlack:
@classmethod
def xoxb_token(cls):
logger = FoxylibLogger.func_level2logger(cls.xoxb_token, logging.DEBUG)
token = FoxylibEnv.key2value("SLACK_BOT_USER_OAUTH_ACCESS_TOKEN")
# logger.debug({"token": token})
return token
@classmethod
def xoxp_token(cls):
logger = FoxylibLogger.func_level2logger(cls.xoxp_token, logging.DEBUG)
token = FoxylibEnv.key2value("SLACK_OAUTH_ACCESS_TOKEN")
# logger.debug({"token": token})
return token
@classmethod
@FunctionTool.wrapper2wraps_applied(lru_cache(maxsize=1))
def rtm_client(cls):
return SlackTool.token2rtm_client(cls.xoxb_token())
@classmethod
@FunctionTool.wrapper2wraps_applied(lru_cache(maxsize=1))
def web_client(cls):
return SlackTool.token2web_client(cls.xoxb_token())
class FoxylibChannel:
class Value:
FOXYLIB = "foxylib"
V = Value
| 27.66 | 79 | 0.740419 |
cece8f79bbd811bba80aee5ab890ff3eb844d726 | 1,766 | py | Python | setup.py | dschien/pint-pandas | 5c8499133f87afe718681df7b48f0870708078be | [
"BSD-3-Clause"
] | null | null | null | setup.py | dschien/pint-pandas | 5c8499133f87afe718681df7b48f0870708078be | [
"BSD-3-Clause"
] | null | null | null | setup.py | dschien/pint-pandas | 5c8499133f87afe718681df7b48f0870708078be | [
"BSD-3-Clause"
] | null | null | null | import sys
try:
reload(sys).setdefaultencoding("UTF-8")
except:
pass
try:
from setuptools import setup, find_packages
except ImportError:
print('Please install or upgrade setuptools or pip to continue')
sys.exit(1)
import codecs
def read(filename):
return codecs.open(filename, encoding='utf-8').read()
long_description = '\n\n'.join([read('README.md'),
# read('AUTHORS'),
# read('CHANGES')
])
__doc__ = long_description
install_requirements = [
"pint>=0.10.1",
"pandas>=0.24.0",
]
extra_requirements = {
"test": ["pytest", "pytest-cov", "codecov", "coveralls", "nbval"]
}
setup(
name='Pint-Pandas',
version='0.1.dev0', # should move to using versioneer for this
description='Pandas interface for Pint',
long_description=long_description,
keywords='physical quantities unit conversion science',
author='Hernan E. Grecco',
author_email='[email protected]',
url='https://github.com/hgrecco/pint-pandas',
test_suite='pintpandas.testsuite',
packages=find_packages(),
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries',
'Programming Language :: Python :: 3.6',
],
install_requires=install_requirements,
extras_require=extra_requirements,
)
| 27.169231 | 69 | 0.628539 |
fd573c3a05a02f71f3924bfcdb7e2bd02f3c2fb1 | 3,956 | py | Python | layint_scan_api/models/package_search_result.py | LayeredInsight/layint_scan_api_python | bc258b2af5d2211b986e32fedea95fcfc7de80ff | [
"Apache-2.0"
] | 1 | 2018-03-26T23:55:00.000Z | 2018-03-26T23:55:00.000Z | layint_scan_api/models/package_search_result.py | LayeredInsight/layint_scan_api_python | bc258b2af5d2211b986e32fedea95fcfc7de80ff | [
"Apache-2.0"
] | null | null | null | layint_scan_api/models/package_search_result.py | LayeredInsight/layint_scan_api_python | bc258b2af5d2211b986e32fedea95fcfc7de80ff | [
"Apache-2.0"
] | 2 | 2020-11-04T02:56:33.000Z | 2020-11-05T08:12:01.000Z | # coding: utf-8
"""
Layered Insight Scan
Layered Insight Scan performs static vulnerability analysis, license and package compliance. You can find out more about Scan at http://layeredinsight.com.
OpenAPI spec version: 0.9.4
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class PackageSearchResult(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'package_version': 'PackageVersion',
'images': 'list[ImageRef]'
}
attribute_map = {
'package_version': 'PackageVersion',
'images': 'Images'
}
def __init__(self, package_version=None, images=None):
"""
PackageSearchResult - a model defined in Swagger
"""
self._package_version = None
self._images = None
if package_version is not None:
self.package_version = package_version
if images is not None:
self.images = images
@property
def package_version(self):
"""
Gets the package_version of this PackageSearchResult.
:return: The package_version of this PackageSearchResult.
:rtype: PackageVersion
"""
return self._package_version
@package_version.setter
def package_version(self, package_version):
"""
Sets the package_version of this PackageSearchResult.
:param package_version: The package_version of this PackageSearchResult.
:type: PackageVersion
"""
self._package_version = package_version
@property
def images(self):
"""
Gets the images of this PackageSearchResult.
:return: The images of this PackageSearchResult.
:rtype: list[ImageRef]
"""
return self._images
@images.setter
def images(self, images):
"""
Sets the images of this PackageSearchResult.
:param images: The images of this PackageSearchResult.
:type: list[ImageRef]
"""
self._images = images
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, PackageSearchResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.373333 | 160 | 0.573812 |
caa5faabd040cc4fa59ecdfc876c8a7032b53bee | 2,363 | py | Python | m3u8-cmdline/m3u8.py | Abeous/streaming | 5d34eba2f27af4d25395200038b240ec1f4ea3e5 | [
"MIT"
] | null | null | null | m3u8-cmdline/m3u8.py | Abeous/streaming | 5d34eba2f27af4d25395200038b240ec1f4ea3e5 | [
"MIT"
] | null | null | null | m3u8-cmdline/m3u8.py | Abeous/streaming | 5d34eba2f27af4d25395200038b240ec1f4ea3e5 | [
"MIT"
] | null | null | null | import subprocess
import os
import platform
import json
import argparse
class config_info:
def __init__(self, system):
self.directory = os.path.join(os.path.expanduser('~'), '.config','hls-restreamer')
self.os = system
self.path = self.directory + '/config.json'
config = config_info(platform.system())
def Initialize(args):
data = {"streamkey":"",
"ffmpeg":"ffmpeg -loglevel warning -reconnect 1 -reconnect_at_eof 1 -reconnect_delay_max 10 -i {0} -codec:a aac -c:v copy -f flv rtmp://ingest.angelthump.com:1935/live/{1}"}
if not os.path.isdir(config.directory):
try:
os.makedirs(config.directory)
except Exception as e:
raise e
if not os.path.isfile(config.path):
try:
with open(config.path, 'w') as config_file: json.dump(data, config_file)
except Exception as e:
raise e
def readConfig(args):
if args.streamkey is None:
with open(config.path, 'r') as config_file:
config_JSON_data = json.load(config_file)
if (config_JSON_data['streamkey'] == '' or config_JSON_data['streamkey'] != str):
print('Stream key not found, please use --streamkey flag with your angelthump streamkey to continue')
exit()
else:
return config_JSON_data['streamkey'], config_JSON_data['ffmpeg']
elif args.streamkey is not None:
with open(config.path, 'r+') as config_file:
config_JSON_data = json.load(config_file)
config_JSON_data['streamkey'] = args.streamkey
#sets position to beginning of file then removing all after it
config_file.seek(0)
config_file.truncate()
json.dump(config_JSON_data, config_file)
return args.streamkey, config_JSON_data['ffmpeg']
def main(data, args):
streamkey = data[0]
cmd = data[1].format(args.streamlink, streamkey).split(' ')
try:
process = subprocess.Popen(cmd).wait()
except KeyboardInterrupt:
print('Manual break by user')
process.kill()
exit()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='A tutorial of argparse!')
parser.add_argument("-sk", "--streamkey", help="Sets config's streamkey. If not called ffmpeg will use config file's streamkey.", type=str)
parser.add_argument("streamlink")
args = parser.parse_args()
Initialize(args)
main(readConfig(args), args) | 30.688312 | 183 | 0.679221 |
af9607f6dc6d5832a92f523c324954277f407309 | 3,046 | py | Python | demo.py | namelessjon/mitemp | bd6ffed5bfd9a3a52dd8a4b96e896fa79b5c5f10 | [
"MIT"
] | null | null | null | demo.py | namelessjon/mitemp | bd6ffed5bfd9a3a52dd8a4b96e896fa79b5c5f10 | [
"MIT"
] | null | null | null | demo.py | namelessjon/mitemp | bd6ffed5bfd9a3a52dd8a4b96e896fa79b5c5f10 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Demo file showing how to use the mitemp library."""
import argparse
import re
import logging
import sys
from btlewrap import available_backends, BluepyBackend, GatttoolBackend, PygattBackend
from mitemp_bt.mitemp_bt_poller import MiTempBtPoller, \
MI_TEMPERATURE, MI_HUMIDITY, MI_BATTERY
def valid_mitemp_mac(mac, pat=re.compile(r"4C:65:A8:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}")):
"""Check for valid mac adresses."""
if not pat.match(mac.upper()):
raise argparse.ArgumentTypeError('The MAC address "{}" seems to be in the wrong format'.format(mac))
return mac
def poll(args):
"""Poll data from the sensor."""
backend = _get_backend(args)
poller = MiTempBtPoller(args.mac, backend)
print("Getting data from Mi Temperature and Humidity Sensor")
print("FW: {}".format(poller.firmware_version()))
print("Name: {}".format(poller.name()))
print("Battery: {}".format(poller.parameter_value(MI_BATTERY)))
print("Temperature: {}".format(poller.parameter_value(MI_TEMPERATURE)))
print("Humidity: {}".format(poller.parameter_value(MI_HUMIDITY)))
# def scan(args):
# """Scan for sensors."""
# backend = _get_backend(args)
# print('Scanning for 10 seconds...')
# devices = mitemp_scanner.scan(backend, 10)
# devices = []
# print('Found {} devices:'.format(len(devices)))
# for device in devices:
# print(' {}'.format(device))
def _get_backend(args):
"""Extract the backend class from the command line arguments."""
if args.backend == 'gatttool':
backend = GatttoolBackend
elif args.backend == 'bluepy':
backend = BluepyBackend
elif args.backend == 'pygatt':
backend = PygattBackend
else:
raise Exception('unknown backend: {}'.format(args.backend))
return backend
def list_backends(_):
"""List all available backends."""
backends = [b.__name__ for b in available_backends()]
print('\n'.join(backends))
def main():
"""Main function.
Mostly parsing the command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--backend', choices=['gatttool', 'bluepy', 'pygatt'], default='gatttool')
parser.add_argument('-v', '--verbose', action='store_const', const=True)
subparsers = parser.add_subparsers(help='sub-command help', )
parser_poll = subparsers.add_parser('poll', help='poll data from a sensor')
parser_poll.add_argument('mac', type=valid_mitemp_mac)
parser_poll.set_defaults(func=poll)
# parser_scan = subparsers.add_parser('scan', help='scan for devices')
# parser_scan.set_defaults(func=scan)
parser_scan = subparsers.add_parser('backends', help='list the available backends')
parser_scan.set_defaults(func=list_backends)
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
if not hasattr(args, "func"):
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == '__main__':
main()
| 31.402062 | 108 | 0.676953 |
2a78ad851be6e6d0ba2a5b198392c65f9017acb9 | 3,112 | py | Python | tests/collections/asr/numba/rnnt_loss/utils/test_reduce.py | vadam5/NeMo | 3c5db09539293c3c19a6bb7437011f91261119af | [
"Apache-2.0"
] | 1 | 2021-04-13T20:34:16.000Z | 2021-04-13T20:34:16.000Z | tests/collections/asr/numba/rnnt_loss/utils/test_reduce.py | vadam5/NeMo | 3c5db09539293c3c19a6bb7437011f91261119af | [
"Apache-2.0"
] | null | null | null | tests/collections/asr/numba/rnnt_loss/utils/test_reduce.py | vadam5/NeMo | 3c5db09539293c3c19a6bb7437011f91261119af | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from numba import cuda
from nemo.collections.asr.parts.numba import __NUMBA_MINIMUM_VERSION__, numba_utils
from nemo.collections.asr.parts.numba.rnnt_loss.utils.cuda_utils import reduce
class TestRNNTCUDAReductions:
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Reductions can only be run when CUDA is available")
@pytest.mark.unit
def test_reduce_max(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
random = np.random.RandomState(0)
original_shape = [1, 5, 4, 3]
x = random.randn(*original_shape).reshape([-1])
dx = random.randn(*x.shape)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
dx_c = cuda.to_device(dx, stream=stream)
# call kernel
cols = np.prod(original_shape[:3])
reduce.reduce_max(x_c, dx_c, rows=original_shape[-1], cols=cols, minus=False, stream=stream)
# sync kernel
stream.synchronize()
dx_result = dx_c.copy_to_host(stream=stream)
del x_c, dx_c
# collect results in first [B * T * U] values; for all V
assert np.abs(dx_result[cols:] - dx[cols:]).sum() <= 1e-7
# make sure dx_result updates the [B * T * U] values
assert np.abs(dx_result[:cols] - dx[:cols]).sum() > 0
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Reductions can only be run when CUDA is available")
@pytest.mark.unit
def test_reduce_exp(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
random = np.random.RandomState(0)
original_shape = [1, 5, 4, 2]
x = random.randn(*original_shape).reshape([-1])
dx = np.zeros_like(x)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
dx_c = cuda.to_device(dx, stream=stream)
# call kernel
cols = np.prod(original_shape[:3])
reduce.reduce_exp(x_c, dx_c, rows=original_shape[-1], cols=cols, minus=False, stream=stream)
# sync kernel
stream.synchronize()
dx_result = dx_c.copy_to_host(stream=stream)
del x_c, dx_c
# collect results in first [B * T * U] values; for all V
assert (dx_result[cols:] - dx[cols:]).sum() <= 1e-7
# make sure dx_result updates the [B * T * U] values
assert np.abs(dx_result[:cols] - dx[:cols]).sum() > 0
if __name__ == '__main__':
pytest.main([__file__])
| 36.186047 | 113 | 0.670308 |
54a5fbc89ea01241390d2432962fec5e6639b33f | 1,001 | py | Python | domains/fetch/problems/training/problem1011_CR.py | patras91/rae_release | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | [
"BSD-3-Clause"
] | 1 | 2021-09-28T12:56:56.000Z | 2021-09-28T12:56:56.000Z | domains/fetch/problems/training/problem1011_CR.py | patras91/rae_release | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | [
"BSD-3-Clause"
] | null | null | null | domains/fetch/problems/training/problem1011_CR.py | patras91/rae_release | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | [
"BSD-3-Clause"
] | 1 | 2022-03-31T16:30:39.000Z | 2022-03-31T16:30:39.000Z | __author__ = 'patras'
from domain_chargeableRobot import *
from timer import DURATION
from state import state
DURATION.TIME = {
'put': 2,
'take': 2,
'perceive': 3,
'charge': 5,
'move': 10,
'moveToEmergency': 5,
'moveCharger': 15,
'addressEmergency': 10,
'wait': 5,
}
DURATION.COUNTER = {
'put': 2,
'take': 2,
'perceive': 3,
'charge': 5,
'move': 10,
'moveToEmergency': 5,
'moveCharger': 15,
'addressEmergency': 10,
'wait': 5,
}
rv.LOCATIONS = [1, 2, 3, 4]
rv.EDGES = {1: [2], 2: [1, 3], 3: [2, 4], 4: [3]}
rv.OBJECTS=['o1']
rv.ROBOTS=['r1']
def ResetState():
state.loc = {'r1': 2}
state.charge = {'r1': 4}
state.load = {'r1': NIL}
state.pos = {'c1': 1, 'o1': UNK}
state.containers = { 1:['o1'],2:[],3:[],4:[],}
state.emergencyHandling = {'r1': False, 'r2': False}
state.view = {}
for l in rv.LOCATIONS:
state.view[l] = False
tasks = {
7: [['fetch', 'r1', 'o1']],
}
eventsEnv = {
} | 19.627451 | 56 | 0.526474 |
e614a47e3f16a16923d73953e6722c5099e8baa1 | 501 | py | Python | tests/conftest.py | afrigon/fastapi-template | cb3c86353c67ef19c5abe12658e327ff37b14f90 | [
"MIT"
] | 2 | 2020-03-05T20:34:09.000Z | 2020-04-19T02:33:53.000Z | tests/conftest.py | afrigon/sharify-api | 383baa5ae089d996c2d68da8b55e566dd0cfbbf9 | [
"MIT"
] | 2 | 2019-12-17T18:49:29.000Z | 2019-12-17T23:19:11.000Z | tests/conftest.py | afrigon/fastapi-template | cb3c86353c67ef19c5abe12658e327ff37b14f90 | [
"MIT"
] | 2 | 2020-01-07T14:25:38.000Z | 2021-06-23T16:10:57.000Z | """
Application fixtures to easily create app instances
Examples:
$ python -m pytest
$ coverage run -m pytest
$ coverage report
$ coverage html
"""
import pytest
from starlette.testclient import TestClient
from app import ApplicationFactory
@pytest.fixture
def app():
"""debug application fixture"""
return ApplicationFactory('API', 'Test application').create(debug=True)
@pytest.fixture
def client(app):
"""application client fixture"""
return TestClient(app)
| 17.892857 | 75 | 0.716567 |
649d10222f6083f88868a8fb2dedd78e800b42ef | 172 | py | Python | RPI_Camera.py | KyungWonCho/Research-1 | 9f0e124ffbdb2e3e148ea5cc3336c2075b4d7420 | [
"MIT"
] | null | null | null | RPI_Camera.py | KyungWonCho/Research-1 | 9f0e124ffbdb2e3e148ea5cc3336c2075b4d7420 | [
"MIT"
] | null | null | null | RPI_Camera.py | KyungWonCho/Research-1 | 9f0e124ffbdb2e3e148ea5cc3336c2075b4d7420 | [
"MIT"
] | null | null | null | from picamera import PiCamera
from time import sleep
camera = PiCamera()
camera.start_preview()
sleep(2)
camera.capture('/home/pi/Desktop/img.jpg')
camera.stop_preview()
| 17.2 | 42 | 0.77907 |
b4d6b8b0b4e4e5e98d2d3eea0318434c146cc82c | 20,530 | py | Python | flexget/plugins/output/send_telegram.py | vbabiy/Flexget | 9611689de68062cd3dddcf145d65c7a44a42d490 | [
"MIT"
] | null | null | null | flexget/plugins/output/send_telegram.py | vbabiy/Flexget | 9611689de68062cd3dddcf145d65c7a44a42d490 | [
"MIT"
] | null | null | null | flexget/plugins/output/send_telegram.py | vbabiy/Flexget | 9611689de68062cd3dddcf145d65c7a44a42d490 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
import sys
from distutils.version import LooseVersion
from ssl import SSLError
from urllib2 import URLError
from sqlalchemy import Column, Integer, String
from flexget import db_schema, plugin, options
from flexget.event import event
from flexget.logger import console
from flexget.utils.database import with_session
from flexget.utils.template import RenderError
try:
import telegram
from telegram.error import TelegramError
except ImportError:
telegram = None
_MIN_TELEGRAM_VER = '3.2.0'
_PLUGIN_NAME = 'send_telegram'
_PARSERS = ['markdown', 'html']
_TOKEN_ATTR = 'bot_token'
_TMPL_ATTR = 'template'
_PARSE_ATTR = 'parse_mode'
_RCPTS_ATTR = 'recipients'
_USERNAME_ATTR = 'username'
_FULLNAME_ATTR = 'fullname'
_FIRSTNAME_ATTR = 'first'
_SURNAME_ATTR = 'sur'
_GROUP_ATTR = 'group'
ChatIdsBase = db_schema.versioned_base('telegram_chat_ids', 0)
class ChatIdEntry(ChatIdsBase):
__tablename__ = 'telegram_chat_ids'
id = Column(Integer, primary_key=True)
username = Column(String, index=True, nullable=True)
firstname = Column(String, index=True, nullable=True)
surname = Column(String, index=True, nullable=True)
group = Column(String, index=True, nullable=True)
def __str__(self):
x = ['id={0}'.format(self.id)]
if self.username:
x.append('username={0}'.format(self.username))
if self.firstname:
x.append('firstname={0}'.format(self.firstname))
if self.surname:
x.append('surname={0}'.format(self.surname))
if self.group:
x.append('group={0}'.format(self.group))
return ' '.join(x)
class SendTelegram(object):
"""Send a message to one or more Telegram users or groups upon accepting a download.
Preparations::
* Install 'python-telegram-bot' python pkg (i.e. `pip install python-telegram-bot`)
* Create a bot & obtain a token for it (see https://core.telegram.org/bots#botfather).
* For direct messages (not to a group), start a conversation with the bot and click "START" in the Telegram app.
* For group messages, add the bot to the desired group and send a start message to the bot: "/start" (mind the
leading '/').
Configuration example::
my-task:
send_telegram:
bot_token: token
template: {{title}}
use_markdown: no
recipients:
- username: my-user-name
- group: my-group-name
- fullname:
first: my-first-name
sur: my-sur-name
Bootstrapping and testing the bot::
* Execute: `flexget send_telegram bootstrap`.
Look at the console output and make sure that the operation was successful.
* Execute: `flexget send_telegram test-msg`.
This will send a test message for every recipient you've configured.
Configuration notes::
You may use any combination of recipients types (`username`, `group` or `fullname`) - 0 or more of each (but you
need at least one total...).
`template`::
Optional. The template from the example is the default.
`parse_mode`::
Optional. Whether the template uses `markdown` or `html` formatting.
NOTE: The markdown parser will fall back to basic parsing if there is a parsing error. This can be cause due to
unclosed tags (watch out for wandering underscore when using markdown)
`username` vs. `fullname`::
Not all Telegram users have a username. In such cases you would have to use the `fullname` approach. Otherwise, it
is much easier to use the `username` configuration.
"""
log = None # initialized during plugin.register
""":type: flexget.logger.FlexGetLogger"""
_token = None
_tmpl = None
_use_markdown = False
_usernames = None
_fullnames = None
_groups = None
_bot = None
schema = {
'type': 'object',
'properties': {
_TOKEN_ATTR: {'type': 'string'},
_TMPL_ATTR: {'type': 'string', 'default': '{{title}}'},
_PARSE_ATTR: {'type': 'string', 'enum': _PARSERS},
_RCPTS_ATTR: {
'type': 'array',
'minItems': 1,
'items': {
'oneOf': [
{
'type': 'object',
'properties': {
_USERNAME_ATTR: {'type': 'string'},
},
'required': [_USERNAME_ATTR],
'additionalProperties': False,
},
{
'type': 'object',
'properties': {
_FULLNAME_ATTR: {
'type': 'object',
'properties': {
_FIRSTNAME_ATTR: {'type': 'string'},
_SURNAME_ATTR: {'type': 'string'},
},
'required': [_FIRSTNAME_ATTR, _SURNAME_ATTR],
'additionalProperties': False,
},
},
'required': [_FULLNAME_ATTR],
'additionalProperties': False,
},
{
'type': 'object',
'properties': {
_GROUP_ATTR: {'type': 'string'},
},
'required': [_GROUP_ATTR],
'additionalProperties': False,
},
],
},
},
},
'required': [_TOKEN_ATTR, _RCPTS_ATTR],
'additionalProperties': False,
}
def _parse_config(self, config):
"""
:type config: dict
"""
self._token = config[_TOKEN_ATTR]
self._tmpl = config[_TMPL_ATTR]
self._parse_mode = config.get(_PARSE_ATTR)
self._usernames = []
self._fullnames = []
self._groups = []
for i in config[_RCPTS_ATTR]:
if _USERNAME_ATTR in i:
self._usernames.append(i[_USERNAME_ATTR])
elif _FULLNAME_ATTR in i:
fullname = i[_FULLNAME_ATTR]
firstname = fullname[_FIRSTNAME_ATTR]
surname = fullname[_SURNAME_ATTR]
self._fullnames.append((firstname, surname))
elif _GROUP_ATTR in i:
self._groups.append(i[_GROUP_ATTR])
def on_task_output(self, task, config):
"""makes this plugin count as output (stops warnings about missing outputs)"""
pass
def on_task_exit(self, task, config):
"""Send telegram message(s) at exit"""
session = task.session
chat_ids = self._real_init(session, config)
if not chat_ids:
return
self._send_msgs(task, chat_ids)
def _real_init(self, session, config, ):
self._enforce_telegram_plugin_ver()
self._parse_config(config)
self.log.debug('token={0} parse_mode={5}, tmpl={4!r} usernames={1} fullnames={2} groups={3}'.format(
self._token, self._usernames, self._fullnames, self._groups, self._tmpl, self._parse_mode))
self._init_bot()
chat_ids = self._get_chat_ids_n_update_db(session)
return chat_ids
def bootstrap(self, session, config):
"""bootstrap the plugin configuration and update db with cached chat_ids"""
console('{0} - bootstrapping...'.format(_PLUGIN_NAME))
chat_ids = self._real_init(session, config)
found_usernames = [x.username for x in chat_ids if x.username]
found_fullnames = [(x.firstname, x.surname) for x in chat_ids if x.firstname]
found_grps = [x.group for x in chat_ids if x.group]
missing_usernames = [x for x in self._usernames if x not in found_usernames]
missing_fullnames = [x for x in self._fullnames if x not in found_fullnames]
missing_grps = [x for x in self._groups if x not in found_grps]
if missing_usernames or missing_fullnames or missing_grps:
for i in missing_usernames:
console('ERR: could not find chat_id for username: {0}'.format(i))
for i in missing_fullnames:
console('ERR: could not find chat_id for fullname: {0} {1}'.format(*i))
for i in missing_grps:
console('ERR: could not find chat_id for group: {0}'.format(i))
res = False
else:
console('{0} - bootstrap was successful'.format(_PLUGIN_NAME))
res = True
return res
def test_msg(self, session, config):
"""send test message to configured recipients"""
console('{0} loading chat_ids...'.format(_PLUGIN_NAME))
chat_ids = self._real_init(session, config)
console('{0} sending test message(s)...'.format(_PLUGIN_NAME))
for chat_id in (x.id for x in chat_ids):
self._bot.sendMessage(chat_id=chat_id, text='test message from flexget')
return True
def _init_bot(self):
self._bot = telegram.Bot(self._token)
self._check_token()
def _check_token(self):
try:
self._bot.getMe()
except UnicodeDecodeError as e:
self.log.trace('bot.getMe() raised: {!r}'.format(e))
raise plugin.PluginWarning('invalid bot token')
except (URLError, SSLError) as e:
self.log.error('Could not connect Telegram servers at this time, please try again later: %s', e.args[0])
except TelegramError as e:
self.log.error('Could not connect Telegram servers at this time, please try again later: %s', e.message)
@staticmethod
def _enforce_telegram_plugin_ver():
if telegram is None:
raise plugin.PluginWarning('missing python-telegram-bot pkg')
elif not hasattr(telegram, str('__version__')):
raise plugin.PluginWarning('invalid or old python-telegram-bot pkg')
elif LooseVersion(telegram.__version__) < str(_MIN_TELEGRAM_VER):
raise plugin.PluginWarning('old python-telegram-bot ({0})'.format(telegram.__version__))
def _send_msgs(self, task, chat_ids):
kwargs = dict()
if self._parse_mode == 'markdown':
kwargs['parse_mode'] = telegram.ParseMode.MARKDOWN
elif self._parse_mode == 'html':
kwargs['parse_mode'] = 'HTML' # TODO: Change this to use ParseMode when it's implemented
for entry in task.accepted:
msg = self._render_msg(entry, self._tmpl)
for chat_id in (x.id for x in chat_ids):
try:
self._bot.sendMessage(chat_id=chat_id, text=msg, **kwargs)
except TelegramError as e:
if kwargs['parse_mode']:
self.log.warning(
'Failed to render message using parse mode %s. Falling back to basic parsing: %s' % (
kwargs['parse_mode'], e.message))
del kwargs['parse_mode']
try:
self._bot.sendMessage(chat_id=chat_id, text=msg, **kwargs)
except TelegramError as e:
self.log.error('Cannot send message, : %s' % e.message)
continue
else:
self.log.error('Cannot send message, : %s' % e.message)
continue
def _render_msg(self, entry, tmpl):
"""
:type entry: flexget.entry.Entry
:type tmpl: str
:rtype: str
"""
try:
msg = entry.render(tmpl)
except RenderError as e:
title = entry.get('title')
self.log.error('render error; title={0} err={1}'.format(title, e))
msg = title
return msg
def _get_chat_ids_n_update_db(self, session):
"""
:type session: sqlalchemy.orm.Session
:rtype: list[ChatIdEntry]
"""
usernames = self._usernames[:]
fullnames = self._fullnames[:]
groups = self._groups[:]
chat_ids, has_new_chat_ids = self._get_chat_ids(session, usernames, fullnames, groups)
self.log.debug('chat_ids={0}'.format(chat_ids))
if not chat_ids:
self.log.warning('no chat id found')
else:
if usernames:
self.log.warning('no chat id found for usernames: {0}'.format(usernames))
if fullnames:
self.log.warning('no chat id found for fullnames: {0}'.format(fullnames))
if groups:
self.log.warning('no chat id found for groups: {0}'.format(groups))
if has_new_chat_ids:
self._update_db(session, chat_ids)
return chat_ids
def _get_chat_ids(self, session, usernames, fullnames, groups):
"""get chat ids for `usernames`, `fullnames` & `groups`.
entries with a matching chat ids will be removed from the input lists.
:type session: sqlalchemy.orm.Session
:type usernames: list[str]
:type fullnames: list[tuple[str, str]]
:type groups: list[str]
:returns: chat ids, new chat ids found?
:rtype: list[ChatIdEntry], bool
"""
chat_ids = list()
self.log.debug('loading cached chat ids')
chat_ids = self._get_cached_chat_ids(session, usernames, fullnames, groups)
self.log.debug('found {0} cached chat_ids: {1}'.format(len(chat_ids), ['{0}'.format(x) for x in chat_ids]))
if not (usernames or fullnames or groups):
self.log.debug('all chat ids found in cache')
return chat_ids, False
self.log.debug('loading new chat ids')
new_chat_ids = list(self._get_new_chat_ids(usernames, fullnames, groups))
self.log.debug('found {0} new chat_ids: {1}'.format(len(new_chat_ids), ['{0}'.format(x) for x in new_chat_ids]))
chat_ids.extend(new_chat_ids)
return chat_ids, bool(new_chat_ids)
@staticmethod
def _get_cached_chat_ids(session, usernames, fullnames, groups):
"""get chat ids from the cache (DB). remove found entries from `usernames`, `fullnames` & `groups`
:type session: sqlalchemy.orm.Session
:type usernames: list[str]
:type fullnames: list[tuple[str, str]]
:type groups: list[str]
:rtype: list[ChatIdEntry]
"""
chat_ids = list()
cached_usernames = dict((x.username, x)
for x in session.query(ChatIdEntry).filter(ChatIdEntry.username != None).all())
cached_fullnames = dict(((x.firstname, x.surname), x)
for x in session.query(ChatIdEntry).filter(ChatIdEntry.firstname != None).all())
cached_groups = dict((x.group, x)
for x in session.query(ChatIdEntry).filter(ChatIdEntry.group != None).all())
len_ = len(usernames)
for i, username in enumerate(reversed(usernames)):
item = cached_usernames.get(username)
if item:
chat_ids.append(item)
usernames.pop(len_ - i - 1)
len_ = len(fullnames)
for i, fullname in enumerate(reversed(fullnames)):
item = cached_fullnames.get(fullname)
if item:
chat_ids.append(item)
fullnames.pop(len_ - i - 1)
len_ = len(groups)
for i, grp in enumerate(reversed(groups)):
item = cached_groups.get(grp)
if item:
chat_ids.append(item)
groups.pop(len_ - i - 1)
return chat_ids
def _get_new_chat_ids(self, usernames, fullnames, groups):
"""get chat ids by querying the telegram `bot`
:type usernames: list[str]
:type fullnames: list[tuple[str, str]]
:type groups: list[str]
:rtype: __generator[ChatIdEntry]
"""
upd_usernames, upd_fullnames, upd_groups = self._get_bot_updates()
len_ = len(usernames)
for i, username in enumerate(reversed(usernames)):
chat = upd_usernames.get(username)
if chat is not None:
entry = ChatIdEntry(id=chat.id, username=chat.username, firstname=chat.first_name,
surname=chat.last_name)
yield entry
usernames.pop(len_ - i - 1)
len_ = len(fullnames)
for i, fullname in enumerate(reversed(fullnames)):
chat = upd_fullnames.get(fullname)
if chat is not None:
entry = ChatIdEntry(id=chat.id, username=chat.username, firstname=chat.first_name,
surname=chat.last_name)
yield entry
fullnames.pop(len_ - i - 1)
len_ = len(groups)
for i, grp in enumerate(reversed(groups)):
chat = upd_groups.get(grp)
if chat is not None:
entry = ChatIdEntry(id=chat.id, group=chat.title)
yield entry
groups.pop(len_ - i - 1)
def _get_bot_updates(self):
"""get updated chats info from telegram
:type bot: telegram.Bot
:rtype: (dict[str, telegram.User], dict[(str, str), telegram.User], dict[str, telegram.GroupChat])
"""
# highly unlikely, but if there are more than 100 msgs waiting for the bot, we should not miss one
updates = []
last_upd = 0
while 1:
ups = self._bot.getUpdates(last_upd, limit=100)
updates.extend(ups)
if len(ups) < 100:
break
last_upd = ups[-1].update_id
usernames = dict()
fullnames = dict()
groups = dict()
for chat in (x.message.chat for x in updates):
if chat.type == 'private':
usernames[chat.username] = chat
fullnames[(chat.first_name, chat.last_name)] = chat
elif chat.type in ('group', 'supergroup' or 'channel'):
groups[chat.title] = chat
else:
self.log.warn('unknown chat type: {0}'.format(type(chat)))
return usernames, fullnames, groups
def _update_db(self, session, chat_ids):
"""Update the DB with found `chat_ids`
:type session: sqlalchemy.orm.Session
:type chat_ids: list[ChatIdEntry]
"""
self.log.info('saving updated chat_ids to db')
# avoid duplicate chat_ids. (this is possible if configuration specified both username & fullname
chat_ids_d = dict((x.id, x) for x in chat_ids)
session.add_all(chat_ids_d.itervalues())
session.commit()
def _guess_task_name(manager):
for task in manager.tasks:
if _get_config(manager, task) is not None:
break
else:
task = None
return task
def _get_config(manager, task):
return manager.config['tasks'][task].get(_PLUGIN_NAME)
@with_session()
def do_cli(manager, args, session=None):
"""
:type manager: flexget.Manager
"""
task_name = _guess_task_name(manager)
config = _get_config(manager, task_name)
plugin_info = plugin.get_plugin_by_name(_PLUGIN_NAME)
send_telegram = plugin_info.instance
""":type: SendTelegram"""
if args.action == 'bootstrap':
res = send_telegram.bootstrap(session, config)
elif args.action == 'test-msg':
res = send_telegram.test_msg(session, config)
else:
raise RuntimeError('unknown action')
sys.exit(int(not res))
@event('plugin.register')
def register_plugin():
plugin.register(SendTelegram, _PLUGIN_NAME, api_ver=2)
@event('options.register')
def register_parser_arguments():
parser = options.register_command(_PLUGIN_NAME, do_cli, help='{0} cli'.format(_PLUGIN_NAME))
""":type: options.CoreArgumentParser"""
subp = parser.add_subparsers(dest='action')
bsp = subp.add_parser('bootstrap', help='bootstrap the plugin according to config')
bsp.add_argument('--tasks', )
subp.add_parser('test-msg', help='send test message to all configured recipients')
| 36.858169 | 120 | 0.578811 |
08f5102c7998ca277a75754bba5044337112f6bf | 769 | py | Python | guestbook/views.py | hcpthanks/vCard | cc9a301f413961c398c355426013c0cc05fbb1b7 | [
"MIT"
] | null | null | null | guestbook/views.py | hcpthanks/vCard | cc9a301f413961c398c355426013c0cc05fbb1b7 | [
"MIT"
] | null | null | null | guestbook/views.py | hcpthanks/vCard | cc9a301f413961c398c355426013c0cc05fbb1b7 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect, reverse
from django.http import HttpResponse
from .models import Message
# Create your views here.
def post_message(request):
"""首页留言提交处理
"""
#如果用户客户端提交方法是POST
if request.method =='POST':
# 得到想要的数据项
name = request.POST.get('name', '')
email = request.POST.get('email','')
content = request.POST.get('message','')
# 保障name和content为必填
if name and content:
#创建message实例
msg = Message(name=name, email=email, message=content)
# 留言保存
msg.save()
#留言填写后转跳到首页
return redirect(reverse('home'))
else:
return HttpResponse('用户名及留言必须填写!')
return redirect(reverse('home'))
| 28.481481 | 66 | 0.592978 |
13d6b1f3c18ce36f6c003cff070c4d8f464451e7 | 132 | py | Python | params/scenarios/partlyconstrained.py | systemsecologygroup/EasterIslandABM | bcd2e1268b41a549c6e47508650ec2ce8958ad1b | [
"MIT"
] | null | null | null | params/scenarios/partlyconstrained.py | systemsecologygroup/EasterIslandABM | bcd2e1268b41a549c6e47508650ec2ce8958ad1b | [
"MIT"
] | null | null | null | params/scenarios/partlyconstrained.py | systemsecologygroup/EasterIslandABM | bcd2e1268b41a549c6e47508650ec2ce8958ad1b | [
"MIT"
] | null | null | null | params_scenario = {
"n_agents_arrival": 2,
"p_split_threshold": int(36),
"r_t": 2,
"r_c": 1,
"gamma": 0, # 0
}
| 16.5 | 33 | 0.522727 |
788b40b7f0262c2081143410f03fd2375ea671c7 | 961 | py | Python | models/rf.py | uva-hydroinformatics-lab/flood_data | de82afb48afbc5b6140c363b934c13827762c13e | [
"MIT"
] | null | null | null | models/rf.py | uva-hydroinformatics-lab/flood_data | de82afb48afbc5b6140c363b934c13827762c13e | [
"MIT"
] | 12 | 2018-07-23T21:14:52.000Z | 2018-07-26T03:46:56.000Z | models/rf.py | uva-hydroinformatics-lab/flood_data | de82afb48afbc5b6140c363b934c13827762c13e | [
"MIT"
] | 2 | 2018-07-24T15:50:33.000Z | 2018-07-27T19:04:38.000Z | from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from db_scripts.main_db_script import data_dir, db_filename
from hr_db_scripts.main_db_script import get_table_for_variable_code, get_db_table_as_df
import pandas as pd
import matplotlib.pyplot as plt
df = get_db_table_as_df('for_model_avgs', dbfilename=db_filename)
print df.shape
df = df[df.rd>0.01]
print df.shape
out_col = 'num_flooded'
in_cols = [a for a in df.columns if a not in ['event_date', 'event_name']]
reg = RandomForestRegressor()
reg.fit(df[in_cols], df[out_col])
preds = reg.predict(df[in_cols])
maxval = df[out_col].max()
fig, ax = plt.subplots(1)
ax.scatter(df.num_flooded, preds)
ax.set_aspect('equal', adjustable='box-forced')
ax.set_xlim((0, maxval*1.05))
ax.set_ylim((0, maxval*1.05))
plt.show()
| 32.033333 | 88 | 0.778356 |
da7a7e4541a33981e8b0b3135ec24413fdeff187 | 1,766 | py | Python | docpub/docs/pipeline.py | mcclatchy/docpub | 1ad006803d60aa508394ae656b9554bb815bf92d | [
"Apache-2.0"
] | 5 | 2017-08-07T13:31:20.000Z | 2021-05-23T22:46:14.000Z | docpub/docs/pipeline.py | mcclatchy/docpub | 1ad006803d60aa508394ae656b9554bb815bf92d | [
"Apache-2.0"
] | 2 | 2020-06-05T17:26:25.000Z | 2021-06-10T18:47:54.000Z | docpub/docs/pipeline.py | mcclatchy/docpub | 1ad006803d60aa508394ae656b9554bb815bf92d | [
"Apache-2.0"
] | 1 | 2017-08-07T13:31:20.000Z | 2017-08-07T13:31:20.000Z | # extending python-social-auth pipeline options
from django.contrib.auth.models import Group #, User
# from django.core.mail import send_mail
from docpub.settings import APP_DOMAIN, EMAIL_RECIPIENT, EMAIL_SENDER, SLACK_USER
from docs.slackbot import slackbot
def apply_permissions(backend, user, response, *args, **kwargs):
## if the user hasn't logged in before
if not user.last_login:
## get the add/edit/delete group
group = Group.objects.get(name='Add/edit/delete documents')
## add the user to that group
group.user_set.add(user)
## get the edit user group
group = Group.objects.get(name='Edit user')
## add the user to that group
group.user_set.add(user)
## set the user as staff
user.is_staff = True
user.save()
# def email_admins(backend, user, response, *args, **kwargs):
# if not user.last_login:
# recipients = [EMAIL_RECIPIENT]
# subject = 'New user registered: {user}'.format(user=user)
# message = None
# html_message = 'Edit user profile:<br> {domain}/admin/auth/user/{id}/change/'.format(domain=APP_DOMAIN, id=user.id)
# send_mail(
# subject, ## subject (string)
# message, ## message (string)
# EMAIL_SENDER, ## sender (string)
# recipients, ## recipients (list)
# fail_silently=False,
# html_message=html_message,
# )
def slack_notify(backend, user, response, *args, **kwargs):
if not user.last_login:
message = '{notify} New user registered: {user}\n\n Edit user profile:\n http://{domain}/admin/auth/user/{id}/change/'.format(notify=SLACK_USER, user=user, domain=APP_DOMAIN, id=user.id)
slackbot(message)
| 41.069767 | 194 | 0.643828 |
2803c434eb047663935f62a30f9b8ca1d7973115 | 572 | py | Python | parsercode/daedcode/step3_archive_arena_file.py | DeadlyK1tten/arena_log_parser | d672df63fefd55bd92ad31bd472464073ceb6019 | [
"Apache-2.0"
] | null | null | null | parsercode/daedcode/step3_archive_arena_file.py | DeadlyK1tten/arena_log_parser | d672df63fefd55bd92ad31bd472464073ceb6019 | [
"Apache-2.0"
] | null | null | null | parsercode/daedcode/step3_archive_arena_file.py | DeadlyK1tten/arena_log_parser | d672df63fefd55bd92ad31bd472464073ceb6019 | [
"Apache-2.0"
] | null | null | null | """
archive output_log.txt to archive
Tested on Windows, will it work anywhere else>
"""
import glob
import os
def main():
print('starting')
if not os.path.exists('output_log.txt'):
print('no log file')
return
file_list = glob.glob(os.path.join('archive', 'output_log*.txt'))
for n in range(0, len(file_list)+1):
attempt = os.path.join('archive', 'output_log_{0:03}.txt'.format(n))
if not os.path.exists(attempt):
os.rename('output_log.txt', attempt)
break
if __name__ == '__main__':
main() | 22.88 | 76 | 0.617133 |
e7e35a172884c951ba7d4a6346a67ea48fcc6dce | 6,573 | py | Python | ai4eutils/write_html_image_list.py | dnarqq/WildHack | 4fb9e4545cb47a4283ebc1dec955c0817b1664c0 | [
"MIT"
] | 32 | 2019-07-01T04:50:47.000Z | 2022-03-16T01:48:16.000Z | ai4eutils/write_html_image_list.py | dnarqq/WildHack | 4fb9e4545cb47a4283ebc1dec955c0817b1664c0 | [
"MIT"
] | 5 | 2020-01-20T00:43:10.000Z | 2020-08-13T00:42:20.000Z | ai4eutils/write_html_image_list.py | dnarqq/WildHack | 4fb9e4545cb47a4283ebc1dec955c0817b1664c0 | [
"MIT"
] | 27 | 2019-11-19T23:27:51.000Z | 2022-03-25T20:22:28.000Z | #
# function write_html_image_list(filename,imageFilenames,titles, options)
#
# Given a list of image file names, writes an HTML file that
# shows all those images, with optional one-line headers above each.
#
# Each "filename" can also be a dict with elements 'filename','title',
# 'imageStyle','textStyle'
#
# Strips directory information away if options.makeRelative == 1.
#
# Tries to convert absolute to relative paths if options.makeRelative == 2.
#
# Owner: Dan Morris ([email protected])
#
#%% Constants and imports
import math
import matlab_porting_tools as mpt
#%% write_html_image_list
def write_html_image_list(filename=None,images=None,options={}):
# returns an options struct
if 'fHtml' not in options:
options['fHtml'] = -1
if 'makeRelative' not in options:
options['makeRelative'] = 0
if 'headerHtml' not in options:
options['headerHtml'] = ''
if 'trailerHtml' not in options:
options['trailerHtml'] = ''
if 'defaultTextStyle' not in options:
options['defaultTextStyle'] = \
"font-family:calibri,verdana,arial;font-weight:bold;font-size:150%;text-align:left;margin:0px;"
if 'defaultImageStyle' not in options:
options['defaultImageStyle'] = \
"margin:0px;margin-top:5px;margin-bottom:5px;"
# Possibly split the html output for figures into multiple files; Chrome gets sad with
# thousands of images in a single tab.
if 'maxFiguresPerHtmlFile' not in options:
options['maxFiguresPerHtmlFile'] = math.inf
if filename == None:
return options
# images may be a list of images or a list of image/style/title dictionaries,
# enforce that it's the latter to simplify downstream code
for iImage,imageInfo in enumerate(images):
if isinstance(imageInfo,str):
imageInfo = {'filename':imageInfo,'imageStyle':'','title':'','textStyle':''}
if 'filename' not in imageInfo:
imageInfo['filename'] = ''
if 'imageStyle' not in imageInfo:
imageInfo['imageStyle'] = options['defaultImageStyle']
if 'title' not in imageInfo:
imageInfo['title'] = ''
if 'textStyle' not in imageInfo:
textStyle = options['defaultTextStyle']
imageInfo['textStyle'] = options['defaultTextStyle']
images[iImage] = imageInfo
# Remove leading directory information from filenames if requested
if options['makeRelative'] == 1:
for iImage in range(0,len(images)):
_,n,e = mpt.fileparts(images[iImage]['filename'])
images[iImage]['filename'] = n + e
elif options['makeRelative'] == 2:
baseDir,_,_ = mpt.fileparts(filename)
if len(baseDir) > 1 and baseDir[-1] != '\\':
baseDir = baseDir + '\\'
for iImage in range(0,len(images)):
fn = images[iImage]['filename']
fn = fn.replace(baseDir,'')
images[iImage]['filename'] = fn
nImages = len(images)
# If we need to break this up into multiple files...
if nImages > options['maxFiguresPerHtmlFile']:
# You can't supply your own file handle in this case
if options['fHtml'] != -1:
raise ValueError(
'You can''t supply your own file handle if we have to page the image set')
figureFileStartingIndices = list(range(0,nImages,options['maxFiguresPerHtmlFile']))
assert len(figureFileStartingIndices) > 1
# Open the meta-output file
fMeta = open(filename,'w')
# Write header stuff
fMeta.write('<html><body>\n')
fMeta.write(options['headerHtml'])
fMeta.write('<table border = 0 cellpadding = 2>\n')
for startingIndex in figureFileStartingIndices:
iStart = startingIndex
iEnd = startingIndex+options['maxFiguresPerHtmlFile']-1;
if iEnd >= nImages:
iEnd = nImages-1
trailer = 'image_{:05d}_{:05d}'.format(iStart,iEnd)
localFiguresHtmlFilename = mpt.insert_before_extension(filename,trailer)
fMeta.write('<tr><td>\n')
fMeta.write('<p style="padding-bottom:0px;margin-bottom:0px;text-align:left;font-family:''segoe ui'',calibri,arial;font-size:100%;text-decoration:none;font-weight:bold;">')
fMeta.write('<a href="{}">Figures for images {} through {}</a></p></td></tr>\n'.format(
localFiguresHtmlFilename,iStart,iEnd))
localImages = images[iStart:iEnd+1]
localOptions = options.copy();
localOptions['headerHtml'] = '';
localOptions['trailerHtml'] = '';
# Make a recursive call for this image set
write_html_image_list(localFiguresHtmlFilename,localImages,localOptions)
# ...for each page of images
fMeta.write('</table></body>\n')
fMeta.write(options['trailerHtml'])
fMeta.write('</html>\n')
fMeta.close()
return options
# ...if we have to make multiple sub-pages
bCleanupFile = False
if options['fHtml'] == -1:
bCleanupFile = True;
fHtml = open(filename,'w')
else:
fHtml = options['fHtml']
fHtml.write('<html><body>\n')
fHtml.write(options['headerHtml'])
# Write out images
for iImage,image in enumerate(images):
title = image['title']
imageStyle = image['imageStyle']
textStyle = image['textStyle']
filename = image['filename']
# Remove unicode characters
title = title.encode('ascii','ignore').decode('ascii')
filename = filename.encode('ascii','ignore').decode('ascii')
if len(title) > 0:
fHtml.write(
'<p style="{}">{}</p>\n'\
.format(textStyle,title))
fHtml.write('<img src="{}" style="{}">\n'.format(filename,imageStyle))
if iImage != len(images)-1:
fHtml.write('<br/>')
# ...for each image we need to write
fHtml.write(options['trailerHtml'])
fHtml.write('</body></html>\n')
if bCleanupFile:
fHtml.close()
# ...function
| 34.234375 | 184 | 0.57508 |
ecea65ae45fd2bf00f18ee807d93dc2b1786376e | 25,649 | py | Python | test/ReadsAlignmentUtils_server_test.py | Tianhao-Gu/ReadsAlignmentUtils | dfd5f90489bb8831b142605b36b08f9ebb9d7e3d | [
"MIT"
] | null | null | null | test/ReadsAlignmentUtils_server_test.py | Tianhao-Gu/ReadsAlignmentUtils | dfd5f90489bb8831b142605b36b08f9ebb9d7e3d | [
"MIT"
] | 4 | 2018-08-08T00:35:39.000Z | 2021-01-04T19:57:25.000Z | test/ReadsAlignmentUtils_server_test.py | Tianhao-Gu/ReadsAlignmentUtils | dfd5f90489bb8831b142605b36b08f9ebb9d7e3d | [
"MIT"
] | 13 | 2017-05-30T14:53:37.000Z | 2020-12-16T22:58:32.000Z | # -*- coding: utf-8 -*-
import glob
import hashlib
import inspect
import os # noqa: F401
import shutil
import tempfile
import time
import unittest
from configparser import ConfigParser
from datetime import datetime
from os import environ
from pprint import pprint # noqa: F401
from zipfile import ZipFile
import requests
from ReadsAlignmentUtils.authclient import KBaseAuth as _KBaseAuth
from ReadsAlignmentUtils.ReadsAlignmentUtilsImpl import ReadsAlignmentUtils
from ReadsAlignmentUtils.ReadsAlignmentUtilsServer import MethodContext
from installed_clients.AbstractHandleClient import AbstractHandle as HandleService
from installed_clients.AssemblyUtilClient import AssemblyUtil
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.GenomeFileUtilClient import GenomeFileUtil
from installed_clients.ReadsUtilsClient import ReadsUtils
from installed_clients.WorkspaceClient import Workspace
def dictmerge(x, y):
z = x.copy()
z.update(y)
return z
class ReadsAlignmentUtilsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.token = environ.get('KB_AUTH_TOKEN', None)
cls.callbackURL = environ.get('SDK_CALLBACK_URL')
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('ReadsAlignmentUtils'):
cls.cfg[nameval[0]] = nameval[1]
# Getting username from Auth profile for token
authServiceUrl = cls.cfg['auth-service-url']
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(cls.token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': cls.token,
'user_id': user_id,
'provenance': [
{'service': 'ReadsAlignmentUtils',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.shockURL = cls.cfg['shock-url']
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = Workspace(cls.wsURL)
cls.ws = Workspace(cls.wsURL, token=cls.token)
cls.hs = HandleService(url=cls.cfg['handle-service-url'],
token=cls.token)
# create workspace
wssuffix = int(time.time() * 1000)
wsname = "test_alignment_" + str(wssuffix)
cls.wsinfo = cls.wsClient.create_workspace({'workspace': wsname})
print('created workspace ' + cls.getWsName())
cls.serviceImpl = ReadsAlignmentUtils(cls.cfg)
cls.readUtilsImpl = ReadsUtils(cls.callbackURL)
cls.dfu = DataFileUtil(cls.callbackURL)
cls.assemblyUtil = AssemblyUtil(cls.callbackURL)
cls.gfu = GenomeFileUtil(cls.callbackURL)
cls.scratch = cls.cfg['scratch']
cls.callback_url = os.environ['SDK_CALLBACK_URL']
cls.staged = {}
cls.nodes_to_delete = []
cls.handles_to_delete = []
cls.setupTestData()
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
if hasattr(cls, 'nodes_to_delete'):
for node in cls.nodes_to_delete:
cls.delete_shock_node(node)
if hasattr(cls, 'handles_to_delete'):
cls.hs.delete_handles(cls.hs.hids_to_handles(cls.handles_to_delete))
print('Deleted handles ' + str(cls.handles_to_delete))
def getWsClient(self):
return self.wsClient
@classmethod
def getWsName(cls):
return cls.wsinfo[1]
@classmethod
def getImpl(cls):
return cls.serviceImpl
def getContext(self):
return self.ctx
@classmethod
def delete_shock_node(cls, node_id):
header = {'Authorization': 'Oauth {0}'.format(cls.token)}
requests.delete(cls.shockURL + '/node/' + node_id, headers=header,
allow_redirects=True)
print('Deleted shock node ' + node_id)
@classmethod
def upload_file_to_shock(cls, file_path):
"""
Use DataFileUtil.file_to_shock() save a file to a SHOCK instance.
"""
if file_path is None:
raise Exception("No file given for upload to SHOCK!")
# copy file to where DFU can see it (can only see scratch)
src_file_basename = os.path.basename(file_path)
shared_file_path = os.path.join(cls.scratch, src_file_basename)
shutil.copy2(file_path, shared_file_path)
# Upload files to shock
try:
shock_info = cls.dfu.file_to_shock({
'file_path': shared_file_path,
'make_handle': 1
})
except Exception as e:
raise ValueError('Unable to store ' + file_path + str(e))
# remember shock info
if not hasattr(cls, 'shock_ids'):
cls.shock_ids = []
cls.shock_ids.append(shock_info['shock_id'])
return shock_info
@classmethod
def upload_file_to_shock_and_get_handle(cls, test_file):
"""
Uploads the file in test_file to shock and returns the node and a
handle to the node.
"""
print('loading file to shock: ' + test_file)
node = cls.upload_file_to_shock(test_file)
pprint(node)
cls.nodes_to_delete.append(node['shock_id'])
print('creating handle for shock id ' + node['shock_id'])
handle_id = cls.hs.persist_handle({'id': node['shock_id'],
'type': 'shock',
'url': cls.shockURL
})
cls.handles_to_delete.append(handle_id)
md5 = node['handle']['remote_md5']
return node['shock_id'], handle_id, md5, node['size']
@classmethod
def upload_reads(cls, wsobjname, object_body, fwd_reads,
rev_reads=None, single_end=False, sequencing_tech='Illumina',
single_genome='1'):
ob = dict(object_body) # copy
ob['sequencing_tech'] = sequencing_tech
# ob['single_genome'] = single_genome
ob['wsname'] = cls.getWsName()
ob['name'] = wsobjname
if single_end or rev_reads:
ob['interleaved'] = 0
else:
ob['interleaved'] = 1
print('\n===============staging data for object ' + wsobjname +
'================')
print('uploading forward reads file ' + fwd_reads['file'])
fwd_id, fwd_handle_id, fwd_md5, fwd_size = \
cls.upload_file_to_shock_and_get_handle(fwd_reads['file'])
ob['fwd_id'] = fwd_id
rev_id = None
rev_handle_id = None
if rev_reads:
print('uploading reverse reads file ' + rev_reads['file'])
rev_id, rev_handle_id, rev_md5, rev_size = \
cls.upload_file_to_shock_and_get_handle(rev_reads['file'])
ob['rev_id'] = rev_id
obj_ref = cls.readUtilsImpl.upload_reads(ob)
objdata = cls.wsClient.get_object_info_new({
'objects': [{'ref': obj_ref['obj_ref']}]
})[0]
cls.staged[wsobjname] = {'info': objdata,
'ref': cls.make_ref(objdata),
'fwd_node_id': fwd_id,
'rev_node_id': rev_id,
'fwd_handle_id': fwd_handle_id,
'rev_handle_id': rev_handle_id
}
@classmethod
def upload_genome(cls, wsobj_name, file_name):
genbank_file_path = os.path.join(cls.scratch, file_name)
shutil.copy(os.path.join('data', file_name), genbank_file_path)
genome_obj = cls.gfu.genbank_to_genome({'file': {'path': genbank_file_path},
'workspace_name': cls.getWsName(),
'genome_name': wsobj_name,
'source': 'Ensembl',
'generate_ids_if_needed': 1,
'generate_missing_genes': 1
})
cls.staged[wsobj_name] = {'info': genome_obj['genome_info'],
'ref': genome_obj['genome_ref']}
@classmethod
def upload_assembly(cls, wsobj_name, file_name):
fasta_path = os.path.join(cls.scratch, file_name)
shutil.copy(os.path.join('data', file_name), fasta_path)
assembly_ref = cls.assemblyUtil.save_assembly_from_fasta({'file': {'path': fasta_path},
'workspace_name': cls.getWsName(),
'assembly_name': wsobj_name
})
cls.staged[wsobj_name] = {'info': None,
'ref': assembly_ref}
@classmethod
def upload_empty_data(cls, wsobjname):
objdata = cls.wsClient.save_objects({
'workspace': cls.getWsName(),
'objects': [{'type': 'Empty.AType',
'data': {},
'name': 'empty'
}]
})[0]
cls.staged[wsobjname] = {'info': objdata,
'ref': cls.make_ref(objdata),
}
@classmethod
def save_ws_obj(cls, obj, objname, objtype):
return cls.ws.save_objects({
'workspace': cls.getWsName(),
'objects': [{'type': objtype,
'data': obj,
'name': objname
}]
})[0]
@classmethod
def setupTestFile(cls, file_name):
file_base, file_ext = os.path.splitext(file_name)
timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds() * 1000)
upload_dir = os.path.join(cls.scratch, 'upload_' + file_ext[1:] + '_' + str(timestamp))
os.mkdir(upload_dir)
ret = {}
ret['name'] = file_name
ret['data_file'] = os.path.join('data/', file_name)
ret['file_path'] = os.path.join(upload_dir, file_name)
ret['size'] = cls.getSize(ret.get('data_file'))
ret['md5'] = cls.md5(ret.get('data_file'))
return ret
@classmethod
def setupTestData(cls):
cls.test_bam_file = cls.setupTestFile('accepted_hits.bam')
cls.test_bai_file = cls.setupTestFile('accepted_hits.bai')
cls.test_sam_file = cls.setupTestFile('accepted_hits.sam')
shutil.copy(cls.test_bam_file['data_file'], cls.test_bam_file['file_path'])
shutil.copy(cls.test_sam_file['data_file'], cls.test_sam_file['file_path'])
int_reads = {'file': 'data/interleaved.fq',
'name': '',
'type': ''}
cls.upload_reads('intbasic', {'single_genome': 1}, int_reads)
cls.upload_genome('test_genome', 'minimal.gbff')
cls.upload_assembly('test_assembly', 'test.fna')
cls.upload_empty_data('empty')
cls.more_upload_params = {
'read_library_ref': cls.getWsName() + '/intbasic',
'assembly_or_genome_ref': cls.getWsName() + '/test_assembly',
'condition': 'test_condition'
}
params = dictmerge({'destination_ref': cls.getWsName() + '/test_bam',
'file_path': cls.test_bam_file['file_path'],
'validate': 'True'
}, cls.more_upload_params)
cls.getImpl().upload_alignment(cls.ctx, params)
params = dictmerge({'destination_ref': cls.getWsName() + '/test_sam',
'file_path': cls.test_sam_file['file_path'],
'validate': 'True'
}, cls.more_upload_params)
cls.getImpl().upload_alignment(cls.ctx, params)
@classmethod
def make_ref(cls, objinfo):
return str(objinfo[6]) + '/' + str(objinfo[0]) + '/' + str(objinfo[4])
@classmethod
def getSize(cls, filename):
return os.path.getsize(filename)
@classmethod
def md5(cls, filename):
with open(filename, 'rb') as file_:
hash_md5 = hashlib.md5()
buf = file_.read(65536)
while len(buf) > 0:
hash_md5.update(buf)
buf = file_.read(65536)
return hash_md5.hexdigest()
# NOTE: According to Python unittest naming rules test method names should start from 'test'. # noqa
def upload_alignment_success(self, params, expected):
obj = self.dfu.get_objects(
{'object_refs': [params.get('destination_ref')]})['data'][0]
print("============ GET OBJECTS OUTPUT ==============")
pprint(obj)
print("==============================================")
self.assertEqual(obj['info'][2].startswith(
'KBaseRNASeq.RNASeqAlignment'), True)
d = obj['data']
self.assertEqual(d['genome_id'], params.get('assembly_or_genome_ref'))
self.assertEqual(d['condition'], params.get('condition'))
self.assertEqual(d['read_sample_id'], params.get('read_library_ref'))
self.assertEqual(d['library_type'].startswith('KBaseFile.PairedEndLibrary'), True)
self.assertEqual(d['size'], expected.get('size'))
f = d['file']
self.assertEqual(f['file_name'], expected.get('name'))
self.assertEqual(f['remote_md5'], expected.get('md5'))
node = f['id']
self.nodes_to_delete.append(node)
def check_file(self, file_path, expected):
out_dir, file_name = os.path.split(file_path)
size = os.path.getsize(file_path)
md5 = self.md5(file_path)
self.assertEqual(size, expected['size'])
self.assertEqual(md5, expected['md5'])
def download_alignment_success(self, obj_name, expectedBAM, expectedSAM, expectedBAI):
test_name = inspect.stack()[1][3]
print('\n**** starting expected download success test: ' + test_name + ' ***\n')
params = {'source_ref': self.getWsName() + '/' + obj_name,
'downloadSAM': 'True',
'downloadBAI': 'True'}
ret = self.getImpl().download_alignment(self.ctx, params)[0]
print("================= DOWNLOADED FILES =================== ")
pprint(ret)
print("========================================================")
bam_file_path = os.path.join(ret.get('destination_dir'), self.test_bam_file.get('name'))
sam_file_path = glob.glob(ret.get('destination_dir') + '/*.sam')[0]
bai_file_path = glob.glob(ret.get('destination_dir') + '/*.bai')[0]
self.check_file(bam_file_path, expectedBAM)
self.check_file(sam_file_path, expectedSAM)
self.check_file(bai_file_path, expectedBAI)
def test_upload_success_bam(self):
params = dictmerge({'destination_ref': self.getWsName() + '/test_bam',
'file_path': self.test_bam_file['file_path'],
'validate': 'True'
}, self.more_upload_params)
expected = self.test_bam_file
self.upload_alignment_success(params, expected)
def test_upload_success_sam(self):
params = dictmerge({'destination_ref': self.getWsName() + '/test_sam',
'file_path': self.test_sam_file['file_path'],
'validate': 'True'
}, self.more_upload_params)
expected = self.test_bam_file
self.upload_alignment_success(params, expected)
def test_download_success_bam(self):
self.download_alignment_success('test_bam',
self.test_bam_file,
self.test_sam_file,
self.test_bai_file)
def test_download_success_sam(self):
self.download_alignment_success('test_sam',
self.test_bam_file,
self.test_sam_file,
self.test_bai_file)
def test_get_aligner_stats(self):
# test_bam_file = os.path.join("data", "accepted_hits.bam")
# bam_file = os.path.join(self.scratch, os.path.basename(test_bam_file))
# shutil.copy(test_bam_file, bam_file)
stats_data = self.getImpl()._get_aligner_stats(self.test_bam_file['file_path'])
self.assertEqual(stats_data.get('total_reads'), 15254)
self.assertEqual(stats_data.get('mapped_reads'), 14969)
self.assertEqual(stats_data.get('unmapped_reads'), 285)
self.assertEqual(stats_data.get('singletons'), 0)
self.assertEqual(stats_data.get('multiple_alignments'), 3519)
# Following test uses object refs from a narrative to test backward compatibility to download
# already created Alignment objects in RNASeq. comment the next line to run the test
@unittest.skip("skipped test_download_legacy_alignment_success")
def test_download_legacy_alignment_success(self):
ci_alignment_ref = '22254/23/1'
appdev_alignment_ref = '4389/54/1'
test_name = inspect.stack()[1][3]
print('\n**** starting expected download success test: ' + test_name + ' ***\n')
params = {'source_ref': appdev_alignment_ref,
'downloadSAM': 'True'}
ret = self.getImpl().download_alignment(self.ctx, params)[0]
print("================= DOWNLOADED FILES =================== ")
pprint(ret)
print("=======================================================")
def export_alignment_success(self, objname, export_params, expected_num_files,
expectedBAM, expectedSAM, expectedBAI):
test_name = inspect.stack()[1][3]
print('\n*** starting expected export pass test: ' + test_name + ' **')
export_params['source_ref'] = self.getWsName() + '/' + objname
shocknode = self.getImpl().export_alignment(self.ctx, export_params)[0]['shock_id']
node_url = self.shockURL + '/node/' + shocknode
headers = {'Authorization': 'OAuth ' + self.token}
r = requests.get(node_url, headers=headers, allow_redirects=True)
fn = r.json()['data']['file']['name']
tempdir = tempfile.mkdtemp(dir=self.scratch)
file_path = os.path.join(tempdir, test_name) + '.zip'
print('zip file path: ' + file_path)
print('downloading shocknode ' + shocknode)
with open(file_path, 'wb') as fhandle:
r = requests.get(node_url + '?download_raw', stream=True,
headers=headers, allow_redirects=True)
for chunk in r.iter_content(1024):
if not chunk:
break
fhandle.write(chunk)
with ZipFile(file_path) as z:
z.extractall(tempdir)
print('zip file contents: ' + str(os.listdir(tempdir)))
count = 0
for f in os.listdir(tempdir):
if '.bam' in f:
print('BAM file: ' + f)
count += 1
md5 = self.md5(os.path.join(tempdir, f))
self.assertEqual(md5, expectedBAM.get('md5'))
if '.sam' in f:
print('SAM file: ' + f)
count += 1
md5 = self.md5(os.path.join(tempdir, f))
self.assertEqual(md5, expectedSAM.get('md5'))
if '.bai' in f:
count += 1
print('BAI file: ' + f)
md5 = self.md5(os.path.join(tempdir, f))
self.assertEqual(md5, expectedBAI.get('md5'))
self.assertEqual(count, expected_num_files)
def test_success_export_alignment_bam(self):
opt_params = {'exportSAM': 'True',
'exportBAI': 'True'}
self.export_alignment_success('test_bam', opt_params, 3,
self.test_bam_file,
self.test_sam_file,
self.test_bai_file)
def test_success_export_alignment_sam(self):
opt_params = {'exportSAM': 'True',
'exportBAI': 'True'}
self.export_alignment_success('test_sam', opt_params, 3,
self.test_bam_file,
self.test_sam_file,
self.test_bai_file)
def test_valid_validate_alignment(self):
params = {'file_path': '/kb/module/test/data/samtools/accepted_hits.sam',
'ignore': ['MATE_NOT_FOUND', 'MISSING_READ_GROUP',
'INVALID_MAPPING_QUALITY']}
ret = self.getImpl().validate_alignment(self.ctx, params)[0]
self.assertEqual(True, ret['validated'])
params = {'file_path': '/kb/module/test/data/samtools/accepted_hits.sam'}
ret = self.getImpl().validate_alignment(self.ctx, params)[0]
self.assertEqual(True, ret['validated'])
def test_valid_invalidate_alignment(self):
params = {'file_path': '/kb/module/test/data/samtools/accepted_hits_invalid.sam',
'ignore': ['MATE_NOT_FOUND', 'MISSING_READ_GROUP',
'INVALID_MAPPING_QUALITY']}
ret = self.getImpl().validate_alignment(self.ctx, params)[0]
self.assertEqual(False, ret['validated'])
def fail_upload_alignment(self, params, error, exception=ValueError, do_startswith=False):
test_name = inspect.stack()[1][3]
print('\n*** starting expected upload fail test: ' + test_name + ' **')
with self.assertRaises(exception) as context:
self.getImpl().upload_alignment(self.ctx, params)
if do_startswith:
self.assertTrue(str(context.exception).startswith(error),
"Error message {} does not start with {}".format(
str(context.exception),
error))
else:
self.assertEqual(error, str(context.exception))
def test_upload_fail_empty_reads(self):
params = dictmerge({
'destination_ref': self.getWsName() + '/test_download_sam',
'file_path': self.test_sam_file['file_path']
}, self.more_upload_params)
params['read_library_ref'] = self.getWsName() + '/empty'
self.fail_upload_alignment(params, 'read_library_ref parameter should be of type ' +
'KBaseFile.SingleEndLibrary or KBaseFile.PairedEndLibrary or ' +
'KBaseAssembly.SingleEndLibrary or KBaseAssembly.PairedEndLibrary')
def test_upload_fail_no_dst_ref(self):
self.fail_upload_alignment(
dictmerge({
'condition': 'bar',
'file_path': 'test'
}, self.more_upload_params),
'destination_ref parameter is required')
def test_upload_fail_no_ws_name(self):
self.fail_upload_alignment(
dictmerge({
'condition': 'bar',
'destination_ref': '/foo',
'file_path': 'test'
}, self.more_upload_params),
'Workspace name or id is required in destination_ref')
def test_upload_fail_no_obj_name(self):
self.fail_upload_alignment(
dictmerge({
'condition': 'bar',
'destination_ref': self.getWsName() + '/',
'file_path': 'test'
}, self.more_upload_params),
'Object name or id is required in destination_ref')
def test_upload_fail_no_file(self):
self.fail_upload_alignment(
dictmerge({
'destination_ref': self.getWsName()+'/foo'
}, self.more_upload_params),
'file_path parameter is required')
def test_upload_fail_non_existant_file(self):
self.fail_upload_alignment(
dictmerge({
'destination_ref': self.getWsName()+'/foo',
'file_path': 'foo'
}, self.more_upload_params),
'File does not exist: foo')
def test_upload_fail_bad_wsname(self):
self.fail_upload_alignment(
dictmerge({
'destination_ref': '&bad' + '/foo',
'file_path': 'foo'
}, self.more_upload_params),
"'Illegal character in workspace name &bad: &'")
def test_upload_fail_non_existant_wsname(self):
self.fail_upload_alignment(
dictmerge({
'destination_ref': '1s' + '/foo',
'file_path': 'bar'
}, self.more_upload_params),
"'No workspace with name 1s exists'")
| 40.712698 | 104 | 0.557722 |
9545c3c3affc6ec3df2c8d13528c3834dfa00127 | 3,442 | py | Python | udon/email.py | GregoireDelannoy/udon | 8b71c0dd241d1fee0f90197b89338a6750050504 | [
"ISC"
] | 1 | 2020-02-11T14:46:10.000Z | 2020-02-11T14:46:10.000Z | udon/email.py | GregoireDelannoy/udon | 8b71c0dd241d1fee0f90197b89338a6750050504 | [
"ISC"
] | 1 | 2020-09-19T13:53:06.000Z | 2020-09-19T13:53:06.000Z | udon/email.py | GregoireDelannoy/udon | 8b71c0dd241d1fee0f90197b89338a6750050504 | [
"ISC"
] | 3 | 2020-07-08T07:30:38.000Z | 2021-01-27T10:19:24.000Z | #
# Copyright (c) 2019 Eric Faurot <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import email
import email.header
import email.message
class ErrorMixin:
errors = ()
def add_error(self, error):
self.errors += (error, )
class Header(ErrorMixin):
def __init__(self, name, raw):
self.name = name
self.raw = raw
assert isinstance(raw, str)
self.value = raw
try:
raw.encode('utf-8')
except UnicodeEncodeError:
self.add_error('has-surrogates')
self.value = raw.encode('utf-8', 'surrogateescape').decode('utf-8', 'replace')
@property
def decoded(self):
return self.decode()
def decode(self, unfold = True, strip = True):
def _decode(s, e):
if isinstance(s, bytes):
s = s.decode(e or 'ascii')
return s
value = self.value
if value and unfold:
value = value.replace('\n', '').replace('\r', '')
if value:
value = ''.join(_decode(s, e) for (s, e) in email.header.decode_header(value))
if value and strip:
value = ' '.join(value.strip().split())
return value
class Part(ErrorMixin):
raw = None
body = None
headers = ()
children = ()
def get_header(self, name):
for header in self.get_all_headers(name):
return header
def get_all_headers(self, name):
for header in self.headers:
if name.lower() == header.name.lower():
yield header
def walk(self):
def _iter(part, ancestors):
yield part, ancestors
for child in part.children:
for res in _iter(child, ancestors + (part, )):
yield res
return _iter(self, ())
class Message(Part):
@classmethod
def from_bytes(kls, data):
msg = email.message_from_bytes(data)
return kls.from_message(msg)
@classmethod
def from_message(kls, msg):
def _build_part(part, node):
part.raw = node
part.headers = tuple(Header(name, raw) for name, raw in node._headers)
payload = node.get_payload(decode = True)
if isinstance(payload, bytes):
try:
body = payload.decode(node.get_content_charset('latin-1'))
except UnicodeDecodeError:
part.add_error('payload-encoding')
body = payload.decode('latin-1')
part.body = body
if node.is_multipart():
part.children = tuple(_build_part(Part(), child) for child in node.get_payload())
return part
return _build_part(kls(), msg)
| 30.192982 | 97 | 0.600813 |
fa93e1647da961f5f636dca3ef38b48e98a3773c | 3,356 | py | Python | src/pretix/api/oauth.py | fabm3n/pretix | 520fb620888d5c434665a6a4a33cb2ab22dd42c7 | [
"Apache-2.0"
] | 1,248 | 2015-04-24T13:32:06.000Z | 2022-03-29T07:01:36.000Z | src/pretix/api/oauth.py | fabm3n/pretix | 520fb620888d5c434665a6a4a33cb2ab22dd42c7 | [
"Apache-2.0"
] | 2,113 | 2015-02-18T18:58:16.000Z | 2022-03-31T11:12:32.000Z | src/pretix/api/oauth.py | fabm3n/pretix | 520fb620888d5c434665a6a4a33cb2ab22dd42c7 | [
"Apache-2.0"
] | 453 | 2015-05-13T09:29:06.000Z | 2022-03-24T13:39:16.000Z | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from datetime import timedelta
from django.utils import timezone
from oauth2_provider.exceptions import FatalClientError
from oauth2_provider.oauth2_validators import Grant, OAuth2Validator
from oauth2_provider.settings import oauth2_settings
class Validator(OAuth2Validator):
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
if not getattr(request, 'organizers', None) and request.scopes != ['profile']:
raise FatalClientError('No organizers selected.')
expires = timezone.now() + timedelta(
seconds=oauth2_settings.AUTHORIZATION_CODE_EXPIRE_SECONDS)
g = Grant(application=request.client, user=request.user, code=code["code"],
expires=expires, redirect_uri=request.redirect_uri,
scope=" ".join(request.scopes))
g.save()
if request.scopes != ['profile']:
g.organizers.add(*request.organizers.all())
def validate_code(self, client_id, code, client, request, *args, **kwargs):
try:
grant = Grant.objects.get(code=code, application=client)
if not grant.is_expired():
request.scopes = grant.scope.split(" ")
request.user = grant.user
request.organizers = grant.organizers.all()
return True
return False
except Grant.DoesNotExist:
return False
def _create_access_token(self, expires, request, token, source_refresh_token=None):
if not getattr(request, 'organizers', None) and not getattr(source_refresh_token, 'access_token', None) and token["scope"] != 'profile':
raise FatalClientError('No organizers selected.')
if token['scope'] != 'profile':
if hasattr(request, 'organizers'):
orgs = list(request.organizers.all())
else:
orgs = list(source_refresh_token.access_token.organizers.all())
access_token = super()._create_access_token(expires, request, token, source_refresh_token=None)
if token['scope'] != 'profile':
access_token.organizers.add(*orgs)
return access_token
| 47.942857 | 144 | 0.696663 |
26cec108104050942626e96ae3d987b94e1f759c | 315 | py | Python | videoSummarizer/tag_generation/test_pmi.py | talsperre/LectureSummarizer | 4a0d5a51e80be77fb3715cb386a242871d5cf298 | [
"MIT"
] | 4 | 2020-09-20T19:37:26.000Z | 2021-12-12T08:03:17.000Z | videoSummarizer/tag_generation/test_pmi.py | talsperre/LectureSummarizer | 4a0d5a51e80be77fb3715cb386a242871d5cf298 | [
"MIT"
] | 28 | 2020-11-13T18:50:46.000Z | 2022-03-02T14:53:59.000Z | videoSummarizer/tag_generation/test_pmi.py | talsperre/LectureSummarizer | 4a0d5a51e80be77fb3715cb386a242871d5cf298 | [
"MIT"
] | 2 | 2021-03-23T11:32:43.000Z | 2021-03-24T03:50:37.000Z | from TagGenerationPMI import TagGenrationPMI
import sys
with open(sys.argv[1], 'r') as file:
data = file.read().replace('\n', '')
data = data.split(' ')
obj = TagGenrationPMI()
bigrams, trigrams = obj.generate_tags(data)
print("Bigrams are : {}".format(bigrams))
print("Trigrams are : {}".format(trigrams))
| 22.5 | 44 | 0.688889 |
a62afd91d777a60b046261dbd2ce7f0f02512ab4 | 22,801 | py | Python | aaem/components/annual_savings/annual_savings.py | gina-alaska/alaska_affordable_energy_model | 96fed0137152985ce280ea37e0affec131e3087f | [
"MIT-feh"
] | 1 | 2022-01-23T07:18:36.000Z | 2022-01-23T07:18:36.000Z | aaem/components/annual_savings/annual_savings.py | gina-alaska/alaska_affordable_energy_model | 96fed0137152985ce280ea37e0affec131e3087f | [
"MIT-feh"
] | 5 | 2017-07-14T21:56:46.000Z | 2017-07-14T21:59:15.000Z | aaem/components/annual_savings/annual_savings.py | gina-alaska/alaska_affordable_energy_model | 96fed0137152985ce280ea37e0affec131e3087f | [
"MIT-feh"
] | 2 | 2020-04-28T18:12:55.000Z | 2021-01-13T01:56:57.000Z | """
annual_savings.py
ross spicer
created: 2015/09/16
this file is the representation of what is being calculated at the bottom of
each of the spread sheet tabs.
"""
import numpy as np
import os.path
from abc import ABCMeta, abstractmethod
from pandas import DataFrame
class AnnualSavings (object):
"""
Abstract base class to use as base class for all model components
that have the goal of generating an NPV benefit for the project
"""
__metaclass__ = ABCMeta
def calc_annual_total_savings (self):
"""
calculate an array of annual savings values for the project
pre:
self.annual_electric_savings and self.annual_heating_savings
need to be arrays of dollar values.
post:
annual_total_savings will be an array of dollar amounts.
"""
#~ print self.annual_electric_savings, self.annual_heating_savings
self.annual_total_savings = self.annual_electric_savings + \
self.annual_heating_savings
def calc_annual_costs (self, rate, cost_scaler = 1.0):
"""
calculate the cost of the project for each year.
pre:
rate should be an interest rate.
self.project_life is the # of years project is active for
self.capital_costs is the total cost of the project
post:
self.annual_costs will be a numpy array of dollar values
indicating the cost of the project per year.
"""
rate = rate / 100.0
#~ print self.component_name, cost_scaler
self.capital_costs *= cost_scaler
cost_per_year = -np.pmt(rate, self.actual_project_life,
self.capital_costs)
cpi= self.forecast.cpi[:self.actual_project_life]
self.annual_costs = cost_per_year * cpi# np.ones(self.actual_project_life)
def calc_annual_net_benefit (self):
"""
calculate the yearly benefit of the project
pre:
annual_total_savings and annual_costs are arrays of the same length
(project life) consisting of dollar values
post:
annual_net_benefit is an array of the annual monetary benefit over
projects life time
"""
self.annual_net_benefit = self.annual_total_savings - self.annual_costs
def calc_npv (self, rate, current_year):
"""
clacualte the NPV net benfit
pre:
rate: should be the savings rate
self.annual_net_benefit is an array of the annual monetary benefit
over projects life time
post:
self.net_npv, self.benefit_npv, slef.cost_npv is a dollar value
self.benefit_cost_ratio is a ratio
"""
rate = rate / 100.0
# These need to be calculated for the actual project life
end = self.actual_project_life
# number of arrays as zero ($ value) until project start
yts = []#np.zeros((self.start_year - current_year)+1)
self.benefit_npv = np.npv(rate,
np.append(yts, self.annual_total_savings[:end]))
self.cost_npv = np.npv(rate, np.append(yts, self.annual_costs[:end]))
self.benefit_cost_ratio = self.benefit_npv/self.cost_npv
self.net_npv = np.npv(rate,
np.append(yts, self.annual_net_benefit[:end]))
def calc_cost_of_energy (self, fuel_amount, maintenance = 0):
"""
calculates the cost of energy
pre:
fuel_amount is a the amount of fuel generated, used or saved
units may vary (i.e. kWh, gal..) per year scaler, list, or np.array
maintenance is the operation and maintenance cost per year as a
scaler, list, or np.array
post:
returns a price in $/[units of fuel_amount]
"""
yts = np.zeros((self.start_year - self.cd["current year"])+1)
if not type(maintenance) in [list,np.ndarray]:
maintenance = np.zeros(self.actual_project_life) + maintenance
else:
maintenance = maintenance[:self.actual_project_life]
maintenance = np.array(maintenance)
maintenance_npv = np.npv((self.cd['discount rate']/100.0),
np.append(yts, maintenance))
if not type(fuel_amount) in [list,np.ndarray]:
fuel_amount = np.zeros(self.actual_project_life) + fuel_amount
else:
fuel_amount = fuel_amount[:self.actual_project_life]
fuel_npv = np.npv((self.cd['discount rate']/100.0),
np.append(yts, fuel_amount))
return (self.cost_npv + maintenance_npv)/ fuel_npv
def calc_levelized_costs (self, maintenance_costs):
"""
calculate the levelized costs
pre:
maintenance_costs is the operation and maintenance cost per year
self.get_fuel_total_saved(), and self.get_total_energy_produced (),
must exitst and return numbers representing fuel saved, anenergy
produced or a dict with 'MMBtu' and 'kWh' as keys whose values are
numbers
post:
self.break_even_cost is the break even cost in $/gal
self.levelized_cost_of_energy is the LCOE in $/kWh for
electricity projects, and $/MMBtu for heating projects
for projectes with electricity efficciency and heating efficiency
self.levelized_cost_of_energy is a dictonary wiht 'MMBtu' and 'kWh'
as keys
"""
fuel_saved = self.get_fuel_total_saved()
self.break_even_cost = \
self.calc_cost_of_energy(fuel_saved, maintenance_costs)
energy_produced = self.get_total_energy_produced ()
if type(energy_produced) is dict:
#~ print energy_produced['MMBtu']
self.levelized_cost_of_energy = {}
self.levelized_cost_of_energy['MMBtu'] = \
self.calc_cost_of_energy(energy_produced['MMBtu'][0],
maintenance_costs)*\
energy_produced['MMBtu'][1]
self.levelized_cost_of_energy['kWh'] = \
self.calc_cost_of_energy(energy_produced['kWh'][0],
maintenance_costs)*\
energy_produced['kWh'][1]
else:
self.levelized_cost_of_energy = \
self.calc_cost_of_energy(energy_produced, maintenance_costs)
def calc_internal_rate_of_return (self):
"""
calculate the interal rate of return
"""
self.irr = 0
try:
l = [-self.capital_costs] +\
self.annual_total_savings[:self.actual_project_life].tolist()
self.irr = np.irr(l)
except (AttributeError, ValueError, np.linalg.linalg.LinAlgError):
pass
def set_project_life_details (self, start_year,project_life):
"""
set the details for the project life time(
pre:
start_year is an int represnting a year
projct life: is the number(int >0) of years (<= fc_period)
fc_period: <int> length of forecast period
post:
self.start_year, and self.project_life are the input values
and self.end_year would be the year the project ends.
"""
self.start_year = start_year
#~ # do caclulations for whole forecast period & only save project life
#~ self.project_life = fc_period
#~ self.end_year = self.start_year + self.project_life
# remember the actual lifetime
self.actual_project_life = project_life
self.project_life = project_life
self.actual_end_year = self.start_year + project_life - 1
self.end_year = self.start_year + project_life - 1
def get_diesel_prices (self):
"""
get the diesel prices
pre:
community name should be in the community data.
post:
self.diesel prices has prices for the project life
"""
#~ prices = self.cd["diesel prices"]
#~ self.diesel_prices = prices.get_projected_prices(self.start_year,
#~ self.end_year)
self.cd["diesel prices"].index = \
self.cd["diesel prices"].index.astype(int)
self.cd["diesel prices"] = self.cd["diesel prices"].astype(float)
self.diesel_prices = self.cd["diesel prices"].ix[self.start_year:]
self.diesel_prices.columns = ['prices']
start, end = self.start_year, self.end_year
existing_len = len(self.diesel_prices.ix[start:end])
extend_by = (end + 1) - start - existing_len
if extend_by > 0:
extend = DataFrame(
index=range(
self.diesel_prices.index[-1] + 1,
self.diesel_prices.index[-1]+extend_by +1
),
columns=['prices'])
extend['prices'] = self.diesel_prices.iloc[-1]['prices']
self.diesel_prices = \
DataFrame( self.diesel_prices.ix[start:end]['prices']).\
append(extend)
else:
# -1 to ensure same behavour
self.diesel_prices = \
DataFrame(self.diesel_prices['prices'].ix[start:end])
self.diesel_prices = self.diesel_prices['prices'].values
def get_electricity_prices (self):
"""
"""
self.cd["electric prices"].index = \
self.cd["electric prices"].index.astype(int)
self.cd["electric prices"] = \
self.cd["electric prices"].astype(float)
prices = self.cd["electric prices"]
self.electricity_prices = prices.ix[self.start_year:]
self.cd["electric prices"].index = self.cd["electric prices"].index.astype(int)
self.cd["electric prices"] = self.cd["electric prices"].astype(float)
self.electricity_prices = self.cd["electric prices"].ix[self.start_year:]
self.electricity_prices.columns = ['prices']
start, end = self.start_year, self.end_year
existing_len = len(self.electricity_prices.ix[start:end])
extend_by = (end + 1) - start - existing_len
if extend_by > 0:
extend = DataFrame(
index=range(
self.electricity_prices.index[-1] +1 ,
self.electricity_prices.index[-1] + 1+extend_by
),
columns=['prices'])
extend['prices'] = self.electricity_prices.iloc[-1]['prices']
self.electricity_prices = \
DataFrame( self.electricity_prices.ix[start:end]['prices']).\
append(extend)
else:
# -1 to ensure same behavour
self.electricity_prices = \
DataFrame(self.electricity_prices['prices'].ix[start:end])
self.electricity_prices = self.electricity_prices['prices'].values
def save_additional_output(self, directory):
"""
"""
pass
@abstractmethod
def calc_capital_costs (self):
"""
abstract function
should be implemented by child class to calculate self.capital_costs
(the cost of the project) a dollar value
"""
raise NotImplementedError, "should be implemented by child class to" +\
" calculate self.capital_costs(the cost of the project) a dollar value"
@abstractmethod
def calc_annual_electric_savings (self):
"""
abstract function
should be implemented by child class to create
self.annual_electric_savings as an np.array, length self.project_life,
of dollar values(numbers)"
"""
raise NotImplementedError, "should be implemented by child class to" +\
" create self.annual_electric_savings as an np.array, length" +\
" self.project_life, of dollar values(numbers)"
@abstractmethod
def calc_annual_heating_savings (self):
"""
abstract function
should be implemented by child class to create
self.annual_heating_savings as an np.array, length self.project_life,
of dollar values(numbers)"
"""
raise NotImplementedError, "should be implemented by child class to" +\
" create self.annual_heating_savings as an np.array, length" +\
" self.project_life, of dollar values(numbers)"
@abstractmethod
def run (self, scalers = {'capital costs':1.0}):
"""
abstract function
should be implemented by child class to run component
"""
raise NotImplementedError, "should be implemented by child class to" +\
" run the component"
## helper
def get_nan_range (self):
"""
gets an array of nan's for the project life time, the "get"
functions defined here should return a array of nan's indicating that
the value is not applicable to the component. If a "get" should do
something else overload it in the component
pre:
self.lifetime > 0
post:
returns array of nan's length self.lifetime
"""
return np.zeros(self.project_life)/0
## Heating
def get_base_HF_use (self): # ex: eff(res) G89-V89
""" returns HF use array (baseline) """
try:
return self.baseline_HF_consumption
except:
return self.get_nan_range()
def get_proposed_HF_use (self): # ex: eff(res) G81-V81
""" returns HF use array (proposed) """
try:
return self.proposed_HF_consumption
except:
return self.get_nan_range()
def get_base_HF_cost (self): # ex: eff(res) G93-V93
""" returns HF cost array (baseline) """
try:
return self.baseline_HF_cost
except:
return self.get_nan_range()
def get_proposed_HF_cost (self): # ex: eff(res) G86-V86
""" returns HF cost array (proposed) """
try:
return self.proposed_HF_cost
except:
return self.get_nan_range()
def get_fuel_price (self): # ex: eff(res) G84-V84 or G90-V90
""" get the diesel fuel price used"""
try:
### TODO:CHANGE THIS TOO??
return self.diesel_prices
except:
return self.get_nan_range()
## Electric
def get_base_kWh_use (self): # ex: eff(res) G89-V89
""" returns kWh use array (baseline) """
try:
return self.baseline_kWh_consumption
except:
return self.get_nan_range()
def get_proposed_kWh_use (self): # ex: eff(res) G73-V73
""" returns kWh use array (proposed) """
try:
return self.proposed_kWh_consumption
except:
return self.get_nan_range()
def get_base_kWh_cost (self): # ex: eff(res) G75-V75
""" returns kWh cost array (baseline) """
try:
return self.baseline_kWh_cost
except:
return self.get_nan_range()
def get_proposed_kWh_cost (self): # ex: eff(res) G70-V70
""" returns kWh cost array (proposed) """
try:
return self.proposed_kWh_cost
except:
return self.get_nan_range()
## annual savings
def get_electric_savings_costs (self): # ex: eff(res) G57-V57 or G75-V75
""" returns kWh savings array (base - proposed) """
try:
return self.annual_electric_savings
except:
return self.get_nan_range()
def get_heating_savings_costs (self): # ex: eff(res) G58-V58 or G94-V94
""" returns HF savings array (base - proposed) """
try:
return self.annual_heating_savings
except:
return self.get_nan_range()
def get_total_savings_costs (self): # ex: eff(res) G59-V59
""" returns total savings array """
try:
return self.annual_total_savings
except:
return self.get_nan_range()
def get_capital_costs (self): # ex: eff(res) G55-V55
""" return capital costs array """
try:
return self.annual_costs
except:
return self.get_nan_range()
def get_net_benefit (self): # ex: eff(res) G62-V62
""" return net benefit array """
try:
return self.annual_net_benefit
except:
return self.get_nan_range()
## NPVs
def get_NPV_benefits (self): # ex: eff(res) C13
""" return NPV benefits (float) """
try:
return self.benefit_npv
except AttributeError:
return "N/A"
def get_NPV_costs (self): # ex: eff(res) C14
""" return NPV costs (float) """
try:
return self.cost_npv
except AttributeError:
return "N/A"
def get_BC_ratio (self): # ex: eff(res) C15
""" return NPV benefit/cost ratio (float) """
try:
return self.benefit_cost_ratio
except AttributeError:
return "N/A"
def get_NPV_net_benefit (self): # ex: eff(res) C16
""" return NPV net benefit (float) """
try:
return self.net_npv
except AttributeError:
return "N/A"
## save functions
def save_csv_outputs (self, directory):
"""
save all csv outputs
pre:
directory should exist and be an absolute path
post:
electric,heating, and finical csv files are saved
"""
self.save_component_csv(directory)
#~ self.save_electric_csv (directory)
#~ self.save_heating_csv (directory)
#~ if self.cd["model financial"]:
#~ self.save_financial_csv (directory)
def save_component_csv (self, directory):
"""
save the output from the component.
"""
if not self.was_run:
return
years = np.array(range(self.project_life)) + self.start_year
df = DataFrame({
self.component_name + \
": Heating Fuel Consumption Baseline (gallons/year)":
self.get_base_HF_use(),
self.component_name + \
": Heating Fuel Consumption Proposed (gallons/year)":
self.get_proposed_HF_use(),
self.component_name + \
": Heating Fuel Consumption Savings (gallons/year)":
self.get_base_HF_use() -\
self.get_proposed_HF_use(),
self.component_name + \
": Heating Fuel Cost Baseline ($/year)":
self.get_base_HF_cost(),
self.component_name + \
": Heating Fuel Cost Proposed ($/year)":
self.get_proposed_HF_cost(),
self.component_name + \
": Heating Fuel Cost Savings ($/year)":
self.get_heating_savings_costs(),
self.component_name + \
": Electricity Consumption Baseline (kWh/year)":
self.get_base_kWh_use(),
self.component_name + \
": Electricity Consumption Proposed (kWh/year)":
self.get_proposed_kWh_use(),
self.component_name + \
": Electricity Consumption Savings (kWh/year)":
self.get_base_kWh_use() -\
self.get_proposed_kWh_use(),
self.component_name + \
": Electricity Cost Basline ($/year)":
self.get_base_kWh_cost(),
self.component_name + \
": Electricity Cost Proposed ($/year)":
self.get_proposed_kWh_cost(),
self.component_name + \
": Electricity Cost Savings ($/year)":
self.get_electric_savings_costs(),
self.component_name + \
": Project Capital Cost ($/year)":
self.get_capital_costs(),
self.component_name + \
": Total Cost Savings ($/year)":
self.get_total_savings_costs(),
self.component_name + \
": Net Benefit ($/year)":
self.get_net_benefit(),
}, years)
df["Community"] = self.cd['name']
ol = ["Community",
self.component_name + \
": Heating Fuel Consumption Baseline (gallons/year)",
self.component_name + \
": Heating Fuel Consumption Proposed (gallons/year)",
self.component_name + \
": Heating Fuel Consumption Savings (gallons/year)",
self.component_name + ": Heating Fuel Cost Baseline ($/year)",
self.component_name + ": Heating Fuel Cost Proposed ($/year)",
self.component_name + ": Heating Fuel Cost Savings ($/year)",
self.component_name + \
": Electricity Consumption Baseline (kWh/year)",
self.component_name + \
": Electricity Consumption Proposed (kWh/year)",
self.component_name + \
": Electricity Consumption Savings (kWh/year)",
self.component_name + ": Electricity Cost Basline ($/year)",
self.component_name + ": Electricity Cost Proposed ($/year)",
self.component_name + ": Electricity Cost Savings ($/year)",
self.component_name + ": Project Capital Cost ($/year)",
self.component_name + ": Total Cost Savings ($/year)",
self.component_name + ": Net Benefit ($/year)"]
fname = os.path.join(directory,
self.cd['name'] + '_' +\
self.component_name.lower().replace(' ','_').\
replace('&','and') +\
"_output.csv")
fname = fname.replace(" ","_")
# save to end of project(actual lifetime)
df[ol].ix[:self.actual_end_year].to_csv(fname, index_label="Year")
| 39.312069 | 87 | 0.564317 |
b6eca889b97e1ea666446d3fabab81ba29c6ee01 | 2,401 | py | Python | 5.analysis/logistic_classification.py | fullmooncj/textmining_edu | b1402fd96fbde945f48c52d71ba4dfe51fd96602 | [
"Apache-2.0"
] | null | null | null | 5.analysis/logistic_classification.py | fullmooncj/textmining_edu | b1402fd96fbde945f48c52d71ba4dfe51fd96602 | [
"Apache-2.0"
] | null | null | null | 5.analysis/logistic_classification.py | fullmooncj/textmining_edu | b1402fd96fbde945f48c52d71ba4dfe51fd96602 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import numpy as np
import tensorflow as tf
xy = np.loadtxt('news.target', unpack=True, dtype='float32')
x_data = np.transpose(xy[0:20])
y_data = np.transpose(xy[20:])
X = tf.placeholder("float", [None, 20])
Y = tf.placeholder("float", [None, 7])
W = tf.Variable(tf.zeros([20, 7]))
# matrix shape X=[8, 3], W=[3, 3]
hypothesis = tf.nn.softmax(tf.matmul(X, W))
learning_rate = 0.001
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), reduction_indices=1))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(2001):
sess.run(optimizer, feed_dict={X: x_data, Y: y_data})
if step % 200 == 0:
print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W))
print('--------------------')
print('global')
a = sess.run(hypothesis, feed_dict={X: [[0.005917159763313609, 0.0, 0.011834319526627219, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.005917159763313609, 0.0, 0.0, 0.0, 0.0, 0.0, 0.005917159763313609, 0.0, 0.0, 0.005917159763313609, 0.0]]})
print(a, sess.run(tf.argmax(a, 1)))
print('social')
b = sess.run(hypothesis, feed_dict={X: [[0.016666666666666666, 0.016666666666666666, 0.016666666666666666, 0.016666666666666666, 0.016666666666666666, 0.016666666666666666, 0.016666666666666666, 0.016666666666666666, 0.016666666666666666, 0.016666666666666666, 0.016666666666666666, 0.016666666666666666, 0.0, 0.016666666666666666, 0.016666666666666666, 0.016666666666666666, 0.016666666666666666, 0.016666666666666666, 0.016666666666666666, 0.016666666666666666]]})
print(b, sess.run(tf.argmax(b, 1)))
print('politics')
c = sess.run(hypothesis, feed_dict={X: [[0.045454545454545456, 0.045454545454545456, 0.0, 0.045454545454545456, 0.0, 0.0, 0.0, 0.0, 0.0, 0.045454545454545456, 0.0, 0.0, 0.0, 0.045454545454545456, 0.0, 0.0, 0.0, 0.045454545454545456, 0.045454545454545456, 0.0]]})
print(c, sess.run(tf.argmax(c, 1)))
#all = sess.run(hypothesis, feed_dict={X: [[1, 11, 7], [1, 3, 4], [1, 1, 0]]})
#print(all, sess.run(tf.argmax(all, 1)))
| 46.173077 | 528 | 0.627655 |
28b8091855c00d8088e53caba2b1c7b3d9d612cd | 39,781 | py | Python | tests/test_datasets.py | ckkelvinchan/mmediting | 73ab4347380f8022b7dde6be65cecf910cbdfbd0 | [
"Apache-2.0"
] | 2 | 2021-04-20T11:31:37.000Z | 2021-05-27T13:04:40.000Z | tests/test_datasets.py | ckkelvinchan/mmediting | 73ab4347380f8022b7dde6be65cecf910cbdfbd0 | [
"Apache-2.0"
] | null | null | null | tests/test_datasets.py | ckkelvinchan/mmediting | 73ab4347380f8022b7dde6be65cecf910cbdfbd0 | [
"Apache-2.0"
] | 2 | 2021-04-22T12:10:14.000Z | 2021-05-19T02:09:48.000Z | import os.path as osp
from pathlib import Path
from unittest.mock import patch
import numpy as np
import pytest
from torch.utils.data import Dataset
# yapf: disable
from mmedit.datasets import (AdobeComp1kDataset, BaseGenerationDataset,
BaseSRDataset, GenerationPairedDataset,
GenerationUnpairedDataset, RepeatDataset,
SRAnnotationDataset, SRFolderDataset,
SRFolderGTDataset, SRFolderRefDataset,
SRLmdbDataset, SRREDSDataset,
SRREDSMultipleGTDataset, SRTestMultipleGTDataset,
SRVid4Dataset, SRVimeo90KDataset,
SRVimeo90KMultipleGTDataset)
# yapf: enable
def mock_open(*args, **kwargs):
"""unittest.mock_open wrapper.
unittest.mock_open doesn't support iteration. Wrap it to fix this bug.
Reference: https://stackoverflow.com/a/41656192
"""
import unittest
f_open = unittest.mock.mock_open(*args, **kwargs)
f_open.return_value.__iter__ = lambda self: iter(self.readline, '')
return f_open
def check_keys_contain(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys).issubset(set(result_keys))
class TestMattingDatasets:
@classmethod
def setup_class(cls):
# create para for creating a dataset.
cls.data_prefix = Path(__file__).parent / 'data'
cls.ann_file = osp.join(cls.data_prefix, 'test_list.json')
cls.pipeline = [
dict(type='LoadImageFromFile', key='alpha', flag='grayscale')
]
def test_comp1k_dataset(self):
comp1k_dataset = AdobeComp1kDataset(self.ann_file, self.pipeline,
self.data_prefix)
first_data = comp1k_dataset[0]
assert 'alpha' in first_data
assert isinstance(first_data['alpha'], np.ndarray)
assert first_data['alpha'].shape == (552, 800)
def test_comp1k_evaluate(self):
comp1k_dataset = AdobeComp1kDataset(self.ann_file, self.pipeline,
self.data_prefix)
with pytest.raises(TypeError):
comp1k_dataset.evaluate('Not a list object')
results = [{
'pred_alpha': None,
'eval_result': {
'SAD': 26,
'MSE': 0.006
}
}, {
'pred_alpha': None,
'eval_result': {
'SAD': 24,
'MSE': 0.004
}
}]
eval_result = comp1k_dataset.evaluate(results)
assert set(eval_result.keys()) == set(['SAD', 'MSE'])
assert eval_result['SAD'] == 25
assert eval_result['MSE'] == 0.005
class TestSRDatasets:
@classmethod
def setup_class(cls):
cls.data_prefix = Path(__file__).parent / 'data'
def test_base_super_resolution_dataset(self):
class ToyDataset(BaseSRDataset):
"""Toy dataset for testing SRDataset."""
def __init__(self, pipeline, test_mode=False):
super().__init__(pipeline, test_mode)
def load_annotations(self):
pass
def __len__(self):
return 2
toy_dataset = ToyDataset(pipeline=[])
file_paths = ['gt/baboon.png', 'lq/baboon_x4.png']
file_paths = [str(self.data_prefix / v) for v in file_paths]
result = toy_dataset.scan_folder(self.data_prefix)
assert check_keys_contain(result, file_paths)
result = toy_dataset.scan_folder(str(self.data_prefix))
assert check_keys_contain(result, file_paths)
with pytest.raises(TypeError):
toy_dataset.scan_folder(123)
# test evaluate function
results = [{
'eval_result': {
'PSNR': 20,
'SSIM': 0.6
}
}, {
'eval_result': {
'PSNR': 30,
'SSIM': 0.8
}
}]
with pytest.raises(TypeError):
# results must be a list
toy_dataset.evaluate(results=5)
with pytest.raises(AssertionError):
# The length of results should be equal to the dataset len
toy_dataset.evaluate(results=[results[0]])
eval_results = toy_dataset.evaluate(results=results)
assert eval_results == {'PSNR': 25, 'SSIM': 0.7}
with pytest.raises(AssertionError):
results = [{
'eval_result': {
'PSNR': 20,
'SSIM': 0.6
}
}, {
'eval_result': {
'PSNR': 30
}
}]
# Length of evaluation result should be the same as the dataset len
toy_dataset.evaluate(results=results)
def test_sr_annotation_dataset(self):
# setup
anno_file_path = self.data_prefix / 'train.txt'
sr_pipeline = [
dict(type='LoadImageFromFile', io_backend='disk', key='lq'),
dict(type='LoadImageFromFile', io_backend='disk', key='gt'),
dict(type='PairedRandomCrop', gt_patch_size=128),
dict(type='ImageToTensor', keys=['lq', 'gt'])
]
target_keys = [
'lq_path', 'gt_path', 'scale', 'lq', 'lq_ori_shape', 'gt',
'gt_ori_shape'
]
# input path is Path object
sr_annotation_dataset = SRAnnotationDataset(
lq_folder=self.data_prefix / 'lq',
gt_folder=self.data_prefix / 'gt',
ann_file=anno_file_path,
pipeline=sr_pipeline,
scale=4,
filename_tmpl='{}_x4')
data_infos = sr_annotation_dataset.data_infos
assert data_infos == [
dict(
lq_path=str(self.data_prefix / 'lq' / 'baboon_x4.png'),
gt_path=str(self.data_prefix / 'gt' / 'baboon.png'))
]
result = sr_annotation_dataset[0]
assert (len(sr_annotation_dataset) == 1)
assert check_keys_contain(result.keys(), target_keys)
# input path is str
sr_annotation_dataset = SRAnnotationDataset(
lq_folder=str(self.data_prefix / 'lq'),
gt_folder=str(self.data_prefix / 'gt'),
ann_file=str(anno_file_path),
pipeline=sr_pipeline,
scale=4,
filename_tmpl='{}_x4')
data_infos = sr_annotation_dataset.data_infos
assert data_infos == [
dict(
lq_path=str(self.data_prefix / 'lq' / 'baboon_x4.png'),
gt_path=str(self.data_prefix / 'gt' / 'baboon.png'))
]
result = sr_annotation_dataset[0]
assert (len(sr_annotation_dataset) == 1)
assert check_keys_contain(result.keys(), target_keys)
def test_sr_folder_dataset(self):
# setup
sr_pipeline = [
dict(type='LoadImageFromFile', io_backend='disk', key='lq'),
dict(type='LoadImageFromFile', io_backend='disk', key='gt'),
dict(type='PairedRandomCrop', gt_patch_size=128),
dict(type='ImageToTensor', keys=['lq', 'gt'])
]
target_keys = ['lq_path', 'gt_path', 'scale', 'lq', 'gt']
lq_folder = self.data_prefix / 'lq'
gt_folder = self.data_prefix / 'gt'
filename_tmpl = '{}_x4'
# input path is Path object
sr_folder_dataset = SRFolderDataset(
lq_folder=lq_folder,
gt_folder=gt_folder,
pipeline=sr_pipeline,
scale=4,
filename_tmpl=filename_tmpl)
data_infos = sr_folder_dataset.data_infos
assert data_infos == [
dict(
lq_path=str(lq_folder / 'baboon_x4.png'),
gt_path=str(gt_folder / 'baboon.png'))
]
result = sr_folder_dataset[0]
assert (len(sr_folder_dataset) == 1)
assert check_keys_contain(result.keys(), target_keys)
# input path is str
sr_folder_dataset = SRFolderDataset(
lq_folder=str(lq_folder),
gt_folder=str(gt_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl=filename_tmpl)
data_infos = sr_folder_dataset.data_infos
assert data_infos == [
dict(
lq_path=str(lq_folder / 'baboon_x4.png'),
gt_path=str(gt_folder / 'baboon.png'))
]
result = sr_folder_dataset[0]
assert (len(sr_folder_dataset) == 1)
assert check_keys_contain(result.keys(), target_keys)
def test_sr_folder_gt_dataset(self):
# setup
sr_pipeline = [
dict(type='LoadImageFromFile', io_backend='disk', key='gt'),
dict(type='ImageToTensor', keys=['gt'])
]
target_keys = ['gt_path', 'gt']
gt_folder = self.data_prefix / 'gt'
filename_tmpl = '{}_x4'
# input path is Path object
sr_folder_dataset = SRFolderGTDataset(
gt_folder=gt_folder,
pipeline=sr_pipeline,
scale=4,
filename_tmpl=filename_tmpl)
data_infos = sr_folder_dataset.data_infos
assert data_infos == [dict(gt_path=str(gt_folder / 'baboon.png'))]
result = sr_folder_dataset[0]
assert (len(sr_folder_dataset) == 1)
assert check_keys_contain(result.keys(), target_keys)
# input path is str
sr_folder_dataset = SRFolderGTDataset(
gt_folder=str(gt_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl=filename_tmpl)
data_infos = sr_folder_dataset.data_infos
assert data_infos == [dict(gt_path=str(gt_folder / 'baboon.png'))]
result = sr_folder_dataset[0]
assert (len(sr_folder_dataset) == 1)
assert check_keys_contain(result.keys(), target_keys)
def test_sr_folder_ref_dataset(self):
# setup
sr_pipeline = [
dict(type='LoadImageFromFile', io_backend='disk', key='lq'),
dict(type='LoadImageFromFile', io_backend='disk', key='gt'),
dict(type='LoadImageFromFile', io_backend='disk', key='ref'),
dict(type='PairedRandomCrop', gt_patch_size=128),
dict(type='ImageToTensor', keys=['lq', 'gt', 'ref'])
]
target_keys = [
'lq_path', 'gt_path', 'ref_path', 'scale', 'lq', 'gt', 'ref'
]
lq_folder = self.data_prefix / 'lq'
gt_folder = self.data_prefix / 'gt'
ref_folder = self.data_prefix / 'gt'
filename_tmpl = '{}_x4'
# input path is Path object
sr_folder_ref_dataset = SRFolderRefDataset(
lq_folder=lq_folder,
gt_folder=gt_folder,
ref_folder=str(ref_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl_lq=filename_tmpl)
data_infos = sr_folder_ref_dataset.data_infos
assert data_infos == [
dict(
lq_path=str(lq_folder / 'baboon_x4.png'),
gt_path=str(gt_folder / 'baboon.png'),
ref_path=str(ref_folder / 'baboon.png'))
]
result = sr_folder_ref_dataset[0]
assert len(sr_folder_ref_dataset) == 1
assert check_keys_contain(result.keys(), target_keys)
# input path is str
sr_folder_ref_dataset = SRFolderRefDataset(
lq_folder=str(lq_folder),
gt_folder=str(gt_folder),
ref_folder=str(ref_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl_lq=filename_tmpl)
data_infos = sr_folder_ref_dataset.data_infos
assert data_infos == [
dict(
lq_path=str(lq_folder / 'baboon_x4.png'),
gt_path=str(gt_folder / 'baboon.png'),
ref_path=str(ref_folder / 'baboon.png'))
]
result = sr_folder_ref_dataset[0]
assert len(sr_folder_ref_dataset) == 1
assert check_keys_contain(result.keys(), target_keys)
with pytest.raises(AssertionError):
sr_folder_ref_dataset = SRFolderRefDataset(
lq_folder=str(lq_folder),
gt_folder=str(self.data_prefix / 'image'), # fake gt_folder
ref_folder=str(ref_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl_lq=filename_tmpl)
with pytest.raises(AssertionError):
sr_folder_ref_dataset = SRFolderRefDataset(
lq_folder=str(self.data_prefix / 'image'), # fake lq_folder
gt_folder=str(gt_folder),
ref_folder=str(ref_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl_lq=filename_tmpl)
with pytest.raises(AssertionError):
sr_folder_ref_dataset = SRFolderRefDataset(
lq_folder=str(lq_folder),
gt_folder=str(self.data_prefix / 'bg'), # fake gt_folder
ref_folder=str(ref_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl_lq=filename_tmpl)
with pytest.raises(AssertionError):
sr_folder_ref_dataset = SRFolderRefDataset(
lq_folder=str(self.data_prefix / 'bg'), # fake lq_folder
gt_folder=str(gt_folder),
ref_folder=str(ref_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl_lq=filename_tmpl)
with pytest.raises(AssertionError):
sr_folder_ref_dataset = SRFolderRefDataset(
lq_folder=None,
gt_folder=None,
ref_folder=str(ref_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl_lq=filename_tmpl)
def test_sr_lmdb_dataset(self):
# setup
lq_lmdb_folder = self.data_prefix / 'lq.lmdb'
sr_pipeline = [
dict(
type='LoadImageFromFile',
io_backend='lmdb',
key='lq',
db_path=lq_lmdb_folder),
dict(
type='LoadImageFromFile',
io_backend='lmdb',
key='gt',
db_path=lq_lmdb_folder),
dict(type='ImageToTensor', keys=['lq', 'gt'])
]
target_keys = [
'lq_path', 'gt_path', 'scale', 'lq', 'lq_ori_shape', 'gt',
'gt_ori_shape'
]
# input path is Path object
sr_lmdb_dataset = SRLmdbDataset(
lq_folder=lq_lmdb_folder,
gt_folder=lq_lmdb_folder, # fake gt_folder
pipeline=sr_pipeline,
scale=1)
data_infos = sr_lmdb_dataset.data_infos
assert data_infos == [dict(lq_path='baboon', gt_path='baboon')]
result = sr_lmdb_dataset[0]
assert (len(sr_lmdb_dataset) == 1)
assert check_keys_contain(result.keys(), target_keys)
# input path is str
sr_lmdb_dataset = SRLmdbDataset(
lq_folder=str(lq_lmdb_folder),
gt_folder=(lq_lmdb_folder), # fake gt_folder
pipeline=sr_pipeline,
scale=1)
data_infos = sr_lmdb_dataset.data_infos
assert data_infos == [dict(lq_path='baboon', gt_path='baboon')]
result = sr_lmdb_dataset[0]
assert (len(sr_lmdb_dataset) == 1)
assert check_keys_contain(result.keys(), target_keys)
with pytest.raises(ValueError):
sr_lmdb_dataset = SRLmdbDataset(
lq_folder=self.data_prefix, # normal folder
gt_folder=lq_lmdb_folder, # fake gt_folder
pipeline=sr_pipeline,
scale=1)
with pytest.raises(ValueError):
sr_lmdb_dataset = SRLmdbDataset(
lq_folder=str(self.data_prefix), # normal folder
gt_folder=lq_lmdb_folder, # fake gt_folder
pipeline=sr_pipeline,
scale=1)
with pytest.raises(ValueError):
sr_lmdb_dataset = SRLmdbDataset(
lq_folder=lq_lmdb_folder,
gt_folder=self.data_prefix, # normal folder
pipeline=sr_pipeline,
scale=1)
with pytest.raises(ValueError):
sr_lmdb_dataset = SRLmdbDataset(
lq_folder=lq_lmdb_folder,
gt_folder=str(self.data_prefix), # normal folder
pipeline=sr_pipeline,
scale=1)
class TestGenerationDatasets:
@classmethod
def setup_class(cls):
cls.data_prefix = Path(__file__).parent / 'data'
def test_base_generation_dataset(self):
class ToyDataset(BaseGenerationDataset):
"""Toy dataset for testing Generation Dataset."""
def load_annotations(self):
pass
toy_dataset = ToyDataset(pipeline=[])
file_paths = [
'paired/test/3.jpg', 'paired/train/1.jpg', 'paired/train/2.jpg'
]
file_paths = [str(self.data_prefix / v) for v in file_paths]
# test scan_folder
result = toy_dataset.scan_folder(self.data_prefix)
assert check_keys_contain(result, file_paths)
result = toy_dataset.scan_folder(str(self.data_prefix))
assert check_keys_contain(result, file_paths)
with pytest.raises(TypeError):
toy_dataset.scan_folder(123)
# test evaluate
toy_dataset.data_infos = file_paths
with pytest.raises(TypeError):
_ = toy_dataset.evaluate(1)
test_results = [dict(saved_flag=True), dict(saved_flag=True)]
with pytest.raises(AssertionError):
_ = toy_dataset.evaluate(test_results)
test_results = [
dict(saved_flag=True),
dict(saved_flag=True),
dict(saved_flag=False)
]
eval_results = toy_dataset.evaluate(test_results)
assert eval_results['val_saved_number'] == 2
def test_generation_paired_dataset(self):
# setup
img_norm_cfg = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
pipeline = [
dict(
type='LoadPairedImageFromFile',
io_backend='disk',
key='pair',
flag='color'),
dict(
type='Resize',
keys=['img_a', 'img_b'],
scale=(286, 286),
interpolation='bicubic'),
dict(
type='FixedCrop',
keys=['img_a', 'img_b'],
crop_size=(256, 256)),
dict(type='Flip', keys=['img_a', 'img_b'], direction='horizontal'),
dict(type='RescaleToZeroOne', keys=['img_a', 'img_b']),
dict(
type='Normalize',
keys=['img_a', 'img_b'],
to_rgb=True,
**img_norm_cfg),
dict(type='ImageToTensor', keys=['img_a', 'img_b']),
dict(
type='Collect',
keys=['img_a', 'img_b'],
meta_keys=['img_a_path', 'img_b_path'])
]
target_keys = ['img_a', 'img_b', 'meta']
target_meta_keys = ['img_a_path', 'img_b_path']
pair_folder = self.data_prefix / 'paired'
# input path is Path object
generation_paried_dataset = GenerationPairedDataset(
dataroot=pair_folder, pipeline=pipeline, test_mode=True)
data_infos = generation_paried_dataset.data_infos
assert data_infos == [
dict(pair_path=str(pair_folder / 'test' / '3.jpg'))
]
result = generation_paried_dataset[0]
assert (len(generation_paried_dataset) == 1)
assert check_keys_contain(result.keys(), target_keys)
assert check_keys_contain(result['meta'].data.keys(), target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(pair_folder / 'test' /
'3.jpg'))
assert (result['meta'].data['img_b_path'] == str(pair_folder / 'test' /
'3.jpg'))
# input path is str
generation_paried_dataset = GenerationPairedDataset(
dataroot=str(pair_folder), pipeline=pipeline, test_mode=True)
data_infos = generation_paried_dataset.data_infos
assert data_infos == [
dict(pair_path=str(pair_folder / 'test' / '3.jpg'))
]
result = generation_paried_dataset[0]
assert (len(generation_paried_dataset) == 1)
assert check_keys_contain(result.keys(), target_keys)
assert check_keys_contain(result['meta'].data.keys(), target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(pair_folder / 'test' /
'3.jpg'))
assert (result['meta'].data['img_b_path'] == str(pair_folder / 'test' /
'3.jpg'))
# test_mode = False
generation_paried_dataset = GenerationPairedDataset(
dataroot=str(pair_folder), pipeline=pipeline, test_mode=False)
data_infos = generation_paried_dataset.data_infos
assert data_infos == [
dict(pair_path=str(pair_folder / 'train' / '1.jpg')),
dict(pair_path=str(pair_folder / 'train' / '2.jpg'))
]
assert (len(generation_paried_dataset) == 2)
result = generation_paried_dataset[0]
assert check_keys_contain(result.keys(), target_keys)
assert check_keys_contain(result['meta'].data.keys(), target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(pair_folder /
'train' / '1.jpg'))
assert (result['meta'].data['img_b_path'] == str(pair_folder /
'train' / '1.jpg'))
result = generation_paried_dataset[1]
assert check_keys_contain(result.keys(), target_keys)
assert check_keys_contain(result['meta'].data.keys(), target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(pair_folder /
'train' / '2.jpg'))
assert (result['meta'].data['img_b_path'] == str(pair_folder /
'train' / '2.jpg'))
def test_generation_unpaired_dataset(self):
# setup
img_norm_cfg = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
pipeline = [
dict(
type='LoadImageFromFile',
io_backend='disk',
key='img_a',
flag='color'),
dict(
type='LoadImageFromFile',
io_backend='disk',
key='img_b',
flag='color'),
dict(
type='Resize',
keys=['img_a', 'img_b'],
scale=(286, 286),
interpolation='bicubic'),
dict(
type='Crop',
keys=['img_a', 'img_b'],
crop_size=(256, 256),
random_crop=True),
dict(type='Flip', keys=['img_a'], direction='horizontal'),
dict(type='Flip', keys=['img_b'], direction='horizontal'),
dict(type='RescaleToZeroOne', keys=['img_a', 'img_b']),
dict(
type='Normalize',
keys=['img_a', 'img_b'],
to_rgb=True,
**img_norm_cfg),
dict(type='ImageToTensor', keys=['img_a', 'img_b']),
dict(
type='Collect',
keys=['img_a', 'img_b'],
meta_keys=['img_a_path', 'img_b_path'])
]
target_keys = ['img_a', 'img_b', 'meta']
target_meta_keys = ['img_a_path', 'img_b_path']
unpair_folder = self.data_prefix / 'unpaired'
# input path is Path object
generation_unpaired_dataset = GenerationUnpairedDataset(
dataroot=unpair_folder, pipeline=pipeline, test_mode=True)
data_infos_a = generation_unpaired_dataset.data_infos_a
data_infos_b = generation_unpaired_dataset.data_infos_b
assert data_infos_a == [
dict(path=str(unpair_folder / 'testA' / '5.jpg'))
]
assert data_infos_b == [
dict(path=str(unpair_folder / 'testB' / '6.jpg'))
]
result = generation_unpaired_dataset[0]
assert (len(generation_unpaired_dataset) == 1)
assert check_keys_contain(result.keys(), target_keys)
assert check_keys_contain(result['meta'].data.keys(), target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(unpair_folder /
'testA' / '5.jpg'))
assert (result['meta'].data['img_b_path'] == str(unpair_folder /
'testB' / '6.jpg'))
# input path is str
generation_unpaired_dataset = GenerationUnpairedDataset(
dataroot=str(unpair_folder), pipeline=pipeline, test_mode=True)
data_infos_a = generation_unpaired_dataset.data_infos_a
data_infos_b = generation_unpaired_dataset.data_infos_b
assert data_infos_a == [
dict(path=str(unpair_folder / 'testA' / '5.jpg'))
]
assert data_infos_b == [
dict(path=str(unpair_folder / 'testB' / '6.jpg'))
]
result = generation_unpaired_dataset[0]
assert (len(generation_unpaired_dataset) == 1)
assert check_keys_contain(result.keys(), target_keys)
assert check_keys_contain(result['meta'].data.keys(), target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(unpair_folder /
'testA' / '5.jpg'))
assert (result['meta'].data['img_b_path'] == str(unpair_folder /
'testB' / '6.jpg'))
# test_mode = False
generation_unpaired_dataset = GenerationUnpairedDataset(
dataroot=str(unpair_folder), pipeline=pipeline, test_mode=False)
data_infos_a = generation_unpaired_dataset.data_infos_a
data_infos_b = generation_unpaired_dataset.data_infos_b
assert data_infos_a == [
dict(path=str(unpair_folder / 'trainA' / '1.jpg')),
dict(path=str(unpair_folder / 'trainA' / '2.jpg'))
]
assert data_infos_b == [
dict(path=str(unpair_folder / 'trainB' / '3.jpg')),
dict(path=str(unpair_folder / 'trainB' / '4.jpg'))
]
assert (len(generation_unpaired_dataset) == 2)
img_b_paths = [
str(unpair_folder / 'trainB' / '3.jpg'),
str(unpair_folder / 'trainB' / '4.jpg')
]
result = generation_unpaired_dataset[0]
assert check_keys_contain(result.keys(), target_keys)
assert check_keys_contain(result['meta'].data.keys(), target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(unpair_folder /
'trainA' / '1.jpg'))
assert result['meta'].data['img_b_path'] in img_b_paths
result = generation_unpaired_dataset[1]
assert check_keys_contain(result.keys(), target_keys)
assert check_keys_contain(result['meta'].data.keys(), target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(unpair_folder /
'trainA' / '2.jpg'))
assert result['meta'].data['img_b_path'] in img_b_paths
def test_repeat_dataset():
class ToyDataset(Dataset):
def __init__(self):
super().__init__()
self.members = [1, 2, 3, 4, 5]
def __len__(self):
return len(self.members)
def __getitem__(self, idx):
return self.members[idx % 5]
toy_dataset = ToyDataset()
repeat_dataset = RepeatDataset(toy_dataset, 2)
assert len(repeat_dataset) == 10
assert repeat_dataset[2] == 3
assert repeat_dataset[8] == 4
def test_reds_dataset():
root_path = Path(__file__).parent / 'data'
txt_content = ('000/00000001.png (720, 1280, 3)\n'
'001/00000001.png (720, 1280, 3)\n'
'250/00000001.png (720, 1280, 3)\n')
mocked_open_function = mock_open(read_data=txt_content)
with patch('builtins.open', mocked_open_function):
# official val partition
reds_dataset = SRREDSDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='official',
test_mode=False)
assert reds_dataset.data_infos == [
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='000/00000001',
max_frame_num=100,
num_input_frames=5),
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='001/00000001',
max_frame_num=100,
num_input_frames=5)
]
# REDS4 val partition
reds_dataset = SRREDSDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='REDS4',
test_mode=False)
assert reds_dataset.data_infos == [
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='001/00000001',
max_frame_num=100,
num_input_frames=5),
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='250/00000001',
max_frame_num=100,
num_input_frames=5)
]
with pytest.raises(ValueError):
# wrong val_partitaion
reds_dataset = SRREDSDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='wrong_val_partition',
test_mode=False)
with pytest.raises(AssertionError):
# num_input_frames should be odd numbers
reds_dataset = SRREDSDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=6,
pipeline=[],
scale=4,
val_partition='wrong_val_partition',
test_mode=False)
# test mode
# official val partition
reds_dataset = SRREDSDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='official',
test_mode=True)
assert reds_dataset.data_infos == [
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='250/00000001',
max_frame_num=100,
num_input_frames=5)
]
# REDS4 val partition
reds_dataset = SRREDSDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='REDS4',
test_mode=True)
assert reds_dataset.data_infos == [
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='000/00000001',
max_frame_num=100,
num_input_frames=5)
]
def test_vimeo90k_dataset():
root_path = Path(__file__).parent / 'data'
txt_content = ('00001/0266 (256, 448, 3)\n00002/0268 (256, 448, 3)\n')
mocked_open_function = mock_open(read_data=txt_content)
lq_paths_1 = [
str(root_path / '00001' / '0266' / f'im{v}.png') for v in range(1, 8)
]
gt_paths_1 = [str(root_path / '00001' / '0266' / 'im4.png')]
lq_paths_2 = [
str(root_path / '00002' / '0268' / f'im{v}.png') for v in range(1, 8)
]
gt_paths_2 = [str(root_path / '00002' / '0268' / 'im4.png')]
with patch('builtins.open', mocked_open_function):
vimeo90k_dataset = SRVimeo90KDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=7,
pipeline=[],
scale=4,
test_mode=False)
assert vimeo90k_dataset.data_infos == [
dict(lq_path=lq_paths_1, gt_path=gt_paths_1, key='00001/0266'),
dict(lq_path=lq_paths_2, gt_path=gt_paths_2, key='00002/0268')
]
with pytest.raises(AssertionError):
# num_input_frames should be odd numbers
vimeo90k_dataset = SRVimeo90KDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=6,
pipeline=[],
scale=4,
test_mode=False)
def test_vid4_dataset():
root_path = Path(__file__).parent / 'data'
txt_content = ('calendar 1 (320,480,3)\ncity 2 (320,480,3)\n')
mocked_open_function = mock_open(read_data=txt_content)
with patch('builtins.open', mocked_open_function):
vid4_dataset = SRVid4Dataset(
lq_folder=root_path / 'lq',
gt_folder=root_path / 'gt',
ann_file='fake_ann_file',
num_input_frames=5,
pipeline=[],
scale=4,
test_mode=False,
metric_average_mode='clip',
filename_tmpl='{:08d}')
assert vid4_dataset.data_infos == [
dict(
lq_path=str(root_path / 'lq'),
gt_path=str(root_path / 'gt'),
key='calendar/00000000',
num_input_frames=5,
max_frame_num=1),
dict(
lq_path=str(root_path / 'lq'),
gt_path=str(root_path / 'gt'),
key='city/00000000',
num_input_frames=5,
max_frame_num=2),
dict(
lq_path=str(root_path / 'lq'),
gt_path=str(root_path / 'gt'),
key='city/00000001',
num_input_frames=5,
max_frame_num=2),
]
with pytest.raises(AssertionError):
# num_input_frames should be odd numbers
SRVid4Dataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=6,
pipeline=[],
scale=4,
test_mode=False)
with pytest.raises(ValueError):
# metric_average_mode can only be either 'clip' or 'all'
SRVid4Dataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=6,
pipeline=[],
scale=4,
metric_average_mode='abc',
test_mode=False)
def test_sr_reds_multiple_gt_dataset():
root_path = Path(__file__).parent / 'data'
# official val partition
reds_dataset = SRREDSMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=15,
pipeline=[],
scale=4,
val_partition='official',
test_mode=False)
assert len(reds_dataset.data_infos) == 240 # 240 training clips
assert reds_dataset.data_infos[0] == dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='000',
sequence_length=100,
num_input_frames=15)
# REDS4 val partition
reds_dataset = SRREDSMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=20,
pipeline=[],
scale=4,
val_partition='REDS4',
test_mode=False)
assert len(reds_dataset.data_infos) == 266 # 266 training clips
assert reds_dataset.data_infos[0] == dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='001',
sequence_length=100,
num_input_frames=20) # 000 is been removed
with pytest.raises(ValueError):
# wrong val_partitaion
reds_dataset = SRREDSMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='wrong_val_partition',
test_mode=False)
# test mode
# official val partition
reds_dataset = SRREDSMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='official',
test_mode=True)
assert len(reds_dataset.data_infos) == 30 # 30 test clips
assert reds_dataset.data_infos[0] == dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='240',
sequence_length=100,
num_input_frames=5)
# REDS4 val partition
reds_dataset = SRREDSMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='REDS4',
test_mode=True)
assert len(reds_dataset.data_infos) == 4 # 4 test clips
assert reds_dataset.data_infos[1] == dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='011',
sequence_length=100,
num_input_frames=5)
def test_sr_vimeo90k_mutiple_gt_dataset():
root_path = Path(__file__).parent / 'data/vimeo90k'
txt_content = ('00001/0266 (256,448,3)\n')
mocked_open_function = mock_open(read_data=txt_content)
lq_paths = [
str(root_path / '00001' / '0266' / f'im{v}.png') for v in range(1, 8)
]
gt_paths = [
str(root_path / '00001' / '0266' / f'im{v}.png') for v in range(1, 8)
]
with patch('builtins.open', mocked_open_function):
vimeo90k_dataset = SRVimeo90KMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
pipeline=[],
scale=4,
test_mode=False)
assert vimeo90k_dataset.data_infos == [
dict(lq_path=lq_paths, gt_path=gt_paths, key='00001/0266')
]
def test_sr_test_multiple_gt_dataset():
root_path = Path(__file__).parent / 'data/test_multiple_gt'
test_dataset = SRTestMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
pipeline=[],
scale=4,
test_mode=True)
assert test_dataset.data_infos == [
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='sequence_1',
sequence_length=2),
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='sequence_2',
sequence_length=1)
]
| 36.698339 | 79 | 0.554712 |
47af018e313c42d52665b96fb869f829aa6c5194 | 8,436 | py | Python | ginipaybusiness/src/doc/source/conf.py | gini/gini-health-sdk-android | 09361d0f1ead21d821992e31943477af138c6d62 | [
"Apache-2.0"
] | null | null | null | ginipaybusiness/src/doc/source/conf.py | gini/gini-health-sdk-android | 09361d0f1ead21d821992e31943477af138c6d62 | [
"Apache-2.0"
] | null | null | null | ginipaybusiness/src/doc/source/conf.py | gini/gini-health-sdk-android | 09361d0f1ead21d821992e31943477af138c6d62 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Gini API documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 23 17:02:36 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import gini_sphinx_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Gini Health SDK for Android'
copyright = u'2014-2021, Gini GmbH'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '2.0'
# The full version, including alpha/beta/rc tags.
#release = '2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'gini_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"google_analytics_id": os.environ.get("GOOGLE_ANALYTICS_ID"),
"logo_path": "_static/logo_flat.png"
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [gini_sphinx_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GiniAPIdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'GiniAPI.tex', u'Gini API Documentation',
u'Gini', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'giniapi', u'Gini API Documentation',
[u'Gini'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GiniAPI', u'Gini API Documentation',
u'Gini', 'GiniAPI', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
todo_include_todos = os.environ.get("TODO", "1") == "1"
| 31.477612 | 79 | 0.717639 |
2c9a21427f66d749c15c14426cd3660d34d1c28f | 2,348 | py | Python | tests/testapp/tests/factories.py | joshuajonah/feincms-elephantblog | 57a1d60ffa72d7678e6d9f1ceb0ad31b9a44fcff | [
"BSD-3-Clause"
] | null | null | null | tests/testapp/tests/factories.py | joshuajonah/feincms-elephantblog | 57a1d60ffa72d7678e6d9f1ceb0ad31b9a44fcff | [
"BSD-3-Clause"
] | null | null | null | tests/testapp/tests/factories.py | joshuajonah/feincms-elephantblog | 57a1d60ffa72d7678e6d9f1ceb0ad31b9a44fcff | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import, unicode_literals
import datetime
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
import factory
from elephantblog.models import Entry, Category, CategoryTranslation
class UserFactory(factory.DjangoModelFactory):
FACTORY_FOR = User
username = 'author'
password = 'elephant'
email = '[email protected]'
class EntryFactory(factory.DjangoModelFactory):
FACTORY_FOR = Entry
is_active = True
is_featured = False
def create_entries(factory):
author = UserFactory()
entries = []
entries.append(factory.create(
pk=1,
author=author,
title='Entry 1',
published_on=datetime.datetime(2012, 8, 12, 11, 0, 0),
last_changed=datetime.datetime(2012, 8, 12, 15, 0, 0),
slug='entry-1',
language='en',
))
entries.append(factory.create(
pk=2,
author=author,
title='Eintrag 1',
published_on=datetime.datetime(2012, 10, 12, 11, 0, 0),
last_changed=datetime.datetime(2012, 10, 12, 15, 0, 0),
slug='eintrag-1',
language='en',
))
return entries
def create_chinese_entries(factory):
entries = create_entries(factory)
author = entries[0].author
factory.create(
pk=3,
author=author,
title='Entry 2 chinese traditional',
language='zh-cn',
translation_of=entries[0],
published_on=datetime.datetime(2012, 10, 12, 12, 0, 0),
last_changed=datetime.datetime(2012, 10, 12, 16, 0, 0),
slug='entry-2-cn'
)
factory.create(
pk=4,
author=author,
title='Entry 2 chinese simplified',
language='zh-tw',
translation_of=entries[0],
published_on=datetime.datetime(2012, 10, 12, 12, 0, 0),
last_changed=datetime.datetime(2012, 10, 12, 16, 0, 0),
slug='entry-2-tw'
)
class CategoryTranslationFactory(factory.DjangoModelFactory):
FACTORY_FOR = CategoryTranslation
class CategoryFactory(factory.DjangoModelFactory):
FACTORY_FOR = Category
def create_category(title):
category = CategoryFactory.create()
CategoryTranslationFactory.create(
parent=category,
title=title,
slug=slugify(title)
)
return category
| 25.247312 | 68 | 0.650341 |
b180d9a0aaee642a22b00ccb8a76c7b426b376bf | 4,225 | py | Python | benchmark/startQiskit3034.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit3034.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit3034.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=42
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[0],input_qubit[3]) # number=17
prog.h(input_qubit[3]) # number=18
prog.x(input_qubit[3]) # number=14
prog.h(input_qubit[3]) # number=32
prog.cz(input_qubit[0],input_qubit[3]) # number=33
prog.h(input_qubit[3]) # number=34
prog.rx(-1.928937889304133,input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[3]) # number=36
prog.cz(input_qubit[2],input_qubit[3]) # number=37
prog.h(input_qubit[3]) # number=38
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=24
prog.cz(input_qubit[3],input_qubit[2]) # number=25
prog.h(input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[0],input_qubit[2]) # number=29
prog.cx(input_qubit[0],input_qubit[2]) # number=39
prog.x(input_qubit[2]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=41
prog.cx(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[1]) # number=20
prog.x(input_qubit[1]) # number=21
prog.x(input_qubit[3]) # number=27
prog.x(input_qubit[3]) # number=28
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit3034.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.631148 | 140 | 0.649941 |
b96f4c361ecaa3a29ac1ceca551f23dc1144fec5 | 14,342 | py | Python | ami_md/ami_json.py | bturkus/ami-tools | d9ad73a3802055ac6c3a935ecb291fc96df6d156 | [
"MIT"
] | null | null | null | ami_md/ami_json.py | bturkus/ami-tools | d9ad73a3802055ac6c3a935ecb291fc96df6d156 | [
"MIT"
] | null | null | null | ami_md/ami_json.py | bturkus/ami-tools | d9ad73a3802055ac6c3a935ecb291fc96df6d156 | [
"MIT"
] | null | null | null | import os, json, re, logging
# data manipulation
from pandas.tslib import Timestamp
import numpy as np
import pandas as pd
# ami modules
import ami_files.ami_file as ami_file
import ami_md.ami_md_constants as ami_md_constants
FULL_TECHFN_RE = r"^[a-z]{3}_[a-z\d\-\*_]+_([vfrspt]\d{2})+_(pm|em|sc)$"
STUB_TECHFN_RE = r"^[a-z]{3}_[a-z\d\-\*_]+_([vfrspt]\d{2})+_(pm|em|sc)"
FULL_REFFN_RE = r"^[a-z]{3}_[a-z\d\-\*_]+_([vfrspt]\d{2})+_(pm|em|sc)\.(mov|wav|mkv|dv|mp4)$"
AUDIOFIELDS = ["filename", "extension", "fileFormat",
"fileSize", "dateCreated", "durationHuman", "durationMilli",
"audioCodec"]
VIDEOFIELDS = ["filename", "extension", "fileFormat",
"fileSize", "dateCreated", "durationHuman", "durationMilli",
"audioCodec", "videoCodec"]
LOGGER = logging.getLogger(__name__)
class AMIJSONError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class ami_json:
def __init__(self, filepath = None, load = True, flat_dict = None,
schema_version = "x.0.0", media_filepath = None):
"""
Initialize object as nested json
"""
if filepath:
self.path = filepath
self.filename = os.path.basename(filepath)
if load:
try:
with open(self.path, 'r', encoding = 'utf-8-sig') as f:
self.dict = json.load(f)
except:
self.raise_jsonerror('Not a JSON file')
else:
self.set_mediaformattype()
if flat_dict:
self.filename = os.path.splitext(flat_dict["asset.referenceFilename"])[0] + ".json"
nested_dict = {}
if "asset.schemaVersion" not in flat_dict.items():
flat_dict["asset.schemaVersion"] = schema_version
for key, value in flat_dict.items():
if value:
if pd.isnull(value):
continue
if type(value) == Timestamp:
value = value.strftime('%Y-%m-%d')
if isinstance(value, np.generic):
value = np.asscalar(value)
nested_dict = self.convert_dotKeyToNestedDict(
nested_dict, key, value)
self.dict = nested_dict
self.set_mediaformattype()
self.coerce_strings()
if media_filepath:
self.set_mediafilepath(media_filepath)
def set_mediaformattype(self):
try:
hasattr(self, 'dict')
except AttributeError:
raise_jsonerror('Cannot set format type, metadata dictionary not loaded.')
self.media_format_type = self.dict["source"]["object"]["type"][0:5]
def set_mediafilepath(self, media_filepath = None):
if not media_filepath:
LOGGER.info('Attempting to locate media file based on JSON file location.')
if hasattr(self, "path"):
try:
self.check_reffn()
except:
try:
self.check_techfn()
except:
raise_jsonerror("Cannot determine described media file based on filename metdata")
else:
media_filename = self.dict["technical"]["filename"] + '.' + self.dict["technical"]["extension"]
else:
media_filename = self.dict["asset"]["referenceFilename"]
media_filepath = os.path.join(os.path.split(self.path)[0], media_filename)
else:
raise_jsonerror("Cannot determine described media file location with json file location")
if os.path.isfile(media_filepath):
self.media_filepath = media_filepath
else:
self.raise_jsonerror("There is no media file found at {}".format(media_filepath))
def convert_dotKeyToNestedDict(self, tree, key, value):
"""
Recursive method that takes a dot-delimited header and returns a
nested dictionary.
Keyword arguments:
key -- dot-delimited header string
value -- value associated with header
"""
t = tree
if "." in key:
key, rest = key.split(".", 1)
if key not in tree:
t[key] = {}
self.convert_dotKeyToNestedDict(t[key], rest, value)
else:
t[key] = value
return t
def convert_nestedDictToDotKey(self, tree, separator = ".", prefix = ""):
"""
Recursive method that takes a dot-delimited header and returns a
nested dictionary.
Keyword arguments:
key -- dot-delimited header string
value -- value associated with header
"""
new_tree = {}
for key, value in tree.items():
key = prefix + key
if isinstance(value, dict):
new_tree.update(self.convert_nestedDictToDotKey(value, separator, key + separator))
else:
new_tree[key] = value
return new_tree
def coerce_strings(self):
for key, item in self.dict["bibliographic"].items():
self.dict["bibliographic"][key] = str(item).split('.')[0]
for key, item in self.dict["digitizer"]["organization"]["address"].items():
self.dict["digitizer"]["organization"]["address"][key] = str(item).split('.')[0]
def validate_json(self):
"""
Check the metadata values for common errors.
"""
valid = True
LOGGER.info("Checking: {}".format(os.path.basename(self.filename)))
#Check for a sheet that should have preservation metadata data
try:
self.check_techfn()
except AMIJSONError as e:
LOGGER.error("Error in JSON metadata: {0}".format(e.message))
valid = False
try:
self.check_reffn()
except AMIJSONError as e:
LOGGER.error("Error in JSON metadata: {0}".format(e.message))
valid = False
try:
self.compare_techfn_reffn()
except AMIJSONError as e:
LOGGER.error("Error in JSON metadata: {0}".format(e.message))
valid = False
try:
self.check_techmd_fields()
except AMIJSONError as e:
LOGGER.error("Error in JSON metadata: {0}".format(e.message))
valid = False
if hasattr(self, 'media_filepath'):
try:
self.compare_techfn_media_filename()
except AMIJSONError as e:
LOGGER.error("Error in JSON metadata: {0}".format(e.message))
valid = False
try:
self.check_techmd_values()
except AMIJSONError as e:
LOGGER.error("Error in JSON metadata: {0}".format(e.message))
valid = False
else:
LOGGER.warning('Cannot check technical metadata values against media file without location of the described media file.')
return valid
def check_techmd_fields(self):
self.valid_techmd_fields = False
found_fields = set(list(self.dict["technical"].keys()))
if self.media_format_type == "audio":
expected_fields = set(AUDIOFIELDS)
elif self.media_format_type == "video":
expected_fields = set(VIDEOFIELDS)
if not found_fields >= expected_fields:
self.raise_jsonerror("Metadata is missing the following fields: {}".format(
expected_fields - found_fields))
self.valid_techmd_fields = True
return True
def set_media_file(self):
if not hasattr(self, 'media_filepath'):
self.set_mediafilepath()
self.media_file = ami_file.ami_file(self.media_filepath)
def check_techmd_values(self):
if not hasattr(self, 'valid_techmd_fields'):
self.check_techmd_fields()
if not hasattr(self, 'media_file'):
self.set_media_file()
if self.media_format_type == "audio":
field_mapping = ami_md_constants.JSON_TO_AUDIO_FILE_MAPPING
elif self.media_format_type == "video":
field_mapping = ami_md_constants.JSON_TO_VIDEO_FILE_MAPPING
errors = []
for key, value in field_mapping.items():
try:
self.check_md_value(key, value)
except AMIJSONError as e:
errors.append(e.message)
if errors:
self.raise_jsonerror(' '.join(errors))
return True
def check_md_value(self, field, mapped_field, separator = '.'):
try:
file_value = getattr(self.media_file, mapped_field)
except AttributeError:
self.raise_jsonerror("File does not have expected attribute: {}".format(
mapped_field
))
md_value = self.dict["technical"]
if separator in field:
field_parts = field.split(separator)
for part in field_parts:
md_value = md_value[part]
else:
md_value = md_value[field]
if md_value != file_value:
if field == 'dateCreated':
LOGGER.warning('{0} in JSON and from file disagree. JSON: {1}, From file: {2}.'.format(
field, md_value, file_value
))
else:
self.raise_jsonerror("Incorrect value for {0}. Expected: {1}, Found: {2}.".format(
field, md_value, file_value
))
return True
def repair_techmd(self):
if not hasattr(self, 'media_file'):
self.set_media_file()
LOGGER.info("Rewriting technical md for {}".format(os.path.basename(self.filename)))
self.dict["technical"]["filename"] = self.media_file.base_filename
self.dict["technical"]["extension"] = self.media_file.extension
self.dict["technical"]["fileFormat"] = self.media_file.format
if "fileSize" not in self.dict["technical"].keys():
self.dict["technical"]["fileSize"] = {}
self.dict["technical"]["fileSize"]["measure"] = self.media_file.size
self.dict["technical"]["fileSize"]["unit"] = "B"
#retain original dates
if not "dateCreated" in self.dict["technical"].keys():
self.dict["technical"]["dateCreated"] = self.media_file.date_created
self.dict["technical"]["durationHuman"] = self.media_file.duration_human
if "durationMilli" not in self.dict["technical"].keys():
self.dict["technical"]["durationMilli"] = {}
self.dict["technical"]["durationMilli"]["measure"] = self.media_file.duration_milli
self.dict["technical"]["durationMilli"]["unit"] = "ms"
self.dict["technical"]["audioCodec"] = self.media_file.audio_codec
if self.media_file.type == "Video":
self.dict["technical"]["videoCodec"] = self.media_file.video_codec
def check_techfn(self):
if not "filename" in self.dict["technical"].keys():
self.raise_jsonerror("Key missing for technical.filename")
if not re.match(FULL_TECHFN_RE, self.dict["technical"]["filename"]):
self.raise_jsonerror("Value for technical.filename does not meet expectations: {}"
.format(self.dict["technical"]["filename"]))
return True
def repair_techfn(self):
correct_techfn = re.match(STUB_TECHFN_RE, self.dict["technical"]["filename"])
if correct_techfn:
if hasattr(self, 'media_filepath'):
try:
self.compare_techfn_media_filename()
except:
LOGGER.error('Extracted technical filename does not match provide media filename.')
return False
try:
self.compare_techfn_reffn()
except:
LOGGER.warning('Extracted technical filename does not match referenceFilename value.')
self.dict["technical"]["filename"] = correct_techfn[0]
LOGGER.info("{} technical.filename updated to: {}".format(
self.filename, self.dict["technical"]["filename"]))
return True
else:
LOGGER.error("Valid technical.filename could not be extracted from {}".format(
self.dict["technical"]["filename"]))
return False
def check_reffn(self):
if not "referenceFilename" in self.dict["asset"].keys():
self.raise_jsonerror("Key missing for asset.referenceFilename")
if not re.match(FULL_REFFN_RE, self.dict["asset"]["referenceFilename"]):
self.raise_jsonerror("Value for asset.referenceFilename does not meet expectations: {}"
.format(self.dict["asset"]["referenceFilename"]))
return True
def repair_reffn(self):
try:
self.check_techfn()
except AMIJSONError as e:
LOGGER.error("Valid asset.referenceFilename cannot be created from technical fields: {}, {}".format(
self.dict["technical"]["filename"], self.dict["technical"]["extension"]))
return False
else:
replacement_value = self.dict["technical"]["filename"] + '.' + self.dict["technical"]["extension"]
self.dict["asset"]["referenceFilename"] = replacement_value
LOGGER.info("{} asset.referenceFilename updated to: {}".format(self.filename, self.dict["asset"]["referenceFilename"]))
return True
def compare_techfn_reffn(self):
if not ("filename" in self.dict["technical"].keys() and
"extension" in self.dict["technical"].keys() and
"referenceFilename" in self.dict["asset"].keys()):
self.raise_jsonerror("Key or keys related to filenames missing")
if self.dict["asset"]["referenceFilename"] != self.dict["technical"]["filename"] + '.' + self.dict["technical"]["extension"]:
self.raise_jsonerror("Value for asset.referenceFilename should equal technical.filename + technical.extension: {} != {}.{}"
.format(self.dict["asset"]["referenceFilename"],
self.dict["technical"]["filename"], self.dict["technical"]["extension"]))
return True
def compare_techfn_media_filename(self):
expected_media_filename = self.dict["technical"]["filename"] + '.' + self.dict["technical"]["extension"]
provided_media_filename = os.path.basename(self.media_filepath)
if expected_media_filename != provided_media_filename:
self.raise_jsonerror("Value for technical.filename + technical.extension should equal media filename: {} != {}"
.format(expected_media_filename, provided_media_filename))
return True
def write_json(self, output_directory, indent = None):
if not os.path.exists(output_directory):
self.raise_jsonerror('output directory does not exist')
else:
json_directory = output_directory
if ('technical' in self.dict.keys() and
'filename' in self.dict['technical'].keys()):
filename = self.dict['technical']['filename']
elif ('asset' in self.dict.keys() and
'referenceFilename' in self.dict['asset'].keys()):
filename = self.dict['asset']['referenceFilename'].split('.')[0]
else:
self.raise_jsonerror('Metadata requires asset.referenceFilename or technical.filename to be saved.')
json_filename = "{0}/{1}.json".format(
json_directory,
filename)
with open(json_filename, 'w') as f:
json.dump(self.dict, f, indent = indent)
LOGGER.info("{} written".format(json_filename))
def raise_jsonerror(self, msg):
"""
lazy error reporting
"""
raise AMIJSONError(msg)
logging.error(msg + '\n')
return False
| 32.229213 | 129 | 0.6573 |
356fe041b5f84d6497a41eedfa3fec370db6a21b | 9,299 | py | Python | mac-platform-tools/systrace/catapult/devil/devil/android/logcat_monitor_test.py | NBPS-Robotics/FTC-Code-Team-9987---2022 | 180538f3ebd234635fa88f96ae7cf7441df6a246 | [
"MIT"
] | 1,894 | 2015-04-17T18:29:53.000Z | 2022-03-28T22:41:06.000Z | mac-platform-tools/systrace/catapult/devil/devil/android/logcat_monitor_test.py | NBPS-Robotics/FTC-Code-Team-9987---2022 | 180538f3ebd234635fa88f96ae7cf7441df6a246 | [
"MIT"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | mac-platform-tools/systrace/catapult/devil/devil/android/logcat_monitor_test.py | NBPS-Robotics/FTC-Code-Team-9987---2022 | 180538f3ebd234635fa88f96ae7cf7441df6a246 | [
"MIT"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=protected-access
import itertools
import threading
import unittest
import six
from devil import devil_env
from devil.android import logcat_monitor
from devil.android.sdk import adb_wrapper
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock # pylint: disable=import-error
def _CreateTestLog(raw_logcat=None):
test_adb = adb_wrapper.AdbWrapper('0123456789abcdef')
test_adb.Logcat = mock.Mock(return_value=(l for l in raw_logcat))
test_log = logcat_monitor.LogcatMonitor(test_adb, clear=False)
return test_log
def zip_longest(expected, actual):
# pylint: disable=no-member
if six.PY2:
return itertools.izip_longest(expected, actual)
else:
return itertools.zip_longest(expected, actual)
class LogcatMonitorTest(unittest.TestCase):
_TEST_THREADTIME_LOGCAT_DATA = [
'01-01 01:02:03.456 7890 0987 V LogcatMonitorTest: '
'verbose logcat monitor test message 1',
'01-01 01:02:03.457 8901 1098 D LogcatMonitorTest: '
'debug logcat monitor test message 2',
'01-01 01:02:03.458 9012 2109 I LogcatMonitorTest: '
'info logcat monitor test message 3',
'01-01 01:02:03.459 0123 3210 W LogcatMonitorTest: '
'warning logcat monitor test message 4',
'01-01 01:02:03.460 1234 4321 E LogcatMonitorTest: '
'error logcat monitor test message 5',
'01-01 01:02:03.461 2345 5432 F LogcatMonitorTest: '
'fatal logcat monitor test message 6',
'01-01 01:02:03.462 3456 6543 D LogcatMonitorTest: '
'last line'
]
def assertIterEqual(self, expected_iter, actual_iter):
for expected, actual in zip_longest(expected_iter, actual_iter):
self.assertIsNotNone(
expected,
msg='actual has unexpected elements starting with %s' % str(actual))
self.assertIsNotNone(
actual,
msg='actual is missing elements starting with %s' % str(expected))
self.assertEqual(actual.group('proc_id'), expected[0])
self.assertEqual(actual.group('thread_id'), expected[1])
self.assertEqual(actual.group('log_level'), expected[2])
self.assertEqual(actual.group('component'), expected[3])
self.assertEqual(actual.group('message'), expected[4])
with self.assertRaises(StopIteration):
next(actual_iter)
with self.assertRaises(StopIteration):
next(expected_iter)
@mock.patch('time.sleep', mock.Mock())
def testWaitFor_success(self):
test_log = _CreateTestLog(
raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
test_log.Start()
actual_match = test_log.WaitFor(r'.*(fatal|error) logcat monitor.*', None)
self.assertTrue(actual_match)
self.assertEqual(
'01-01 01:02:03.460 1234 4321 E LogcatMonitorTest: '
'error logcat monitor test message 5', actual_match.group(0))
self.assertEqual('error', actual_match.group(1))
test_log.Stop()
test_log.Close()
@mock.patch('time.sleep', mock.Mock())
def testWaitFor_failure(self):
test_log = _CreateTestLog(
raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
test_log.Start()
actual_match = test_log.WaitFor(r'.*My Success Regex.*',
r'.*(fatal|error) logcat monitor.*')
self.assertIsNone(actual_match)
test_log.Stop()
test_log.Close()
@mock.patch('time.sleep', mock.Mock())
def testWaitFor_buffering(self):
# Simulate an adb log stream which does not complete until the test tells it
# to. This checks that the log matcher can receive individual lines from the
# log reader thread even if adb is not producing enough output to fill an
# entire file io buffer.
finished_lock = threading.Lock()
finished_lock.acquire()
def LogGenerator():
for line in type(self)._TEST_THREADTIME_LOGCAT_DATA:
yield line
finished_lock.acquire()
test_adb = adb_wrapper.AdbWrapper('0123456789abcdef')
test_adb.Logcat = mock.Mock(return_value=LogGenerator())
test_log = logcat_monitor.LogcatMonitor(test_adb, clear=False)
test_log.Start()
actual_match = test_log.WaitFor(r'.*last line.*', None)
finished_lock.release()
self.assertTrue(actual_match)
test_log.Stop()
test_log.Close()
@mock.patch('time.sleep', mock.Mock())
def testFindAll_defaults(self):
test_log = _CreateTestLog(
raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
test_log.Start()
test_log.WaitFor(r'.*last line.*', None)
test_log.Stop()
expected_results = [('7890', '0987', 'V', 'LogcatMonitorTest',
'verbose logcat monitor test message 1'),
('8901', '1098', 'D', 'LogcatMonitorTest',
'debug logcat monitor test message 2'),
('9012', '2109', 'I', 'LogcatMonitorTest',
'info logcat monitor test message 3'),
('0123', '3210', 'W', 'LogcatMonitorTest',
'warning logcat monitor test message 4'),
('1234', '4321', 'E', 'LogcatMonitorTest',
'error logcat monitor test message 5'),
('2345', '5432', 'F', 'LogcatMonitorTest',
'fatal logcat monitor test message 6')]
actual_results = test_log.FindAll(r'\S* logcat monitor test message \d')
self.assertIterEqual(iter(expected_results), actual_results)
test_log.Close()
@mock.patch('time.sleep', mock.Mock())
def testFindAll_defaults_miss(self):
test_log = _CreateTestLog(
raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
test_log.Start()
test_log.WaitFor(r'.*last line.*', None)
test_log.Stop()
expected_results = []
actual_results = test_log.FindAll(r'\S* nothing should match this \d')
self.assertIterEqual(iter(expected_results), actual_results)
test_log.Close()
@mock.patch('time.sleep', mock.Mock())
def testFindAll_filterProcId(self):
test_log = _CreateTestLog(
raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
test_log.Start()
test_log.WaitFor(r'.*last line.*', None)
test_log.Stop()
actual_results = test_log.FindAll(
r'\S* logcat monitor test message \d', proc_id=1234)
expected_results = [('1234', '4321', 'E', 'LogcatMonitorTest',
'error logcat monitor test message 5')]
self.assertIterEqual(iter(expected_results), actual_results)
test_log.Close()
@mock.patch('time.sleep', mock.Mock())
def testFindAll_filterThreadId(self):
test_log = _CreateTestLog(
raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
test_log.Start()
test_log.WaitFor(r'.*last line.*', None)
test_log.Stop()
actual_results = test_log.FindAll(
r'\S* logcat monitor test message \d', thread_id=2109)
expected_results = [('9012', '2109', 'I', 'LogcatMonitorTest',
'info logcat monitor test message 3')]
self.assertIterEqual(iter(expected_results), actual_results)
test_log.Close()
@mock.patch('time.sleep', mock.Mock())
def testFindAll_filterLogLevel(self):
test_log = _CreateTestLog(
raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
test_log.Start()
test_log.WaitFor(r'.*last line.*', None)
test_log.Stop()
actual_results = test_log.FindAll(
r'\S* logcat monitor test message \d', log_level=r'[DW]')
expected_results = [('8901', '1098', 'D', 'LogcatMonitorTest',
'debug logcat monitor test message 2'),
('0123', '3210', 'W', 'LogcatMonitorTest',
'warning logcat monitor test message 4')]
self.assertIterEqual(iter(expected_results), actual_results)
test_log.Close()
@mock.patch('time.sleep', mock.Mock())
def testFindAll_filterComponent(self):
test_log = _CreateTestLog(
raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
test_log.Start()
test_log.WaitFor(r'.*last line.*', None)
test_log.Stop()
actual_results = test_log.FindAll(r'.*', component='LogcatMonitorTest')
expected_results = [('7890', '0987', 'V', 'LogcatMonitorTest',
'verbose logcat monitor test message 1'),
('8901', '1098', 'D', 'LogcatMonitorTest',
'debug logcat monitor test message 2'),
('9012', '2109', 'I', 'LogcatMonitorTest',
'info logcat monitor test message 3'),
('0123', '3210', 'W', 'LogcatMonitorTest',
'warning logcat monitor test message 4'),
('1234', '4321', 'E', 'LogcatMonitorTest',
'error logcat monitor test message 5'),
('2345', '5432', 'F', 'LogcatMonitorTest',
'fatal logcat monitor test message 6'),
('3456', '6543', 'D', 'LogcatMonitorTest', 'last line')]
self.assertIterEqual(iter(expected_results), actual_results)
test_log.Close()
if __name__ == '__main__':
unittest.main(verbosity=2)
| 40.430435 | 80 | 0.647597 |
9d6962508612d479bf00b382ede9298a33c10f30 | 3,071 | py | Python | python/ngraph/__init__.py | free25zer/ngraph | a58d3bc255b092d901b54aa744dc0e5945d24fb6 | [
"Apache-2.0"
] | null | null | null | python/ngraph/__init__.py | free25zer/ngraph | a58d3bc255b092d901b54aa744dc0e5945d24fb6 | [
"Apache-2.0"
] | null | null | null | python/ngraph/__init__.py | free25zer/ngraph | a58d3bc255b092d901b54aa744dc0e5945d24fb6 | [
"Apache-2.0"
] | null | null | null | # ******************************************************************************
# Copyright 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""ngraph module namespace, exposing factory functions for all ops and other classes."""
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution('ngraph-core').version
except DistributionNotFound:
__version__ = '0.0.0-dev'
from ngraph.ops import absolute
from ngraph.ops import absolute as abs
from ngraph.ops import acos
from ngraph.ops import add
from ngraph.ops import argmax
from ngraph.ops import argmin
from ngraph.ops import asin
from ngraph.ops import atan
from ngraph.ops import avg_pool
from ngraph.ops import batch_norm
from ngraph.ops import broadcast
from ngraph.ops import broadcast_to
from ngraph.ops import ceiling
from ngraph.ops import ceiling as ceil
from ngraph.ops import concat
from ngraph.ops import constant
from ngraph.ops import convert
from ngraph.ops import convolution
from ngraph.ops import convolution_backprop_data
from ngraph.ops import cos
from ngraph.ops import cosh
from ngraph.ops import divide
from ngraph.ops import dot
from ngraph.ops import equal
from ngraph.ops import exp
from ngraph.ops import floor
from ngraph.ops import get_output_element
from ngraph.ops import greater
from ngraph.ops import greater_eq
from ngraph.ops import less
from ngraph.ops import less_eq
from ngraph.ops import log
from ngraph.ops import logical_and
from ngraph.ops import logical_or
from ngraph.ops import logical_not
from ngraph.ops import lrn
from ngraph.ops import max
from ngraph.ops import max_pool
from ngraph.ops import maximum
from ngraph.ops import min
from ngraph.ops import minimum
from ngraph.ops import multiply
from ngraph.ops import negative
from ngraph.ops import not_equal
from ngraph.ops import one_hot
from ngraph.ops import pad
from ngraph.ops import parameter
from ngraph.ops import power
from ngraph.ops import prod
from ngraph.ops import relu
from ngraph.ops import replace_slice
from ngraph.ops import reshape
from ngraph.ops import reverse
from ngraph.ops import select
from ngraph.ops import sign
from ngraph.ops import sin
from ngraph.ops import sinh
from ngraph.ops import slice
from ngraph.ops import softmax
from ngraph.ops import sqrt
from ngraph.ops import subtract
from ngraph.ops import sum
from ngraph.ops import tan
from ngraph.ops import tanh
from ngraph.ops import topk
from ngraph.runtime import runtime
| 33.380435 | 88 | 0.773689 |
db409b215a78d2e5f470f077baabf89008987b12 | 19,482 | py | Python | pandas/core/computation/pytables.py | YuechengWu/pandas | 7f753892eb6b29aaa62176cb9f00ad84c092c09a | [
"BSD-3-Clause"
] | 2 | 2019-04-16T21:03:23.000Z | 2021-05-08T13:25:44.000Z | pandas/core/computation/pytables.py | YuechengWu/pandas | 7f753892eb6b29aaa62176cb9f00ad84c092c09a | [
"BSD-3-Clause"
] | 1 | 2016-08-15T12:35:16.000Z | 2016-08-15T12:35:16.000Z | pandas/core/computation/pytables.py | YuechengWu/pandas | 7f753892eb6b29aaa62176cb9f00ad84c092c09a | [
"BSD-3-Clause"
] | 2 | 2017-05-27T03:25:12.000Z | 2021-09-21T21:51:12.000Z | """ manage PyTables query interface via Expressions """
import ast
from functools import partial
import numpy as np
from pandas.compat import DeepChainMap, string_types, u
from pandas.core.dtypes.common import is_list_like
import pandas as pd
from pandas.core.base import StringMixin
import pandas.core.common as com
from pandas.core.computation import expr, ops
from pandas.core.computation.common import _ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import UndefinedVariableError, is_term
from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
class Scope(expr.Scope):
__slots__ = 'queryables',
def __init__(self, level, global_dict=None, local_dict=None,
queryables=None):
super(Scope, self).__init__(level + 1, global_dict=global_dict,
local_dict=local_dict)
self.queryables = queryables or dict()
class Term(ops.Term):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = StringMixin.__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
super(Term, self).__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == 'left':
if self.name not in self.env.queryables:
raise NameError('name {name!r} is not defined'
.format(name=self.name))
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
@property
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
def __init__(self, op, lhs, rhs, queryables, encoding):
super(BinOp, self).__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.filter = None
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if (isinstance(left, ConditionBinOp) and
isinstance(right, ConditionBinOp)):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if (isinstance(left, FilterBinOp) and
isinstance(right, FilterBinOp)):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(self.op, left, right, queryables=self.queryables,
encoding=self.encoding).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
""" inplace conform rhs """
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self):
""" return True if this is a valid field """
return self.lhs in self.queryables
@property
def is_in_table(self):
""" return True if this is a valid column name for generation (e.g. an
actual column in the table) """
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
""" the kind of my field """
return getattr(self.queryables.get(self.lhs), 'kind', None)
@property
def meta(self):
""" the meta of my field """
return getattr(self.queryables.get(self.lhs), 'meta', None)
@property
def metadata(self):
""" the metadata of my field """
return getattr(self.queryables.get(self.lhs), 'metadata', None)
def generate(self, v):
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return "({lhs} {op} {val})".format(lhs=self.lhs, op=self.op, val=val)
def convert_value(self, v):
""" convert the expression that is in the term to something that is
accepted by pytables """
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
if kind == u('datetime64') or kind == u('datetime'):
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = pd.Timestamp(v)
if v.tz is not None:
v = v.tz_convert('UTC')
return TermValue(v, v.value, kind)
elif kind == u('timedelta64') or kind == u('timedelta'):
v = _coerce_scalar_to_timedelta_type(v, unit='s').value
return TermValue(int(v), v, kind)
elif meta == u('category'):
metadata = com.values_from_object(self.metadata)
result = metadata.searchsorted(v, side='left')
# result returns 0 if v is first element or if v is not in metadata
# check that metadata contains v
if not result and v not in metadata:
result = -1
return TermValue(result, result, u('integer'))
elif kind == u('integer'):
v = int(float(v))
return TermValue(v, v, kind)
elif kind == u('float'):
v = float(v)
return TermValue(v, v, kind)
elif kind == u('bool'):
if isinstance(v, string_types):
v = not v.strip().lower() in [u('false'), u('f'), u('no'),
u('n'), u('none'), u('0'),
u('[]'), u('{}'), u('')]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, string_types):
# string quoting
return TermValue(v, stringify(v), u('string'))
else:
raise TypeError("Cannot compare {v} of type {typ} to {kind} column"
.format(v=v, typ=type(v), kind=kind))
def convert_values(self):
pass
class FilterBinOp(BinOp):
def __unicode__(self):
return pprint_thing("[Filter : [{lhs}] -> [{op}]"
.format(lhs=self.filter[0], op=self.filter[1]))
def invert(self):
""" invert the filter """
if self.filter is not None:
f = list(self.filter)
f[1] = self.generate_filter_op(invert=True)
self.filter = tuple(f)
return self
def format(self):
""" return the actual filter format """
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [{slf}]"
.format(slf=self))
rhs = self.conform(self.rhs)
values = [TermValue(v, v, self.kind) for v in rhs]
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ['==', '!='] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
return self
return None
# equality conditions
if self.op in ['==', '!=']:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
else:
raise TypeError("passing a filterable condition to a non-table "
"indexer [{slf}]".format(slf=self))
return self
def generate_filter_op(self, invert=False):
if (self.op == '!=' and not invert) or (self.op == '==' and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __unicode__(self):
return pprint_thing("[Condition : [{cond}]]"
.format(cond=self.condition))
def invert(self):
""" invert the condition """
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError("cannot use an invert condition when "
"passing to numexpr")
def format(self):
""" return the actual ne format """
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [{slf}]"
.format(slf=self))
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ['==', '!=']:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = "({cond})".format(cond=' | '.join(vs))
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = "({lhs} {op} {rhs})".format(lhs=self.lhs.condition,
op=self.op,
rhs=self.rhs.condition)
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != '~':
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None:
if issubclass(klass, ConditionBinOp):
if operand.condition is not None:
return operand.invert()
elif issubclass(klass, FilterBinOp):
if operand.filter is not None:
return operand.invert()
return None
_op_classes = {'unary': UnaryOp}
class ExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super(ExprVisitor, self).__init__(env, engine, parser)
for bin_op in self.binary_ops:
bin_node = self.binary_op_nodes_map[bin_op]
setattr(self, 'visit_{node}'.format(node=bin_node),
lambda node, bin_op=bin_op: partial(BinOp, bin_op,
**kwargs))
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp('~', self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError('Unary addition not supported')
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0],
comparators=[node.value])
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple suscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except AttributeError:
pass
try:
return self.const_type(value[slobj], self.env)
except TypeError:
raise ValueError("cannot subscript {value!r} with "
"{slobj!r}".format(value=value, slobj=slobj))
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx.__class__
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {name}"
.format(name=ctx.__name__))
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (Expr, string_types)) or is_list_like(w)):
raise TypeError("where must be passed as a string, Expr, "
"or list-like of Exprs")
return w
class Expr(expr.Expr):
""" hold a pytables like expression, comprised of possibly multiple 'terms'
Parameters
----------
where : string term expression, Expr, or list-like of Exprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
an Expr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
def __init__(self, where, queryables=None, encoding=None, scope_level=0):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict = DeepChainMap()
if isinstance(where, Expr):
local_dict = where.env.scope
where = where.expr
elif isinstance(where, (list, tuple)):
for idx, w in enumerate(where):
if isinstance(w, Expr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
where = ' & '.join(map('({})'.format, com.flatten(where))) # noqa
self.expr = where
self.env = Scope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, string_types):
self.env.queryables.update(queryables)
self._visitor = ExprVisitor(self.env, queryables=queryables,
parser='pytables', engine='pytables',
encoding=encoding)
self.terms = self.parse()
def __unicode__(self):
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError:
raise ValueError("cannot process expression [{expr}], [{slf}] "
"is not a valid condition".format(expr=self.expr,
slf=self))
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError:
raise ValueError("cannot process expression [{expr}], [{slf}] "
"is not a valid filter".format(expr=self.expr,
slf=self))
return self.condition, self.filter
class TermValue(object):
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind):
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding):
""" quote the string if not encoded
else encode and return """
if self.kind == u'string':
if encoding is not None:
return self.converted
return '"{converted}"'.format(converted=self.converted)
elif self.kind == u'float':
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return self.converted
def maybe_expression(s):
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, string_types):
return False
ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',)
# make sure we have an op at least
return any(op in s for op in ops)
| 32.148515 | 79 | 0.555949 |
1c1195d740c673c4fbbe9bca08ba0a97d7c2e035 | 17,646 | py | Python | backend/tests/test_study.py | sartography/star-drive | c0f33378d42913c3e677e07f74eb46d7b2b82a0a | [
"MIT"
] | null | null | null | backend/tests/test_study.py | sartography/star-drive | c0f33378d42913c3e677e07f74eb46d7b2b82a0a | [
"MIT"
] | 368 | 2018-12-18T14:43:20.000Z | 2022-03-02T02:54:18.000Z | backend/tests/test_study.py | sartography/star-drive | c0f33378d42913c3e677e07f74eb46d7b2b82a0a | [
"MIT"
] | 2 | 2019-10-02T03:06:06.000Z | 2020-10-05T16:53:48.000Z | import unittest
from flask import json
from tests.base_test import BaseTest
from app import db
from app.email_service import TEST_MESSAGES
from app.model.email_log import EmailLog
from app.model.investigator import Investigator
from app.model.participant import Relationship
from app.model.questionnaires.identification_questionnaire import IdentificationQuestionnaire
from app.model.questionnaires.contact_questionnaire import ContactQuestionnaire
from app.model.study import Study, Status
from app.model.study_category import StudyCategory
from app.model.study_investigator import StudyInvestigator
class TestStudy(BaseTest, unittest.TestCase):
def test_study_basics(self):
self.construct_study()
s = db.session.query(Study).first()
self.assertIsNotNone(s)
s_id = s.id
rv = self.app.get('/api/study/%i' % s_id,
follow_redirects=True,
content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(response["id"], s_id)
self.assertEqual(response["title"], 'Fantastic Study')
self.assertEqual(response["description"], 'A study that will go down in history')
self.assertNotIn('study_users', response, "Never include info about other users in a non-protected endpoint")
self.assertNotIn('users', response, "Never include info about other users in a non-protected endpoint")
def test_modify_study_basics(self):
self.construct_study()
s = db.session.query(Study).first()
self.assertIsNotNone(s)
s_id = s.id
rv = self.app.get('/api/study/%i' % s_id, content_type="application/json")
response = json.loads(rv.get_data(as_text=True))
response['title'] = 'Edwarardos Lemonade and Oil Change'
response['description'] = 'Better fluids for you and your car.'
response['benefit_description'] = 'Better fluids for you and your car, Duh.'
response["short_title"] = 'Edwardos'
response["short_description"] = 'Better fluids yada yada.'
response["image_url"] = '/some/url'
response["coordinator_email"] = '[email protected]'
orig_date = response['last_updated']
rv = self.app.put('/api/study/%i' % s_id, data=self.jsonify(response), content_type="application/json",
follow_redirects=True)
self.assert_success(rv)
rv = self.app.get('/api/study/%i' % s_id, content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(response['title'], 'Edwarardos Lemonade and Oil Change')
self.assertEqual(response['description'], 'Better fluids for you and your car.')
self.assertEqual(response['benefit_description'], 'Better fluids for you and your car, Duh.')
self.assertEqual(response["short_title"], 'Edwardos')
self.assertEqual(response["short_description"], 'Better fluids yada yada.')
self.assertEqual(response["image_url"], '/some/url')
self.assertEqual(response["coordinator_email"], '[email protected]')
self.assertNotEqual(orig_date, response['last_updated'])
def test_delete_study(self):
s = self.construct_study()
s_id = s.id
rv = self.app.get('api/study/%i' % s_id, content_type="application/json")
self.assert_success(rv)
rv = self.app.delete('api/study/%i' % s_id, content_type="application/json")
self.assert_success(rv)
rv = self.app.get('api/study/%i' % s_id, content_type="application/json")
self.assertEqual(404, rv.status_code)
def test_create_study(self):
study = {'title': "Study of Studies", 'benefit_description': "This study will change your life.",
'organization_name': "Study Org"}
rv = self.app.post('api/study', data=self.jsonify(study), content_type="application/json",
follow_redirects=True)
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(response['title'], 'Study of Studies')
self.assertEqual(response['benefit_description'], 'This study will change your life.')
self.assertIsNotNone(response['id'])
def test_get_study_by_category(self):
c = self.construct_category()
s = self.construct_study()
cs = StudyCategory(study=s, category=c)
db.session.add(cs)
db.session.commit()
rv = self.app.get(
'/api/category/%i/study' % c.id,
content_type="application/json",
headers=self.logged_in_headers())
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(1, len(response))
self.assertEqual(s.id, response[0]["study_id"])
self.assertEqual(s.description, response[0]["study"]["description"])
def test_get_study_by_category_includes_category_details(self):
c = self.construct_category(name="c1")
c2 = self.construct_category(name="c2")
s = self.construct_study()
cs = StudyCategory(study=s, category=c)
cs2 = StudyCategory(study=s, category=c2)
db.session.add_all([cs, cs2])
db.session.commit()
rv = self.app.get(
'/api/category/%i/study' % c.id,
content_type="application/json",
headers=self.logged_in_headers())
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(s.id, response[0]["study_id"])
self.assertEqual(2,
len(response[0]["study"]["study_categories"]))
self.assertEqual(
"c1", response[0]["study"]["study_categories"][0]["category"]
["name"])
def test_category_study_count(self):
c = self.construct_category()
s = self.construct_study()
cs = StudyCategory(study=s, category=c)
db.session.add(cs)
db.session.commit()
rv = self.app.get(
'/api/category/%i' % c.id, content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(1, response["study_count"])
def test_get_category_by_study(self):
c = self.construct_category()
s = self.construct_study()
cs = StudyCategory(study=s, category=c)
db.session.add(cs)
db.session.commit()
rv = self.app.get(
'/api/study/%i/category' % s.id,
content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(1, len(response))
self.assertEqual(c.id, response[0]["id"])
self.assertEqual(c.name, response[0]["category"]["name"])
def test_add_category_to_study(self):
c = self.construct_category()
s = self.construct_study()
sc_data = {"study_id": s.id, "category_id": c.id}
rv = self.app.post(
'/api/study_category',
data=self.jsonify(sc_data),
content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(c.id, response["category_id"])
self.assertEqual(s.id, response["study_id"])
def test_set_all_categories_on_study(self):
c1 = self.construct_category(name="c1")
c2 = self.construct_category(name="c2")
c3 = self.construct_category(name="c3")
s = self.construct_study()
sc_data = [
{
"category_id": c1.id
},
{
"category_id": c2.id
},
{
"category_id": c3.id
},
]
rv = self.app.post(
'/api/study/%i/category' % s.id,
data=self.jsonify(sc_data),
content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(3, len(response))
sc_data = [{"category_id": c1.id}]
rv = self.app.post(
'/api/study/%i/category' % s.id,
data=self.jsonify(sc_data),
content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(1, len(response))
def test_remove_category_from_study(self):
self.test_add_category_to_study()
rv = self.app.delete('/api/study_category/%i' % 1)
self.assert_success(rv)
rv = self.app.get(
'/api/study/%i/category' % 1, content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(0, len(response))
def test_add_investigator_to_study(self):
i = self.construct_investigator()
s = self.construct_study()
si_data = {"study_id": s.id, "investigator_id": i.id}
rv = self.app.post(
'/api/study_investigator',
data=self.jsonify(si_data),
content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(i.id, response["investigator_id"])
self.assertEqual(s.id, response["study_id"])
def test_set_all_investigators_on_study(self):
i1 = self.construct_investigator(name="person1")
i2 = self.construct_investigator(name="person2")
i3 = self.construct_investigator(name="person3")
s = self.construct_study()
si_data = [
{"investigator_id": i1.id},
{"investigator_id": i2.id},
{"investigator_id": i3.id},
]
rv = self.app.post(
'/api/study/%i/investigator' % s.id,
data=self.jsonify(si_data),
content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(3, len(response))
si_data = [{"investigator_id": i1.id}]
rv = self.app.post(
'/api/study/%i/investigator' % s.id,
data=self.jsonify(si_data),
content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(1, len(response))
def test_remove_investigator_from_study(self):
self.test_add_investigator_to_study()
rv = self.app.delete('/api/study_investigator/%i' % 1)
self.assert_success(rv)
rv = self.app.get(
'/api/study/%i/investigator' % 1, content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(0, len(response))
def test_study_inquiry_sends_email(self):
message_count = len(TEST_MESSAGES)
s = self.construct_study(title="The Best Study")
u = self.construct_user()
guardian = self.construct_participant(user=u, relationship=Relationship.self_guardian)
dependent1 = self.construct_participant(user=u, relationship=Relationship.dependent)
self.construct_contact_questionnaire(user=u, participant=guardian, phone="540-669-8855")
self.construct_identification_questionnaire(user=u, participant=guardian, first_name="Fred")
self.construct_identification_questionnaire(user=u, participant=dependent1, first_name="Fred", is_first_name_preferred=False, nickname="Zorba")
data = {'user_id': u.id, 'study_id': s.id}
rv = self.app.post('/api/study_inquiry',
data=self.jsonify(data),
follow_redirects=True,
content_type="application/json",
headers=self.logged_in_headers())
self.assert_success(rv)
self.assertGreater(len(TEST_MESSAGES), message_count)
self.assertEqual("Autism DRIVE: Study Inquiry Email",
self.decode(TEST_MESSAGES[-1]['subject']))
logs = EmailLog.query.all()
self.assertIsNotNone(logs[-1].tracking_code)
def test_study_inquiry_creates_StudyUser(self):
s = self.construct_study(title="The Best Study")
u = self.construct_user()
self.assertEquals(0, len(s.study_users))
guardian = self.construct_participant(user=u, relationship=Relationship.self_guardian)
self.construct_contact_questionnaire(user=u, participant=guardian, phone="540-669-8855")
self.construct_identification_questionnaire(user=u, participant=guardian, first_name="Fred")
data = {'user_id': u.id, 'study_id': s.id}
rv = self.app.post('/api/study_inquiry',
data=self.jsonify(data),
follow_redirects=True,
content_type="application/json",
headers=self.logged_in_headers())
self.assert_success(rv)
self.assertEquals(1, len(s.study_users))
def test_study_inquiry_fails_without_valid_study_or_user(self):
s = self.construct_study(title="The Best Study")
u = self.construct_user()
guardian = self.construct_participant(user=u, relationship=Relationship.self_guardian)
self.construct_contact_questionnaire(user=u, participant=guardian, phone="540-669-8855")
self.construct_identification_questionnaire(user=u, participant=guardian, first_name="Fred")
data = {'user_id': u.id, 'study_id': 456}
rv = self.app.post('/api/study_inquiry',
data=self.jsonify(data),
follow_redirects=True,
content_type="application/json",
headers=self.logged_in_headers())
self.assertEqual(400, rv.status_code)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(response['message'], 'Error in finding correct user and study to complete study inquiry')
data = {'user_id': 456, 'study_id': s.id}
rv = self.app.post('/api/study_inquiry',
data=self.jsonify(data),
follow_redirects=True,
content_type="application/json",
headers=self.logged_in_headers())
self.assertEqual(400, rv.status_code)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(response['message'], 'Error in finding correct user and study to complete study inquiry')
def construct_identification_questionnaire(self, relationship_to_participant='adoptFather', first_name='Karl',
is_first_name_preferred=True, nickname=None, participant=None, user=None):
iq = IdentificationQuestionnaire(relationship_to_participant=relationship_to_participant, first_name=first_name,
is_first_name_preferred=is_first_name_preferred, nickname=nickname)
if user is None:
u = self.construct_user(email='[email protected]')
iq.user_id = u.id
else:
u = user
iq.user_id = u.id
if participant is None:
iq.participant_id = self.construct_participant(user=u, relationship=Relationship.dependent).id
else:
iq.participant_id = participant.id
db.session.add(iq)
db.session.commit()
db_iq = db.session.query(IdentificationQuestionnaire).filter_by(participant_id=iq.participant_id).first()
self.assertEqual(db_iq.nickname, iq.nickname)
return db_iq
def construct_contact_questionnaire(self, phone="123-456-7890", can_leave_voicemail=True, contact_times="whenever",
email='[email protected]', participant=None, user=None):
cq = ContactQuestionnaire(phone=phone, can_leave_voicemail=can_leave_voicemail, contact_times=contact_times,
email=email)
if user is None:
u = self.construct_user(email='[email protected]')
cq.user_id = u.id
else:
u = user
cq.user_id = u.id
if participant is None:
cq.participant_id = self.construct_participant(user=u, relationship=Relationship.dependent).id
else:
cq.participant_id = participant.id
db.session.add(cq)
db.session.commit()
db_cq = db.session.query(ContactQuestionnaire).filter_by(zip=cq.zip).first()
self.assertEqual(db_cq.phone, cq.phone)
return db_cq
def test_delete_study_deletes_relationship(self):
i = self.construct_investigator()
s = self.construct_study()
si = StudyInvestigator(investigator_id=i.id, study_id=s.id)
db.session.add(si)
db.session.commit()
si_id = si.id
rv = self.app.get('api/study_investigator/%i' % si_id, content_type="application/json", headers=self.logged_in_headers())
self.assert_success(rv)
rv = self.app.delete('api/study/%i' % s.id, content_type="application/json",
headers=self.logged_in_headers())
self.assert_success(rv)
rv = self.app.get('api/study_investigator/%i' % si_id, content_type="application/json", headers=self.logged_in_headers())
self.assertEqual(404, rv.status_code)
| 44.225564 | 151 | 0.629208 |
2c49a1d6d4b4f9487afa74be01a9ab641d6bd449 | 1,847 | py | Python | Krakatau-master/Krakatau/Krakatau/assembler/token_regexes.py | orneryhippo/saturdays | 525ce086452e96a01d1762418c79d4c84fd605b5 | [
"Apache-2.0"
] | null | null | null | Krakatau-master/Krakatau/Krakatau/assembler/token_regexes.py | orneryhippo/saturdays | 525ce086452e96a01d1762418c79d4c84fd605b5 | [
"Apache-2.0"
] | null | null | null | Krakatau-master/Krakatau/Krakatau/assembler/token_regexes.py | orneryhippo/saturdays | 525ce086452e96a01d1762418c79d4c84fd605b5 | [
"Apache-2.0"
] | null | null | null | DIRECTIVE = r'\.[a-z]+'
WORD = r'(?:[a-zA-Z_$\(<]|\[[A-Z\[])[\w$;\/\[\(\)<>*+-]*'
FOLLOWED_BY_WHITESPACE = r'(?=\s|\Z)'
REF = r'\[[a-z0-9_:]+\]'
LABEL_DEF = r'L\w+:'
COMMENT = r';.*'
# Match optional comment and at least one newline, followed by any number of empty/whitespace lines
NEWLINES = r'(?:{})?\n\s*'.format(COMMENT)
HEX_DIGIT = r'[0-9a-fA-F]'
ESCAPE_SEQUENCE = r'''\\(?:U00(?:10|0{hd}){hd}{{4}}|u{hd}{{4}}|x{hd}{{2}}|[btnfr'"\\0-7])'''.format(hd=HEX_DIGIT)
# See http://stackoverflow.com/questions/430759/regex-for-managing-escaped-characters-for-items-like-string-literals/5455705# 5455705
STRING_LITERAL = r'''
[bB]?(?:
"
[^"\n\\]* # any number of unescaped characters
(?:{es}[^"\n\\]* # escape sequence followed by 0 or more unescaped
)*
"
|
'
[^'\n\\]* # any number of unescaped characters
(?:{es}[^'\n\\]* # escape sequence followed by 0 or more unescaped
)*
'
)'''.format(es=ESCAPE_SEQUENCE)
# For error detection
STRING_START = r'''[bB]?(?:"(?:[^"\\\n]|{es})*|'(?:[^'\\\n]|{es})*)'''.format(es=ESCAPE_SEQUENCE)
# Careful here: | is not greedy so hex must come first
INT_LITERAL = r'[+-]?(?:0[xX]{hd}+|[1-9][0-9]*|0)[lL]?'.format(hd=HEX_DIGIT)
FLOAT_LITERAL = r'''(?:
(?:
[-+][Ii][Nn][Ff][Ii][Nn][Ii][Tt][Yy]| # Nan and Inf both have mandatory sign
[-+][Nn][Aa][Nn]
(?:<0[xX]{hd}+>)? # Optional suffix for nonstandard NaNs
)|
[-+]?(?:
\d+\.\d+(?:[eE][+-]?\d+)?| # decimal float
\d+[eE][+-]?\d+| # decimal float with no fraction (exponent mandatory)
0[xX]{hd}+(?:\.{hd}+)?[pP][+-]?\d+ # hexidecimal float
)
)[fF]?
'''.format(hd=HEX_DIGIT)
| 40.152174 | 133 | 0.493774 |
62ba4ee5ae4668e2001ba14c394b7034f35704b8 | 239 | py | Python | scaffoldgraph/utils/__init__.py | UCLCheminformatics/ScaffoldGraph | 0443ce118110290a99601d65b2d000ac8bc7a1e9 | [
"MIT"
] | 121 | 2019-12-12T15:30:16.000Z | 2022-02-28T02:00:54.000Z | scaffoldgraph/utils/__init__.py | UCLCheminformatics/ScaffoldGraph | 0443ce118110290a99601d65b2d000ac8bc7a1e9 | [
"MIT"
] | 8 | 2020-04-04T15:37:26.000Z | 2021-11-17T07:30:31.000Z | scaffoldgraph/utils/__init__.py | UCLCheminformatics/ScaffoldGraph | 0443ce118110290a99601d65b2d000ac8bc7a1e9 | [
"MIT"
] | 28 | 2019-12-16T11:58:53.000Z | 2021-11-19T09:57:46.000Z | """
scaffoldgraph.utils
"""
from .misc import canonize_smiles, summary
from .aggregate import aggregate
from .logging import suppress_rdlogger
__all__ = [
'canonize_smiles',
'aggregate',
'summary',
'suppress_rdlogger',
]
| 15.933333 | 42 | 0.715481 |
48bae455bbdef6dae8f929f65944f96b8f97e3c1 | 1,951 | py | Python | manila/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py | redhat-openstack/manila | bef43561b303a36d99849952ba8c408b19bafd02 | [
"Apache-2.0"
] | null | null | null | manila/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py | redhat-openstack/manila | bef43561b303a36d99849952ba8c408b19bafd02 | [
"Apache-2.0"
] | null | null | null | manila/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py | redhat-openstack/manila | bef43561b303a36d99849952ba8c408b19bafd02 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from manila.openstack.common.scheduler import filters
LOG = logging.getLogger(__name__)
class IgnoreAttemptedHostsFilter(filters.BaseHostFilter):
"""Filter out previously attempted hosts
A host passes this filter if it has not already been attempted for
scheduling. The scheduler needs to add previously attempted hosts
to the 'retry' key of filter_properties in order for this to work
correctly. For example::
{
'retry': {
'hosts': ['host1', 'host2'],
'num_attempts': 3,
}
}
"""
def host_passes(self, host_state, filter_properties):
"""Skip nodes that have already been attempted."""
attempted = filter_properties.get('retry')
if not attempted:
# Re-scheduling is disabled
LOG.debug("Re-scheduling is disabled.")
return True
hosts = attempted.get('hosts', [])
host = host_state.host
passes = host not in hosts
pass_msg = "passes" if passes else "fails"
LOG.debug("Host %(host)s %(pass_msg)s. Previously tried hosts: "
"%(hosts)s" % {'host': host,
'pass_msg': pass_msg,
'hosts': hosts})
return passes
| 33.637931 | 78 | 0.631984 |
10fc16abb288a774bc34237207448be852316a7a | 30,577 | py | Python | google/cloud/data_fusion_v1/services/data_fusion/async_client.py | renovate-bot/python-data-fusion | cdc2d510bca87bda0599f4081f54a41aa23006ad | [
"Apache-2.0"
] | 2 | 2021-07-07T11:58:49.000Z | 2021-12-30T02:19:36.000Z | google/cloud/data_fusion_v1/services/data_fusion/async_client.py | renovate-bot/python-data-fusion | cdc2d510bca87bda0599f4081f54a41aa23006ad | [
"Apache-2.0"
] | 30 | 2021-07-07T13:17:40.000Z | 2022-03-29T00:02:01.000Z | google/cloud/data_fusion_v1/services/data_fusion/async_client.py | renovate-bot/python-data-fusion | cdc2d510bca87bda0599f4081f54a41aa23006ad | [
"Apache-2.0"
] | 2 | 2021-07-07T11:00:37.000Z | 2022-01-29T08:09:18.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.data_fusion_v1.services.data_fusion import pagers
from google.cloud.data_fusion_v1.types import datafusion
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import DataFusionTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import DataFusionGrpcAsyncIOTransport
from .client import DataFusionClient
class DataFusionAsyncClient:
"""Service for creating and managing Data Fusion instances.
Data Fusion enables ETL developers to build code-free, data
integration pipelines via a point-and-click UI.
"""
_client: DataFusionClient
DEFAULT_ENDPOINT = DataFusionClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = DataFusionClient.DEFAULT_MTLS_ENDPOINT
crypto_key_path = staticmethod(DataFusionClient.crypto_key_path)
parse_crypto_key_path = staticmethod(DataFusionClient.parse_crypto_key_path)
instance_path = staticmethod(DataFusionClient.instance_path)
parse_instance_path = staticmethod(DataFusionClient.parse_instance_path)
common_billing_account_path = staticmethod(
DataFusionClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
DataFusionClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(DataFusionClient.common_folder_path)
parse_common_folder_path = staticmethod(DataFusionClient.parse_common_folder_path)
common_organization_path = staticmethod(DataFusionClient.common_organization_path)
parse_common_organization_path = staticmethod(
DataFusionClient.parse_common_organization_path
)
common_project_path = staticmethod(DataFusionClient.common_project_path)
parse_common_project_path = staticmethod(DataFusionClient.parse_common_project_path)
common_location_path = staticmethod(DataFusionClient.common_location_path)
parse_common_location_path = staticmethod(
DataFusionClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DataFusionAsyncClient: The constructed client.
"""
return DataFusionClient.from_service_account_info.__func__(DataFusionAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DataFusionAsyncClient: The constructed client.
"""
return DataFusionClient.from_service_account_file.__func__(DataFusionAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> DataFusionTransport:
"""Returns the transport used by the client instance.
Returns:
DataFusionTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(DataFusionClient).get_transport_class, type(DataFusionClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, DataFusionTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the data fusion client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.DataFusionTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = DataFusionClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_available_versions(
self,
request: Union[datafusion.ListAvailableVersionsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListAvailableVersionsAsyncPager:
r"""Lists possible versions for Data Fusion instances in
the specified project and location.
Args:
request (Union[google.cloud.data_fusion_v1.types.ListAvailableVersionsRequest, dict]):
The request object. Request message for the list
available versions request.
parent (:class:`str`):
Required. The project and location
for which to retrieve instance
information in the format
projects/{project}/locations/{location}.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.data_fusion_v1.services.data_fusion.pagers.ListAvailableVersionsAsyncPager:
Response message for the list
available versions request.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datafusion.ListAvailableVersionsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_available_versions,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListAvailableVersionsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def list_instances(
self,
request: Union[datafusion.ListInstancesRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListInstancesAsyncPager:
r"""Lists Data Fusion instances in the specified project
and location.
Args:
request (Union[google.cloud.data_fusion_v1.types.ListInstancesRequest, dict]):
The request object. Request message for listing Data
Fusion instances.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.data_fusion_v1.services.data_fusion.pagers.ListInstancesAsyncPager:
Response message for the list
instance request.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
request = datafusion.ListInstancesRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_instances,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListInstancesAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_instance(
self,
request: Union[datafusion.GetInstanceRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datafusion.Instance:
r"""Gets details of a single Data Fusion instance.
Args:
request (Union[google.cloud.data_fusion_v1.types.GetInstanceRequest, dict]):
The request object. Request message for getting details
about a Data Fusion instance.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.data_fusion_v1.types.Instance:
Represents a Data Fusion instance.
"""
# Create or coerce a protobuf request object.
request = datafusion.GetInstanceRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_instance,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_instance(
self,
request: Union[datafusion.CreateInstanceRequest, dict] = None,
*,
parent: str = None,
instance: datafusion.Instance = None,
instance_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new Data Fusion instance in the specified
project and location.
Args:
request (Union[google.cloud.data_fusion_v1.types.CreateInstanceRequest, dict]):
The request object. Request message for creating a Data
Fusion instance.
parent (:class:`str`):
Required. The instance's project and
location in the format
projects/{project}/locations/{location}.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
instance (:class:`google.cloud.data_fusion_v1.types.Instance`):
An instance resource.
This corresponds to the ``instance`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
instance_id (:class:`str`):
Required. The name of the instance to
create.
This corresponds to the ``instance_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.data_fusion_v1.types.Instance`
Represents a Data Fusion instance.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, instance, instance_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datafusion.CreateInstanceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if instance is not None:
request.instance = instance
if instance_id is not None:
request.instance_id = instance_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_instance,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
datafusion.Instance,
metadata_type=datafusion.OperationMetadata,
)
# Done; return the response.
return response
async def delete_instance(
self,
request: Union[datafusion.DeleteInstanceRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a single Date Fusion instance.
Args:
request (Union[google.cloud.data_fusion_v1.types.DeleteInstanceRequest, dict]):
The request object. Request message for deleting a Data
Fusion instance.
name (:class:`str`):
Required. The instance resource name
in the format
projects/{project}/locations/{location}/instances/{instance}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datafusion.DeleteInstanceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_instance,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=datafusion.OperationMetadata,
)
# Done; return the response.
return response
async def update_instance(
self,
request: Union[datafusion.UpdateInstanceRequest, dict] = None,
*,
instance: datafusion.Instance = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Updates a single Data Fusion instance.
Args:
request (Union[google.cloud.data_fusion_v1.types.UpdateInstanceRequest, dict]):
The request object. Request message for updating a Data
Fusion instance. Data Fusion allows updating the labels,
options, and stack driver settings. This is also used
for CDF version upgrade.
instance (:class:`google.cloud.data_fusion_v1.types.Instance`):
Required. The instance resource that
replaces the resource on the server.
Currently, Data Fusion only allows
replacing labels, options, and stack
driver settings. All other fields will
be ignored.
This corresponds to the ``instance`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Field mask is used to specify the fields that the update
will overwrite in an instance resource. The fields
specified in the update_mask are relative to the
resource, not the full request. A field will be
overwritten if it is in the mask. If the user does not
provide a mask, all the supported fields (labels,
options, and version currently) will be overwritten.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.data_fusion_v1.types.Instance`
Represents a Data Fusion instance.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([instance, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datafusion.UpdateInstanceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if instance is not None:
request.instance = instance
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_instance,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("instance.name", request.instance.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
datafusion.Instance,
metadata_type=datafusion.OperationMetadata,
)
# Done; return the response.
return response
async def restart_instance(
self,
request: Union[datafusion.RestartInstanceRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Restart a single Data Fusion instance.
At the end of an operation instance is fully restarted.
Args:
request (Union[google.cloud.data_fusion_v1.types.RestartInstanceRequest, dict]):
The request object. Request message for restarting a
Data Fusion instance.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.data_fusion_v1.types.Instance`
Represents a Data Fusion instance.
"""
# Create or coerce a protobuf request object.
request = datafusion.RestartInstanceRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.restart_instance,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
datafusion.Instance,
metadata_type=datafusion.OperationMetadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-data-fusion",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("DataFusionAsyncClient",)
| 41.264507 | 171 | 0.636132 |
dbd0f8e468a298fccd3eb6d5c36f0a44b215852c | 657 | py | Python | migrations/versions/27630d92b728_.py | VikTymZ/pegelinux | bd274aebf8606da5217e0ee7653cf0dd8db4c10a | [
"MIT"
] | 11 | 2018-06-29T01:48:05.000Z | 2021-01-13T19:56:02.000Z | migrations/versions/27630d92b728_.py | VikTymZ/pegelinux | bd274aebf8606da5217e0ee7653cf0dd8db4c10a | [
"MIT"
] | 237 | 2018-07-01T05:18:03.000Z | 2022-02-12T10:32:50.000Z | migrations/versions/27630d92b728_.py | VikTymZ/pegelinux | bd274aebf8606da5217e0ee7653cf0dd8db4c10a | [
"MIT"
] | 9 | 2018-06-30T12:10:28.000Z | 2019-10-22T10:42:43.000Z | """empty message
Revision ID: 27630d92b728
Revises: a4702859796a
Create Date: 2018-06-27 10:13:29.576559
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "27630d92b728"
down_revision = "a4702859796a"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("post", sa.Column("domain", sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("post", "domain")
# ### end Alembic commands ###
| 22.655172 | 84 | 0.69102 |
51fb8451b0bf5d8804f4b5850b386bfdeb80481f | 3,404 | py | Python | molo/globalsite/migrations/0001_initial.py | praekeltfoundation/molo.globalsite | 225d68de81b566aa8511e0326cf09b15fe7530c6 | [
"BSD-2-Clause"
] | null | null | null | molo/globalsite/migrations/0001_initial.py | praekeltfoundation/molo.globalsite | 225d68de81b566aa8511e0326cf09b15fe7530c6 | [
"BSD-2-Clause"
] | 7 | 2018-05-10T14:36:41.000Z | 2018-10-18T08:27:51.000Z | molo/globalsite/migrations/0001_initial.py | praekeltfoundation/molo.globalsite | 225d68de81b566aa8511e0326cf09b15fe7530c6 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-05-15 20:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0032_add_bulk_delete_page_permission'),
('wagtailimages', '0018_remove_rendition_filter'),
]
operations = [
migrations.CreateModel(
name='CountrySite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name=b'Country Name')),
('code', models.CharField(help_text=b'eg. ZA', max_length=6, verbose_name=b'Country Code')),
('site_url', models.CharField(help_text=b'Link to the country site. eg. http://www.zm.sitename.org/', max_length=128)),
('flag', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'verbose_name': 'Country site',
},
),
migrations.CreateModel(
name='GlobalSiteSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_globalsite', models.BooleanField(default=False, help_text=b'When activated it will set the current site as the global site.', verbose_name=b'Activate Global Site')),
('autoredirect', models.BooleanField(default=False, help_text=b'When activated it will automatically redirect the users to the country of their choice when accessing the global site.', verbose_name=b'Activate Auto Redirect')),
('geolocation', models.BooleanField(default=False, help_text=b'When activated it will detect users country and redirect them to the supported country site. If the detected country is not available it will display the available country sites.', verbose_name=b'Activate Geolocation')),
('description', models.TextField(blank=True, help_text=b'This description will be displayed on the homepage of the global site', null=True)),
('site', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Site')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name=b'Region')),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'verbose_name': 'Country Region',
},
),
migrations.AddField(
model_name='countrysite',
name='region',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='country_sites', to='globalsite.Region', verbose_name=b'Country Region'),
),
]
| 54.031746 | 299 | 0.635135 |
658371dcd734d69d0c4f4eae9e882256b8ca859e | 158 | py | Python | backend/__init__.py | luannguyenkhoa/tododemo-django-docker | 2f71498dc5884199e4b38211ba892273824ae5bf | [
"MIT"
] | null | null | null | backend/__init__.py | luannguyenkhoa/tododemo-django-docker | 2f71498dc5884199e4b38211ba892273824ae5bf | [
"MIT"
] | null | null | null | backend/__init__.py | luannguyenkhoa/tododemo-django-docker | 2f71498dc5884199e4b38211ba892273824ae5bf | [
"MIT"
] | 1 | 2019-10-16T08:11:07.000Z | 2019-10-16T08:11:07.000Z | """Initialize."""
__version__ = '0.1.0'
__version_info__ = tuple([int(num) if num.isdigit() else num for num in __version__.replace('-', '.', 1).split('.')])
| 39.5 | 117 | 0.64557 |
38af581913af0e5e5a6e8e1f3b539d92d54555a3 | 5,527 | py | Python | main.py | hiwonjoon/cycle-gan-tf | 1dea58036d75029559c8f0aaf83e307919b0742b | [
"MIT"
] | 79 | 2017-04-11T14:59:21.000Z | 2019-07-08T04:14:02.000Z | main.py | naru-hiyoko/cycle-gan-tf | 1dea58036d75029559c8f0aaf83e307919b0742b | [
"MIT"
] | 3 | 2017-05-11T17:33:32.000Z | 2018-02-18T12:54:04.000Z | main.py | naru-hiyoko/cycle-gan-tf | 1dea58036d75029559c8f0aaf83e307919b0742b | [
"MIT"
] | 28 | 2017-04-11T20:45:15.000Z | 2019-07-05T11:24:16.000Z | import numpy as np
import random
from ops import *
from model import *
import tensorflow as tf
import dataset
tf.set_random_seed(123)
np.random.seed(123)
random.seed(123)
TARGET = 'horse2zebra'
LOG_DIR = './log/'+TARGET
A_DIR = './datasets/'+TARGET+'/trainA/*'
B_DIR = './datasets/'+TARGET+'/trainB/*'
LEARNING_RATE = 0.0001
BETA_1 = 0.5
BETA_2 = 0.9
LAMBDA = 10
LAMBDA_CYCLE = 10
BATCH_SIZE = 8
MAX_ITERATION = 1000000
SAVE_PERIOD = 10000
SUMMARY_PERIOD = 50
NUM_CRITIC_TRAIN = 4
#############################################3
# Define Network
#############################################3
_, a = dataset.get_image_batch(A_DIR,BATCH_SIZE,300,256)
_, b = dataset.get_image_batch(B_DIR,BATCH_SIZE,300,256)
with tf.variable_scope('gen_a_to_b') as a_to_b_scope :
b_gen = build_enc_dec(a)
with tf.variable_scope('gen_b_to_a') as b_to_a_scope :
a_gen = build_enc_dec(b)
with tf.variable_scope('gen_b_to_a',reuse=True) :
a_identity = build_enc_dec(b_gen,True)
with tf.variable_scope('gen_a_to_b',reuse=True) :
b_identity = build_enc_dec(a_gen,True)
with tf.variable_scope('c_a') as scope:
alpha = tf.random_uniform(shape=[BATCH_SIZE,1,1,1], minval=0.,maxval=1.)
a_hat = alpha * a+ (1.0-alpha) * a_gen
v_a_real = build_critic(a)
scope.reuse_variables()
v_a_gen = build_critic(a_gen)
v_a_hat = build_critic(a_hat)
with tf.variable_scope('c_b') as scope:
alpha = tf.random_uniform(shape=[BATCH_SIZE,1,1,1], minval=0.,maxval=1.)
b_hat = alpha * b+ (1.0-alpha) * b_gen
v_b_real = build_critic(b)
scope.reuse_variables()
v_b_gen = build_critic(b_gen)
v_b_hat = build_critic(b_hat)
c_vars = [v for v in tf.trainable_variables() if v.name.startswith('c_')]
g_vars = [v for v in tf.trainable_variables() if v.name.startswith('gen_')]
#for v in c_vars : print v
#print('----------------------')
#for v in g_vars : print v
##################################
# Define Loss
##################################
c_optimizer = tf.train.AdamOptimizer(LEARNING_RATE,BETA_1,BETA_2)
g_optimizer = tf.train.AdamOptimizer(LEARNING_RATE,BETA_1,BETA_2)
# Training ops
W_a = tf.reduce_mean(v_a_real) - tf.reduce_mean(v_a_gen)
W_b = tf.reduce_mean(v_b_real) - tf.reduce_mean(v_b_gen)
W = W_a + W_b
GP_a = tf.reduce_mean(
(tf.sqrt(tf.reduce_sum(tf.gradients(v_a_hat,a_hat)[0]**2,reduction_indices=[1,2,3]))-1.0)**2
)
GP_b = tf.reduce_mean(
(tf.sqrt(tf.reduce_sum(tf.gradients(v_b_hat,b_hat)[0]**2,reduction_indices=[1,2,3]))-1.0)**2
)
GP = GP_a + GP_b
loss_c = -1.0*W + LAMBDA*GP
with tf.variable_scope('c_train') :
gvs = c_optimizer.compute_gradients(loss_c,var_list=c_vars)
train_c_op = c_optimizer.apply_gradients(gvs)
loss_g_a = -1.0 * tf.reduce_mean(v_a_gen)
loss_g_b = -1.0 * tf.reduce_mean(v_b_gen)
loss_g = loss_g_a + loss_g_b
loss_cycle_a = tf.reduce_mean(
tf.reduce_mean(tf.abs(a - a_identity),reduction_indices=[1,2,3])) # following the paper implementation.(divide by #pixels)
loss_cycle_b = tf.reduce_mean(
tf.reduce_mean(tf.abs(b - b_identity),reduction_indices=[1,2,3])) # following the paper implementation.(divide by #pixels)
loss_cycle = loss_cycle_a + loss_cycle_b
with tf.variable_scope('g_train') :
gvs = g_optimizer.compute_gradients(loss_g+LAMBDA_CYCLE*loss_cycle,var_list=g_vars)
train_g_op = g_optimizer.apply_gradients(gvs)
#################################
# Miscellaneous(summary, init, etc.)
#################################
tf.summary.image('real_a',tf.transpose(a,perm=[0,2,3,1]),max_outputs=10)
tf.summary.image('fake_a',tf.transpose(a_gen,perm=[0,2,3,1]),max_outputs=10)
tf.summary.image('identity_a',tf.transpose(a_identity,perm=[0,2,3,1]),max_outputs=10)
tf.summary.image('real_b',tf.transpose(b,perm=[0,2,3,1]),max_outputs=10)
tf.summary.image('fake_b',tf.transpose(b_gen,perm=[0,2,3,1]),max_outputs=10)
tf.summary.image('identity_b',tf.transpose(b_identity,perm=[0,2,3,1]),max_outputs=10)
tf.summary.scalar('Estimated W',W)
tf.summary.scalar('gradient_penalty',GP)
tf.summary.scalar('loss_g', loss_g)
tf.summary.scalar('loss_cycle', loss_cycle)
# Summary Operations
summary_op = tf.summary.merge_all()
# Init operation
init_op = tf.global_variables_initializer()
#################################
# Train! (summary, init, etc.)
#################################
# Saver & Summary Writer
saver = tf.train.Saver(max_to_keep = 5)
# Queue ,Threads and Summary Writer
sess = tf.Session()
sess.run([init_op])
# if model exist, restore
"""
#if model exist :
# saver.restore(sess,"path_to_model")
"""
try:
summary_writer = tf.summary.FileWriter(LOG_DIR,sess.graph)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,coord=coord)
for step in xrange(MAX_ITERATION+1) :
if coord.should_stop() :
break
for _ in xrange(NUM_CRITIC_TRAIN) :
_ = sess.run(train_c_op)
W_eval, GP_eval, loss_g_eval, loss_cycle_eval, _ = sess.run([W,GP,loss_g,loss_cycle,train_g_op])
print('%7d : W : %1.6f, GP : %1.6f, Loss G : %1.6f, Loss Cycle : %1.6f'%(
step,W_eval,GP_eval,loss_g_eval,loss_cycle_eval))
if( step % SUMMARY_PERIOD == 0 ) :
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str,step)
if( step % SAVE_PERIOD == 0 ):
saver.save(sess,LOG_DIR+'/model.ckpt',global_step=step)
except Exception, e:
coord.request_stop(e)
finally :
coord.request_stop()
coord.join(threads)
sess.close()
| 31.050562 | 126 | 0.667089 |
3cc3019314fa41644a2b1914638a13ead45f04f7 | 5,170 | py | Python | rl_algorithms/qlearning/DQN.py | blurry-mood/RL-algorithms | a3869943059579ef3a28f803bfe00f4fd58baeab | [
"MIT"
] | 4 | 2021-12-12T16:55:55.000Z | 2022-01-16T18:22:18.000Z | rl_algorithms/qlearning/DQN.py | blurry-mood/RL-algorithms | a3869943059579ef3a28f803bfe00f4fd58baeab | [
"MIT"
] | null | null | null | rl_algorithms/qlearning/DQN.py | blurry-mood/RL-algorithms | a3869943059579ef3a28f803bfe00f4fd58baeab | [
"MIT"
] | null | null | null | import collections
from copy import deepcopy
from typing import List
import torch
from ..Agent import Agent
import numpy as np
from torch import nn
class DQN(Agent):
def __init__(self, network: nn.Module, actions: List, alpha: float, gamma: float, eps: float, c: int = 128, t: int = 1024, capacity: int = 1024, bs: int = 32, device='cpu'):
super().__init__()
self.actions = {i: action for i, action in enumerate(actions)}
self.alpha = alpha
self.gamma = gamma
self.eps = eps
self.bs = bs
self.c = c
self.t = t
self.device = device
self.buffer = ExperienceReplay(capacity, device)
self.Q = network.to(device)
self.Q_prime = deepcopy(self.Q).to(device).eval()
self.loss = nn.MSELoss()
self.opt = torch.optim.SGD(self.Q.parameters(), lr=self.alpha)
self.i = 0 # counter used to trigger the update of Q_prime with Q
self.prev_state = None
self.prev_action = None
def _action_value(self, state, action=None, clone: bool = False):
""" If clone is False, the `self.Q` network is used, otherwise, `self.Q_prime` is used. """
Q = self.Q if not clone else self.Q_prime
n = state.shape[0]
state = state.to(self.device)
if action is not None:
value = Q(state)[list(range(n)), action]
else:
value = Q(state)
return value
def _get_action(self, state, eps):
""" Return an eps-greedy action to be taken from this state. """
with torch.no_grad():
if np.random.rand() < eps: # * 0.5*(np.cos(2 * np.pi * self.i/self.t)+1):
return torch.from_numpy(np.random.choice(list(self.actions.keys()), size=(state.shape[0],)))
actions = self._action_value(state=state, clone=True).argmax(dim=1)
return actions
def update(self, state:torch.Tensor, reward:float):
""" Update state-action value of previous (state, action).
Args:
state (Any): The new state representation.
reward (float): Reward received upon the transaction to `state`.
Note:
- The parameter ``state`` should be a tensor with the leading batch dimension.
"""
state = self.decode_state(state).cpu()
# register history
self.buffer.append((self.prev_state, self.prev_action, torch.tensor(reward).unsqueeze(0).float(), state))
# sample batch_size
states, actions, rewards, next_states = self.buffer.sample(self.bs)
gt = rewards + self.gamma * self._action_value(next_states, clone=True).max(dim=1)[0]
pred = self._action_value(states, actions, clone=False)
loss = self.loss(pred, gt)
# update Q
self.opt.zero_grad()
loss.backward()
self.opt.step()
if self.i == self.c:
# update Q_prim
self.i = 0
self.Q_prime = deepcopy(self.Q).eval()
self.i += 1
try:
return loss.item()
except:
return None
def take_action(self, state):
""" Choose an eps-greedy action to be taken from this state.
Args:
state (Any): The current state representation. After fed to ``decode_state``, the output should be eligible to be a network input.
"""
state = self.decode_state(state)
assert state.shape[0] ==1
action = self._get_action(state, self.eps).cpu()
self.prev_action = action
self.prev_state = state
return self.actions[action.item()]
def save(self, path: str):
""" Save state-action value table in `path`.npy
Args:
path (str): The location of where to store the state-action value table.
"""
super().save(path)
torch.save(self.Q.state_dict(), path + '.pth')
def load(self, path):
""" Load state-action value table.
If it doesn't exist, a randomly-initialized table is used.
Args:
path (str): The location of where the state-action value table resides.
"""
try:
self.Q.load_state_dict(torch.load( path + '.pth'))
self.Q = self.Q.to(self.device)
self.Q_prime = deepcopy(self.Q).to(self.device).eval()
except:
print("No file is found in:", path)
class ExperienceReplay:
def __init__(self, capacity, device):
self.buffer = collections.deque(maxlen=capacity)
self.device = device
def __len__(self):
return len(self.buffer)
def append(self, experience):
self.buffer.append(experience)
def sample(self, batch_size):
try:
indices = np.random.choice(
len(self.buffer), batch_size, replace=False)
except:
indices = np.random.choice(
len(self.buffer), batch_size, replace=True)
states, actions, rewards, next_states = map(lambda x: torch.cat(x, dim=0).to(self.device), zip(*(self.buffer[idx] for idx in indices)))
return states, actions, rewards, next_states | 32.515723 | 177 | 0.589942 |
eabbba6d63a9f509c8dec2c871ddcec2855f336e | 388 | py | Python | kroml/iostrategies/input_text/loader_text.py | kroML/kroml | cfa5e505aaac01c31c1a4811e27ce70c41e0b1ae | [
"MIT"
] | null | null | null | kroml/iostrategies/input_text/loader_text.py | kroML/kroml | cfa5e505aaac01c31c1a4811e27ce70c41e0b1ae | [
"MIT"
] | 4 | 2020-09-26T01:10:21.000Z | 2022-02-10T02:00:18.000Z | kroml/iostrategies/input_text/loader_text.py | kroML/kroml | cfa5e505aaac01c31c1a4811e27ce70c41e0b1ae | [
"MIT"
] | null | null | null | import abc
from utils.logger import Logger
logger = Logger(__name__)
@logger.for_all_methods(in_args=False,
skip_func=['__init__'])
class Loader(metaclass=abc.ABCMeta):
def __init__(self, config, variables):
self.config = config
self.variables = variables
@abc.abstractmethod
def load(self, input_file: dict) -> None:
pass
| 21.555556 | 47 | 0.654639 |
3acadcf71646a67465b0c1cf26ce6a3a3ac1139d | 15,022 | py | Python | flask_appbuilder/security/sqla/manager.py | astahlman/Flask-AppBuilder | ebd543db9fc787ca9a1c5ee06809ae0e92359968 | [
"BSD-3-Clause"
] | null | null | null | flask_appbuilder/security/sqla/manager.py | astahlman/Flask-AppBuilder | ebd543db9fc787ca9a1c5ee06809ae0e92359968 | [
"BSD-3-Clause"
] | 3 | 2021-06-08T23:39:54.000Z | 2022-03-12T00:50:13.000Z | flask_appbuilder/security/sqla/manager.py | jameslo1212/Flask-AppBuilder | b71789d85b632935eca79c0b53fb77f20bf17fe6 | [
"BSD-3-Clause"
] | null | null | null | import logging
import uuid
from sqlalchemy import func
from sqlalchemy.engine.reflection import Inspector
from werkzeug.security import generate_password_hash
from .models import User, Permission, PermissionView, RegisterUser, ViewMenu, Role
from ..manager import BaseSecurityManager
from ...models.sqla.interface import SQLAInterface
from ...models.sqla import Base
from ... import const as c
log = logging.getLogger(__name__)
class SecurityManager(BaseSecurityManager):
"""
Responsible for authentication, registering security views,
role and permission auto management
If you want to change anything just inherit and override, then
pass your own security manager to AppBuilder.
"""
user_model = User
""" Override to set your own User Model """
role_model = Role
""" Override to set your own Role Model """
permission_model = Permission
viewmenu_model = ViewMenu
permissionview_model = PermissionView
registeruser_model = RegisterUser
def __init__(self, appbuilder):
"""
SecurityManager contructor
param appbuilder:
F.A.B AppBuilder main object
"""
super(SecurityManager, self).__init__(appbuilder)
user_datamodel = SQLAInterface(self.user_model)
if self.auth_type == c.AUTH_DB:
self.userdbmodelview.datamodel = user_datamodel
elif self.auth_type == c.AUTH_LDAP:
self.userldapmodelview.datamodel = user_datamodel
elif self.auth_type == c.AUTH_OID:
self.useroidmodelview.datamodel = user_datamodel
elif self.auth_type == c.AUTH_OAUTH:
self.useroauthmodelview.datamodel = user_datamodel
elif self.auth_type == c.AUTH_REMOTE_USER:
self.userremoteusermodelview.datamodel = user_datamodel
self.userstatschartview.datamodel = user_datamodel
if self.auth_user_registration:
self.registerusermodelview.datamodel = SQLAInterface(self.registeruser_model)
self.rolemodelview.datamodel = SQLAInterface(self.role_model)
self.permissionmodelview.datamodel = SQLAInterface(self.permission_model)
self.viewmenumodelview.datamodel = SQLAInterface(self.viewmenu_model)
self.permissionviewmodelview.datamodel = SQLAInterface(self.permissionview_model)
self.create_db()
@property
def get_session(self):
return self.appbuilder.get_session
def register_views(self):
super(SecurityManager, self).register_views()
def create_db(self):
try:
engine = self.get_session.get_bind(mapper=None, clause=None)
inspector = Inspector.from_engine(engine)
if 'ab_user' not in inspector.get_table_names():
log.info(c.LOGMSG_INF_SEC_NO_DB)
Base.metadata.create_all(engine)
log.info(c.LOGMSG_INF_SEC_ADD_DB)
super(SecurityManager, self).create_db()
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_CREATE_DB.format(str(e)))
exit(1)
def find_register_user(self, registration_hash):
return self.get_session.query(self.registeruser_model).filter(
self.registeruser_model.registration_hash == registration_hash).scalar()
def add_register_user(self, username, first_name, last_name, email,
password='', hashed_password=''):
"""
Add a registration request for the user.
:rtype : RegisterUser
"""
register_user = self.registeruser_model()
register_user.username = username
register_user.email = email
register_user.first_name = first_name
register_user.last_name = last_name
if hashed_password:
register_user.password = hashed_password
else:
register_user.password = generate_password_hash(password)
register_user.registration_hash = str(uuid.uuid1())
try:
self.get_session.add(register_user)
self.get_session.commit()
return register_user
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_REGISTER_USER.format(str(e)))
self.appbuilder.get_session.rollback()
return None
def del_register_user(self, register_user):
"""
Deletes registration object from database
:param register_user: RegisterUser object to delete
"""
try:
self.get_session.delete(register_user)
self.get_session.commit()
return True
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_REGISTER_USER.format(str(e)))
self.get_session.rollback()
return False
def find_user(self, username=None, email=None):
"""
Finds user by username or email
"""
if username:
return self.get_session.query(self.user_model).filter(func.lower(self.user_model.username) == func.lower(username)).first()
elif email:
return self.get_session.query(self.user_model).filter_by(email=email).first()
def get_all_users(self):
return self.get_session.query(self.user_model).all()
def add_user(self, username, first_name, last_name, email, role, password='', hashed_password=''):
"""
Generic function to create user
"""
try:
user = self.user_model()
user.first_name = first_name
user.last_name = last_name
user.username = username
user.email = email
user.active = True
user.roles.append(role)
if hashed_password:
user.password = hashed_password
else:
user.password = generate_password_hash(password)
self.get_session.add(user)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_ADD_USER.format(username))
return user
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_USER.format(str(e)))
self.get_session.rollback()
return False
def count_users(self):
return self.get_session.query(func.count('*')).select_from(self.user_model).scalar()
def update_user(self, user):
try:
self.get_session.merge(user)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_UPD_USER.format(user))
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_UPD_USER.format(str(e)))
self.get_session.rollback()
return False
def get_user_by_id(self, pk):
return self.get_session.query(self.user_model).get(pk)
"""
-----------------------
PERMISSION MANAGEMENT
-----------------------
"""
def add_role(self, name):
role = self.find_role(name)
if role is None:
try:
role = self.role_model()
role.name = name
self.get_session.add(role)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_ADD_ROLE.format(name))
return role
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_ROLE.format(str(e)))
self.get_session.rollback()
return role
def find_role(self, name):
return self.get_session.query(self.role_model).filter_by(name=name).first()
def get_all_roles(self):
return self.get_session.query(self.role_model).all()
def get_public_permissions(self):
role = self.get_session.query(self.role_model).filter_by(name=self.auth_role_public).first()
return role.permissions
def find_permission(self, name):
"""
Finds and returns a Permission by name
"""
return self.get_session.query(self.permission_model).filter_by(name=name).first()
def add_permission(self, name):
"""
Adds a permission to the backend, model permission
:param name:
name of the permission: 'can_add','can_edit' etc...
"""
perm = self.find_permission(name)
if perm is None:
try:
perm = self.permission_model()
perm.name = name
self.get_session.add(perm)
self.get_session.commit()
return perm
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_PERMISSION.format(str(e)))
self.get_session.rollback()
return perm
def del_permission(self, name):
"""
Deletes a permission from the backend, model permission
:param name:
name of the permission: 'can_add','can_edit' etc...
"""
perm = self.find_permission(name)
if perm:
try:
self.get_session.delete(perm)
self.get_session.commit()
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_PERMISSION.format(str(e)))
self.get_session.rollback()
"""
----------------------
PRIMITIVES VIEW MENU
----------------------
"""
def find_view_menu(self, name):
"""
Finds and returns a ViewMenu by name
"""
return self.get_session.query(self.viewmenu_model).filter_by(name=name).first()
def get_all_view_menu(self):
return self.get_session.query(self.viewmenu_model).all()
def add_view_menu(self, name):
"""
Adds a view or menu to the backend, model view_menu
param name:
name of the view menu to add
"""
view_menu = self.find_view_menu(name)
if view_menu is None:
try:
view_menu = self.viewmenu_model()
view_menu.name = name
self.get_session.add(view_menu)
self.get_session.commit()
return view_menu
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_VIEWMENU.format(str(e)))
self.get_session.rollback()
return view_menu
def del_view_menu(self, name):
"""
Deletes a ViewMenu from the backend
:param name:
name of the ViewMenu
"""
obj = self.find_view_menu(name)
if obj:
try:
self.get_session.delete(obj)
self.get_session.commit()
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_PERMISSION.format(str(e)))
self.get_session.rollback()
"""
----------------------
PERMISSION VIEW MENU
----------------------
"""
def find_permission_view_menu(self, permission_name, view_menu_name):
"""
Finds and returns a PermissionView by names
"""
permission = self.find_permission(permission_name)
view_menu = self.find_view_menu(view_menu_name)
return self.get_session.query(self.permissionview_model).filter_by(permission=permission, view_menu=view_menu).first()
def find_permissions_view_menu(self, view_menu):
"""
Finds all permissions from ViewMenu, returns list of PermissionView
:param view_menu: ViewMenu object
:return: list of PermissionView objects
"""
return self.get_session.query(self.permissionview_model).filter_by(view_menu_id=view_menu.id).all()
def add_permission_view_menu(self, permission_name, view_menu_name):
"""
Adds a permission on a view or menu to the backend
:param permission_name:
name of the permission to add: 'can_add','can_edit' etc...
:param view_menu_name:
name of the view menu to add
"""
vm = self.add_view_menu(view_menu_name)
perm = self.add_permission(permission_name)
pv = self.permissionview_model()
pv.view_menu_id, pv.permission_id = vm.id, perm.id
try:
self.get_session.add(pv)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_ADD_PERMVIEW.format(str(pv)))
return pv
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_PERMVIEW.format(str(e)))
self.get_session.rollback()
def del_permission_view_menu(self, permission_name, view_menu_name):
try:
pv = self.find_permission_view_menu(permission_name, view_menu_name)
# delete permission on view
self.get_session.delete(pv)
self.get_session.commit()
# if no more permission on permission view, delete permission
if not self.get_session.query(self.permissionview_model).filter_by(permission=pv.permission).all():
self.del_permission(pv.permission.name)
log.info(c.LOGMSG_INF_SEC_DEL_PERMVIEW.format(permission_name, view_menu_name))
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_PERMVIEW.format(str(e)))
self.get_session.rollback()
def exist_permission_on_views(self, lst, item):
for i in lst:
if i.permission and i.permission.name == item:
return True
return False
def exist_permission_on_view(self, lst, permission, view_menu):
for i in lst:
if i.permission.name == permission and i.view_menu.name == view_menu:
return True
return False
def add_permission_role(self, role, perm_view):
"""
Add permission-ViewMenu object to Role
:param role:
The role object
:param perm_view:
The PermissionViewMenu object
"""
if perm_view not in role.permissions:
try:
role.permissions.append(perm_view)
self.get_session.merge(role)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_ADD_PERMROLE.format(str(perm_view), role.name))
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_PERMROLE.format(str(e)))
self.get_session.rollback()
def del_permission_role(self, role, perm_view):
"""
Remove permission-ViewMenu object to Role
:param role:
The role object
:param perm_view:
The PermissionViewMenu object
"""
if perm_view in role.permissions:
try:
role.permissions.remove(perm_view)
self.get_session.merge(role)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_DEL_PERMROLE.format(str(perm_view), role.name))
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_PERMROLE.format(str(e)))
self.get_session.rollback()
| 36.818627 | 135 | 0.607975 |
7f596934619918187d0f3ea8a92a3adf403f59fd | 3,277 | py | Python | topic_model/train_lda_model_and_save.py | krikyn/Strong-Paraphrase-Generation-2020 | 3d5b6f4fd0d4b4f96ed6bdd91b7000d3d80fc901 | [
"MIT"
] | 10 | 2020-04-29T18:28:34.000Z | 2021-11-26T07:41:19.000Z | topic_model/train_lda_model_and_save.py | krikyn/Strong-Paraphrase-Generation-2020 | 3d5b6f4fd0d4b4f96ed6bdd91b7000d3d80fc901 | [
"MIT"
] | null | null | null | topic_model/train_lda_model_and_save.py | krikyn/Strong-Paraphrase-Generation-2020 | 3d5b6f4fd0d4b4f96ed6bdd91b7000d3d80fc901 | [
"MIT"
] | 9 | 2020-07-02T08:31:55.000Z | 2021-11-15T20:57:54.000Z | import re
import numpy as np
import pandas as pd
from pprint import pprint
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
import logging
import warnings
from lxml import etree
def make_bigrams(texts):
return [bigram_mod[doc] for doc in texts]
def make_trigrams(texts):
return [trigram_mod[bigram_mod[doc]] for doc in texts]
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
warnings.filterwarnings("ignore", category=DeprecationWarning)
data_words = []
root = etree.parse(r'C:\Users\kiva0319\PycharmProjects\Diploma2020\processed\paraphrases.xml')
root = root.getroot()
count = 0
bad_paragraphs = 0
for element in root[1]:
print(count, bad_paragraphs)
count += 1
element_paragraphs_1 = element[14]
element_paragraphs_2 = element[15]
for paragraph in element_paragraphs_1:
if int(paragraph.attrib.get("words")) >= 5:
words = []
for word in paragraph.text.split(";"):
if word.isalpha():
words.append(word)
data_words.append(words)
else:
print("bad paragraph")
bad_paragraphs += 1
for paragraph in element_paragraphs_2:
if int(paragraph.attrib.get("words")) >= 5:
words = []
for word in paragraph.text.split(";"):
if word.isalpha():
words.append(word)
data_words.append(words)
else:
print("bad paragraph")
bad_paragraphs += 1
bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100)
trigram = gensim.models.Phrases(bigram[data_words], threshold=100)
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
data_lemmatized = make_bigrams(data_words)
print(data_lemmatized[:1])
id2word = corpora.Dictionary(data_lemmatized)
texts = data_lemmatized
corpus = [id2word.doc2bow(text) for text in texts]
print(corpus[:1])
# lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
# id2word=id2word,
# num_topics=10,
# random_state=100,
# update_every=1,
# chunksize=100,
# passes=10,
# alpha='auto',
# per_word_topics=True,
# max_iterations=50)
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=100,
chunksize=100)
lda_model.save('lda_model_full3')
# pprint(lda_model.print_topics())
# doc_lda = lda_model[corpus]
vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word, mds='mmds')
pyLDAvis.save_html(vis, 'LDA_Visualization.html')
| 30.915094 | 94 | 0.588648 |
6e2224888f00bc45654f9a1c6cbd4b3c03016c3a | 81 | py | Python | scraper/eckersleys.py | hector-moya/my-new-chroma | be61a9e8a5221118c16f1ed517f6c914c533e354 | [
"MIT"
] | null | null | null | scraper/eckersleys.py | hector-moya/my-new-chroma | be61a9e8a5221118c16f1ed517f6c914c533e354 | [
"MIT"
] | 5 | 2021-03-09T23:46:12.000Z | 2022-03-02T06:24:52.000Z | scraper/eckersleys.py | hector-moya/my-new-chroma | be61a9e8a5221118c16f1ed517f6c914c533e354 | [
"MIT"
] | null | null | null | from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup | 40.5 | 43 | 0.839506 |
93684b9dac1b63822ee0f3a4ee9e3a1e4242514c | 4,141 | py | Python | binlin/utils/visualize.py | UKPLab/inlg2019-revisiting-binlin | 250196403ee4050cac78c547add90087ea04243f | [
"Apache-2.0"
] | 1 | 2021-12-15T08:44:35.000Z | 2021-12-15T08:44:35.000Z | binlin/utils/visualize.py | UKPLab/inlg2019-revisiting-binlin | 250196403ee4050cac78c547add90087ea04243f | [
"Apache-2.0"
] | 3 | 2021-03-19T04:07:44.000Z | 2022-01-13T01:40:50.000Z | binlin/utils/visualize.py | UKPLab/inlg2019-revisiting-binlin | 250196403ee4050cac78c547add90087ea04243f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import os
from typing import List
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import torch
from matplotlib.backends.backend_pdf import PdfPages
from torch.nn.modules.module import _addindent
from binlin.utils.timing import create_progress_bar
# needed on machines w/o DISPLAY var set
plt.switch_backend('agg')
logger = logging.getLogger('main')
# Training procedure
def torch_summarize(model, show_weights=True, show_parameters=True):
"""
Summarizes torch model by showing trainable parameters and weights.
Taken from:
https://stackoverflow.com/questions/42480111/model-summary-in-pytorch#42616812
"""
tmpstr = model.__class__.__name__ + ' (\n'
for key, module in model._modules.items():
# if it contains layers let call it recursively to get params and weights
if type(module) in [
torch.nn.modules.container.Container,
torch.nn.modules.container.Sequential
]:
modstr = torch_summarize(module)
else:
modstr = module.__repr__()
modstr = _addindent(modstr, 2)
params = sum([np.prod(p.size()) for p in module.parameters()])
weights = tuple([tuple(p.size()) for p in module.parameters()])
tmpstr += ' (' + key + '): ' + modstr
if show_weights:
tmpstr += ', weights={}'.format(weights)
if show_parameters:
tmpstr += ', parameters={}'.format(params)
tmpstr += '\n'
tmpstr = tmpstr + ')'
return tmpstr
def plot_lcurves(train_losses: List, dev_losses: List, img_title: str, fname: str, show: bool):
plt.plot(train_losses)
plt.plot(dev_losses)
plt.title(img_title)
plt.legend(labels=["TrainLoss", "DevLoss"], loc='best', fancybox=True, framealpha=0.5)
if fname is not None:
logger.debug("Saving the learning curve plot --> %s" % fname)
plt.savefig(fname)
if show:
plt.show()
# Graph visualization
def draw_graph(g, label, layout='dot', fname='depgraph.png', node_mapping=None):
# mapping = dict([(n, '%s_%s' % (n, n_attr['PRED_FORM'])) for n, n_attr in g.nodes(data=True)])
if node_mapping:
g = nx.relabel_nodes(g, node_mapping)
ag = nx.nx_agraph.to_agraph(g)
ag.graph_attr['label'] = label
ag.layout(prog=layout)
ag.draw(fname)
def hilight_disagreement(gold_g, pred_g):
gedges = gold_g.edges()
pedges = pred_g.edges()
for ge in gedges:
if ge not in pedges:
pred_g.add_edge(*ge, color='red', style='setlinewidth(3)')
for pe in pedges:
if pe not in gedges:
nx.set_edge_attributes(pred_g, 'color', values={pe: 'blue'})
nx.set_edge_attributes(pred_g, 'style', values={pe: 'setlinewidth(3)'})
# Data analysis
def hist_l(l, xlabel, ylabel, title):
n, bins, patches = plt.hist(l, 50, facecolor='g', alpha=0.75)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.grid(True)
plt.show()
def hist_from_countd(counts_dict):
val, weight = zip(*[(k, v) for k, v in counts_dict.items()])
plt.hist(val, weights=weight)
def plot_len_hist(itemlist, xlabel, ylabel, title, fname):
# Aux function to plot a histogram of the distribution of itmes' lengths.
# Creating a Pandas DataFrame from a list of lengths
lens_df = pd.DataFrame(itemlist)
# Retrieving stats from Pandas DF
mean = float(lens_df.mean())
std = float(lens_df.std())
min_len = int(lens_df.min())
max_len = int(lens_df.max())
pp = PdfPages(fname)
# plot the histogram of the length distribution
n, bins, patches = plt.hist(itemlist, 20, facecolor='b', alpha=0.55)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
# plt.yscale('log')
plt.axis([min_len, max_len, 0, 10000])
plt.text(40, 7500, r'$mean={:.2f},\ std={:.2f}$'.format(mean, std))
plt.text(40, 6800, r'$min={},\ max={}$'.format(min_len, max_len))
plt.grid(True)
plt.tight_layout()
plt.show()
pp.savefig()
pp.close() | 28.756944 | 99 | 0.645013 |
d667b996a7269ff29fa2a08dbc7f5c906aedb67b | 1,373 | py | Python | lego/settings/lego.py | ollfkaih/lego | b15aacaf09efe90e7f984d25b0e7bddbe12647e8 | [
"MIT"
] | 45 | 2017-10-24T12:09:06.000Z | 2021-11-03T21:21:03.000Z | lego/settings/lego.py | ollfkaih/lego | b15aacaf09efe90e7f984d25b0e7bddbe12647e8 | [
"MIT"
] | 980 | 2017-10-24T12:29:07.000Z | 2022-03-31T04:04:31.000Z | lego/settings/lego.py | ollfkaih/lego | b15aacaf09efe90e7f984d25b0e7bddbe12647e8 | [
"MIT"
] | 23 | 2018-04-11T16:34:22.000Z | 2021-11-23T12:28:30.000Z | from datetime import timedelta
SITE = {
"name": "LEGO",
"slogan": "LEGO Er Ganske Oppdelt",
"contact_email": "[email protected]",
"documentation_url": "/docs/",
"domain": "abakus.no",
"owner": "Abakus",
}
API_VERSION = "v1"
LOGIN_REDIRECT_URL = f"/api/{API_VERSION}/"
EMAIL_SUBJECT_PREFIX = "[Abakus] "
ADMINS = (("Webkom", "[email protected]"),)
MANAGERS = ADMINS
PENALTY_DURATION = timedelta(days=20)
# Tuples for ignored (month, day) intervals
PENALTY_IGNORE_SUMMER = ((6, 1), (8, 15))
PENALTY_IGNORE_WINTER = ((12, 1), (1, 10))
REGISTRATION_CONFIRMATION_TIMEOUT = 60 * 60 * 24
STUDENT_CONFIRMATION_TIMEOUT = 60 * 60 * 24
PASSWORD_RESET_TIMEOUT = 60 * 60 * 24
HEALTH_CHECK_REMOTE_IPS = ["10.", "127.0.0."]
LDAP_GROUPS = [
"Ababand",
"Fondsstyret",
"Abakus-leder",
"Hovedstyret",
"Komiteledere",
"itDAGENE",
"Jubileum",
"Kasserere",
"Ordenen",
"PR-ansvarlige",
"RevyStyret",
"xcom-data",
"xcom-komtek",
]
SMTP_HOST = "0.0.0.0"
SMTP_PORT = 8025
RESTRICTED_ADDRESS = "restricted"
RESTRICTED_DOMAIN = "abakus.no"
RESTRICTED_FROM = "Abakus <[email protected]>"
RESTRICTED_ALLOW_ORIGINAL_SENDER = False
GSUITE_DOMAIN = "abakus.no"
GSUITE_GROUPS = []
# External users in GSuite not managed by lego. (Don't suspend these.)
GSUITE_EXTERNAL_USERS = ["[email protected]", "[email protected]"]
| 22.145161 | 71 | 0.670066 |
7896d86a3d11f4a7f73d57d5bfba3a679bdfbc51 | 7,244 | py | Python | beerkeg.py | huntrar/ChooseMyBeer | 02dad14baf064ce8d3107ff6f4586bf2ec0d3118 | [
"MIT"
] | 4 | 2015-07-22T05:51:18.000Z | 2017-04-30T03:01:39.000Z | beerkeg.py | huntrar/ChooseMyBeer | 02dad14baf064ce8d3107ff6f4586bf2ec0d3118 | [
"MIT"
] | null | null | null | beerkeg.py | huntrar/ChooseMyBeer | 02dad14baf064ce8d3107ff6f4586bf2ec0d3118 | [
"MIT"
] | null | null | null |
import re
import webbrowser
from urlparse import urlparse
from utils import get_text, get_html, is_num, unique
class BeerKeg(object):
''' Beer Keg class '''
def __init__(self, url, num_attempts, verbose=False):
''' url must be a string containing the url for a single BevMo keg '''
self.url = url
''' Turn printing on or off '''
self.verbose = verbose
''' Prevent parsing more than once '''
self.parsed = False
''' The ratio of gallons of alcohol per dollar '''
self.ratio = None
''' Number of attempts to find ABV '''
self.num_attempts = num_attempts
def open(self):
webbrowser.open(self.url)
def parse(self):
''' retrieves the page and parses the contents into the following fields
self.name (May include brewery/brand and/or beer)
self.price (USD)
self.volume (Gallons)
self.num_avail (Kegs)
self.desc (Keg description)
'''
if self.parsed:
return
self.parsed = True
html = get_html(self.url)
''' Attempt to get name and volume '''
try:
self.name = html.xpath('//h1/text()')[0].strip()
if '(' in self.name and ')' in self.name:
split_name = self.name.split('(')
self.name = split_name[0].strip()
volume = filter(lambda x: is_num(x) if '.' not in x \
else x, split_name[1].strip(')').strip())
if is_num(volume):
self.volume = float(volume)
else:
self.volume = 0.0
else:
self.volume = 0.0
except Exception:
self.name = ''
self.volume = 0.0
''' Attempt to get price '''
try:
self.price = float(html.xpath('//span[@class="ProductDetailItemPric\
e"]/text()')[0].strip().strip('$'))
except Exception:
self.price = 0.0
''' Attempt to get number of available kegs '''
try:
self.num_avail = int(html.xpath('//em/text()\
')[0].strip().split()[0])
except Exception:
self.num_avail = 0
''' Attempt to get description '''
try:
self.desc = html.xpath('//td[@class="ProductDetailCell"]/p/text()\
')[0].strip()
except Exception:
self.desc = ''
def get_abv(self):
''' Attempts to find percentage of alcohol by volume using Bing '''
abv = ''
found_abv = ''
''' A ceiling for ABV content for validation
We can assume BevMo does not offer kegs with this high of an ABV
'''
max_abv = 20.0
if not self.parsed:
self.parse()
search_url = 'https://www.bing.com/search?q={0}+alcohol+content\
'.format('+'.join(self.name.split()))
search_links = get_html(search_url).xpath('//a/@href')
new_search_links = search_links[search_links.index('javascript:'):][1:]
results = [x for x in new_search_links if x != '#' and 'site:' not in x]
''' Max number of links to search for alcohol by volume (ABV) '''
num_attempts = self.num_attempts
''' Filter links with same domain to improve chances of matching '''
searched_domains = set()
''' Add the top page results that are unique, r_it is an iterator '''
top_results = []
r_it = 0
result_link = ''
while len(top_results) < num_attempts and r_it < len(results):
result_link = results[r_it]
domain = '{url.netloc}'.format(url=urlparse(result_link))
if '.' in domain:
if domain.count('.') > 1:
domain = domain.split('.')[1]
else:
domain = domain.split('.')[0]
''' Avoid already searched domains '''
if domain in searched_domains:
r_it += 1
else:
top_results.append(result_link)
r_it += 1
searched_domains.add(domain)
for i in xrange(min(num_attempts, len(top_results))):
if self.verbose:
print('Searching {}'.format(top_results[i]))
try:
search_text = ''.join(get_text(get_html(top_results[i])))
except Exception:
continue
''' Retrieves partial string containing the words ABV and a % '''
abv = re.search('(?<=[Aa][Bb][Vv])[^\d]*(\d+[.]?\d*)(?=%)|(?<=%)\
[^\d]*(\d+[.]?\d*)[^\d]*\
(?=[Aa][Bb][Cc])', search_text)
if abv:
abv = abv.group()
''' Filters for a number with or without a decimal pt '''
abv = float(re.search('(\d+[.]?\d*)', abv).group())
''' If new ABV is 0.0, return previously found ABV if any
otherwise, move onto the next link
'''
if abv == 0.0:
if found_abv:
if self.verbose:
print('ABV for {} is {}'.format(self.name, abv))
else:
continue
if abv < max_abv:
if abv < max_abv / 2:
if self.verbose:
print('ABV for {} is {}'.format(self.name, abv))
return abv
''' Replace the new ABV only if the next is lower '''
if found_abv:
if abv < found_abv:
if self.verbose:
print('ABV for {} is {}'.format(self.name, abv))
return abv
else:
if self.verbose:
print('ABV for {} is {}\
'.format(self.name, found_abv))
return found_abv
''' Sets the new ABV to the found ABV '''
found_abv = abv
else:
if found_abv:
if self.verbose:
print('ABV for {} is {}'.format(self.name, found_abv))
return found_abv
''' No ABV was found by this point '''
if self.verbose:
print('ABV not found for {}'.format(self.name))
return None
def get_ratio(self):
''' Returns the ratio of gallons of alcohol per USD '''
alcohol_pct = self.get_abv()
if alcohol_pct is not None:
try:
ratio = (alcohol_pct * .1 * self.volume) / self.price
except Exception:
return None
if self.verbose:
print('\tRatio: {}'.format(str(ratio)))
self.ratio = ratio
return ratio
else:
return None
| 32.778281 | 80 | 0.464384 |
38f1635e2190dd8c678402adf55ebf12edf16bd7 | 3,751 | py | Python | gui/plugins/timeline_view_test.py | strcrzy/grr | 60d113069b8016c3eba8bc735a3acbc5c738fda4 | [
"Apache-2.0"
] | 6 | 2015-04-03T02:25:28.000Z | 2021-11-17T21:42:59.000Z | gui/plugins/timeline_view_test.py | defaultnamehere/grr | ba1648b97a76f844ffb8e1891cc9e2680f9b1c6e | [
"Apache-2.0"
] | 3 | 2020-02-11T22:29:15.000Z | 2021-06-10T17:44:31.000Z | gui/plugins/timeline_view_test.py | defaultnamehere/grr | ba1648b97a76f844ffb8e1891cc9e2680f9b1c6e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
"""Tests for the Timeline viewer flow."""
from grr.client import vfs
from grr.gui import runtests_test
from grr.lib import access_control
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
class TestTimelineView(test_lib.GRRSeleniumTest):
"""Test the timeline view."""
def CreateTimelineFixture(self):
"""Creates a new timeline fixture we can play with."""
# Create a client for testing
client_id = rdfvalue.ClientURN("C.0000000000000001")
token = access_control.ACLToken(username="test", reason="fixture")
fd = aff4.FACTORY.Create(client_id, "VFSGRRClient", token=token)
cert = self.ClientCertFromPrivateKey(
config_lib.CONFIG["Client.private_key"])
client_cert = rdfvalue.RDFX509Cert(cert.as_pem())
fd.Set(fd.Schema.CERT(client_cert))
fd.Close()
# Install the mock
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.ClientVFSHandlerFixture
client_mock = action_mocks.ActionMock("ListDirectory")
output_path = "analysis/Timeline/MAC"
for _ in test_lib.TestFlowHelper(
"RecursiveListDirectory", client_mock, client_id=client_id,
pathspec=rdfvalue.PathSpec(
path="/", pathtype=rdfvalue.PathSpec.PathType.OS),
token=token):
pass
# Now make a timeline
for _ in test_lib.TestFlowHelper(
"MACTimes", client_mock, client_id=client_id, token=token,
path="/", output=output_path):
pass
def setUp(self):
test_lib.GRRSeleniumTest.setUp(self)
# Create a new collection
with self.ACLChecksDisabled():
self.CreateTimelineFixture()
self.GrantClientApproval("C.0000000000000001")
def testTimelineViewer(self):
# Open the main page
self.Open("/")
self.Type("client_query", "0001")
self.Click("client_query_submit")
self.WaitUntilEqual(u"C.0000000000000001",
self.GetText, "css=span[type=subject]")
# Choose client 1
self.Click("css=td:contains('0001')")
# Go to Browse VFS
self.Click("css=a:contains('Browse Virtual Filesystem')")
# Navigate to the analysis directory
self.Click("css=#_analysis ins.jstree-icon")
self.Click("link=Timeline")
self.Click("css=span[type=subject]:contains(\"MAC\")")
self.WaitUntil(self.IsElementPresent, "css=td:contains(\"TIMELINE\")")
self.assert_("View details" in self.GetText("css=td div.default_view a"))
self.Click("css=a:contains(\"View details\")")
self.WaitUntil(self.IsElementPresent, "container_query")
self.Type("css=input#container_query",
"subject contains bash and timestamp > 2010")
# Use the hidden submit button to issue the query. Ordinarily users have to
# press enter here as they do not see the submit button. But pressing enter
# does not work with chrome.
self.Click("css=#toolbar_main form[name=query_form] button[type=submit]")
self.WaitUntilContains("2011-03-07 12:50:20",
self.GetText, "css=tbody tr:first")
self.Click("css=tbody tr:first td")
self.WaitUntilContains("2011-03-07 12:50:20", self.GetText,
"css=.tab-content h3")
# Check that the embedded stat proto is properly presented
self.assertTrue("2011-03-07 12:50:20" in self.GetText(
"css=td.proto_value tr:contains('St atime') td.proto_value"))
def main(argv):
# Run the full test suite
runtests_test.SeleniumTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| 30.495935 | 79 | 0.688083 |
65c2192b08e4b1e1d4cdee725461069b57cfc99f | 1,562 | py | Python | comparison_qrs_detectors/comparison_qrs_detectors/config.py | Andrew1021/Comparison-QRS-Detectors | 6f77b7aa6b48fc79c5aa62f2546cbfb0a794ab9d | [
"BSD-3-Clause"
] | null | null | null | comparison_qrs_detectors/comparison_qrs_detectors/config.py | Andrew1021/Comparison-QRS-Detectors | 6f77b7aa6b48fc79c5aa62f2546cbfb0a794ab9d | [
"BSD-3-Clause"
] | 2 | 2021-09-02T08:31:27.000Z | 2021-09-06T09:28:13.000Z | comparison_qrs_detectors/comparison_qrs_detectors/config.py | Andrew1021/Comparison-QRS-Detectors | 6f77b7aa6b48fc79c5aa62f2546cbfb0a794ab9d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
comparison_qrs_detectors
-------
A toolbox for comparison of the Pan-Tompkins, Engelse-Zeelenberg and Christov QRS detector written in Python.
:copyright: (c) 2021 by Open Innovation Lab
:license: BSD 3-clause, see LICENSE for more details.
"""
# Imports
# 3rd party
import pathlib
from pathlib import Path
# directories
file_path = pathlib.Path(__file__).resolve()
package_dir = file_path.parent
setup_dir = package_dir.parent
project_dir = package_dir.parent.parent
data_dir = package_dir.parent.parent / Path('data')
notebook_dir = package_dir.parent.parent / Path('notebooks')
# or
#import inspect, os.path
#filename = inspect.getframeinfo(inspect.currentframe()).filename
#path = os.path.dirname(os.path.abspath(filename))
# raw data paths
mit_bih = data_dir / 'raw/mit-bih-arrhythmia-database-1.0.0/mit-bih-arrhythmia-database-1.0.0'
ludb = data_dir / 'raw/lobachevsky-university-electrocardiography-database-1.0.1/lobachevsky-university-electrocardiography-database-1.0.1'
mit_raw_data_path = data_dir / 'raw/mit_bit_ecg.h5'
ludb_raw_data_path = data_dir / 'raw/ludb_ecg.h5'
# processed data paths
mit_processed_data_path = data_dir / 'processed/mit_bit_ecg.h5'
ludb_processed_data_path = data_dir / 'processed/ludb_ecg.h5'
# cleaned data paths
mit_cleand_data_path = data_dir / 'cleaned/mit_bit_ecg.h5'
ludb_cleand_data_path = data_dir / 'cleaned/ludb_ecg.h5'
# URL
mit_db_url = 'https://physionet.org/files/mitdb/1.0.0/'
ludb_url = 'https://physionet.org/files/ludb/1.0.1/'
| 34.711111 | 140 | 0.75096 |
a0eecfbecc7ffd1c439329f03fb7b5b7173d9c3c | 20,669 | py | Python | modules/tests/smoke/broken_links.py | arnavsharma93/eden | 2e559a277c4144ba4f4cdcd108460d025923671d | [
"MIT"
] | null | null | null | modules/tests/smoke/broken_links.py | arnavsharma93/eden | 2e559a277c4144ba4f4cdcd108460d025923671d | [
"MIT"
] | null | null | null | modules/tests/smoke/broken_links.py | arnavsharma93/eden | 2e559a277c4144ba4f4cdcd108460d025923671d | [
"MIT"
] | null | null | null | """ Sahana Eden Test Framework
@copyright: 2011-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from time import time
from types import MethodType
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
import sys
import socket
from tests.web2unittest import Web2UnitTest
from gluon import current
class BrokenLinkTest(Web2UnitTest):
""" Smoke Test, visit every link it can find and report on the outcome """
def __init__(self):
Web2UnitTest.__init__(self)
self.clearRecord()
# This string must exist in the URL for it to be followed
# Useful to avoid going to linked sites
self.homeURL = self.url
# Link used to identify a URL to a ticket
self.url_ticket = "/admin/default/ticket/"
# Tuple of strings that if in the URL will be ignored
# Useful to avoid dynamic URLs that trigger the same functionality
self.include_ignore = ("_language=",
"logout",
"appadmin",
"admin",
"delete",
)
# tuple of strings that should be removed from the URL before storing
# Typically this will be some variables passed in via the URL
self.strip_url = ("?_next=",
)
self.reportOnly = False
self.maxDepth = 16 # sanity check
self.setThreshold(10)
self.setUser("[email protected]/eden")
self.total_visited = 0
self.broken_links_count = 0
def clearRecord(self):
# the total url links visited
self.totalLinks = 0
# The number of unique urls found at depth i, where i is the index
self.linkDepth = []
# Dictionary of the parent for each URL
self.urlParentList = {}
# dictionary of ReportData objects indexed on the url
self.results = {}
def setAgent(self, agentAcronym):
# Decide on the agent that will be used to power the smoke test
if agentAcronym == "g":
self.agent = "Ghost"
try:
from ghost import Ghost
self.ghost = Ghost(wait_timeout = 360)
except ImportError:
raise NameError("Ghost not installed")
from using_ghost import login, visit
else:
self.agent = "Twill"
try:
from twill import get_browser
from twill import set_output
except ImportError:
raise NameError("Twill not installed")
try:
import mechanize
except ImportError:
raise NameError("Mechanize not installed")
self.b = get_browser()
self.b_data = StringIO()
set_output(self.b_data)
from using_twill import login, visit
self.visit = MethodType(visit, self)
self.login = MethodType(login, self)
def setReportOnly(self, action):
self.reportOnly = action
def setDepth(self, depth):
self.maxDepth = depth
def setUser(self, user):
self.credentials = user.split(",")
def setThreshold(self, value):
value = float(value)
self.threshold = value
# socket.setdefaulttimeout(value*2)
def addResults2Current(self):
'''
Store the count links in gluon.current to be used by HTMLTestRunner for better reporting
'''
smoke_results = {}
smoke_results['working_links'] = self.total_visited - self.broken_links_count
smoke_results['broken_links_count'] = self.broken_links_count
current.data['smoke_results'] = smoke_results
def runTest(self):
"""
Test to find all exposed links and check the http code returned.
This test doesn't run any javascript so some false positives
will be found.
The test can also display an histogram depicting the number of
links found at each depth.
Failure or Success to be shown in the report is checked in addSuccess in TestResult
class
"""
self.reporter("Running the smoke tests using %s" % self.agent)
for user in self.credentials:
self.clearRecord()
if self.login(user):
self.reporter("Smoke Test for user %s" % self.user)
self.visitLinks()
self.report()
self.addResults2Current()
else:
raise Exception("Login Failed")
def visitLinks(self):
url = self.homeURL + "/default/index"
to_visit = [url]
start = time()
self.total_visited = 0
if not self.reportOnly:
for depth in range(self.maxDepth):
if len(to_visit) == 0:
break
self.linkDepth.append(len(to_visit))
self.totalLinks += len(to_visit)
visit_start = time()
url_visited = "%d urls" % len(to_visit)
self.total_visited += len(to_visit)
to_visit = self.visit(to_visit, depth)
msg = "%.2d Visited %s in %.3f seconds, %d more urls found" % (depth, url_visited, time()-visit_start, len(to_visit))
self.reporter(msg)
if self.config.verbose >= 2:
if self.config.verbose >= 3:
print >> self.stdout
if self.stdout.isatty(): # terminal should support colour
msg = "%.2d Visited \033[1;32m%s\033[0m in %.3f seconds, \033[1;31m%d\033[0m more urls found" % (depth, url_visited, time()-visit_start, len(to_visit))
print >> self.stdout, msg
if len(to_visit) > 0:
self.linkDepth.append(len(to_visit))
finish = time()
self.reporter("Finished took %.3f seconds" % (finish - start))
def report(self):
self.reporter("%d URLs visited" % self.totalLinks)
self.brokenReport()
self.timeReport()
if self.config.record_timings:
if not self.reportOnly:
self.record_timings()
self.scatterplot()
self.depthReport()
def record_timings(self):
import_error = ""
try:
import xlrd
except:
import_error += "ERROR: the xlrd modules is needed to record timings\n"
try:
import xlwt
except:
import_error += "ERROR: the xlwt modules is needed to record timings\n"
if import_error != "":
print >> self.stderr, import_error
return
rec_time_filename = self.config.record_timings_filename
try:
workbook = xlrd.open_workbook(filename=rec_time_filename,
formatting_info=True)
except:
workbook = None
summary = {}
if workbook:
summary = self.read_timings_sheet(workbook)
if len(summary["date"]) > 100:
# Need to rotate the file
# 1) make a summary and save this
self.report_timings_summary(summary, rec_time_filename)
# 2) archive the file
from zipfile import ZipFile
import os
zip_filename = os.path.join(self.config.path, "rec_time.zip")
archive = ZipFile(zip_filename, "a")
arc_name = "%s-%s.xls" % (rec_time_filename[len(self.config.path):-4],
current.request.now.date()
)
archive.write(rec_time_filename,arc_name)
archive.close()
# 3) clear the current file
os.unlink(rec_time_filename)
summary = {}
if "date" not in summary:
last_col = 0
summary["date"] = [current.request.now.date()]
else:
last_col = len(summary["date"])
summary["date"].append(current.request.now.date())
for (url, rd_obj) in self.results.items():
if url not in summary:
summary[url] = []
# ensure that the row is as long as the number of dates
shortage = last_col - len(summary[url])
if shortage > 0:
summary[url] = summary[url] + ['']*shortage
summary[url].append((rd_obj.get_duration(), rd_obj.is_broken()))
self.write_timings_sheet(summary, rec_time_filename)
def read_timings_sheet(self, workbook):
"""
This will extract all the details from the xls sheet
"""
sheet = workbook.sheet_by_name("Timings")
summary = {}
RED = 0x0A
num_cells = sheet.ncols
summary["date"] = []
for col in range(1, num_cells):
summary["date"].append(sheet.cell_value(0, col))
for row in range(1,sheet.nrows):
url = sheet.cell_value(row, 0)
summary[url] = []
for col in range(1, num_cells):
duration = sheet.cell_value(row, col)
xf = sheet.cell_xf_index(row, col)
bg = workbook.xf_list[xf].background
broken = (bg.pattern_colour_index == RED)
summary[url].append((duration, broken))
return summary
def write_timings_sheet(self, summary, filename=None):
import xlwt
RED = 0x0A
book = xlwt.Workbook(encoding="utf-8")
sheet = book.add_sheet("Timings")
stylebroken = xlwt.XFStyle()
stylebroken.pattern.pattern = stylebroken.pattern.SOLID_PATTERN
stylebroken.pattern.pattern_fore_colour = RED
col = 1
for date in summary["date"]:
sheet.write(0,col,str(date))
col += 1
row = 1
for (url, results) in summary.items():
if url == "date":
continue
sheet.write(row,0,url)
col = 1
for data in results:
if len(data) == 2 and data[1]:
sheet.write(row,col,data[0],stylebroken)
elif len(data) > 0:
sheet.write(row,col,data[0])
col += 1
row += 1
if filename:
book.save(filename)
return book
def report_timings_summary(self,
summary,
summary_file_name = None,
mean_threshold = 1):
"""
This will extract the details from the sheet and optionally save
them to a summary file
summary: the summary details returned from the spreadsheet (read_timings_sheet)
summary_file_name: name of the file to record the summary details (if required)
mean_threshold: The minimum number of values required to include
the mean in the regression calculations
"""
import numpy
import datetime
good_values = []
other_values = []
total_values = []
for date in summary["date"]:
good_values.append([])
other_values.append([])
total_values.append([])
for (url,results) in summary.items():
if url == "date":
continue
else:
cnt = 0
for (duration, broken) in results:
if duration != "":
total_values[cnt].append(duration)
if broken:
other_values[cnt].append(duration)
else:
good_values[cnt].append(duration)
cnt += 1
# get the number of days each entry is after the first date
# and calculate the average, if the average is NAN then ignore both
date_summary = []
gv_mean = []
gv_std = []
gv_date = []
cnt = 0
start = datetime.datetime.strptime(summary["date"][0],"%Y-%m-%d")
for list in good_values:
if len(list) > mean_threshold:
mean = numpy.mean(list)
std = numpy.std(list)
if not numpy.isnan(mean):
this_date = datetime.datetime.strptime(summary["date"][cnt],"%Y-%m-%d")
date_summary.append((this_date - start).days)
gv_mean.append(mean)
gv_std.append(std)
gv_date.append(summary["date"][cnt])
cnt += 1
# calculate the regression line
if len(gv_mean) > 2:
(m,b) = numpy.polyfit(date_summary, gv_mean, 1)
else:
m = b = 0
if summary_file_name != None:
book = self.write_timings_sheet(summary)
sheet = book.add_sheet("summary")
row = 0
for date in gv_date:
sheet.write(row,0,str(date))
sheet.write(row,1,gv_mean[row])
row += 1
sheet.write(row,0,"Trend")
sheet.write(row,1,m)
# Save the details to the summary file
book.save(summary_file_name)
return (date_summary, gv_mean, gv_std, m, b)
def report_model_url(self):
print "Report breakdown by module"
for (model, value) in self.model_url.items():
print model
for ud in value:
url = ud[0]
depth = ud[1]
parent = ud[2]
tabs = "\t" * depth
print "%s %s-%s (parent url - %s)" % (tabs, depth, url, parent)
def brokenReport(self):
self.reporter("Broken Links")
as_html = current.test_config.html
self.broken_links_count = 0
for (url, rd_obj) in self.results.items():
if as_html:
print_url = "<a href=%s%s target=\"_blank\">%s</a>" % (self.homeURL, url, url)
else:
print_url = url
if rd_obj.is_broken():
if rd_obj.threw_exception():
msg = "(Exception) %s" % print_url
else:
http_code = rd_obj.return_http_code()
ticket = rd_obj.the_ticket(as_html)
try:
parent = self.urlParentList[url]
if as_html:
parent = "<a href=%s%s target=\"_blank\">Parent</a>" % (self.homeURL, parent)
except:
parent = "unknown"
msg = "%3d. (%s - %s) %s called from %s" % (self.broken_links_count + 1,
http_code,
ticket,
print_url,
parent
)
self.reporter(msg)
self.broken_links_count += 1
def timeReport(self):
from operator import itemgetter
import numpy
thresholdLink = {}
linktimes = []
for (url, rd_obj) in self.results.items():
duration = rd_obj.get_duration()
linktimes.append(duration)
if duration > self.threshold:
thresholdLink[url] = duration
self.reporter("Time Analysis - Links beyond threshold")
for (visited_url, duration) in sorted(thresholdLink.iteritems(),
key=itemgetter(1),
reverse=True):
self.reporter( "%s took %.3f seconds" % (visited_url, duration))
self.reporter("Time Analysis - summary")
total = len(linktimes)
average = numpy.mean(linktimes)
std = numpy.std(linktimes)
msg = "%s links visited with an average time of %.3f and standard deviation of %.3f" % (total, average, std)
self.reporter(msg)
def scatterplot(self):
"""
Method to draw a scatterplot of the average time to download links
against time. Add a regression line to show the trend over time.
"""
try:
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
self.FigureCanvas = FigureCanvas
from matplotlib.figure import Figure
self.Figure = Figure
import numpy
except ImportError:
return
try:
import xlrd
except:
import_error += "ERROR: the xlrd modules is needed to record timings\n"
rec_time_filename = self.config.record_timings_filename
try:
workbook = xlrd.open_workbook(filename=rec_time_filename,
formatting_info=True)
except:
return
import numpy
# Only include the mean in the regression values if there are at least 10 URL timings
summary = self.read_timings_sheet(workbook)
(date_summary, gv_mean, gv_std, m, b) = self.report_timings_summary(summary, mean_threshold=10)
if len(gv_mean) <= 2:
return
fig = Figure(figsize=(5, 2.5))
canvas = self.FigureCanvas(fig)
ax = fig.add_subplot(111)
linear = numpy.poly1d([m,b])
denom = numpy.max(gv_std)/50
size = gv_std/denom
ax.scatter(date_summary, gv_mean, marker="d", s=size)
ax.plot(date_summary, linear(date_summary), '--r')
chart = StringIO()
canvas.print_figure(chart)
image = chart.getvalue()
import base64
base64Img = base64.b64encode(image)
image = "<img src=\"data:image/png;base64,%s\">" % base64Img
self.reporter("Scatterplot of average link times per successful run")
self.reporter(image)
self.reporter("The trend line has a current slope of %s" % m)
self.reporter("The y-intercept is %s seconds" % b)
def depthReport(self):
"""
Method to draw a histogram of the number of new links
discovered at each depth.
(i.e. show how many links are required to reach a link)
"""
try:
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
self.FigureCanvas = FigureCanvas
from matplotlib.figure import Figure
self.Figure = Figure
from numpy import arange
except ImportError:
return
self.reporter("Analysis of link depth")
fig = Figure(figsize=(4, 2.5))
# Draw a histogram
width = 0.9
rect = [0.12, 0.08, 0.9, 0.85]
ax = fig.add_axes(rect)
left = arange(len(self.linkDepth))
plot = ax.bar(left, self.linkDepth, width=width)
# Add the x axis labels
ax.set_xticks(left+(width*0.5))
ax.set_xticklabels(left)
chart = StringIO()
canvas = self.FigureCanvas(fig)
canvas.print_figure(chart)
image = chart.getvalue()
import base64
base64Img = base64.b64encode(image)
image = "<img src=\"data:image/png;base64,%s\">" % base64Img
self.reporter(image)
| 39.220114 | 175 | 0.544632 |
0d4294dd7e1387a470a4641ca2286099b860c6d0 | 4,992 | py | Python | dds_web/security/tokens.py | zishanmirza/dds_web | dbcb92176951cf7589a558833e2c870e9e47e9df | [
"BSD-3-Clause"
] | null | null | null | dds_web/security/tokens.py | zishanmirza/dds_web | dbcb92176951cf7589a558833e2c870e9e47e9df | [
"BSD-3-Clause"
] | 47 | 2020-02-04T15:20:12.000Z | 2020-06-01T06:25:21.000Z | dds_web/security/tokens.py | zishanmirza/dds_web | dbcb92176951cf7589a558833e2c870e9e47e9df | [
"BSD-3-Clause"
] | 3 | 2019-09-20T08:45:04.000Z | 2019-09-24T08:51:00.000Z | ####################################################################################################
# IMPORTS ################################################################################ IMPORTS #
####################################################################################################
# Standard library
import datetime
import secrets
# Installed
import flask
from jwcrypto import jwk, jwt
# Own modules
import dds_web.utils
import dds_web.forms
# Functions ############################################################################ FUNCTIONS #
def encrypted_jwt_token(
username,
sensitive_content,
expires_in=datetime.timedelta(hours=168),
additional_claims=None,
fully_authenticated=False,
):
"""
Encrypts a signed JWT token. This is to be used for any encrypted token regardless of the sensitive content.
:param str username: Username must be obtained through authentication
:param str or None sensitive_content: This is the content that must be protected by encryption.
Can be set to None for protecting the signed token.
:param timedelta expires_in: This is the maximum allowed age of the token. (default 2 days)
:param Dict or None additional_claims: Any additional token claims can be added. e.g., {"iss": "DDS"}
:param Boolean fully_authenticated: set to True only after successful 2fa which means that all authentication
steps have succeeded and this final token can be used for normal operation by the cli (default False)
"""
jwe_protected_header = {
"alg": "A256KW",
"enc": "A256GCM",
}
if fully_authenticated:
# exp claim in this (integrity) protected JWE header is provided only to let the
# cli know the precise expiration time of the encrypted token. It has no impact
# on the actual enforcement of the expiration of the token.
# This time is in iso format in contrast to the actual exp claim in timestamp,
# because timestamp translates to a wrong time in local date time
jwe_protected_header["exp"] = (dds_web.utils.current_time() + expires_in).isoformat()
token = jwt.JWT(
header=jwe_protected_header,
claims=__signed_jwt_token(
username=username,
sensitive_content=sensitive_content,
expires_in=expires_in,
additional_claims=additional_claims,
),
)
key = jwk.JWK.from_password(flask.current_app.config.get("SECRET_KEY"))
token.make_encrypted_token(key)
return token.serialize()
def update_token_with_mfa(token_claims):
expires_in = (
datetime.datetime.fromtimestamp(token_claims.get("exp")) - dds_web.utils.current_time()
)
return encrypted_jwt_token(
username=token_claims.get("sub"),
sensitive_content=token_claims.get("sen_con"),
expires_in=expires_in,
additional_claims={"mfa_auth_time": dds_web.utils.current_time().timestamp()},
fully_authenticated=True,
)
def __signed_jwt_token(
username,
sensitive_content=None,
expires_in=datetime.timedelta(hours=168),
additional_claims=None,
):
"""
Generic signed JWT token. This is to be used by both signed-only and signed-encrypted tokens.
:param str username: Username must be obtained through authentication
:param str or None sensitive_content: This is the content that must be protected by encryption. (default None)
:param timedelta expires_in: This is the maximum allowed age of the token. (default 2 days)
:param Dict or None additional_claims: Any additional token claims can be added. e.g., {"iss": "DDS"}
"""
expiration_time = dds_web.utils.current_time() + expires_in
# exp claim has to be in timestamp, otherwise jwcrypto cannot verify the exp claim
# and so raises an exception for it. This does not cause any timezone issues as it
# is only issued and verified on the api side.
data = {"sub": username, "exp": expiration_time.timestamp(), "nonce": secrets.token_hex(32)}
if additional_claims:
data.update(additional_claims)
if sensitive_content:
data["sen_con"] = sensitive_content
key = jwk.JWK.from_password(flask.current_app.config.get("SECRET_KEY"))
token = jwt.JWT(header={"alg": "HS256"}, claims=data, algs=["HS256"])
token.make_signed_token(key)
return token.serialize()
def jwt_token(username, expires_in=datetime.timedelta(hours=168), additional_claims=None):
"""
Generates a signed JWT token. This is to be used for general purpose signed token.
:param str username: Username must be obtained through authentication
:param timedelta expires_in: This is the maximum allowed age of the token. (default 2 days)
:param Dict or None additional_claims: Any additional token claims can be added. e.g., {"iss": "DDS"}
"""
return __signed_jwt_token(
username=username, expires_in=expires_in, additional_claims=additional_claims
)
| 42.666667 | 114 | 0.66266 |
b29447bc2f0835101a524c0fbbd9fc8cc07d80d5 | 1,596 | py | Python | src/ioSettings/urls.py | IOwebapps/sigtest | 4440d08fa187b5287ff98cb0263cfb5cfd819e2e | [
"MIT"
] | 1 | 2022-03-29T15:09:03.000Z | 2022-03-29T15:09:03.000Z | src/ioSettings/urls.py | IOwebapps/sigtest | 4440d08fa187b5287ff98cb0263cfb5cfd819e2e | [
"MIT"
] | null | null | null | src/ioSettings/urls.py | IOwebapps/sigtest | 4440d08fa187b5287ff98cb0263cfb5cfd819e2e | [
"MIT"
] | null | null | null | """ URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from django.urls import path, include
from django.views.generic import TemplateView
from apps.home import views
from apps.home.views import HomeServe
from django.conf import settings
### הגשת URL ראשית
if settings.DEBUG==True:
urlpatterns = [
url(r'^adminapp/', admin.site.urls),
# url(r'^$', HomeServe.as_view()),
path('', include('apps.home.urls')),
]+ static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
else:
urlpatterns = [
path('', include('apps.home.urls')),
]+ static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
handler404 = 'apps.home.views.error_404_view'
# urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 33.957447 | 80 | 0.712406 |
c3f090a7abfcc876891555a356671fc3978ae791 | 38,263 | py | Python | samples/python/console/speech_sample.py | jolguk/cognitive-services-speech-sdk | 13173866ff72138f767254fddbf8a98c92669f5e | [
"MIT"
] | null | null | null | samples/python/console/speech_sample.py | jolguk/cognitive-services-speech-sdk | 13173866ff72138f767254fddbf8a98c92669f5e | [
"MIT"
] | null | null | null | samples/python/console/speech_sample.py | jolguk/cognitive-services-speech-sdk | 13173866ff72138f767254fddbf8a98c92669f5e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
"""
Speech recognition samples for the Microsoft Cognitive Services Speech SDK
"""
import time
import wave
import string
try:
import azure.cognitiveservices.speech as speechsdk
except ImportError:
print("""
Importing the Speech SDK for Python failed.
Refer to
https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-python for
installation instructions.
""")
import sys
sys.exit(1)
# Set up the subscription info for the Speech Service:
# Replace with your own subscription key and service region (e.g., "westus").
speech_key, service_region = "YourSubscriptionKey", "YourServiceRegion"
# Specify the path to an audio file containing speech (mono WAV / PCM with a sampling rate of 16
# kHz).
weatherfilename = "whatstheweatherlike.wav"
weatherfilenamemp3 = "whatstheweatherlike.mp3"
def speech_recognize_once_from_mic():
"""performs one-shot speech recognition from the default microphone"""
# <SpeechRecognitionWithMicrophone>
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
# Creates a speech recognizer using microphone as audio input.
# The default language is "en-us".
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config)
# Starts speech recognition, and returns after a single utterance is recognized. The end of a
# single utterance is determined by listening for silence at the end or until a maximum of 15
# seconds of audio is processed. It returns the recognition text as result.
# Note: Since recognize_once() returns only a single utterance, it is suitable only for single
# shot recognition like command or query.
# For long-running multi-utterance recognition, use start_continuous_recognition() instead.
result = speech_recognizer.recognize_once()
# Check the result
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
print("Recognized: {}".format(result.text))
elif result.reason == speechsdk.ResultReason.NoMatch:
print("No speech could be recognized")
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print("Speech Recognition canceled: {}".format(cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print("Error details: {}".format(cancellation_details.error_details))
# </SpeechRecognitionWithMicrophone>
def speech_recognize_once_from_file():
"""performs one-shot speech recognition with input from an audio file"""
# <SpeechRecognitionWithFile>
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
audio_config = speechsdk.audio.AudioConfig(filename=weatherfilename)
# Creates a speech recognizer using a file as audio input, also specify the speech language
speech_recognizer = speechsdk.SpeechRecognizer(
speech_config=speech_config, language="de-DE", audio_config=audio_config)
# Starts speech recognition, and returns after a single utterance is recognized. The end of a
# single utterance is determined by listening for silence at the end or until a maximum of 15
# seconds of audio is processed. It returns the recognition text as result.
# Note: Since recognize_once() returns only a single utterance, it is suitable only for single
# shot recognition like command or query.
# For long-running multi-utterance recognition, use start_continuous_recognition() instead.
result = speech_recognizer.recognize_once()
# Check the result
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
print("Recognized: {}".format(result.text))
elif result.reason == speechsdk.ResultReason.NoMatch:
print("No speech could be recognized: {}".format(result.no_match_details))
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print("Speech Recognition canceled: {}".format(cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print("Error details: {}".format(cancellation_details.error_details))
# </SpeechRecognitionWithFile>
def speech_recognize_once_compressed_input():
"""performs one-shot speech recognition with compressed input from an audio file"""
# <SpeechRecognitionWithCompressedFile>
class BinaryFileReaderCallback(speechsdk.audio.PullAudioInputStreamCallback):
def __init__(self, filename: str):
super().__init__()
self._file_h = open(filename, "rb")
def read(self, buffer: memoryview) -> int:
try:
size = buffer.nbytes
frames = self._file_h.read(size)
buffer[:len(frames)] = frames
return len(frames)
except Exception as ex:
print('Exception in `read`: {}'.format(ex))
raise
def close(self) -> None:
print('closing file')
try:
self._file_h.close()
except Exception as ex:
print('Exception in `close`: {}'.format(ex))
raise
# Creates an audio stream format. For an example we are using MP3 compressed file here
compressed_format = speechsdk.audio.AudioStreamFormat(compressed_stream_format=speechsdk.AudioStreamContainerFormat.MP3)
callback = BinaryFileReaderCallback(filename=weatherfilenamemp3)
stream = speechsdk.audio.PullAudioInputStream(stream_format=compressed_format, pull_stream_callback=callback)
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
audio_config = speechsdk.audio.AudioConfig(stream=stream)
# Creates a speech recognizer using a file as audio input, also specify the speech language
speech_recognizer = speechsdk.SpeechRecognizer(speech_config, audio_config)
# Starts speech recognition, and returns after a single utterance is recognized. The end of a
# single utterance is determined by listening for silence at the end or until a maximum of 15
# seconds of audio is processed. It returns the recognition text as result.
# Note: Since recognize_once() returns only a single utterance, it is suitable only for single
# shot recognition like command or query.
# For long-running multi-utterance recognition, use start_continuous_recognition() instead.
result = speech_recognizer.recognize_once()
# Check the result
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
print("Recognized: {}".format(result.text))
elif result.reason == speechsdk.ResultReason.NoMatch:
print("No speech could be recognized: {}".format(result.no_match_details))
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print("Speech Recognition canceled: {}".format(cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print("Error details: {}".format(cancellation_details.error_details))
# </SpeechRecognitionWithCompressedFile>
def speech_recognize_once_from_file_with_customized_model():
"""performs one-shot speech recognition with input from an audio file, specifying a custom
model"""
# <SpeechRecognitionUsingCustomizedModel>
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
# Create source language configuration with the speech language and the endpoint ID of your customized model
# Replace with your speech language and CRIS endpoint ID.
source_language_config = speechsdk.languageconfig.SourceLanguageConfig("zh-CN", "YourEndpointId")
audio_config = speechsdk.audio.AudioConfig(filename=weatherfilename)
# Creates a speech recognizer using a file as audio input and specify the source language config
speech_recognizer = speechsdk.SpeechRecognizer(
speech_config=speech_config, source_language_config=source_language_config, audio_config=audio_config)
# Starts speech recognition, and returns after a single utterance is recognized. The end of a
# single utterance is determined by listening for silence at the end or until a maximum of 15
# seconds of audio is processed. It returns the recognition text as result.
# Note: Since recognize_once() returns only a single utterance, it is suitable only for single
# shot recognition like command or query.
# For long-running multi-utterance recognition, use start_continuous_recognition() instead.
result = speech_recognizer.recognize_once()
# Check the result
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
print("Recognized: {}".format(result.text))
elif result.reason == speechsdk.ResultReason.NoMatch:
print("No speech could be recognized: {}".format(result.no_match_details))
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print("Speech Recognition canceled: {}".format(cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print("Error details: {}".format(cancellation_details.error_details))
# </SpeechRecognitionUsingCustomizedModel>
def speech_recognize_once_from_file_with_custom_endpoint_parameters():
"""performs one-shot speech recognition with input from an audio file, specifying an
endpoint with custom parameters"""
initial_silence_timeout_ms = 15 * 1e3
template = "wss://{}.stt.speech.microsoft.com/speech/recognition" \
"/conversation/cognitiveservices/v1?initialSilenceTimeoutMs={:d}"
speech_config = speechsdk.SpeechConfig(subscription=speech_key,
endpoint=template.format(service_region, int(initial_silence_timeout_ms)))
print("Using endpoint", speech_config.get_property(speechsdk.PropertyId.SpeechServiceConnection_Endpoint))
audio_config = speechsdk.audio.AudioConfig(filename=weatherfilename)
# Creates a speech recognizer using a file as audio input.
# The default language is "en-us".
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
# Starts speech recognition, and returns after a single utterance is recognized. The end of a
# single utterance is determined by listening for silence at the end or until a maximum of 15
# seconds of audio is processed. It returns the recognition text as result.
# Note: Since recognize_once() returns only a single utterance, it is suitable only for single
# shot recognition like command or query.
# For long-running multi-utterance recognition, use start_continuous_recognition() instead.
result = speech_recognizer.recognize_once()
# Check the result
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
print("Recognized: {}".format(result.text))
elif result.reason == speechsdk.ResultReason.NoMatch:
print("No speech could be recognized: {}".format(result.no_match_details))
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print("Speech Recognition canceled: {}".format(cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print("Error details: {}".format(cancellation_details.error_details))
def speech_recognize_async_from_file():
"""performs one-shot speech recognition asynchronously with input from an audio file"""
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
audio_config = speechsdk.audio.AudioConfig(filename=weatherfilename)
# Creates a speech recognizer using a file as audio input.
# The default language is "en-us".
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
# Perform recognition. `recognize_async` does not block until recognition is complete,
# so other tasks can be performed while recognition is running.
# However, recognition stops when the first utterance has bee recognized.
# For long-running recognition, use continuous recognitions instead.
result_future = speech_recognizer.recognize_once_async()
print('recognition is running....')
# Other tasks can be performed here...
# Retrieve the recognition result. This blocks until recognition is complete.
result = result_future.get()
# Check the result
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
print("Recognized: {}".format(result.text))
elif result.reason == speechsdk.ResultReason.NoMatch:
print("No speech could be recognized: {}".format(result.no_match_details))
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print("Speech Recognition canceled: {}".format(cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print("Error details: {}".format(cancellation_details.error_details))
def speech_recognize_continuous_from_file():
"""performs continuous speech recognition with input from an audio file"""
# <SpeechContinuousRecognitionWithFile>
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
audio_config = speechsdk.audio.AudioConfig(filename=weatherfilename)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
done = False
def stop_cb(evt):
"""callback that signals to stop continuous recognition upon receiving an event `evt`"""
print('CLOSING on {}'.format(evt))
nonlocal done
done = True
# Connect callbacks to the events fired by the speech recognizer
speech_recognizer.recognizing.connect(lambda evt: print('RECOGNIZING: {}'.format(evt)))
speech_recognizer.recognized.connect(lambda evt: print('RECOGNIZED: {}'.format(evt)))
speech_recognizer.session_started.connect(lambda evt: print('SESSION STARTED: {}'.format(evt)))
speech_recognizer.session_stopped.connect(lambda evt: print('SESSION STOPPED {}'.format(evt)))
speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt)))
# stop continuous recognition on either session stopped or canceled events
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
# Start continuous speech recognition
speech_recognizer.start_continuous_recognition()
while not done:
time.sleep(.5)
speech_recognizer.stop_continuous_recognition()
# </SpeechContinuousRecognitionWithFile>
def speech_recognize_keyword_from_microphone():
"""performs keyword-triggered speech recognition with input microphone"""
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
# Creates an instance of a keyword recognition model. Update this to
# point to the location of your keyword recognition model.
model = speechsdk.KeywordRecognitionModel("YourKeywordRecognitionModelFile.table")
# The phrase your keyword recognition model triggers on.
keyword = "YourKeyword"
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config)
done = False
def stop_cb(evt):
"""callback that signals to stop continuous recognition upon receiving an event `evt`"""
print('CLOSING on {}'.format(evt))
nonlocal done
done = True
def recognizing_cb(evt):
"""callback for recognizing event"""
if evt.result.reason == speechsdk.ResultReason.RecognizingKeyword:
print('RECOGNIZING KEYWORD: {}'.format(evt))
elif evt.result.reason == speechsdk.ResultReason.RecognizingSpeech:
print('RECOGNIZING: {}'.format(evt))
def recognized_cb(evt):
"""callback for recognized event"""
if evt.result.reason == speechsdk.ResultReason.RecognizedKeyword:
print('RECOGNIZED KEYWORD: {}'.format(evt))
elif evt.result.reason == speechsdk.ResultReason.RecognizedSpeech:
print('RECOGNIZED: {}'.format(evt))
elif evt.result.reason == speechsdk.ResultReason.NoMatch:
print('NOMATCH: {}'.format(evt))
# Connect callbacks to the events fired by the speech recognizer
speech_recognizer.recognizing.connect(recognizing_cb)
speech_recognizer.recognized.connect(recognized_cb)
speech_recognizer.session_started.connect(lambda evt: print('SESSION STARTED: {}'.format(evt)))
speech_recognizer.session_stopped.connect(lambda evt: print('SESSION STOPPED {}'.format(evt)))
speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt)))
# stop continuous recognition on either session stopped or canceled events
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
# Start keyword recognition
speech_recognizer.start_keyword_recognition(model)
print('Say something starting with "{}" followed by whatever you want...'.format(keyword))
while not done:
time.sleep(.5)
speech_recognizer.stop_keyword_recognition()
def speech_recognition_with_pull_stream():
"""gives an example how to use a pull audio stream to recognize speech from a custom audio
source"""
class WavFileReaderCallback(speechsdk.audio.PullAudioInputStreamCallback):
"""Example class that implements the Pull Audio Stream interface to recognize speech from
an audio file"""
def __init__(self, filename: str):
super().__init__()
self._file_h = wave.open(filename, mode=None)
self.sample_width = self._file_h.getsampwidth()
assert self._file_h.getnchannels() == 1
assert self._file_h.getsampwidth() == 2
assert self._file_h.getframerate() == 16000
assert self._file_h.getcomptype() == 'NONE'
def read(self, buffer: memoryview) -> int:
"""read callback function"""
size = buffer.nbytes
frames = self._file_h.readframes(size // self.sample_width)
buffer[:len(frames)] = frames
return len(frames)
def close(self):
"""close callback function"""
self._file_h.close()
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
# specify the audio format
wave_format = speechsdk.audio.AudioStreamFormat(samples_per_second=16000, bits_per_sample=16,
channels=1)
# setup the audio stream
callback = WavFileReaderCallback(weatherfilename)
stream = speechsdk.audio.PullAudioInputStream(callback, wave_format)
audio_config = speechsdk.audio.AudioConfig(stream=stream)
# instantiate the speech recognizer with pull stream input
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
done = False
def stop_cb(evt):
"""callback that signals to stop continuous recognition upon receiving an event `evt`"""
print('CLOSING on {}'.format(evt))
nonlocal done
done = True
# Connect callbacks to the events fired by the speech recognizer
speech_recognizer.recognizing.connect(lambda evt: print('RECOGNIZING: {}'.format(evt)))
speech_recognizer.recognized.connect(lambda evt: print('RECOGNIZED: {}'.format(evt)))
speech_recognizer.session_started.connect(lambda evt: print('SESSION STARTED: {}'.format(evt)))
speech_recognizer.session_stopped.connect(lambda evt: print('SESSION STOPPED {}'.format(evt)))
speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt)))
# stop continuous recognition on either session stopped or canceled events
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
# Start continuous speech recognition
speech_recognizer.start_continuous_recognition()
while not done:
time.sleep(.5)
speech_recognizer.stop_continuous_recognition()
def speech_recognition_with_push_stream():
"""gives an example how to use a push audio stream to recognize speech from a custom audio
source"""
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
# setup the audio stream
stream = speechsdk.audio.PushAudioInputStream()
audio_config = speechsdk.audio.AudioConfig(stream=stream)
# instantiate the speech recognizer with push stream input
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
# Connect callbacks to the events fired by the speech recognizer
speech_recognizer.recognizing.connect(lambda evt: print('RECOGNIZING: {}'.format(evt)))
speech_recognizer.recognized.connect(lambda evt: print('RECOGNIZED: {}'.format(evt)))
speech_recognizer.session_started.connect(lambda evt: print('SESSION STARTED: {}'.format(evt)))
speech_recognizer.session_stopped.connect(lambda evt: print('SESSION STOPPED {}'.format(evt)))
speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt)))
# The number of bytes to push per buffer
n_bytes = 3200
wav_fh = wave.open(weatherfilename)
# start continuous speech recognition
speech_recognizer.start_continuous_recognition()
# start pushing data until all data has been read from the file
try:
while(True):
frames = wav_fh.readframes(n_bytes // 2)
print('read {} bytes'.format(len(frames)))
if not frames:
break
stream.write(frames)
time.sleep(.1)
finally:
# stop recognition and clean up
wav_fh.close()
stream.close()
speech_recognizer.stop_continuous_recognition()
def speech_recognize_once_with_auto_language_detection_from_mic():
"""performs one-shot speech recognition from the default microphone with auto language detection"""
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
# create the auto detection language configuration with the potential source language candidates
auto_detect_source_language_config = \
speechsdk.languageconfig.AutoDetectSourceLanguageConfig(languages=["de-DE", "en-US"])
speech_recognizer = speechsdk.SpeechRecognizer(
speech_config=speech_config, auto_detect_source_language_config=auto_detect_source_language_config)
result = speech_recognizer.recognize_once()
# Check the result
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
auto_detect_source_language_result = speechsdk.AutoDetectSourceLanguageResult(result)
print("Recognized: {} in language {}".format(result.text, auto_detect_source_language_result.language))
elif result.reason == speechsdk.ResultReason.NoMatch:
print("No speech could be recognized")
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print("Speech Recognition canceled: {}".format(cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print("Error details: {}".format(cancellation_details.error_details))
def speech_recognize_with_auto_language_detection_UsingCustomizedModel():
"""performs speech recognition from the audio file with auto language detection, using customized model"""
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
audio_config = speechsdk.audio.AudioConfig(filename=weatherfilename)
# Replace the languages with your languages in BCP-47 format, e.g. fr-FR.
# Please see https://docs.microsoft.com/azure/cognitive-services/speech-service/language-support
# for all supported languages
en_language_config = speechsdk.languageconfig.SourceLanguageConfig("en-US")
# Replace the languages with your languages in BCP-47 format, e.g. zh-CN.
# Set the endpoint ID of your customized mode that will be used for fr-FR.
# Replace with your own CRIS endpoint ID.
fr_language_config = speechsdk.languageconfig.SourceLanguageConfig("fr-FR", "myendpointId")
# create the auto detection language configuration with the source language configurations
auto_detect_source_language_config = speechsdk.languageconfig.AutoDetectSourceLanguageConfig(
sourceLanguageConfigs=[en_language_config, fr_language_config])
speech_recognizer = speechsdk.SpeechRecognizer(
speech_config=speech_config,
auto_detect_source_language_config=auto_detect_source_language_config,
audio_config=audio_config)
result = speech_recognizer.recognize_once()
# Check the result
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
auto_detect_source_language_result = speechsdk.AutoDetectSourceLanguageResult(result)
print("Recognized: {} in language {}".format(result.text, auto_detect_source_language_result.language))
elif result.reason == speechsdk.ResultReason.NoMatch:
print("No speech could be recognized")
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print("Speech Recognition canceled: {}".format(cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print("Error details: {}".format(cancellation_details.error_details))
def speech_recognize_keyword_locally_from_microphone():
"""runs keyword spotting locally, with direct access to the result audio"""
# Creates an instance of a keyword recognition model. Update this to
# point to the location of your keyword recognition model.
model = speechsdk.KeywordRecognitionModel("YourKeywordRecognitionModelFile.table")
# The phrase your keyword recognition model triggers on.
keyword = "YourKeyword"
# Create a local keyword recognizer with the default microphone device for input.
keyword_recognizer = speechsdk.KeywordRecognizer()
done = False
def recognized_cb(evt):
# Only a keyword phrase is recognized. The result cannot be 'NoMatch'
# and there is no timeout. The recognizer runs until a keyword phrase
# is detected or recognition is canceled (by stop_recognition_async()
# or due to the end of an input file or stream).
result = evt.result
if result.reason == speechsdk.ResultReason.RecognizedKeyword:
print("RECOGNIZED KEYWORD: {}".format(result.text))
nonlocal done
done = True
def canceled_cb(evt):
result = evt.result
if result.reason == speechsdk.ResultReason.Canceled:
print('CANCELED: {}'.format(result.cancellation_details.reason))
nonlocal done
done = True
# Connect callbacks to the events fired by the keyword recognizer.
keyword_recognizer.recognized.connect(recognized_cb)
keyword_recognizer.canceled.connect(canceled_cb)
# Start keyword recognition.
result_future = keyword_recognizer.recognize_once_async(model)
print('Say something starting with "{}" followed by whatever you want...'.format(keyword))
result = result_future.get()
# Read result audio (incl. the keyword).
if result.reason == speechsdk.ResultReason.RecognizedKeyword:
time.sleep(2) # give some time so the stream is filled
result_stream = speechsdk.AudioDataStream(result)
result_stream.detach_input() # stop any more data from input getting to the stream
save_future = result_stream.save_to_wav_file_async("AudioFromRecognizedKeyword.wav")
print('Saving file...')
saved = save_future.get()
# If active keyword recognition needs to be stopped before results, it can be done with
#
# stop_future = keyword_recognizer.stop_recognition_async()
# print('Stopping...')
# stopped = stop_future.get()
def pronunciation_assessment_from_microphone():
""""performs one-shot pronunciation assessment asynchronously with input from microphone."""
# Creates an instance of a speech config with specified subscription key and service region.
# Replace with your own subscription key and service region (e.g., "westus").
# Note: The pronunciation assessment feature is currently only available on en-US language.
config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
# The pronunciation assessment service has a longer default end silence timeout (5 seconds) than normal STT
# as the pronunciation assessment is widely used in education scenario where kids have longer break in reading.
# You can adjust the end silence timeout based on your real scenario.
config.set_property(speechsdk.PropertyId.SpeechServiceConnection_EndSilenceTimeoutMs, "3000")
reference_text = ""
# create pronunciation assessment config, set grading system, granularity and if enable miscue based on your requirement.
pronunciation_config = speechsdk.PronunciationAssessmentConfig(reference_text=reference_text,
grading_system=speechsdk.PronunciationAssessmentGradingSystem.HundredMark,
granularity=speechsdk.PronunciationAssessmentGranularity.Phoneme,
enable_miscue=True)
recognizer = speechsdk.SpeechRecognizer(speech_config=config)
while True:
# Receives reference text from console input.
print('Enter reference text you want to assess, or enter empty text to exit.')
print('> ')
try:
reference_text = input()
except EOFError:
break
pronunciation_config.reference_text = reference_text
pronunciation_config.apply_to(recognizer)
# Starts recognizing.
print('Read out "{}" for pronunciation assessment ...'.format(reference_text))
# Note: Since recognize_once() returns only a single utterance, it is suitable only for single
# shot evaluation.
# For long-running multi-utterance pronunciation evaluation, use start_continuous_recognition() instead.
result = recognizer.recognize_once_async().get()
# Check the result
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
print('Recognized: {}'.format(result.text))
print(' Pronunciation Assessment Result:')
pronunciation_result = speechsdk.PronunciationAssessmentResult(result)
print(' Accuracy score: {}, Pronunciation score: {}, Completeness score : {}, FluencyScore: {}'.format(
pronunciation_result.accuracy_score, pronunciation_result.pronunciation_score,
pronunciation_result.completeness_score, pronunciation_result.fluency_score
))
print(' Word-level details:')
for idx, word in enumerate(pronunciation_result.words):
print(' {}: word: {}, accuracy score: {}, error type: {};'.format(
idx + 1, word.word, word.accuracy_score, word.error_type
))
elif result.reason == speechsdk.ResultReason.NoMatch:
print("No speech could be recognized")
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print("Speech Recognition canceled: {}".format(cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print("Error details: {}".format(cancellation_details.error_details))
def pronunciation_assessment_continuous_from_file():
"""performs continuous speech recognition asynchronously with input from an audio file"""
import difflib
import json
# Creates an instance of a speech config with specified subscription key and service region.
# Replace with your own subscription key and service region (e.g., "westus").
# Note: The pronunciation assessment feature is currently only available on en-US language.
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
audio_config = speechsdk.audio.AudioConfig(filename=weatherfilename)
reference_text = "What's the weather like?"
# create pronunciation assessment config, set grading system, granularity and if enable miscue based on your requirement.
enable_miscue = True
pronunciation_config = speechsdk.PronunciationAssessmentConfig(reference_text=reference_text,
grading_system=speechsdk.PronunciationAssessmentGradingSystem.HundredMark,
granularity=speechsdk.PronunciationAssessmentGranularity.Phoneme,
enable_miscue=enable_miscue)
# Creates a speech recognizer using a file as audio input.
# The default language is "en-us".
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
# apply pronunciation assessment config to speech recognizer
pronunciation_config.apply_to(speech_recognizer)
done = False
recognized_words = []
accuracy_scores = []
fluency_scores = []
durations = []
def stop_cb(evt):
"""callback that signals to stop continuous recognition upon receiving an event `evt`"""
print('CLOSING on {}'.format(evt))
nonlocal done
done = True
def recognized(evt):
print('pronunciation assessment for: {}'.format(evt.result.text))
pronunciation_result = speechsdk.PronunciationAssessmentResult(evt.result)
print(' Accuracy score: {}, pronunciation score: {}, completeness score : {}, fluency score: {}'.format(
pronunciation_result.accuracy_score, pronunciation_result.pronunciation_score,
pronunciation_result.completeness_score, pronunciation_result.fluency_score
))
nonlocal recognized_words, accuracy_scores, fluency_scores, durations
recognized_words += pronunciation_result.words
accuracy_scores.append(pronunciation_result.accuracy_score)
fluency_scores.append(pronunciation_result.fluency_score)
json_result = evt.result.properties.get(speechsdk.PropertyId.SpeechServiceResponse_JsonResult)
jo = json.loads(json_result)
nb = jo['NBest'][0]
durations.append(sum([int(w['Duration']) for w in nb['Words']]))
# Connect callbacks to the events fired by the speech recognizer
speech_recognizer.recognized.connect(recognized)
speech_recognizer.session_started.connect(lambda evt: print('SESSION STARTED: {}'.format(evt)))
speech_recognizer.session_stopped.connect(lambda evt: print('SESSION STOPPED {}'.format(evt)))
speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt)))
# stop continuous recognition on either session stopped or canceled events
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
# Start continuous pronunciation assessment
speech_recognizer.start_continuous_recognition()
while not done:
time.sleep(.5)
speech_recognizer.stop_continuous_recognition()
# We can calculate whole accuracy and fluency scores by duration weighted averaging
accuracy_score = sum(i[0] * i[1] for i in zip(accuracy_scores, durations)) / sum(durations)
fluency_score = sum(i[0] * i[1] for i in zip(fluency_scores, durations)) / sum(durations)
# we need to convert the reference text to lower case, and split to words, then remove the punctuations.
reference_words = [w.strip(string.punctuation) for w in reference_text.lower().split()]
# For continuous pronunciation assessment mode, the service won't return the words with `Insertion` or `Omission`
# even if miscue is enabled.
# We need to compare with the reference text after received all recognized words to get these error words.
if enable_miscue:
diff = difflib.SequenceMatcher(None, reference_words, [x.word for x in recognized_words])
final_words = []
for tag, i1, i2, j1, j2 in diff.get_opcodes():
if tag == 'insert':
for word in recognized_words[j1:j2]:
if word.error_type == 'None':
word._error_type = 'Insertion'
final_words.append(word)
elif tag == 'delete':
for word_text in reference_words[i1:i2]:
word = speechsdk.PronunciationAssessmentWordResult({
'Word': word_text,
'PronunciationAssessment': {
'ErrorType': 'Omission',
}
})
final_words.append(word)
else:
final_words += recognized_words[j1:j2]
else:
final_words = recognized_words
# Calculate whole completeness score
completeness_score = len([w for w in final_words if w.error_type == 'None']) / len(reference_words) * 100
print(' Paragraph accuracy score: {}, completeness score: {}, fluency score: {}'.format(
accuracy_score, completeness_score, fluency_score
))
for idx, word in enumerate(final_words):
print(' {}: word: {}\taccuracy score: {}\terror type: {};'.format(
idx + 1, word.word, word.accuracy_score, word.error_type
))
| 50.545575 | 141 | 0.721219 |
4cc955c2b035c6d18c2ceebe24d70da017ef3bca | 4,846 | py | Python | jina/peapods/runtimes/base.py | abreu4/jina | d1d045e9e0933dffb3bd668cb9cfebab6cd52202 | [
"Apache-2.0"
] | 2 | 2021-01-22T07:34:35.000Z | 2021-01-23T04:36:41.000Z | jina/peapods/runtimes/base.py | abreu4/jina | d1d045e9e0933dffb3bd668cb9cfebab6cd52202 | [
"Apache-2.0"
] | 1 | 2021-02-27T05:56:45.000Z | 2021-02-27T05:57:03.000Z | jina/peapods/runtimes/base.py | abreu4/jina | d1d045e9e0933dffb3bd668cb9cfebab6cd52202 | [
"Apache-2.0"
] | null | null | null | import argparse
from ...logging import JinaLogger
class BaseRuntime:
"""A Jina Runtime is a procedure that blocks the main process once running (i.e. :meth:`run_forever`),
therefore must be put into a separated thread/process. Any program/library/package/module that blocks the main
process, can be formulated into a :class:`BaseRuntime` class and then be used in :class:`BasePea`.
In the sequel, we call the main process/thread as ``M``, the process/thread blocked :class:`Runtime` as ``S``.
In Jina, a :class:`BasePea` object is used to manage a :class:`Runtime` object's lifecycle. A :class:`BasePea`
is a subclass of :class:`multiprocessing.Process` or :class:`threading.Thread`, it starts from ``M`` and once the
``S`` is spawned, it calls :class:`Runtime` methods in the following order:
0. :meth:`__init__` in ``M``
1. :meth:`setup` in ``S``
2. :meth:`run_forever` in ``S``. Note that this will block ``S``, step 3 won't be
reached until it is unblocked by :meth:`cancel`
3. :meth:`teardown` in ``S``. Note that ``S`` is blocked by
:meth:`run_forever`, this step won't be reached until step 2 is unblocked by :meth:`cancel`
The :meth:`setup` and :meth:`teardown` pair together, which defines instructions that will be executed before
and after. In subclasses, they are optional.
The :meth:`run_forever` and :meth:`cancel` pair together, which introduces blocking to ``S`` and then
unblocking from it. They are mandatory for all subclasses.
Note that, there is no "exclusive" relation between :meth:`run_forever` and :meth:`teardown`, :meth:`teardown`
is not about "cancelling", it is about "cleaning".
Unlike other three methods that get invoked inside ``S``, the :meth:`cancel` is invoked in ``M`` to unblock ``S``.
Therefore, :meth:`cancel` usually requires some special communication between ``M`` and ``S``, e.g.
- Use :class:`threading.Event` or `multiprocessing.Event`, while :meth:`run_forever` polls for this event
- Use ZMQ to send a message, while :meth:`run_forever` polls for this message
- Use HTTP/REST to send a request, while :meth:`run_forever` listens to this request
Note, another way to jump out from :meth:`run_forever` is raise exceptions from it. This will immediately move to
:meth:`teardown`.
.. note::
Rule of thumb on exception handling: if you are not sure if you should handle exception inside
:meth:`run_forever`, :meth:`cancel`, :meth:`setup`, :meth:`teardown`, then DO NOT catch exception in them.
Exception is MUCH better handled by :class:`BasePea`.
.. seealso::
:class:`BasePea` for managing a :class:`Runtime` object's lifecycle.
"""
def run_forever(self):
""" Running the blocking procedure inside ``S``. Note, once this method is called,
``S`` is blocked.
.. note::
If this method raises any exception, :meth:`teardown` will be called.
.. seealso::
:meth:`cancel` for cancelling the forever loop.
"""
raise NotImplementedError
def cancel(self):
""" Cancelling :meth:`run_forever` from ``M``. :meth:`cancel` usually requires some special communication
between ``M`` and ``S``, e.g.
- Use :class:`threading.Event` or `multiprocessing.Event`, while :meth:`run_forever` polls for this event
- Use ZMQ to send a message, while :meth:`run_forever` polls for this message
- Use HTTP/REST to send a request, while :meth:`run_forever` listens to this request
.. seealso::
:meth:`run_forever` for blocking the process/thread.
"""
raise NotImplementedError
def setup(self):
""" Method called to prepare the runtime inside ``S``. Optional in subclasses.
The default implementation does nothing.
.. note::
If this method raises any exception, then :meth:`run_forever` and :meth:`teardown` won't be called.
.. note::
Unlike :meth:`__init__` called in ``M``, :meth:`setup` is called inside ``S``.
"""
pass
def teardown(self):
""" Method called immediately after :meth:`run_forever` is unblocked.
You can tidy up things here. Optional in subclasses. The default implementation does nothing.
.. note::
This method will only be called if the :meth:`setup` succeeds.
"""
pass
def __init__(self, args: 'argparse.Namespace'):
super().__init__()
self.args = args
if args.name:
self.name = f'{args.name}/{self.__class__.__name__}'
else:
self.name = self.__class__.__name__
self.logger = JinaLogger(self.name, **vars(self.args))
| 41.067797 | 119 | 0.645687 |
23beec444364b57e4dd08e4b7c66067aec449642 | 9,981 | py | Python | numpy/array_api/_creation_functions.py | OakCityLabs/numpy | 09f5c5a64eb019b3e058c7183ca1ead6190bdbc8 | [
"BSD-3-Clause"
] | 1 | 2021-12-14T18:48:58.000Z | 2021-12-14T18:48:58.000Z | numpy/array_api/_creation_functions.py | OakCityLabs/numpy | 09f5c5a64eb019b3e058c7183ca1ead6190bdbc8 | [
"BSD-3-Clause"
] | null | null | null | numpy/array_api/_creation_functions.py | OakCityLabs/numpy | 09f5c5a64eb019b3e058c7183ca1ead6190bdbc8 | [
"BSD-3-Clause"
] | 1 | 2022-02-09T22:48:38.000Z | 2022-02-09T22:48:38.000Z | from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
if TYPE_CHECKING:
from ._typing import (
Array,
Device,
Dtype,
NestedSequence,
SupportsBufferProtocol,
)
from collections.abc import Sequence
from ._dtypes import _all_dtypes
import numpy as np
def _check_valid_dtype(dtype):
# Note: Only spelling dtypes as the dtype objects is supported.
# We use this instead of "dtype in _all_dtypes" because the dtype objects
# define equality with the sorts of things we want to disallow.
for d in (None,) + _all_dtypes:
if dtype is d:
return
raise ValueError("dtype must be one of the supported dtypes")
def asarray(
obj: Union[
Array,
bool,
int,
float,
NestedSequence[bool | int | float],
SupportsBufferProtocol,
],
/,
*,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
copy: Optional[bool] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.asarray <numpy.asarray>`.
See its docstring for more information.
"""
# _array_object imports in this file are inside the functions to avoid
# circular imports
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
if copy is False:
# Note: copy=False is not yet implemented in np.asarray
raise NotImplementedError("copy=False is not yet implemented")
if isinstance(obj, Array):
if dtype is not None and obj.dtype != dtype:
copy = True
if copy is True:
return Array._new(np.array(obj._array, copy=True, dtype=dtype))
return obj
if dtype is None and isinstance(obj, int) and (obj > 2 ** 64 or obj < -(2 ** 63)):
# Give a better error message in this case. NumPy would convert this
# to an object array. TODO: This won't handle large integers in lists.
raise OverflowError("Integer out of bounds for array dtypes")
res = np.asarray(obj, dtype=dtype)
return Array._new(res)
def arange(
start: Union[int, float],
/,
stop: Optional[Union[int, float]] = None,
step: Union[int, float] = 1,
*,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.arange <numpy.arange>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.arange(start, stop=stop, step=step, dtype=dtype))
def empty(
shape: Union[int, Tuple[int, ...]],
*,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.empty <numpy.empty>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.empty(shape, dtype=dtype))
def empty_like(
x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.empty_like <numpy.empty_like>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.empty_like(x._array, dtype=dtype))
def eye(
n_rows: int,
n_cols: Optional[int] = None,
/,
*,
k: int = 0,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.eye <numpy.eye>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.eye(n_rows, M=n_cols, k=k, dtype=dtype))
def from_dlpack(x: object, /) -> Array:
from ._array_object import Array
return Array._new(np._from_dlpack(x))
def full(
shape: Union[int, Tuple[int, ...]],
fill_value: Union[int, float],
*,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.full <numpy.full>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
if isinstance(fill_value, Array) and fill_value.ndim == 0:
fill_value = fill_value._array
res = np.full(shape, fill_value, dtype=dtype)
if res.dtype not in _all_dtypes:
# This will happen if the fill value is not something that NumPy
# coerces to one of the acceptable dtypes.
raise TypeError("Invalid input to full")
return Array._new(res)
def full_like(
x: Array,
/,
fill_value: Union[int, float],
*,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.full_like <numpy.full_like>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
res = np.full_like(x._array, fill_value, dtype=dtype)
if res.dtype not in _all_dtypes:
# This will happen if the fill value is not something that NumPy
# coerces to one of the acceptable dtypes.
raise TypeError("Invalid input to full_like")
return Array._new(res)
def linspace(
start: Union[int, float],
stop: Union[int, float],
/,
num: int,
*,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
endpoint: bool = True,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.linspace <numpy.linspace>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.linspace(start, stop, num, dtype=dtype, endpoint=endpoint))
def meshgrid(*arrays: Array, indexing: str = "xy") -> List[Array]:
"""
Array API compatible wrapper for :py:func:`np.meshgrid <numpy.meshgrid>`.
See its docstring for more information.
"""
from ._array_object import Array
# Note: unlike np.meshgrid, only inputs with all the same dtype are
# allowed
if len({a.dtype for a in arrays}) > 1:
raise ValueError("meshgrid inputs must all have the same dtype")
return [
Array._new(array)
for array in np.meshgrid(*[a._array for a in arrays], indexing=indexing)
]
def ones(
shape: Union[int, Tuple[int, ...]],
*,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.ones <numpy.ones>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.ones(shape, dtype=dtype))
def ones_like(
x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.ones_like <numpy.ones_like>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.ones_like(x._array, dtype=dtype))
def tril(x: Array, /, *, k: int = 0) -> Array:
"""
Array API compatible wrapper for :py:func:`np.tril <numpy.tril>`.
See its docstring for more information.
"""
from ._array_object import Array
if x.ndim < 2:
# Note: Unlike np.tril, x must be at least 2-D
raise ValueError("x must be at least 2-dimensional for tril")
return Array._new(np.tril(x._array, k=k))
def triu(x: Array, /, *, k: int = 0) -> Array:
"""
Array API compatible wrapper for :py:func:`np.triu <numpy.triu>`.
See its docstring for more information.
"""
from ._array_object import Array
if x.ndim < 2:
# Note: Unlike np.triu, x must be at least 2-D
raise ValueError("x must be at least 2-dimensional for triu")
return Array._new(np.triu(x._array, k=k))
def zeros(
shape: Union[int, Tuple[int, ...]],
*,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.zeros <numpy.zeros>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.zeros(shape, dtype=dtype))
def zeros_like(
x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.zeros_like <numpy.zeros_like>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.zeros_like(x._array, dtype=dtype))
| 28.355114 | 86 | 0.642721 |
66d50d07ff2067b802b90a2aadd88df23153830a | 1,136 | py | Python | examples/unsupervised_quality_estimation/aggregate_scores.py | fairseq-FT/fairseq | 18725499144c1bba7c151b796ba774e59d36eaa9 | [
"MIT"
] | 16,259 | 2018-05-02T02:31:30.000Z | 2022-03-31T21:50:23.000Z | examples/unsupervised_quality_estimation/aggregate_scores.py | fairseq-FT/fairseq | 18725499144c1bba7c151b796ba774e59d36eaa9 | [
"MIT"
] | 3,863 | 2018-05-02T13:42:39.000Z | 2022-03-31T19:03:32.000Z | examples/unsupervised_quality_estimation/aggregate_scores.py | fairseq-FT/fairseq | 18725499144c1bba7c151b796ba774e59d36eaa9 | [
"MIT"
] | 4,796 | 2018-05-02T07:55:51.000Z | 2022-03-31T14:46:45.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
import numpy as np
aggregate_funcs = {
"std": np.std,
"var": np.var,
"median": np.median,
"mean": np.mean,
"min": np.min,
"max": np.max,
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file", required=True, type=str)
parser.add_argument("-n", "--repeat_times", required=True, type=int)
parser.add_argument("-o", "--output_file", required=False)
parser.add_argument("-f", "--func", required=False, default="mean")
args = parser.parse_args()
stream = open(args.output_file, "w") if args.output_file else sys.stdout
segment_scores = []
for line in open(args.input_file):
segment_scores.append(float(line.strip()))
if len(segment_scores) == args.repeat_times:
stream.write("{}\n".format(aggregate_funcs[args.func](segment_scores)))
segment_scores = []
if __name__ == "__main__":
main()
| 27.047619 | 83 | 0.654049 |
c0e4709abc2601efd73a3ca778f01b32c64954f0 | 4,093 | py | Python | igibson/tasks/room_rearrangement_task.py | Nick-AhSen/iGibson | c6854f11eec5d935fa3ef3d6d4852c6571beab4b | [
"MIT"
] | null | null | null | igibson/tasks/room_rearrangement_task.py | Nick-AhSen/iGibson | c6854f11eec5d935fa3ef3d6d4852c6571beab4b | [
"MIT"
] | null | null | null | igibson/tasks/room_rearrangement_task.py | Nick-AhSen/iGibson | c6854f11eec5d935fa3ef3d6d4852c6571beab4b | [
"MIT"
] | null | null | null | import logging
import numpy as np
import pybullet as p
from igibson.reward_functions.potential_reward import PotentialReward
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.tasks.task_base import BaseTask
from igibson.termination_conditions.max_collision import MaxCollision
from igibson.termination_conditions.out_of_bound import OutOfBound
from igibson.termination_conditions.timeout import Timeout
from igibson.utils.utils import restoreState
class RoomRearrangementTask(BaseTask):
"""
Room Rearrangement Task
The goal is to close as many furniture (e.g. cabinets and fridges) as possible
"""
def __init__(self, env):
super(RoomRearrangementTask, self).__init__(env)
assert isinstance(
env.scene, InteractiveIndoorScene
), "room rearrangement can only be done in InteractiveIndoorScene"
self.prismatic_joint_reward_scale = self.config.get("prismatic_joint_reward_scale", 1.0)
self.revolute_joint_reward_scale = self.config.get("revolute_joint_reward_scale", 1.0)
self.termination_conditions = [
MaxCollision(self.config),
Timeout(self.config),
OutOfBound(self.config),
]
self.reward_functions = [
PotentialReward(self.config),
]
self.floor_num = 0
def get_potential(self, env):
"""
Compute task-specific potential: furniture joint positions
:param env: environment instance
:param: task potential
"""
task_potential = 0.0
for (body_id, joint_id) in self.body_joint_pairs:
j_type = p.getJointInfo(body_id, joint_id)[2]
j_pos = p.getJointState(body_id, joint_id)[0]
scale = (
self.prismatic_joint_reward_scale if j_type == p.JOINT_PRISMATIC else self.revolute_joint_reward_scale
)
task_potential += scale * j_pos
return task_potential
def reset_scene(self, env):
"""
Reset all scene objects and then open certain object categories of interest.
:param env: environment instance
"""
env.scene.reset_scene_objects()
env.scene.force_wakeup_scene_objects()
self.body_joint_pairs = env.scene.open_all_objs_by_categories(
[
"bottom_cabinet",
"bottom_cabinet_no_top",
"top_cabinet",
"dishwasher",
"fridge",
"microwave",
"oven",
"washer" "dryer",
],
mode="random",
prob=0.5,
)
def sample_initial_pose(self, env):
"""
Sample robot initial pose
:param env: environment instance
:return: initial pose
"""
_, initial_pos = env.scene.get_random_point(floor=self.floor_num)
initial_orn = np.array([0, 0, np.random.uniform(0, np.pi * 2)])
return initial_pos, initial_orn
def reset_agent(self, env):
"""
Reset robot initial pose.
Sample initial pose, check validity, and land it.
:param env: environment instance
"""
reset_success = False
max_trials = 100
# cache pybullet state
# TODO: p.saveState takes a few seconds, need to speed up
state_id = p.saveState()
for _ in range(max_trials):
initial_pos, initial_orn = self.sample_initial_pose(env)
reset_success = env.test_valid_position(env.robots[0], initial_pos, initial_orn)
restoreState(state_id)
if reset_success:
break
if not reset_success:
logging.warning("WARNING: Failed to reset robot without collision")
env.land(env.robots[0], initial_pos, initial_orn)
p.removeState(state_id)
for reward_function in self.reward_functions:
reward_function.reset(self, env)
def get_task_obs(self, env):
"""
No task-specific observation
"""
return
| 33.276423 | 118 | 0.629367 |
921c6e06d0564b4cf8c9e3520338f70b055ae2ba | 4,730 | py | Python | dials_data/cli.py | toastisme/data | 6ef6383853385bc63f81cfa5217bae29a17c8279 | [
"BSD-3-Clause"
] | null | null | null | dials_data/cli.py | toastisme/data | 6ef6383853385bc63f81cfa5217bae29a17c8279 | [
"BSD-3-Clause"
] | 10 | 2021-06-01T05:57:15.000Z | 2022-03-01T23:03:11.000Z | dials_data/cli.py | ndevenish/data | 93225485f9011900b8ec57645ff0b368b4810c66 | [
"BSD-3-Clause"
] | null | null | null | import argparse
import sys
import dials_data
import dials_data.datasets
import dials_data.download
import yaml
def cli_info(cmd_args):
parser = argparse.ArgumentParser(
description="Shown information", prog="dials.data info"
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="show more output in machine readable format",
)
args = parser.parse_args(cmd_args)
information = {
"repository.location": dials_data.datasets.repository_location(),
"version.full": dials_data.__version__,
"version.commit": dials_data.__commit__[:7],
"version.major": ".".join(dials_data.__version__.split(".")[:2]),
}
if args.verbose:
for k in sorted(information):
print("{}={}".format(k, information[k]))
else:
print(
"""
DIALS regression data manager v{information[version.full]}
repository location: {information[repository.location]}
""".format(
information=information
).strip()
)
def cli_get(cmd_args):
parser = argparse.ArgumentParser(
description="Download datasets", prog="dials.data get"
)
parser.add_argument("dataset", nargs="*")
parser.add_argument(
"--create-hashinfo",
action="store_true",
help="generate file integrity information for specified datasets in the current directory",
)
parser.add_argument(
"-q", "--quiet", action="store_true", help="machine readable output"
)
parser.add_argument(
"--verify", action="store_true", help="verify integrity of downloaded dataset"
)
args = parser.parse_args(cmd_args)
if args.verify and args.create_hashinfo:
sys.exit("Parameter --create-hashinfo can not be used with --verify")
if not args.dataset:
parser.print_help()
sys.exit(0)
unknown_data = set(args.dataset) - set(dials_data.datasets.definition)
if unknown_data:
sys.exit("Unknown dataset: {}".format(", ".join(unknown_data)))
repository = dials_data.datasets.repository_location()
if not args.quiet:
print(f"Repository location: {repository.strpath}\n")
for ds in args.dataset:
if not args.quiet:
print(f"Downloading dataset {ds}")
hashinfo = dials_data.download.fetch_dataset(
ds, ignore_hashinfo=args.create_hashinfo, verify=args.verify
)
if args.create_hashinfo:
if not args.quiet:
print(f"Writing file integrity information to {ds}.yml")
with open(f"{ds}.yml", "w") as fh:
yaml.dump(hashinfo, fh, default_flow_style=False)
if args.quiet:
print(repository.join(ds).strpath)
else:
print("Dataset {} stored in {}".format(ds, repository.join(ds).strpath))
def cli_list(cmd_args):
parser = argparse.ArgumentParser(
description="Show dataset information", prog="dials.data list"
)
parser.add_argument(
"--missing-hashinfo",
action="store_true",
help="only list datasets without file integrity information",
)
parser.add_argument(
"-q", "--quiet", action="store_true", help="machine readable output"
)
args = parser.parse_args(cmd_args)
if args.missing_hashinfo:
ds_list = dials_data.datasets.fileinfo_dirty
else:
ds_list = dials_data.datasets.definition
dials_data.datasets.list_known_definitions(ds_list, quiet=args.quiet)
def main():
if dials_data.__commit__:
version = dials_data.__version__ + "-g" + dials_data.__commit__[:7]
else:
version = dials_data.__version__ + "-dev"
parser = argparse.ArgumentParser(
usage="dials.data <command> [<args>]",
description="""DIALS regression data manager v{version}
The most commonly used commands are:
list List available datasets
get Download datasets
Each command has its own set of parameters, and you can get more information
by running dials.data <command> --help
""".format(
version=version
),
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument("subcommand", help=argparse.SUPPRESS)
# parse_args defaults to [1:] for args, but need to
# exclude the rest of the args too, or validation will fail
parameters = sys.argv[1:2]
if not parameters:
parser.print_help()
sys.exit(0)
args = parser.parse_args(parameters)
subcommand = globals().get("cli_" + args.subcommand)
if subcommand:
return subcommand(sys.argv[2:])
parser.print_help()
print()
sys.exit(f"Unrecognized command: {args.subcommand}")
| 32.62069 | 99 | 0.646512 |
8d95dcb84f04bbe0268a6eb6b45884d13c275237 | 62,147 | py | Python | tests/test_scottbrian_utils/test_time_hdr.py | ScottBrian/scottbrian_utils | 897d61462153a40e00f8c596d5286958eb03f34a | [
"MIT"
] | null | null | null | tests/test_scottbrian_utils/test_time_hdr.py | ScottBrian/scottbrian_utils | 897d61462153a40e00f8c596d5286958eb03f34a | [
"MIT"
] | null | null | null | tests/test_scottbrian_utils/test_time_hdr.py | ScottBrian/scottbrian_utils | 897d61462153a40e00f8c596d5286958eb03f34a | [
"MIT"
] | null | null | null | """test_time_hdr.py module."""
from datetime import datetime, timedelta
import pytest
import sys
from typing import Any, Callable, cast, Tuple, Union
from typing_extensions import Final
from scottbrian_utils.time_hdr import StartStopHeader as StartStopHeader
from scottbrian_utils.time_hdr import time_box as time_box
from scottbrian_utils.time_hdr import DT_Format as DT_Format
class ErrorTstTimeHdr(Exception):
"""Base class for exception in this module."""
pass
class InvalidRouteNum(ErrorTstTimeHdr):
"""InvalidRouteNum exception class."""
pass
dt_format_arg_list = ['0',
'%H:%M',
'%H:%M:%S',
'%m/%d %H:%M:%S',
'%b %d %H:%M:%S',
'%m/%d/%y %H:%M:%S',
'%m/%d/%Y %H:%M:%S',
'%b %d %Y %H:%M:%S',
'%a %b %d %Y %H:%M:%S',
'%a %b %d %H:%M:%S.%f',
'%A %b %d %H:%M:%S.%f',
'%A %B %d %H:%M:%S.%f'
]
@pytest.fixture(params=dt_format_arg_list) # type: ignore
def dt_format_arg(request: Any) -> str:
"""Using different time formats.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return cast(str, request.param)
style_num_list = [1, 2, 3]
@pytest.fixture(params=style_num_list) # type: ignore
def style_num(request: Any) -> int:
"""Using different time_box styles.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return cast(int, request.param)
end_arg_list = ['0', '\n', '\n\n']
@pytest.fixture(params=end_arg_list) # type: ignore
def end_arg(request: Any) -> str:
"""Choose single or double space.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return cast(str, request.param)
file_arg_list = ['0', 'None', 'sys.stdout', 'sys.stderr']
@pytest.fixture(params=file_arg_list) # type: ignore
def file_arg(request: Any) -> str:
"""Using different file arg.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return cast(str, request.param)
flush_arg_list = ['0', 'True', 'False']
@pytest.fixture(params=flush_arg_list) # type: ignore
def flush_arg(request: Any) -> str:
"""False: do not flush print stream, True: flush print stream.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return cast(str, request.param)
enabled_arg_list = ['0',
'static_true',
'static_false',
'dynamic_true',
'dynamic_false'
]
@pytest.fixture(params=enabled_arg_list) # type: ignore
def enabled_arg(request: Any) -> str:
"""Determines how to specify time_box_enabled.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return cast(str, request.param)
class TestStartStopHeader:
"""TestStartStopHeader class."""
@pytest.fixture(scope='class') # type: ignore
def hdr(self) -> "StartStopHeader":
"""Method hdr.
Returns:
StartStopHeader instance
"""
return StartStopHeader('TestName')
def test_print_start_msg(self, hdr: "StartStopHeader", capsys: Any,
dt_format_arg: DT_Format,
end_arg: str,
file_arg: str,
flush_arg: str) -> None:
"""test_print_start_msg method.
Args:
hdr: instance of StartStopHeader
capsys: instance of the capture sys fixture
dt_format_arg: specifies dt_format_arg fixture
end_arg: specifies end_arg fixture
file_arg: specifies file_arg fixture
flush_arg: specifies the flush_arg fixture
"""
route_num, expected_dt_format, end, file, \
flush, enabled_tf = TestTimeBox.get_arg_flags(
dt_format=dt_format_arg,
end=end_arg,
file=file_arg,
flush=flush_arg,
enabled='0')
if route_num == TestTimeBox.DT0_END0_FILE0_FLUSH0_ENAB0:
hdr.print_start_msg()
elif route_num == TestTimeBox.DT0_END0_FILE0_FLUSH1_ENAB0:
hdr.print_start_msg(flush=flush)
elif route_num == TestTimeBox.DT0_END0_FILE1_FLUSH0_ENAB0:
hdr.print_start_msg(file=eval(file_arg))
elif route_num == TestTimeBox.DT0_END0_FILE1_FLUSH1_ENAB0:
hdr.print_start_msg(file=eval(file_arg), flush=flush)
elif route_num == TestTimeBox.DT0_END1_FILE0_FLUSH0_ENAB0:
hdr.print_start_msg(end=end_arg)
elif route_num == TestTimeBox.DT0_END1_FILE0_FLUSH1_ENAB0:
hdr.print_start_msg(end=end_arg, flush=flush)
elif route_num == TestTimeBox.DT0_END1_FILE1_FLUSH0_ENAB0:
hdr.print_start_msg(end=end_arg, file=eval(file_arg))
elif route_num == TestTimeBox.DT0_END1_FILE1_FLUSH1_ENAB0:
hdr.print_start_msg(end=end_arg, file=eval(file_arg),
flush=flush)
elif route_num == TestTimeBox.DT1_END0_FILE0_FLUSH0_ENAB0:
hdr.print_start_msg(dt_format=dt_format_arg)
elif route_num == TestTimeBox.DT1_END0_FILE0_FLUSH1_ENAB0:
hdr.print_start_msg(dt_format=dt_format_arg, flush=flush)
elif route_num == TestTimeBox.DT1_END0_FILE1_FLUSH0_ENAB0:
hdr.print_start_msg(dt_format=dt_format_arg, file=eval(file_arg))
elif route_num == TestTimeBox.DT1_END0_FILE1_FLUSH1_ENAB0:
hdr.print_start_msg(dt_format=dt_format_arg, file=eval(file_arg),
flush=flush)
elif route_num == TestTimeBox.DT1_END1_FILE0_FLUSH0_ENAB0:
hdr.print_start_msg(dt_format=dt_format_arg, end=end_arg)
elif route_num == TestTimeBox.DT1_END1_FILE0_FLUSH1_ENAB0:
hdr.print_start_msg(dt_format=dt_format_arg, end=end_arg,
flush=flush)
elif route_num == TestTimeBox.DT1_END1_FILE1_FLUSH0_ENAB0:
hdr.print_start_msg(dt_format=dt_format_arg, end=end_arg,
file=eval(file_arg))
else: # route_num == TestTimeBox.DT1_END1_FILE1_FLUSH1_ENAB0:
hdr.print_start_msg(dt_format=dt_format_arg, end=end_arg,
file=eval(file_arg), flush=flush)
if file == 'sys.stdout':
captured = capsys.readouterr().out
else:
captured = capsys.readouterr().err
start_dt = hdr.start_DT
formatted_dt = start_dt.strftime(expected_dt_format)
msg = '* Starting TestName on ' + formatted_dt + ' *'
flowers = '*' * len(msg)
expected = '\n' + flowers + end + msg + end + flowers + end
assert captured == expected
def test_print_end_msg(self, hdr: "StartStopHeader", capsys: Any,
dt_format_arg: DT_Format,
end_arg: str,
file_arg: str,
flush_arg: str) -> None:
"""Method test_print_end_msg.
Args:
hdr: instance of StartStopHeader
capsys: instance of the capture sys fixture
dt_format_arg: specifies dt_format_arg fixture
end_arg: specifies end_arg fixture
file_arg: specifies file_arg fixture
flush_arg: specifies the flush_arg fixture
"""
route_num, expected_dt_format, end, file, \
flush, enabled_tf = TestTimeBox.get_arg_flags(
dt_format=dt_format_arg,
end=end_arg,
file=file_arg,
flush=flush_arg,
enabled='0')
if route_num == TestTimeBox.DT0_END0_FILE0_FLUSH0_ENAB0:
hdr.print_end_msg()
elif route_num == TestTimeBox.DT0_END0_FILE0_FLUSH1_ENAB0:
hdr.print_end_msg(flush=flush)
elif route_num == TestTimeBox.DT0_END0_FILE1_FLUSH0_ENAB0:
hdr.print_end_msg(file=eval(file_arg))
elif route_num == TestTimeBox.DT0_END0_FILE1_FLUSH1_ENAB0:
hdr.print_end_msg(file=eval(file_arg), flush=flush)
elif route_num == TestTimeBox.DT0_END1_FILE0_FLUSH0_ENAB0:
hdr.print_end_msg(end=end_arg)
elif route_num == TestTimeBox.DT0_END1_FILE0_FLUSH1_ENAB0:
hdr.print_end_msg(end=end_arg, flush=flush)
elif route_num == TestTimeBox.DT0_END1_FILE1_FLUSH0_ENAB0:
hdr.print_end_msg(end=end_arg, file=eval(file_arg))
elif route_num == TestTimeBox.DT0_END1_FILE1_FLUSH1_ENAB0:
hdr.print_end_msg(end=end_arg, file=eval(file_arg),
flush=flush)
elif route_num == TestTimeBox.DT1_END0_FILE0_FLUSH0_ENAB0:
hdr.print_end_msg(dt_format=dt_format_arg)
elif route_num == TestTimeBox.DT1_END0_FILE0_FLUSH1_ENAB0:
hdr.print_end_msg(dt_format=dt_format_arg, flush=flush)
elif route_num == TestTimeBox.DT1_END0_FILE1_FLUSH0_ENAB0:
hdr.print_end_msg(dt_format=dt_format_arg, file=eval(file_arg))
elif route_num == TestTimeBox.DT1_END0_FILE1_FLUSH1_ENAB0:
hdr.print_end_msg(dt_format=dt_format_arg, file=eval(file_arg),
flush=flush)
elif route_num == TestTimeBox.DT1_END1_FILE0_FLUSH0_ENAB0:
hdr.print_end_msg(dt_format=dt_format_arg, end=end_arg)
elif route_num == TestTimeBox.DT1_END1_FILE0_FLUSH1_ENAB0:
hdr.print_end_msg(dt_format=dt_format_arg, end=end_arg,
flush=flush)
elif route_num == TestTimeBox.DT1_END1_FILE1_FLUSH0_ENAB0:
hdr.print_end_msg(dt_format=dt_format_arg, end=end_arg,
file=eval(file_arg))
else: # route_num == TestTimeBox.DT1_END1_FILE1_FLUSH1_ENAB0:
hdr.print_end_msg(dt_format=dt_format_arg, end=end_arg,
file=eval(file_arg), flush=flush)
if file == 'sys.stdout':
captured = capsys.readouterr().out
else:
captured = capsys.readouterr().err
start_dt = hdr.start_DT
end_dt = hdr.end_DT
formatted_delta = str(end_dt - start_dt)
formatted_dt = end_dt.strftime(expected_dt_format)
msg1: str = '* Ending TestName on ' + formatted_dt
msg2: str = '* Elapsed time: ' + formatted_delta
assert captured == TestStartStopHeader.get_flower_box(msg1, msg2, end)
@staticmethod
def get_flower_box(msg1: str, msg2: str, end: str) -> str:
"""Method get_flower_box.
Args:
msg1: first message to issue
msg2: second message to issue
end: specifies the end arg to use on the print statement
Returns:
The flower box with the messages inside
"""
flower_len: int = max(len(msg1), len(msg2)) + 2
flowers: str = '*' * flower_len
msg1 += ' ' * (flower_len - len(msg1) - 1) + '*'
msg2 += ' ' * (flower_len - len(msg2) - 1) + '*'
expected: str = '\n' + flowers + end + msg1 + end + msg2 + end + \
flowers + end
return expected
class TestTimeBox:
"""Class TestTimeBox."""
DT1: Final = 0b00010000
END1: Final = 0b00001000
FILE1: Final = 0b00000100
FLUSH1: Final = 0b00000010
ENAB1: Final = 0b00000001
DT0_END0_FILE0_FLUSH0_ENAB0: Final = 0b00000000
DT0_END0_FILE0_FLUSH0_ENAB1: Final = 0b00000001
DT0_END0_FILE0_FLUSH1_ENAB0: Final = 0b00000010
DT0_END0_FILE0_FLUSH1_ENAB1: Final = 0b00000011
DT0_END0_FILE1_FLUSH0_ENAB0: Final = 0b00000100
DT0_END0_FILE1_FLUSH0_ENAB1: Final = 0b00000101
DT0_END0_FILE1_FLUSH1_ENAB0: Final = 0b00000110
DT0_END0_FILE1_FLUSH1_ENAB1: Final = 0b00000111
DT0_END1_FILE0_FLUSH0_ENAB0: Final = 0b00001000
DT0_END1_FILE0_FLUSH0_ENAB1: Final = 0b00001001
DT0_END1_FILE0_FLUSH1_ENAB0: Final = 0b00001010
DT0_END1_FILE0_FLUSH1_ENAB1: Final = 0b00001011
DT0_END1_FILE1_FLUSH0_ENAB0: Final = 0b00001100
DT0_END1_FILE1_FLUSH0_ENAB1: Final = 0b00001101
DT0_END1_FILE1_FLUSH1_ENAB0: Final = 0b00001110
DT0_END1_FILE1_FLUSH1_ENAB1: Final = 0b00001111
DT1_END0_FILE0_FLUSH0_ENAB0: Final = 0b00010000
DT1_END0_FILE0_FLUSH0_ENAB1: Final = 0b00010001
DT1_END0_FILE0_FLUSH1_ENAB0: Final = 0b00010010
DT1_END0_FILE0_FLUSH1_ENAB1: Final = 0b00010011
DT1_END0_FILE1_FLUSH0_ENAB0: Final = 0b00010100
DT1_END0_FILE1_FLUSH0_ENAB1: Final = 0b00010101
DT1_END0_FILE1_FLUSH1_ENAB0: Final = 0b00010110
DT1_END0_FILE1_FLUSH1_ENAB1: Final = 0b00010111
DT1_END1_FILE0_FLUSH0_ENAB0: Final = 0b00011000
DT1_END1_FILE0_FLUSH0_ENAB1: Final = 0b00011001
DT1_END1_FILE0_FLUSH1_ENAB0: Final = 0b00011010
DT1_END1_FILE0_FLUSH1_ENAB1: Final = 0b00011011
DT1_END1_FILE1_FLUSH0_ENAB0: Final = 0b00011100
DT1_END1_FILE1_FLUSH0_ENAB1: Final = 0b00011101
DT1_END1_FILE1_FLUSH1_ENAB0: Final = 0b00011110
DT1_END1_FILE1_FLUSH1_ENAB1: Final = 0b00011111
@staticmethod
def get_arg_flags(*,
dt_format: str,
end: str,
file: str,
flush: str,
enabled: str
) -> Tuple[int, DT_Format, str, str, bool, bool]:
"""Static method get_arg_flags.
Args:
dt_format: 0 or the dt_format arg to use
end: 0 or the end arg to use
file: 0 or the file arg to use (stdout or stderr)
flush: 0 or the flush arg to use
enabled: 0 or the enabled arg to use
Returns:
the expected results based on the args
"""
route_num = TestTimeBox.DT0_END0_FILE0_FLUSH0_ENAB0
expected_dt_format = DT_Format(StartStopHeader.default_dt_format)
if dt_format != '0':
route_num = route_num | TestTimeBox.DT1
expected_dt_format = DT_Format(dt_format)
expected_end = '\n'
if end != '0':
route_num = route_num | TestTimeBox.END1
expected_end = end
expected_file = 'sys.stdout'
if file != '0':
route_num = route_num | TestTimeBox.FILE1
if file != 'None':
expected_file = file
# Note: we can specify flush but we can not verify whether it works
expected_flush = False
if flush != '0':
route_num = route_num | TestTimeBox.FLUSH1
if flush == 'True':
expected_flush = True
expected_enabled_tf = True
if enabled != '0':
route_num = route_num | TestTimeBox.ENAB1
if (enabled == 'static_false') or (enabled == 'dynamic_false'):
expected_enabled_tf = False
return (route_num, expected_dt_format, expected_end, expected_file,
expected_flush, expected_enabled_tf)
@staticmethod
def get_expected_msg(*,
expected_func_msg: str,
actual: str,
expected_dt_format: DT_Format =
DT_Format('%a %b %d %Y %H:%M:%S'),
# StartStopHeader.default_dt_format,
expected_end: str = '\n',
expected_enabled_tf: bool = True) -> str:
"""Static method get_expected_msg.
Helper function to build the expected message to compare
with the actual message captured with capsys.
Args:
expected_func_msg: message issued by wrapped function
actual: the message captured by capsys
expected_dt_format: dt_format to use to build expected message
expected_end: end arg to use to build expected message
expected_enabled_tf: expected enabled arg to use to build expected
message
Returns:
the expected message that is built based on the input args
"""
if expected_enabled_tf is False:
if expected_func_msg == '':
return ''
else:
return expected_func_msg + '\n'
start_dt = datetime.now()
end_dt = datetime.now() + timedelta(microseconds=42)
formatted_delta = str(end_dt - start_dt)
formatted_delta_len = len(formatted_delta)
formatted_dt = start_dt.strftime(expected_dt_format)
formatted_dt_len = len(formatted_dt)
start_time_marks = '#' * formatted_dt_len
start_time_len = len(start_time_marks)
end_time_marks = '%' * formatted_dt_len
end_time_len = len(end_time_marks)
elapsed_time_marks = '$' * formatted_delta_len
elapsed_time_len = len(elapsed_time_marks)
# build expected0
msg0 = '* Starting func on ' + start_time_marks
flower_len = len(msg0) + len(' *')
flowers = '*' * flower_len
msg0 += ' ' * (flower_len - len(msg0) - 1) + '*'
expected0 = '\n' + flowers + expected_end + msg0 + expected_end \
+ flowers + expected_end
# build expected1
msg1 = '* Ending func on ' + end_time_marks
msg2 = '* Elapsed time: ' + elapsed_time_marks
expected1 = TestStartStopHeader.get_flower_box(msg1, msg2,
expected_end)
if expected_func_msg == '':
expected = expected0 + expected1
else:
expected = expected0 + expected_func_msg + '\n' + expected1
# find positions of the start, end, and elapsed times
start_time_index = expected.index(start_time_marks)
end_time_index = expected.index(end_time_marks)
elapsed_time_index = expected.index(elapsed_time_marks)
modified_expected = expected[0:start_time_index] \
+ actual[start_time_index:start_time_index+start_time_len] \
+ expected[start_time_index+start_time_len:end_time_index] \
+ actual[end_time_index:end_time_index+end_time_len] \
+ expected[end_time_index+end_time_len:elapsed_time_index] \
+ actual[elapsed_time_index:elapsed_time_index+elapsed_time_len] \
+ expected[elapsed_time_index+elapsed_time_len:]
return modified_expected
"""
The following section tests each combination of arguments to the time_box
decorator for three styles of decoration (using pie, calling the
with the function as the first parameter, and calling the decorator with
the function specified after the call. This test is especially useful to
ensure that the type hints are working correctly, and that all
combinations are accepted by python.
The following keywords with various values and in all combinations are
tested:
dt_format - several different datetime formats - see format_list
end - either '\n' for single space, and '\n\n' for double space
file - either sys.stdout or sys.stderr
flush - true/false
time_box_enabled - true/false
"""
def test_timebox_router(self,
capsys: Any,
style_num: int,
dt_format_arg: str,
end_arg: str,
file_arg: str,
flush_arg: str,
enabled_arg: str
) -> None:
"""Method test_timebox_router.
Args:
capsys: instance of the capture sysout fixture
style_num: style from fixture
dt_format_arg: dt_format to use from fixture
end_arg: end arg from fixture for the print invocation
file_arg: file arg from fixture
flush_arg: flush arg from fixture to use on print statement
enabled_arg: specifies whether decorator is enabled
"""
# func: Union[Callable[[int, str], int],
# Callable[[int, str], None],
# Callable[[], int],
# Callable[[], None]]
a_func: Callable[..., Any]
expected_return_value: Union[int, None]
route_num, expected_dt_format, expected_end_arg, expected_file_arg, \
flush, enabled_tf = TestTimeBox.get_arg_flags(
dt_format=dt_format_arg,
end=end_arg,
file=file_arg,
flush=flush_arg,
enabled=enabled_arg)
enabled_spec: Union[bool, Callable[..., bool]] = enabled_tf
def enabled_func() -> bool: return enabled_tf
if (enabled_arg == 'dynamic_true') or (enabled_arg == 'dynamic_false'):
enabled_spec = enabled_func
if style_num == 1:
for func_style in range(1, 5):
a_func = TestTimeBox.build_style1_func(
route_num,
dt_format=DT_Format(dt_format_arg),
end=end_arg,
file=file_arg,
flush=flush,
enabled=enabled_spec,
f_style=func_style
)
if func_style == 1:
func_msg = 'The answer is: ' + str(route_num)
expected_return_value = route_num * style_num
actual_return_value = a_func(route_num,
func_msg)
elif func_style == 2:
func_msg = 'The answer is: ' + str(route_num)
expected_return_value = None
actual_return_value = a_func(route_num, func_msg)
elif func_style == 3:
func_msg = ''
expected_return_value = 42
actual_return_value = a_func()
else: # func_style == 4:
func_msg = ''
expected_return_value = None
actual_return_value = a_func()
TestTimeBox.check_results(
capsys=capsys,
func_msg=func_msg,
expected_dt_format=expected_dt_format,
expected_end=expected_end_arg,
expected_file=expected_file_arg,
expected_enabled_tf=enabled_tf,
expected_return_value=expected_return_value,
actual_return_value=actual_return_value
)
if route_num > TestTimeBox.DT0_END0_FILE1_FLUSH1_ENAB1:
break
return
elif style_num == 2:
a_func = TestTimeBox.build_style2_func(
route_num,
dt_format=DT_Format(dt_format_arg),
end=end_arg,
file=file_arg,
flush=flush,
enabled=enabled_spec
)
else: # style_num = 3
a_func = TestTimeBox.build_style3_func(
route_num,
dt_format=DT_Format(dt_format_arg),
end=end_arg,
file=file_arg,
flush=flush,
enabled=enabled_spec
)
func_msg = 'The answer is: ' + str(route_num)
expected_return_value = route_num * style_num
actual_return_value = a_func(route_num, func_msg)
TestTimeBox.check_results(
capsys=capsys,
func_msg=func_msg,
expected_dt_format=expected_dt_format,
expected_end=expected_end_arg,
expected_file=expected_file_arg,
expected_enabled_tf=enabled_tf,
expected_return_value=expected_return_value,
actual_return_value=actual_return_value
)
@staticmethod
def check_results(capsys: Any,
func_msg: str,
expected_dt_format: DT_Format,
expected_end: str,
expected_file: str,
expected_enabled_tf: bool,
expected_return_value: Union[int, None],
actual_return_value: Union[int, None]
) -> None:
"""Static method check_results.
Args:
capsys: instance of the capture sysout fixture
func_msg: message issued by wrapped function
expected_dt_format: dt_format that is used
expected_end: end arg for the print invocation
expected_file: sys.stdout or sys.stderr
expected_enabled_tf: specifies whether decorator is enabled
expected_return_value: the expected func return value
actual_return_value: the actual func return value
"""
if expected_file == 'sys.stdout':
actual = capsys.readouterr().out
else:
actual = capsys.readouterr().err
func_msg = ''
expected = TestTimeBox.get_expected_msg(
expected_func_msg=func_msg,
actual=actual,
expected_dt_format=expected_dt_format,
expected_end=expected_end,
expected_enabled_tf=expected_enabled_tf)
assert actual == expected
# check that func returns the correct value
message = "Expected return value: {0}, Actual return value: {1}"\
.format(expected_return_value, actual_return_value)
assert expected_return_value == actual_return_value, message
@staticmethod
def build_style1_func(route_num: int,
dt_format: DT_Format,
end: str,
file: str,
flush: bool,
enabled: Union[bool, Callable[..., bool]],
f_style: int
) -> Callable[..., Any]:
"""Static method build_style1_func.
Args:
route_num: specifies how to build the decorator
dt_format: dt format to use
end: end to use
file: specifies sys.stdout or sys.stderr for print statement
flush: specifies flush to use on print statement
enabled: specifies whether the decorator is enabled
f_style: type of call to build
Returns:
callable decorated function
Raises:
InvalidRouteNum: 'route_num was not recognized'
"""
# func: Union[Callable[[int, str], int],
# Callable[[int, str], None],
# Callable[[], int],
# Callable[[], None]]
if route_num == TestTimeBox.DT0_END0_FILE0_FLUSH0_ENAB0:
if f_style == 1:
@time_box
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif f_style == 2:
@time_box
def func(a_int: int, a_str: str) -> None:
print(a_str)
elif f_style == 3:
@time_box
def func() -> int:
return 42
else: # f_style == 4:
@time_box
def func() -> None:
pass
elif route_num == TestTimeBox.DT0_END0_FILE0_FLUSH0_ENAB1:
if f_style == 1:
@time_box(time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif f_style == 2:
@time_box(time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> None:
print(a_str)
elif f_style == 3:
@time_box(time_box_enabled=enabled)
def func() -> int:
return 42
else: # f_style == 4:
@time_box(time_box_enabled=enabled)
def func() -> None:
pass
elif route_num == TestTimeBox.DT0_END0_FILE0_FLUSH1_ENAB0:
if f_style == 1:
@time_box(flush=flush)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif f_style == 2:
@time_box(flush=flush)
def func(a_int: int, a_str: str) -> None:
print(a_str)
elif f_style == 3:
@time_box(flush=flush)
def func() -> int:
return 42
else: # f_style == 4:
@time_box(flush=flush)
def func() -> None:
pass
elif route_num == TestTimeBox.DT0_END0_FILE0_FLUSH1_ENAB1:
if f_style == 1:
@time_box(flush=flush, time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif f_style == 2:
@time_box(flush=flush, time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> None:
print(a_str)
elif f_style == 3:
@time_box(flush=flush, time_box_enabled=enabled)
def func() -> int:
return 42
else: # f_style == 4:
@time_box(flush=flush, time_box_enabled=enabled)
def func() -> None:
pass
elif route_num == TestTimeBox.DT0_END0_FILE1_FLUSH0_ENAB0:
if f_style == 1:
@time_box(file=eval(file))
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif f_style == 2:
@time_box(file=eval(file))
def func(a_int: int, a_str: str) -> None:
print(a_str)
elif f_style == 3:
@time_box(file=eval(file))
def func() -> int:
return 42
else: # f_style == 4:
@time_box(file=eval(file))
def func() -> None:
pass
elif route_num == TestTimeBox.DT0_END0_FILE1_FLUSH0_ENAB1:
if f_style == 1:
@time_box(file=eval(file), time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif f_style == 2:
@time_box(file=eval(file), time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> None:
print(a_str)
elif f_style == 3:
@time_box(file=eval(file), time_box_enabled=enabled)
def func() -> int:
return 42
else: # f_style == 4:
@time_box(file=eval(file), time_box_enabled=enabled)
def func() -> None:
pass
elif route_num == TestTimeBox.DT0_END0_FILE1_FLUSH1_ENAB0:
if f_style == 1:
@time_box(file=eval(file), flush=flush)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif f_style == 2:
@time_box(file=eval(file), flush=flush)
def func(a_int: int, a_str: str) -> None:
print(a_str)
elif f_style == 3:
@time_box(file=eval(file), flush=flush)
def func() -> int:
return 42
else: # f_style == 4:
@time_box(file=eval(file), flush=flush)
def func() -> None:
pass
elif route_num == TestTimeBox.DT0_END0_FILE1_FLUSH1_ENAB1:
if f_style == 1:
@time_box(file=eval(file), flush=flush,
time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif f_style == 2:
@time_box(file=eval(file), flush=flush,
time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> None:
print(a_str)
elif f_style == 3:
@time_box(file=eval(file), flush=flush,
time_box_enabled=enabled)
def func() -> int:
return 42
else: # f_style == 4:
@time_box(file=eval(file), flush=flush,
time_box_enabled=enabled)
def func() -> None:
pass
elif route_num == TestTimeBox.DT0_END1_FILE0_FLUSH0_ENAB0:
@time_box(end=end)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT0_END1_FILE0_FLUSH0_ENAB1:
@time_box(end=end, time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT0_END1_FILE0_FLUSH1_ENAB0:
@time_box(end=end, flush=flush)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT0_END1_FILE0_FLUSH1_ENAB1:
@time_box(end=end, flush=flush, time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT0_END1_FILE1_FLUSH0_ENAB0:
@time_box(end=end, file=eval(file))
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT0_END1_FILE1_FLUSH0_ENAB1:
@time_box(end=end, file=eval(file), time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT0_END1_FILE1_FLUSH1_ENAB0:
@time_box(end=end, file=eval(file), flush=flush)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT0_END1_FILE1_FLUSH1_ENAB1:
@time_box(end=end, file=eval(file), flush=flush,
time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT1_END0_FILE0_FLUSH0_ENAB0:
@time_box(dt_format=dt_format)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT1_END0_FILE0_FLUSH0_ENAB1:
@time_box(dt_format=dt_format, time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT1_END0_FILE0_FLUSH1_ENAB0:
@time_box(dt_format=dt_format, flush=flush)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT1_END0_FILE0_FLUSH1_ENAB1:
@time_box(dt_format=dt_format, flush=flush,
time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT1_END0_FILE1_FLUSH0_ENAB0:
@time_box(dt_format=dt_format, file=eval(file))
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT1_END0_FILE1_FLUSH0_ENAB1:
@time_box(dt_format=dt_format, file=eval(file),
time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT1_END0_FILE1_FLUSH1_ENAB0:
@time_box(dt_format=dt_format, file=eval(file), flush=flush)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT1_END0_FILE1_FLUSH1_ENAB1:
@time_box(dt_format=dt_format, file=eval(file), flush=flush,
time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT1_END1_FILE0_FLUSH0_ENAB0:
@time_box(dt_format=dt_format, end=end)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT1_END1_FILE0_FLUSH0_ENAB1:
@time_box(dt_format=dt_format, end=end, time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT1_END1_FILE0_FLUSH1_ENAB0:
@time_box(dt_format=dt_format, end=end, flush=flush)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT1_END1_FILE0_FLUSH1_ENAB1:
@time_box(dt_format=dt_format, end=end, flush=flush,
time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT1_END1_FILE1_FLUSH0_ENAB0:
@time_box(dt_format=dt_format, end=end, file=eval(file))
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT1_END1_FILE1_FLUSH0_ENAB1:
@time_box(dt_format=dt_format, end=end, file=eval(file),
time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT1_END1_FILE1_FLUSH1_ENAB0:
@time_box(dt_format=dt_format, end=end, file=eval(file),
flush=flush)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
elif route_num == TestTimeBox.DT1_END1_FILE1_FLUSH1_ENAB1:
@time_box(dt_format=dt_format, end=end, file=eval(file),
flush=flush, time_box_enabled=enabled)
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 1
else:
raise InvalidRouteNum('route_num was not recognized')
return func
@staticmethod
def build_style2_func(route_num: int,
dt_format: DT_Format,
end: str,
file: str,
flush: bool,
enabled: Union[bool, Callable[..., bool]]
) -> Callable[[int, str], int]:
"""Static method build_style2_func.
Args:
route_num: specifies how to build the decorator
dt_format: dt format to use
end: end to use
file: specifies sys.stdout or sys.stderr for print statement
flush: specifies flush to use on print statement
enabled: specifies whether the decorator is enabled
Returns:
callable decorated function
Raises:
InvalidRouteNum: 'route_num was not recognized'
"""
if route_num == TestTimeBox.DT0_END0_FILE0_FLUSH0_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func)
elif route_num == TestTimeBox.DT0_END0_FILE0_FLUSH0_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, time_box_enabled=enabled)
elif route_num == TestTimeBox.DT0_END0_FILE0_FLUSH1_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, flush=flush)
elif route_num == TestTimeBox.DT0_END0_FILE0_FLUSH1_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, flush=flush, time_box_enabled=enabled)
elif route_num == TestTimeBox.DT0_END0_FILE1_FLUSH0_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, file=eval(file))
elif route_num == TestTimeBox.DT0_END0_FILE1_FLUSH0_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, file=eval(file), time_box_enabled=enabled)
elif route_num == TestTimeBox.DT0_END0_FILE1_FLUSH1_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, file=eval(file), flush=flush)
elif route_num == TestTimeBox.DT0_END0_FILE1_FLUSH1_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, file=eval(file), flush=flush,
time_box_enabled=enabled)
elif route_num == TestTimeBox.DT0_END1_FILE0_FLUSH0_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, end=end)
elif route_num == TestTimeBox.DT0_END1_FILE0_FLUSH0_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, end=end, time_box_enabled=enabled)
elif route_num == TestTimeBox.DT0_END1_FILE0_FLUSH1_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, end=end, flush=flush)
elif route_num == TestTimeBox.DT0_END1_FILE0_FLUSH1_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, end=end, flush=flush,
time_box_enabled=enabled)
elif route_num == TestTimeBox.DT0_END1_FILE1_FLUSH0_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, end=end, file=eval(file))
elif route_num == TestTimeBox.DT0_END1_FILE1_FLUSH0_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, end=end, file=eval(file),
time_box_enabled=enabled)
elif route_num == TestTimeBox.DT0_END1_FILE1_FLUSH1_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, end=end, file=eval(file), flush=flush)
elif route_num == TestTimeBox.DT0_END1_FILE1_FLUSH1_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, end=end, file=eval(file), flush=flush,
time_box_enabled=enabled)
elif route_num == TestTimeBox.DT1_END0_FILE0_FLUSH0_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, dt_format=dt_format)
elif route_num == TestTimeBox.DT1_END0_FILE0_FLUSH0_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, dt_format=dt_format,
time_box_enabled=enabled)
elif route_num == TestTimeBox.DT1_END0_FILE0_FLUSH1_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, dt_format=dt_format, flush=flush)
elif route_num == TestTimeBox.DT1_END0_FILE0_FLUSH1_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, dt_format=dt_format, flush=flush,
time_box_enabled=enabled)
elif route_num == TestTimeBox.DT1_END0_FILE1_FLUSH0_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, dt_format=dt_format, file=eval(file))
elif route_num == TestTimeBox.DT1_END0_FILE1_FLUSH0_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, dt_format=dt_format, file=eval(file),
time_box_enabled=enabled)
elif route_num == TestTimeBox.DT1_END0_FILE1_FLUSH1_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, dt_format=dt_format, file=eval(file),
flush=flush)
elif route_num == TestTimeBox.DT1_END0_FILE1_FLUSH1_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, dt_format=dt_format, file=eval(file),
flush=flush, time_box_enabled=enabled)
elif route_num == TestTimeBox.DT1_END1_FILE0_FLUSH0_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, dt_format=dt_format, end=end)
elif route_num == TestTimeBox.DT1_END1_FILE0_FLUSH0_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, dt_format=dt_format, end=end,
time_box_enabled=enabled)
elif route_num == TestTimeBox.DT1_END1_FILE0_FLUSH1_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, dt_format=dt_format, end=end, flush=flush)
elif route_num == TestTimeBox.DT1_END1_FILE0_FLUSH1_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, dt_format=dt_format, end=end, flush=flush,
time_box_enabled=enabled)
elif route_num == TestTimeBox.DT1_END1_FILE1_FLUSH0_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, dt_format=dt_format, end=end,
file=eval(file))
elif route_num == TestTimeBox.DT1_END1_FILE1_FLUSH0_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, dt_format=dt_format, end=end,
file=eval(file), time_box_enabled=enabled)
elif route_num == TestTimeBox.DT1_END1_FILE1_FLUSH1_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, dt_format=dt_format, end=end,
file=eval(file), flush=flush)
elif route_num == TestTimeBox.DT1_END1_FILE1_FLUSH1_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 2
func = time_box(func, dt_format=dt_format, end=end,
file=eval(file), flush=flush,
time_box_enabled=enabled)
else:
raise InvalidRouteNum('route_num was not recognized')
return func
@staticmethod
def build_style3_func(route_num: int,
dt_format: DT_Format,
end: str,
file: str,
flush: bool,
enabled: Union[bool, Callable[..., bool]]
) -> Callable[[int, str], int]:
"""Static method build_style3_func.
Args:
route_num: specifies how to build the decorator
dt_format: dt format to use
end: end to use
file: specifies sys.stdout or sys.stderr for print statement
flush: specifies flush to use on print statement
enabled: specifies whether the decorator is enabled
Returns:
callable decorated function
Raises:
InvalidRouteNum: 'route_num was not recognized'
"""
if route_num == TestTimeBox.DT0_END0_FILE0_FLUSH0_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box()(func)
elif route_num == TestTimeBox.DT0_END0_FILE0_FLUSH0_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(time_box_enabled=enabled)(func)
elif route_num == TestTimeBox.DT0_END0_FILE0_FLUSH1_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(flush=flush)(func)
elif route_num == TestTimeBox.DT0_END0_FILE0_FLUSH1_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(flush=flush, time_box_enabled=enabled)(func)
elif route_num == TestTimeBox.DT0_END0_FILE1_FLUSH0_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(file=eval(file))(func)
elif route_num == TestTimeBox.DT0_END0_FILE1_FLUSH0_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(file=eval(file), time_box_enabled=enabled)(func)
elif route_num == TestTimeBox.DT0_END0_FILE1_FLUSH1_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(file=eval(file), flush=flush)(func)
elif route_num == TestTimeBox.DT0_END0_FILE1_FLUSH1_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(file=eval(file), flush=flush,
time_box_enabled=enabled)(func)
elif route_num == TestTimeBox.DT0_END1_FILE0_FLUSH0_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(end=end)(func)
elif route_num == TestTimeBox.DT0_END1_FILE0_FLUSH0_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(end=end, time_box_enabled=enabled)(func)
elif route_num == TestTimeBox.DT0_END1_FILE0_FLUSH1_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(end=end, flush=flush)(func)
elif route_num == TestTimeBox.DT0_END1_FILE0_FLUSH1_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(end=end, flush=flush,
time_box_enabled=enabled)(func)
elif route_num == TestTimeBox.DT0_END1_FILE1_FLUSH0_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(end=end, file=eval(file))(func)
elif route_num == TestTimeBox.DT0_END1_FILE1_FLUSH0_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(end=end, file=eval(file),
time_box_enabled=enabled)(func)
elif route_num == TestTimeBox.DT0_END1_FILE1_FLUSH1_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(end=end, file=eval(file), flush=flush)(func)
elif route_num == TestTimeBox.DT0_END1_FILE1_FLUSH1_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(end=end, file=eval(file), flush=flush,
time_box_enabled=enabled)(func)
elif route_num == TestTimeBox.DT1_END0_FILE0_FLUSH0_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(dt_format=dt_format)(func)
elif route_num == TestTimeBox.DT1_END0_FILE0_FLUSH0_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(dt_format=dt_format,
time_box_enabled=enabled)(func)
elif route_num == TestTimeBox.DT1_END0_FILE0_FLUSH1_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(dt_format=dt_format, flush=flush)(func)
elif route_num == TestTimeBox.DT1_END0_FILE0_FLUSH1_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(dt_format=dt_format, flush=flush,
time_box_enabled=enabled)(func)
elif route_num == TestTimeBox.DT1_END0_FILE1_FLUSH0_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(dt_format=dt_format, file=eval(file))(func)
elif route_num == TestTimeBox.DT1_END0_FILE1_FLUSH0_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(dt_format=dt_format, file=eval(file),
time_box_enabled=enabled)(func)
elif route_num == TestTimeBox.DT1_END0_FILE1_FLUSH1_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(dt_format=dt_format, file=eval(file),
flush=flush)(func)
elif route_num == TestTimeBox.DT1_END0_FILE1_FLUSH1_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(dt_format=dt_format, file=eval(file), flush=flush,
time_box_enabled=enabled)(func)
elif route_num == TestTimeBox.DT1_END1_FILE0_FLUSH0_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(dt_format=dt_format, end=end)(func)
elif route_num == TestTimeBox.DT1_END1_FILE0_FLUSH0_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(dt_format=dt_format, end=end,
time_box_enabled=enabled)(func)
elif route_num == TestTimeBox.DT1_END1_FILE0_FLUSH1_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(dt_format=dt_format, end=end,
flush=flush)(func)
elif route_num == TestTimeBox.DT1_END1_FILE0_FLUSH1_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(dt_format=dt_format, end=end, flush=flush,
time_box_enabled=enabled)(func)
elif route_num == TestTimeBox.DT1_END1_FILE1_FLUSH0_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(dt_format=dt_format, end=end,
file=eval(file))(func)
elif route_num == TestTimeBox.DT1_END1_FILE1_FLUSH0_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(dt_format=dt_format, end=end, file=eval(file),
time_box_enabled=enabled)(func)
elif route_num == TestTimeBox.DT1_END1_FILE1_FLUSH1_ENAB0:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(dt_format=dt_format, end=end, file=eval(file),
flush=flush)(func)
elif route_num == TestTimeBox.DT1_END1_FILE1_FLUSH1_ENAB1:
def func(a_int: int, a_str: str) -> int:
print(a_str)
return a_int * 3
func = time_box(dt_format=dt_format, end=end, file=eval(file),
flush=flush, time_box_enabled=enabled)(func)
else:
raise InvalidRouteNum('route_num was not recognized')
return func
class TestTimeBoxDocstrings:
"""Class TestTimeBoxDocstrings."""
def test_timebox_with_example_1(self) -> None:
"""Method test_timebox_with_example_1."""
print()
print('#' * 50)
print('Example for StartStopHeader:')
print()
def func1() -> None:
print('2 + 2 =', 2+2)
hdr = StartStopHeader('func1')
hdr.print_start_msg(file=sys.stdout)
func1()
hdr.print_end_msg(file=sys.stdout)
def test_timebox_with_example_2(self) -> None:
"""Method test_timebox_with_example_2."""
print()
print('#' * 50)
print('Example for time_box decorator:')
print()
@time_box(file=sys.stdout)
def func2() -> None:
print('2 * 3 =', 2*3)
func2()
def test_timebox_with_example_3(self) -> None:
"""Method test_timebox_with_example_3."""
print()
print('#' * 50)
print('Example of printing to stderr:')
print()
@time_box(file=sys.stderr)
def func3() -> None:
print('this text printed to stdout, not stderr')
func3()
def test_timebox_with_example_4(self) -> None:
"""Method test_timebox_with_example_4."""
print()
print('#' * 50)
print('Example of statically wrapping function with time_box:')
print()
_tbe = False
@time_box(time_box_enabled=_tbe, file=sys.stdout)
def func4a() -> None:
print('this is sample text for _tbe = False static example')
func4a() # func4a is not wrapped by time box
_tbe = True
@time_box(time_box_enabled=_tbe, file=sys.stdout)
def func4b() -> None:
print('this is sample text for _tbe = True static example')
func4b() # func4b is wrapped by time box
def test_timebox_with_example_5(self) -> None:
"""Method test_timebox_with_example_5."""
print()
print('#' * 50)
print('Example of dynamically wrapping function with time_box:')
print()
_tbe = True
def tbe() -> bool: return _tbe
@time_box(time_box_enabled=tbe, file=sys.stdout)
def func5() -> None:
print('this is sample text for the tbe dynamic example')
func5() # func5 is wrapped by time box
_tbe = False
func5() # func5 is not wrapped by time_box
def test_timebox_with_example_6(self) -> None:
"""Method test_timebox_with_example_6."""
print()
print('#' * 50)
print('Example of using different datetime format:')
print()
a_datetime_format: DT_Format = cast(DT_Format, '%m/%d/%y %H:%M:%S')
@time_box(dt_format=a_datetime_format)
def func6() -> None:
print('this is sample text for the datetime format example')
func6()
| 41.348636 | 79 | 0.557565 |