text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
from functools import wraps
__all__ = ['memoize', 'singleton', 'memoize_attr_check']
def _make_key(args, kwargs):
return args, frozenset(kwargs.items())
def memoize(func):
"""Save results of function calls to avoid repeated calculation"""
memo = {}
@wraps(func)
def wrapper(*args, **kwargs):
key = _make_key(args, kwargs)
try:
return memo[key]
except KeyError:
result = func(*args, **kwargs)
memo[key] = result
return result
except TypeError: # unhashable input
return func(*args, **kwargs)
wrapper.__memoize_cache = memo
return wrapper
def clear_cache(func):
"""
Clear the cache of a function that has potentially been
decorated by memoize. Safely ignores non-decorated functions
"""
try:
func.__memoize_cache.clear()
except AttributeError:
pass
def memoize_attr_check(attr):
""" Memoize a method call, cached both on arguments and given attribute
of first argument (which is presumably self)
Has the effect of re-calculating results if a specific attribute changes
"""
def decorator(func):
# must return a decorator function
@wraps(func)
def result(*args, **kwargs):
first_arg = getattr(args[0], attr)
return memo(first_arg, *args, **kwargs)
@memoize
def memo(*args, **kwargs):
return func(*args[1:], **kwargs)
return result
return decorator
def singleton(cls):
"""Turn a class into a singleton, such that new objects
in this class share the same instance"""
instances = {}
@wraps(cls)
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/decorators.py",
"copies": "1",
"size": "1903",
"license": "bsd-3-clause",
"hash": -4601531437269009400,
"line_mean": 23.7142857143,
"line_max": 76,
"alpha_frac": 0.6111403048,
"autogenerated": false,
"ratio": 4.266816143497758,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5377956448297758,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import wraps
import numpy as np
from matplotlib.patches import Polygon, Rectangle, Ellipse, PathPatch
from matplotlib.patches import Path as mplPath
from matplotlib.transforms import IdentityTransform, blended_transform_factory
import copy
np.seterr(all='ignore')
from .exceptions import UndefinedROI
__all__ = ['Roi', 'RectangularROI', 'CircularROI', 'PolygonalROI',
'AbstractMplRoi', 'MplRectangularROI', 'MplCircularROI',
'MplPolygonalROI', 'MplXRangeROI', 'MplYRangeROI',
'XRangeROI', 'RangeROI', 'YRangeROI','VertexROIBase']
PATCH_COLOR = '#FFFF00'
SCRUBBING_KEY = 'control'
try:
from matplotlib.nxutils import points_inside_poly
except ImportError: # nxutils removed in MPL v1.3
from matplotlib.path import Path as mplPath
def points_inside_poly(xypts, xyvts):
p = mplPath(xyvts)
return p.contains_points(xypts)
def aspect_ratio(axes):
""" Returns the pixel height / width of a box that spans 1
data unit in x and y
"""
width = axes.get_position().width * axes.figure.get_figwidth()
height = axes.get_position().height * axes.figure.get_figheight()
xmin, xmax = axes.get_xlim()
ymin, ymax = axes.get_ylim()
return height / width / (ymax - ymin) * (xmax - xmin)
def data_to_norm(axes, x, y):
xy = np.column_stack((np.asarray(x).ravel(), np.asarray(y).ravel()))
pixel = axes.transData.transform(xy)
norm = axes.transAxes.inverted().transform(pixel)
return norm
def data_to_pixel(axes, x, y):
xy = np.column_stack((np.asarray(x).ravel(), np.asarray(y).ravel()))
return axes.transData.transform(xy)
def pixel_to_data(axes, x, y):
xy = np.column_stack((np.asarray(x).ravel(), np.asarray(y).ravel()))
return axes.transData.inverted().transform(xy)
class Roi(object): # pragma: no cover
"""
A geometrical 2D region of interest.
Glue uses Roi's to represent user-drawn regions on plots. There
are many specific subtypes of Roi, but they all have a ``contains``
method to test whether a collection of 2D points lies inside the region.
"""
def contains(self, x, y):
"""Return true/false for each x/y pair.
:param x: Array of X locations
:param y: Array of Y locations
:returns: A Boolean array, where each element is True
if the corresponding (x,y) tuple is inside the Roi.
:raises: UndefinedROI exception if not defined
"""
raise NotImplementedError()
def center(self):
"""Return the (x,y) coordinates of the ROI center"""
raise NotImplementedError()
def move_to(self, x, y):
"""Translate the ROI to a center of (x, y)"""
raise NotImplementedError()
def defined(self):
""" Returns whether or not the subset is properly defined """
raise NotImplementedError()
def to_polygon(self):
""" Returns a tuple of x and y points, approximating the ROI
as a polygon."""
raise NotImplementedError
def copy(self):
"""
Return a clone of the ROI
"""
return copy.copy(self)
class PointROI(Roi):
def __init__(self, x=None, y=None):
self.x = x
self.y = y
def contains(self, x, y):
return False
def move_to(self, x, y):
self.x = x
self.y = y
def defined(self):
try:
return np.isfinite([self.x, self.y]).all()
except TypeError:
return False
def center(self):
return self.x, self.y
def reset(self):
self.x = self.y = None
class RectangularROI(Roi):
"""
A 2D rectangular region of interest.
"""
def __init__(self, xmin=None, xmax=None, ymin=None, ymax=None):
super(RectangularROI, self).__init__()
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def __str__(self):
if self.defined():
return "x=[%0.3f, %0.3f], y=[%0.3f, %0.3f]" % (self.xmin,
self.xmax,
self.ymin,
self.ymax)
else:
return "Undefined Rectangular ROI"
def center(self):
return self.xmin + self.width() / 2, self.ymin + self.height() / 2
def move_to(self, x, y):
cx, cy = self.center()
dx = x - cx
dy = y - cy
self.xmin += dx
self.xmax += dx
self.ymin += dy
self.ymax += dy
def corner(self):
return (self.xmin, self.ymin)
def width(self):
return self.xmax - self.xmin
def height(self):
return self.ymax - self.ymin
def contains(self, x, y):
"""
Test whether a set of (x,y) points falls within
the region of interest
:param x: A scalar or numpy array of x points
:param y: A scalar or numpy array of y points
*Returns*
A list of True/False values, for whether each (x,y)
point falls within the ROI
"""
if not self.defined():
raise UndefinedROI
return (x > self.xmin) & (x < self.xmax) & \
(y > self.ymin) & (y < self.ymax)
def update_limits(self, xmin, ymin, xmax, ymax):
"""
Update the limits of the rectangle
"""
self.xmin = min(xmin, xmax)
self.xmax = max(xmin, xmax)
self.ymin = min(ymin, ymax)
self.ymax = max(ymin, ymax)
def reset(self):
"""
Reset the rectangular region.
"""
self.xmin = None
self.xmax = None
self.ymin = None
self.ymax = None
def defined(self):
return self.xmin is not None
def to_polygon(self):
if self.defined():
return [self.xmin, self.xmax, self.xmax, self.xmin, self.xmin], \
[self.ymin, self.ymin, self.ymax, self.ymax, self.ymin]
else:
return [], []
def __gluestate__(self, context):
return dict(xmin=self.xmin, xmax=self.xmax, ymin=self.ymin, ymax=self.ymax)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(xmin=rec['xmin'], xmax=rec['xmax'],
ymin=rec['ymin'], ymax=rec['ymax'])
class RangeROI(Roi):
def __init__(self, orientation, min=None, max=None):
""":param orientation: 'x' or 'y'. Sets which axis to range"""
super(RangeROI, self).__init__()
if orientation not in ['x', 'y']:
raise TypeError("Orientation must be one of 'x', 'y'")
self.min = min
self.max = max
self.ori = orientation
def __str__(self):
if self.defined():
return "%0.3f < %s < %0.3f" % (self.min, self.ori,
self.max)
else:
return "Undefined %s" % type(self).__name__
def range(self):
return self.min, self.max
def center(self):
return (self.min + self.max) / 2
def set_range(self, lo, hi):
self.min, self.max = lo, hi
def move_to(self, center):
delta = center - self.center()
self.min += delta
self.max += delta
def contains(self, x, y):
if not self.defined():
raise UndefinedROI()
coord = x if self.ori == 'x' else y
return (coord > self.min) & (coord < self.max)
def reset(self):
self.min = None
self.max = None
def defined(self):
return self.min is not None and self.max is not None
def to_polygon(self):
if self.defined():
on = [self.min, self.max, self.max, self.min, self.min]
off = [-1e100, -1e100, 1e100, 1e100, -1e100]
x, y = (on, off) if (self.ori == 'x') else (off, on)
return x, y
else:
return [], []
def __gluestate__(self, context):
return dict(ori=self.ori, min=self.min, max=self.max)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(rec['ori'], min=rec['min'], max=rec['max'])
class XRangeROI(RangeROI):
def __init__(self, min=None, max=None):
super(XRangeROI, self).__init__('x', min=min, max=max)
class YRangeROI(RangeROI):
def __init__(self, min=None, max=None):
super(YRangeROI, self).__init__('y', min=min, max=max)
class CircularROI(Roi):
"""
A 2D circular region of interest.
"""
def __init__(self, xc=None, yc=None, radius=None):
super(CircularROI, self).__init__()
self.xc = xc
self.yc = yc
self.radius = radius
def contains(self, x, y):
"""
Test whether a set of (x,y) points falls within
the region of interest
:param x: A list of x points
:param y: A list of y points
*Returns*
A list of True/False values, for whether each (x,y)
point falls within the ROI
"""
if not self.defined():
raise UndefinedROI
if not isinstance(x, np.ndarray):
x = np.asarray(x)
if not isinstance(y, np.ndarray):
y = np.asarray(y)
return (x - self.xc) ** 2 + (y - self.yc) ** 2 < self.radius ** 2
def set_center(self, x, y):
"""
Set the center of the circular region
"""
self.xc = x
self.yc = y
def set_radius(self, radius):
"""
Set the radius of the circular region
"""
self.radius = radius
def get_center(self):
return self.xc, self.yc
def get_radius(self):
return self.radius
def reset(self):
"""
Reset the rectangular region.
"""
self.xc = None
self.yc = None
self.radius = 0.
def defined(self):
""" Returns True if the ROI is defined """
return self.xc is not None and \
self.yc is not None and self.radius is not None
def to_polygon(self):
""" Returns x, y, where each is a list of points """
if not self.defined():
return [], []
theta = np.linspace(0, 2 * np.pi, num=20)
x = self.xc + self.radius * np.cos(theta)
y = self.yc + self.radius * np.sin(theta)
return x, y
def __gluestate__(self, context):
return dict(xc=self.xc, yc=self.yc, radius=self.radius)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(xc=rec['xc'], yc=rec['yc'], radius=rec['radius'])
class VertexROIBase(Roi):
def __init__(self, vx=None, vy=None):
"""
:param vx: initial x vertices
:type vx: list
:param vy: initial y vertices
:type vy: list
"""
super(VertexROIBase, self).__init__()
self.vx = vx
self.vy = vy
if self.vx is None:
self.vx = []
if self.vy is None:
self.vy = []
def add_point(self, x, y):
"""
Add another vertex to the ROI
:param x: The x coordinate
:param y: The y coordinate
"""
self.vx.append(x)
self.vy.append(y)
def reset(self):
"""
Reset the vertex list.
"""
self.vx = []
self.vy = []
def replace_last_point(self, x, y):
if len(self.vx) > 0:
self.vx[-1] = x
self.vy[-1] = y
def remove_point(self, x, y, thresh=None):
"""Remove the vertex closest to a reference (xy) point
:param x: The x coordinate of the reference point
:param y: The y coordinate of the reference point
:param thresh: An optional threshhold. If present, the vertex
closest to (x,y) will only be removed if the distance
is less than thresh
"""
if len(self.vx) == 0:
return
# find distance between vertices and input
dist = [(x - a) ** 2 + (y - b) ** 2 for a, b
in zip(self.vx, self.vy)]
inds = range(len(dist))
near = min(inds, key=lambda x: dist[x])
if thresh is not None and dist[near] > (thresh ** 2):
return
self.vx = [self.vx[i] for i in inds if i != near]
self.vy = [self.vy[i] for i in inds if i != near]
def defined(self):
return len(self.vx) > 0
def to_polygon(self):
return self.vx, self.vy
def __gluestate__(self, context):
return dict(vx=np.asarray(self.vx).tolist(),
vy=np.asarray(self.vy).tolist())
@classmethod
def __setgluestate__(cls, rec, context):
return cls(vx=rec['vx'], vy=rec['vy'])
class PolygonalROI(VertexROIBase):
"""
A class to define 2D polygonal regions-of-interest
"""
def __str__(self):
result = 'Polygonal ROI ('
result += ','.join(['(%s, %s)' % (x, y)
for x, y in zip(self.vx, self.vy)])
result += ')'
return result
def contains(self, x, y):
"""
Test whether a set of (x,y) points falls within
the region of interest
:param x: A list of x points
:param y: A list of y points
*Returns*
A list of True/False values, for whether each (x,y)
point falls within the ROI
"""
if not self.defined():
raise UndefinedROI
if not isinstance(x, np.ndarray):
x = np.asarray(x)
if not isinstance(y, np.ndarray):
y = np.asarray(y)
xypts = np.column_stack((x.flat, y.flat))
xyvts = np.column_stack((self.vx, self.vy))
result = points_inside_poly(xypts, xyvts)
good = np.isfinite(xypts).all(axis=1)
result[~good] = False
result.shape = x.shape
return result
def move_to(self, xdelta, ydelta):
self.vx = list(map(lambda x: x + xdelta, self.vx))
self.vy = list(map(lambda y: y + ydelta, self.vy))
class Path(VertexROIBase):
def __str__(self):
result = 'Path ('
result += ','.join(['(%s, %s)' % (x, y)
for x, y in zip(self.vx, self.vy)])
result += ')'
return result
class AbstractMplRoi(object): # pragma: no cover
""" Base class for objects which use
Matplotlib user events to edit/display ROIs
"""
def __init__(self, axes):
"""
:param axes: The Matplotlib Axes object to draw to
"""
self._axes = axes
self._roi = self._roi_factory()
self._previous_roi = None
self._mid_selection = False
self._scrubbing = False
def _draw(self):
self._axes.figure.canvas.draw()
def _roi_factory(self):
raise NotImplementedError()
def roi(self):
return self._roi.copy()
def reset(self, include_roi=True):
self._mid_selection = False
self._scrubbing = False
if include_roi:
self._roi.reset()
self._sync_patch()
def active(self):
return self._mid_selection
def start_selection(self, event):
raise NotImplementedError()
def update_selection(self, event):
raise NotImplementedError()
def finalize_selection(self, event):
raise NotImplementedError()
def abort_selection(self, event):
if self._mid_selection:
self._roi_restore()
self.reset(include_roi=False)
def _sync_patch(self):
raise NotImplementedError()
def _roi_store(self):
self._previous_roi = self._roi.copy()
def _roi_restore(self):
self._roi = self._previous_roi
class MplPickROI(AbstractMplRoi):
def _draw(self):
pass
def _roi_factory(self):
return PointROI()
def start_selection(self, event):
self._roi.x = event.xdata
self._roi.y = event.ydata
def update_selection(self, event):
self._roi.x = event.xdata
self._roi.y = event.ydata
def finalize_selection(self, event):
self._roi.x = event.xdata
self._roi.y = event.ydata
def _sync_patch(self):
pass
class MplRectangularROI(AbstractMplRoi):
"""
A subclass of RectangularROI that also renders the ROI to a plot
*Attributes*:
plot_opts:
Dictionary instance
A dictionary of plot keywords that are passed to
the patch representing the ROI. These control
the visual properties of the ROI
"""
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self._xi = None
self._yi = None
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
self._patch = Rectangle((0., 0.), 1., 1.)
self._patch.set_zorder(100)
self._setup_patch()
def _setup_patch(self):
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return RectangularROI()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(event.xdata, event.ydata):
return False
self._roi_store()
self._xi = event.xdata
self._yi = event.ydata
if event.key == SCRUBBING_KEY:
self._scrubbing = True
self._cx, self._cy = self._roi.center()
else:
self.reset()
self._roi.update_limits(event.xdata, event.ydata,
event.xdata, event.ydata)
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.move_to(self._cx + event.xdata - self._xi,
self._cy + event.ydata - self._yi)
else:
self._roi.update_limits(min(event.xdata, self._xi),
min(event.ydata, self._yi),
max(event.xdata, self._xi),
max(event.ydata, self._yi))
self._sync_patch()
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._draw()
def _sync_patch(self):
if self._roi.defined():
corner = self._roi.corner()
width = self._roi.width()
height = self._roi.height()
self._patch.set_xy(corner)
self._patch.set_width(width)
self._patch.set_height(height)
self._patch.set(**self.plot_opts)
self._patch.set_visible(True)
else:
self._patch.set_visible(False)
self._draw()
def __str__(self):
return "MPL Rectangle: %s" % self._patch
class MplXRangeROI(AbstractMplRoi):
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self._xi = None
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
trans = blended_transform_factory(self._axes.transData,
self._axes.transAxes)
self._patch = Rectangle((0., 0.), 1., 1., transform=trans)
self._patch.set_zorder(100)
self._setup_patch()
def _setup_patch(self):
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return XRangeROI()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(event.xdata, event.ydata):
return False
self._roi_store()
if event.key == SCRUBBING_KEY:
self._scrubbing = True
self._dx = event.xdata - self._roi.center()
else:
self.reset()
self._roi.set_range(event.xdata, event.xdata)
self._xi = event.xdata
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.move_to(event.xdata + self._dx)
else:
self._roi.set_range(min(event.xdata, self._xi),
max(event.xdata, self._xi))
self._sync_patch()
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._draw()
def _sync_patch(self):
if self._roi.defined():
rng = self._roi.range()
self._patch.set_xy((rng[0], 0))
self._patch.set_width(rng[1] - rng[0])
self._patch.set_height(1)
self._patch.set(**self.plot_opts)
self._patch.set_visible(True)
else:
self._patch.set_visible(False)
self._draw()
class MplYRangeROI(AbstractMplRoi):
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self._xi = None
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
trans = blended_transform_factory(self._axes.transAxes,
self._axes.transData)
self._patch = Rectangle((0., 0.), 1., 1., transform=trans)
self._patch.set_zorder(100)
self._setup_patch()
def _setup_patch(self):
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return YRangeROI()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(event.xdata, event.ydata):
return False
self._roi_store()
if event.key == SCRUBBING_KEY:
self._scrubbing = True
self._dy = event.ydata - self._roi.center()
else:
self.reset()
self._roi.set_range(event.ydata, event.ydata)
self._xi = event.ydata
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.move_to(event.ydata + self._dy)
else:
self._roi.set_range(min(event.ydata, self._xi),
max(event.ydata, self._xi))
self._sync_patch()
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._draw()
def _sync_patch(self):
if self._roi.defined():
rng = self._roi.range()
self._patch.set_xy((0, rng[0]))
self._patch.set_height(rng[1] - rng[0])
self._patch.set_width(1)
self._patch.set(**self.plot_opts)
self._patch.set_visible(True)
else:
self._patch.set_visible(False)
self._draw()
class MplCircularROI(AbstractMplRoi):
"""
Class to display / edit circular ROIs using matplotlib
Since circles on the screen may not be circles in the data
(due, e.g., to logarithmic scalings on the axes), the
ultimate ROI that is created is a polygonal ROI
:param plot_opts:
A dictionary of plot keywords that are passed to
the patch representing the ROI. These control
the visual properties of the ROI
"""
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
self._xi = None
self._yi = None
self._setup_patch()
def _setup_patch(self):
self._patch = Ellipse((0., 0.), transform=IdentityTransform(),
width=0., height=0.,)
self._patch.set_zorder(100)
self._patch.set(**self.plot_opts)
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return CircularROI()
def _sync_patch(self):
# Update geometry
if not self._roi.defined():
self._patch.set_visible(False)
else:
xy = self._roi.get_center()
r = self._roi.get_radius()
self._patch.center = xy
self._patch.width = 2. * r
self._patch.height = 2. * r
self._patch.set_visible(True)
# Update appearance
self._patch.set(**self.plot_opts)
# Refresh
self._axes.figure.canvas.draw()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
xy = data_to_pixel(self._axes, [event.xdata], [event.ydata])
xi = xy[0, 0]
yi = xy[0, 1]
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(xi, yi):
return False
self._roi_store()
if event.key == SCRUBBING_KEY:
self._scrubbing = True
(xc, yc) = self._roi.get_center()
self._dx = xc - xi
self._dy = yc - yi
else:
self.reset()
self._roi.set_center(xi, yi)
self._roi.set_radius(0.)
self._xi = xi
self._yi = yi
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
xy = data_to_pixel(self._axes, [event.xdata], [event.ydata])
xi = xy[0, 0]
yi = xy[0, 1]
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.set_center(xi + self._dx, yi + self._dy)
else:
dx = xy[0, 0] - self._xi
dy = xy[0, 1] - self._yi
self._roi.set_radius(np.hypot(dx, dy))
self._sync_patch()
def roi(self):
if not self._roi.defined():
return PolygonalROI()
theta = np.linspace(0, 2 * np.pi, num=200)
xy_center = self._roi.get_center()
rad = self._roi.get_radius()
x = xy_center[0] + rad * np.cos(theta)
y = xy_center[1] + rad * np.sin(theta)
xy_data = pixel_to_data(self._axes, x, y)
vx = xy_data[:, 0].ravel().tolist()
vy = xy_data[:, 1].ravel().tolist()
result = PolygonalROI(vx, vy)
return result
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._axes.figure.canvas.draw()
class MplPolygonalROI(AbstractMplRoi):
"""
Defines and displays polygonal ROIs on matplotlib plots
Attributes:
plot_opts: Dictionary instance
A dictionary of plot keywords that are passed to
the patch representing the ROI. These control
the visual properties of the ROI
"""
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
self._setup_patch()
def _setup_patch(self):
self._patch = Polygon(np.array(list(zip([0, 1], [0, 1]))))
self._patch.set_zorder(100)
self._patch.set(**self.plot_opts)
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return PolygonalROI()
def _sync_patch(self):
# Update geometry
if not self._roi.defined():
self._patch.set_visible(False)
else:
x, y = self._roi.to_polygon()
self._patch.set_xy(list(zip(x + [x[0]],
y + [y[0]])))
self._patch.set_visible(True)
# Update appearance
self._patch.set(**self.plot_opts)
# Refresh
self._axes.figure.canvas.draw()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(event.xdata, event.ydata):
return False
self._roi_store()
if event.key == SCRUBBING_KEY:
self._scrubbing = True
self._cx = event.xdata
self._cy = event.ydata
else:
self.reset()
self._roi.add_point(event.xdata, event.ydata)
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.move_to(event.xdata - self._cx,
event.ydata - self._cy)
self._cx = event.xdata
self._cy = event.ydata
else:
self._roi.add_point(event.xdata, event.ydata)
self._sync_patch()
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._axes.figure.canvas.draw()
class MplPathROI(MplPolygonalROI):
def roi_factory(self):
return Path()
def _setup_patch(self):
self._patch = None
def _sync_patch(self):
if self._patch is not None:
self._patch.remove()
self._patch = None
# Update geometry
if not self._roi.defined():
return
else:
x, y = self._roi.to_polygon()
p = MplPath(np.column_stack((x, y)))
self._patch = PatchPath(p)
self._patch.set_visible(True)
# Update appearance
self._patch.set(**self.plot_opts)
# Refresh
self._axes.figure.canvas.draw()
def finalize_selection(self, event):
self._mid_selection = False
if self._patch is not None:
self._patch.set_visible(False)
self._axes.figure.canvas.draw()
class CategoricalRoi(Roi):
"""
A ROI abstraction to represent selections of categorical data.
"""
def __init__(self, categories=None):
if categories is None:
self.categories = None
else:
self.update_categories(categories)
def _categorical_helper(self, indata):
"""
A helper function to do the rigamaroll of getting categorical data.
:param indata: Any type of input data
:return: The best guess at the categorical data associated with indata
"""
try:
if indata.categorical:
return indata._categorical_data
else:
return indata[:]
except AttributeError:
return np.asarray(indata)
def contains(self, x, y):
"""
Test whether a set categorical elements fall within
the region of interest
:param x: Any array-like object of categories
(includes CategoricalComponenets)
:param y: Unused but required for compatibility
*Returns*
A list of True/False values, for whether each x value falls
within the ROI
"""
check = self._categorical_helper(x)
index = np.minimum(np.searchsorted(self.categories, check),
len(self.categories)-1)
return self.categories[index] == check
def update_categories(self, categories):
self.categories = np.unique(self._categorical_helper(categories))
def defined(self):
""" Returns True if the ROI is defined """
return self.categories is not None
def reset(self):
self.categories = None
@staticmethod
def from_range(cat_comp, lo, hi):
"""
Utility function to help construct the Roi from a range.
:param cat_comp: Anything understood by ._categorical_helper ... array, list or component
:param lo: lower bound of the range
:param hi: upper bound of the range
:return: CategoricalRoi object
"""
roi = CategoricalRoi()
cat_data = cat_comp._categories
roi.update_categories(cat_data[np.floor(lo):np.ceil(hi)])
return roi
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/roi.py",
"copies": "1",
"size": "34433",
"license": "bsd-3-clause",
"hash": 455936305685501100,
"line_mean": 26.8134087237,
"line_max": 97,
"alpha_frac": 0.5387564255,
"autogenerated": false,
"ratio": 3.8152908587257617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9853786700679925,
"avg_score": 0.000052116709167251514,
"num_lines": 1238
} |
from __future__ import absolute_import, division, print_function
from functools import wraps
__all__ = ['memoize', 'singleton', 'memoize_attr_check']
def _make_key(args, kwargs):
return args, frozenset(kwargs.items())
def memoize(func):
"""Save results of function calls to avoid repeated calculation"""
memo = {}
@wraps(func)
def wrapper(*args, **kwargs):
# Note that here we have two separate try...except statements, because
# we want to make sure that we catch only TypeError on the first
# statement, and both TypeError and KeyError on the second.
try:
key = _make_key(args, kwargs)
except TypeError: # unhashable input
return func(*args, **kwargs)
try:
return memo[key]
except KeyError:
result = func(*args, **kwargs)
memo[key] = result
return result
except TypeError: # unhashable input
return func(*args, **kwargs)
wrapper.__memoize_cache = memo
return wrapper
def clear_cache(func):
"""
Clear the cache of a function that has potentially been
decorated by memoize. Safely ignores non-decorated functions
"""
try:
func.__memoize_cache.clear()
except AttributeError:
pass
def memoize_attr_check(attr):
""" Memoize a method call, cached both on arguments and given attribute
of first argument (which is presumably self)
Has the effect of re-calculating results if a specific attribute changes
"""
def decorator(func):
# must return a decorator function
@wraps(func)
def result(*args, **kwargs):
first_arg = getattr(args[0], attr)
return memo(first_arg, *args, **kwargs)
@memoize
def memo(*args, **kwargs):
return func(*args[1:], **kwargs)
return result
return decorator
def singleton(cls):
"""Turn a class into a singleton, such that new objects
in this class share the same instance"""
instances = {}
@wraps(cls)
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/decorators.py",
"copies": "5",
"size": "2231",
"license": "bsd-3-clause",
"hash": -6575718541488137000,
"line_mean": 24.6436781609,
"line_max": 78,
"alpha_frac": 0.6131779471,
"autogenerated": false,
"ratio": 4.374509803921568,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 87
} |
from __future__ import absolute_import, division, print_function
from functools import wraps, partial
import traceback
from .data_collection import DataCollection
from .data_factories import load_data
from . import command
from . import Data, Subset
from .hub import HubListener
from .util import PropertySetMixin
from ..utils import as_list
from .edit_subset_mode import EditSubsetMode
from .session import Session
from ..config import settings
__all__ = ['Application', 'ViewerBase']
def catch_error(msg):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
m = "%s\n%s" % (msg, str(e))
detail = str(traceback.format_exc())
self = args[0]
self.report_error(m, detail)
return wrapper
return decorator
class Application(HubListener):
def __init__(self, data_collection=None, session=None):
if session is not None:
self._session = session
session.application = self
self._data = session.data_collection
else:
self._data = data_collection or DataCollection()
self._session = Session(data_collection=self._data,
application=self)
EditSubsetMode().data_collection = self._data
self._hub = self._session.hub
self._cmds = self._session.command_stack
self._cmds.add_callback(lambda x: self._update_undo_redo_enabled())
self._settings = {}
for key, value, validator in settings:
self._settings[key] = [value, validator]
@property
def session(self):
return self._session
@property
def data_collection(self):
return self.session.data_collection
def new_data_viewer(self, viewer_class, data=None):
"""
Create a new data viewer, add it to the UI,
and populate with data
"""
if viewer_class is None:
return
c = viewer_class(self._session)
c.register_to_hub(self._session.hub)
if data and not c.add_data(data):
c.close(warn=False)
return
self.add_widget(c)
c.show()
return c
@catch_error("Failed to save session")
def save_session(self, path, include_data=False):
""" Save the data collection and hub to file.
Can be restored via restore_session
Note: Saving of client is not currently supported. Thus,
restoring this session will lose all current viz windows
"""
from .state import GlueSerializer
gs = GlueSerializer(self, include_data=include_data)
state = gs.dumps(indent=2)
with open(path, 'w') as out:
out.write(state)
def new_tab(self):
raise NotImplementedError()
def add_widget(self, widget, label=None, tab=None):
raise NotImplementedError()
def close_tab(self):
raise NotImplementedError()
def get_setting(self, key):
"""
Fetch the value of an application setting
"""
return self._settings[key][0]
def set_setting(self, key, value):
"""
Set the value of an application setting
Raises a KeyError if the setting does not exist
Raises a ValueError if the value is invalid
"""
validator = self._settings[key][1]
self._settings[key][0] = validator(value)
@property
def settings(self):
"""Iterate over settings"""
for key, (value, _) in self._settings.items():
yield key, value
@catch_error("Could not load data")
def load_data(self, path):
d = load_data(path)
self.add_datasets(self.data_collection, d)
def report_error(self, message, detail):
""" Report an error message to the user.
Must be implemented in a subclass
:param message: the message to display
:type message: str
:detail: Longer context about the error
:type message: str
"""
raise NotImplementedError()
def do(self, command):
self._cmds.do(command)
def undo(self):
try:
self._cmds.undo()
except RuntimeError:
pass
def redo(self):
try:
self._cmds.redo()
except RuntimeError:
pass
def _update_undo_redo_enabled(self):
raise NotImplementedError()
@classmethod
def add_datasets(cls, data_collection, datasets):
"""
Utility method to interactively add datasets to a
data_collection
:param data_collection: :class:`~glue.core.data_collection.DataCollection`
:param datasets: one or more :class:`~glue.core.data.Data` instances
Adds datasets to the collection
"""
datasets = as_list(datasets)
data_collection.extend(datasets)
list(map(partial(cls._suggest_mergers, data_collection), datasets))
@classmethod
def _suggest_mergers(cls, data_collection, data):
"""
When loading a new dataset, check if any existing
data has the same shape. If so, offer to
merge the two datasets
"""
shp = data.shape
other = [d for d in data_collection
if d.shape == shp and d is not data]
if not other:
return
merges = cls._choose_merge(data, other)
if merges:
data_collection.merge(*merges)
@staticmethod
def _choose_merge(data, other):
"""
Present an interface to the user for approving or rejecting
a proposed data merger. Returns a list of datasets from other
that the user has approved to merge with data
"""
raise NotImplementedError
@property
def viewers(self):
"""Return a tuple of tuples of viewers currently open
The i'th tuple stores the viewers in the i'th close_tab
"""
raise NotImplementedError()
def __gluestate__(self, context):
viewers = [list(map(context.id, tab)) for tab in self.viewers]
data = self.session.data_collection
return dict(session=context.id(self.session), viewers=viewers,
data=context.id(data))
@classmethod
def __setgluestate__(cls, rec, context):
self = cls(data_collection=context.object(rec['data']))
# manually register the newly-created session, which
# the viewers need
context.register_object(rec['session'], self.session)
for i, tab in enumerate(rec['viewers']):
if self.tab(i) is None:
self.new_tab()
for v in tab:
viewer = context.object(v)
self.add_widget(viewer, tab=i, hold_position=True)
return self
class ViewerBase(HubListener, PropertySetMixin):
""" Base class for data viewers in an application """
# the glue.clients.layer_artist.LayerArtistContainer
# class/subclass to use
_container_cls = None
def __init__(self, session):
super(ViewerBase, self).__init__()
self._session = session
self._data = session.data_collection
self._hub = None
self._container = self._container_cls()
def register_to_hub(self, hub):
self._hub = hub
def unregister(self, hub):
""" Abstract method to unsubscribe from messages """
raise NotImplementedError
def request_add_layer(self, layer):
""" Issue a command to add a layer """
cmd = command.AddLayer(layer=layer, viewer=self)
self._session.command_stack.do(cmd)
def add_layer(self, layer):
if isinstance(layer, Data):
self.add_data(layer)
elif isinstance(layer, Subset):
self.add_subset(layer)
# else: SubsetGroup
def add_data(self, data):
""" Add a data instance to the viewer
This must be overridden by a subclass
:param data: Data object to add
:type data: :class:`~glue.core.data.Data`
"""
raise NotImplementedError
def add_subset(self, subset):
""" Add a subset to the viewer
This must be overridden by a subclass
:param subset: Subset instance to add
:type subset: :class:`~glue.core.subset.Subset`
"""
raise NotImplementedError
def apply_roi(self, roi):
"""
Apply an ROI to the client
:param roi: The ROI to apply
:type roi: :class:`~glue.core.roi.Roi`
"""
cmd = command.ApplyROI(client=self.client, roi=roi)
self._session.command_stack.do(cmd)
@property
def session(self):
return self._session
@property
def axes(self):
return self.client.axes
def layer_view(self):
raise NotImplementedError()
def options_widget(self):
raise NotImplementedError()
def move(self, x=None, y=None):
""" Reposition a viewer within the application.
:param x: Offset of viewer's left edge from the left edge
of the parent window. Optional
:type x: int
:param y: Offset of the viewer's top edge from the top edge
of the parent window. Optional
:type y: int
"""
raise NotImplementedError()
@property
def position(self):
""" Return the location of the viewer
:rtype: (x, y). Tuple of 2 integers
"""
raise NotImplementedError()
@property
def viewer_size(self):
""" Return the size of the viewer
:rtype: (width, height). Tuple of 2 ints
"""
raise NotImplementedError()
@viewer_size.setter
def viewer_size(self, value):
""" Resize the width and/or height of the viewer
:param value: (width, height)
:param width: new width. Optional.
:type width: int
:param height: new height. Optional.
:type height: int
"""
raise NotImplementedError()
def restore_layers(self, rec, context):
"""
Given a list of glue-serialized layers, restore them
to the viewer
"""
# if this viewer manages a client, rely on it to restore layers
if hasattr(self, 'client'):
return self.client.restore_layers(rec, context)
raise NotImplementedError()
@property
def layers(self):
"""Return a tuple of layers in this viewer.
A layer is a visual representation of a dataset or subset within
the viewer"""
return tuple(self._container)
def __gluestate__(self, context):
return dict(session=context.id(self._session),
size=self.viewer_size,
pos=self.position,
properties=dict((k, context.id(v))
for k, v in self.properties.items()),
layers=list(map(context.do, self.layers))
)
@classmethod
def __setgluestate__(cls, rec, context):
session = context.object(rec['session'])
result = cls(session)
result.register_to_hub(session.hub)
result.viewer_size = rec['size']
x, y = rec['pos']
result.move(x=x, y=y)
prop = dict((k, context.object(v)) for
k, v in rec['properties'].items())
result.restore_layers(rec['layers'], context)
result.properties = prop
return result
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/application_base.py",
"copies": "1",
"size": "11609",
"license": "bsd-3-clause",
"hash": 8375539739027677000,
"line_mean": 28.1683417085,
"line_max": 82,
"alpha_frac": 0.5879920751,
"autogenerated": false,
"ratio": 4.344685628742515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000030271841133377734,
"num_lines": 398
} |
from __future__ import absolute_import, division, print_function
from future import standard_library
standard_library.install_aliases()
from future.utils import iteritems
from typing import List, Dict, Any, Optional, NamedTuple, Tuple, Iterator, Iterable, Union, cast
from types import FrameType, TracebackType, CodeType, FunctionType, ModuleType
import typing
import ast
# noinspection PyCompatibility
import html
import inspect
import json
import os
import traceback
from collections import defaultdict, Sequence, Set, Mapping, deque, namedtuple, Counter
from functools import partial
from itertools import chain, islice
from threading import Lock
from uuid import uuid4
import hashlib
import sys
from asttokens import ASTTokens
from littleutils import group_by_key_func, only
from outdated import warn_if_outdated
from cached_property import cached_property
from cheap_repr import cheap_repr, try_register_repr
from cheap_repr.utils import safe_qualname, exception_string
from birdseye.db import Database, retry_db
from birdseye.tracer import TreeTracerBase, TracedFile, EnterCallInfo, ExitCallInfo, FrameInfo, ChangeValue, Loop
from birdseye import tracer
from birdseye.utils import correct_type, PY3, PY2, one_or_none, \
of_type, Deque, Text, flatten_list, lru_cache, ProtocolEncoder, IPYTHON_FILE_PATH, source_without_decorators, \
is_future_import, get_unfrozen_datetime, FILE_SENTINEL_NAME, read_source_file
from birdseye import __version__
try:
from numpy import ndarray
except ImportError:
class ndarray(object):
pass
try:
from pandas import DataFrame, Series
except ImportError:
class DataFrame(object):
pass
class Series(object):
pass
try:
from django.db.models import QuerySet
except ImportError:
class QuerySet(object):
pass
warn_if_outdated('birdseye', __version__)
CodeInfo = namedtuple('CodeInfo', 'db_func traced_file arg_names')
class BirdsEye(TreeTracerBase):
"""
Decorate functions with an instance of this class to debug them,
or just use the existing instance `eye`.
"""
def __init__(self, db_uri=None, num_samples=None):
"""
Set db_uri to specify where the database lives, as an alternative to
the environment variable BIRDSEYE_DB.
"""
super(BirdsEye, self).__init__()
self._db_uri = db_uri
self._code_infos = {} # type: Dict[CodeType, CodeInfo]
self._last_call_id = None
self._ipython_cell_value = None
self.num_samples = num_samples or dict(
big=dict(
attributes=50,
dict=50,
list=30,
set=30,
pandas_rows=20,
pandas_cols=100,
),
small=dict(
attributes=50,
dict=10,
list=6,
set=6,
pandas_rows=6,
pandas_cols=10,
),
)
@cached_property
def db(self):
return Database(self._db_uri)
def parse_extra(self, root, source, filename):
# type: (ast.Module, str, str) -> None
for node in ast.walk(root): # type: ast.AST
node._loops = tracer.loops(node)
if isinstance(node, ast.expr):
node._is_interesting_expression = is_interesting_expression(node)
@lru_cache()
def compile(self, source, filename, flags=0):
traced_file = super(BirdsEye, self).compile(source, filename, flags)
traced_file.tokens = ASTTokens(source, tree=traced_file.root)
return traced_file
def before_stmt(self, node, frame):
# type: (ast.stmt, FrameType) -> None
if frame.f_code not in self._code_infos:
return
if isinstance(node.parent, ast.For) and node is node.parent.body[0]:
self._add_iteration(node._loops, frame)
def before_expr(self, node, frame):
if isinstance(node.parent, ast.While) and node is node.parent.test:
self._add_iteration(node._loops, frame)
def _add_iteration(self, loops, frame):
# type: (typing.Sequence[Loop], FrameType) -> None
"""
Given one or more nested loops, add an iteration for the innermost
loop (the last in the sequence).
"""
iteration = self.stack[frame].iteration # type: Iteration
for i, loop_node in enumerate(loops):
loop = iteration.loops[loop_node._tree_index]
if i == len(loops) - 1:
loop.append(Iteration())
else:
iteration = loop.last()
def after_expr(self, node, frame, value, exc_value, exc_tb):
# type: (ast.expr, FrameType, Any, Optional[BaseException], Optional[TracebackType]) -> Optional[ChangeValue]
if _tracing_recursively(frame):
return None
if frame.f_code not in self._code_infos:
return None
if node._is_interesting_expression:
# If this is an expression statement and the last statement
# in the body, the value is returned from the cell magic
# to be displayed as usual
if (self._code_infos[frame.f_code].traced_file.is_ipython_cell
and isinstance(node.parent, ast.Expr)
and node.parent is node.parent.parent.body[-1]):
self._ipython_cell_value = value
if is_obvious_builtin(node, self.stack[frame].expression_values[node]):
return None
frame_info = self.stack[frame]
if exc_value:
node_value = self._exception_value(node, frame, exc_value)
else:
node_value = NodeValue.expression(
self.num_samples,
value,
level=max(1, 3 - len(node._loops) * (not self._is_first_loop_iteration(node, frame))),
)
self._set_node_value(node, frame, node_value)
self._check_inner_call(frame_info, node, node_value)
# i.e. is `node` the `y` in `[f(x) for x in y]`, making `node.parent` the `for x in y`
is_special_comprehension_iter = (
isinstance(node.parent, ast.comprehension) and
node is node.parent.iter and
# Generators execute in their own time and aren't directly attached to the parent frame
not isinstance(node.parent.parent, ast.GeneratorExp))
if not is_special_comprehension_iter:
return None
# Mark `for x in y` as a bit that executed, so it doesn't show as grey
self._set_node_value(node.parent, frame, NodeValue.covered())
if exc_value:
return None
# Track each iteration over `y` so that the 'loop' can be stepped through
loops = node._loops + (node.parent,) # type: Tuple[Loop, ...]
def comprehension_iter_proxy():
for item in value:
self._add_iteration(loops, frame)
yield item
# This effectively changes to code to `for x in comprehension_iter_proxy()`
return ChangeValue(comprehension_iter_proxy())
def _check_inner_call(self, frame_info, node, node_value):
# type: (FrameInfo, Union[ast.stmt, ast.expr], NodeValue) -> None
inner_calls = frame_info.inner_calls.pop(node, None)
if inner_calls:
node_value.set_meta('inner_calls', inner_calls)
def _is_first_loop_iteration(self, node, frame):
# type: (ast.AST, FrameType) -> bool
iteration = self.stack[frame].iteration # type: Iteration
for loop_node in node._loops: # type: ast.AST
loop = iteration.loops[loop_node._tree_index]
iteration = loop.last()
if iteration.index > 0:
return False
return True
def _set_node_value(self, node, frame, value):
# type: (ast.AST, FrameType, NodeValue) -> None
iteration = self.stack[frame].iteration # type: Iteration
for loop_node in node._loops: # type: ast.AST
loop = iteration.loops[loop_node._tree_index]
loop.recorded_node(node)
iteration = loop.last()
iteration.vals[node._tree_index] = value
def _exception_value(self, node, frame, exc_value):
# type: (Union[ast.expr, ast.stmt], FrameType, BaseException) -> NodeValue
value = NodeValue.exception(exc_value)
self._set_node_value(node, frame, value)
return value
def after_stmt(self, node, frame, exc_value, exc_traceback, exc_node):
# type: (ast.stmt, FrameType, Optional[BaseException], Optional[TracebackType], Optional[ast.AST]) -> Optional[bool]
if frame.f_code not in self._code_infos or _tracing_recursively(frame):
return None
if exc_value and node is exc_node:
value = self._exception_value(node, frame, exc_value)
else:
value = NodeValue.covered()
self._set_node_value(node, frame, value)
self._check_inner_call(self.stack[frame], node, value)
return None
def enter_call(self, enter_info):
# type: (EnterCallInfo) -> None
frame = enter_info.current_frame # type: FrameType
if frame.f_code not in self._code_infos or _tracing_recursively(frame):
return
frame_info = self.stack[frame]
frame_info.start_time = get_unfrozen_datetime()
frame_info.iteration = Iteration()
code_info = self._code_infos[frame.f_code]
if isinstance(enter_info.enter_node.parent, ast.Module):
arguments = []
else:
f_locals = frame.f_locals.copy() # type: Dict[str, Any]
arguments = [(name, f_locals.pop(name))
for name in code_info.arg_names
if name] + [
# Local variables other than actual arguments. These are variables from
# the enclosing scope. It's handy to treat them like arguments in the UI
it for it in f_locals.items()
if it[0][0] != '.' # Appears when using nested tuple arguments
]
frame_info.arguments = json.dumps([[k, cheap_repr(v)] for k, v in arguments])
frame_info.call_id = self._call_id()
frame_info.inner_calls = defaultdict(list)
prev = self.stack.get(enter_info.caller_frame)
if prev:
inner_calls = getattr(prev, 'inner_calls', None)
if inner_calls is not None:
inner_calls[enter_info.call_node].append(frame_info.call_id)
def _call_id(self):
# type: () -> Text
return uuid4().hex
def exit_call(self, exit_info):
# type: (ExitCallInfo) -> None
"""
This is where all the data collected during the call is gathered up
and sent to the database.
"""
frame = exit_info.current_frame # type: FrameType
if frame.f_code not in self._code_infos or _tracing_recursively(frame):
return
frame_info = self.stack[frame]
top_iteration = frame_info.iteration # type: Iteration
node_values = _deep_dict()
self._extract_node_values(top_iteration, (), node_values)
db_func = self._code_infos[frame.f_code].db_func
exc = exit_info.exc_value # type: Optional[Exception]
if exc:
traceback_str = ''.join(traceback.format_exception(type(exc), exc, exit_info.exc_tb))
exception = exception_string(exc)
else:
traceback_str = exception = None
@retry_db
def add_call():
Call = self.db.Call
call = Call(id=frame_info.call_id,
function_id=db_func,
arguments=frame_info.arguments,
return_value=cheap_repr(exit_info.return_value),
exception=exception,
traceback=traceback_str,
data=json.dumps(
dict(
node_values=node_values,
loop_iterations=top_iteration.extract_iterations()['loops'],
type_names=type_registry.names(),
num_special_types=type_registry.num_special_types,
),
cls=ProtocolEncoder,
separators=(',', ':')
),
start_time=frame_info.start_time)
with self.db.session_scope() as session:
session.add(call)
add_call()
self._last_call_id = frame_info.call_id
def _extract_node_values(self, iteration, path, node_values):
# type: (Iteration, Tuple[int, ...], dict) -> None
"""
Populates node_values with values inside iteration.
"""
# Each element of `path` is an index of a loop iteration
# e.g. given the nested loops:
#
# for i in [0, 1, 2]:
# for j in [0, 1, 2, 3]:
#
# path may be (i, j) for each of the iterations
for tree_index, node_value in iteration.vals.items():
# So this `full_path` is a tuple of ints, but the first
# int has a different meaning from the others
full_path = (tree_index,) + path
# Given a path (a, b, c) we're making node_values 'contain'
# this structure:
# {a: {b: {c: node_value}}}
d = node_values
for path_k in full_path[:-1]:
d = d[path_k]
d[full_path[-1]] = node_value
for loop in iteration.loops.values():
for i, iteration in enumerate(loop):
self._extract_node_values(iteration, path + (i,), node_values)
def trace_function(self, func):
# type: (FunctionType) -> FunctionType
new_func = super(BirdsEye, self).trace_function(func)
code_info = self._code_infos.get(new_func.__code__)
if code_info:
return new_func
lines, start_lineno = inspect.getsourcelines(func) # type: List[Text], int
end_lineno = start_lineno + len(lines)
name = safe_qualname(func)
source_file = inspect.getsourcefile(func)
if source_file.startswith('<ipython-input'):
filename = IPYTHON_FILE_PATH
else:
filename = os.path.abspath(source_file)
traced_file = new_func.traced_file
arg_info = inspect.getargs(new_func.__code__)
arg_names = list(chain(flatten_list(arg_info[0]), arg_info[1:])) # type: List[str]
self._trace(name, filename, traced_file, new_func.__code__, typ='function',
start_lineno=start_lineno, end_lineno=end_lineno,
arg_names=arg_names)
return new_func
def exec_ipython_cell(self, source, callback):
from IPython import get_ipython
shell = get_ipython()
filename = name = shell.compile.cache(source)
flags = shell.compile.flags
traced_file = self.compile(source, filename, flags)
traced_file.is_ipython_cell = True
for node in traced_file.root.body:
if is_future_import(node):
raise ValueError('from __future__ import ... statements '
'are not allowed in cells traced with %%eye')
shell.user_global_ns.update(self._trace_methods_dict(traced_file))
self._trace(name, filename, traced_file, traced_file.code, 'module', source)
try:
shell.ex(traced_file.code)
return self._ipython_cell_value
finally:
callback(self._last_call_id)
self._ipython_cell_value = None
def trace_this_module(self, context=0, deep=False):
frame = inspect.currentframe()
filename = None
while context >= 0:
frame = frame.f_back
filename = inspect.getsourcefile(frame)
if filename is not None:
context -= 1
filename = os.path.abspath(filename)
if frame.f_globals.get('__name__') != '__main__':
if PY3 and self._treetrace_hidden_with_stmt.__name__ not in frame.f_globals:
raise RuntimeError(
'To trace an imported module, you must import birdseye before '
'importing that module.')
return
lines = read_source_file(filename).splitlines()
lines[:frame.f_lineno] = [''] * frame.f_lineno
source = '\n'.join(lines)
self.exec_string(source, filename, frame.f_globals, frame.f_locals, deep)
sys.exit(0)
def exec_string(self, source, filename, globs=None, locs=None, deep=False):
globs = globs or {}
locs = locs or {}
traced_file = self.compile(source, filename)
globs.update(self._trace_methods_dict(traced_file))
self._trace(FILE_SENTINEL_NAME, filename, traced_file, traced_file.code, 'module', source)
if deep:
nodes_by_lineno = {
node.lineno: node
for node in traced_file.nodes
if isinstance(node, ast.FunctionDef)
}
def find_code(root_code):
# type: (CodeType) -> None
for code in root_code.co_consts: # type: CodeType
if not inspect.iscode(code) or code.co_name.startswith('<'):
continue
find_code(code)
lineno = code.co_firstlineno
node = nodes_by_lineno.get(lineno)
if not node:
continue
self._trace(
code.co_name, filename, traced_file, code,
typ='function',
source=source,
start_lineno=lineno,
end_lineno=node.last_token.end[0] + 1,
)
find_code(traced_file.code)
exec(traced_file.code, globs, locs)
def _trace(
self,
name,
filename,
traced_file,
code,
typ,
source='',
start_lineno=1,
end_lineno=None,
arg_names=(),
):
if not end_lineno:
end_lineno = start_lineno + len(source.splitlines())
nodes = list(self._nodes_of_interest(traced_file, start_lineno, end_lineno))
html_body = self._nodes_html(nodes, start_lineno, end_lineno, traced_file)
data_dict = dict(
# This maps each node to the loops enclosing that node
node_loops={
node._tree_index: [n._tree_index for n in node._loops]
for node, _ in nodes
if node._loops
},
)
if typ == 'function':
tokens = traced_file.tokens
func_node = only(node
for node, _ in nodes
if isinstance(node, ast.FunctionDef)
and node.first_token.start[0] == start_lineno)
func_startpos, source = source_without_decorators(tokens, func_node)
# These are for the PyCharm plugin
data_dict.update(
node_ranges=list(self._node_ranges(nodes, tokens, func_startpos)),
loop_ranges=list(self._loop_ranges(nodes, tokens, func_startpos)),
)
data = json.dumps(data_dict, sort_keys=True)
db_func = self._db_func(data, filename, html_body, name, start_lineno, source, typ)
self._code_infos[code] = CodeInfo(db_func, traced_file, arg_names)
def _loop_ranges(self, nodes, tokens, func_start):
# For a for loop, e.g.
#
# for x in y:
#
# this yields the range of the target 'x'.
#
# For a while loop, e.g.
#
# while x < 10:
#
# this yields the range of the condition 'x < 10'.
for node, (classes, _, __) in nodes:
if 'loop' not in classes:
continue
try:
target = node.target # for loop
except AttributeError:
target = node.test # while loop
start, end = tokens.get_text_range(target)
start -= func_start
end -= func_start
yield dict(
tree_index=node._tree_index,
start=start,
end=end
)
def _node_ranges(self, nodes, tokens, func_start):
for node, (classes, _, __) in nodes:
start, end = tokens.get_text_range(node)
start -= func_start
end -= func_start
if start < 0:
assert (end < 0 # nodes before the def, i.e. decorators
or isinstance(node, ast.FunctionDef))
continue
yield dict(
tree_index=node._tree_index,
start=start,
end=end,
depth=node._depth,
classes=classes,
)
@retry_db
def _db_func(self, data, filename, html_body, name, start_lineno, source, typ):
"""
Retrieve the Function object from the database if one exists, or create one.
"""
def h(s):
return hashlib.sha256(s.encode('utf8')).hexdigest()
function_hash = h(filename + name + html_body + data + str(start_lineno))
Function = self.db.Function
with self.db.session_scope() as session:
db_func = one_or_none(session.query(Function).filter_by(hash=function_hash)) # type: Optional[Function]
if not db_func:
db_func = Function(file=filename,
name=name,
type=typ,
html_body=html_body,
lineno=start_lineno,
data=data,
body_hash=h(source),
hash=function_hash)
session.add(db_func)
session.commit() # ensure .id exists
assert isinstance(db_func.id, int)
return db_func.id
def _nodes_of_interest(self, traced_file, start_lineno, end_lineno):
# type: (TracedFile, int, int) -> Iterator[Tuple[ast.AST, Tuple]]
"""
Nodes that may have a value, show up as a box in the UI, and lie within the
given line range.
"""
for node in traced_file.nodes:
classes = []
if (isinstance(node, (ast.While, ast.For, ast.comprehension)) and
not isinstance(node.parent, ast.GeneratorExp)):
classes.append('loop')
if isinstance(node, ast.stmt):
classes.append('stmt')
if isinstance(node, ast.expr):
if not node._is_interesting_expression:
continue
elif not classes:
continue
assert isinstance(node, ast.AST)
# In particular FormattedValue is missing this
if not hasattr(node, 'first_token'):
continue
if not start_lineno <= node.first_token.start[0] <= end_lineno:
continue
start, end = traced_file.tokens.get_text_range(node) # type: int, int
if start == end == 0:
continue
yield node, (classes, start, end)
def _nodes_html(self, nodes, start_lineno, end_lineno, traced_file):
# type: (list, int, int, TracedFile) -> str
"""
The algorithm for generating the HTML works as follows. We generate a list
of HTMLPositions, which are essentially places to insert HTML into the source plus some
metadata. The order of the fields of HTMLPosition ensure that when the list is sorted,
the resulting HTML is valid and correct. Specifically, the fields are:
1. index: the index in the source string where the HTML would be inserted
2. is_start: Indicates if this piece of HTML is the start of a tag, rather than the end.
Ends should appear first, so that the resulting HTML looks like:
<span> ... </span><span> ... </span>
rather than:
<span> ... <span></span> ... </span>
(I think this might actually be unnecessary, since I can't think of any cases of two
expressions right next to each other with nothing in between)
3. depth: the depth of the corresponding node in the AST. We want the start of a tag from
a node to appear before the start of a tag nested within, e.g. `foo()` should become:
<span [for foo()]><span [for foo]>foo</span>()</span>
rather than:
<span [for foo]><span [for foo()]>foo</span>()</span>
4. html: the actual HTML to insert. Not important for ordering.
Mostly the list contains pairs of HTMLPositions corresponding to AST nodes, one for the
start and one for the end.
After the list is sorted, the HTML generated is essentially:
source[0:positions[0].index] + positions[0].html + source[positions[0].index:positions[1].index] + positions[1].html + ...
"""
traced_file.root._depth = 0
for node in ast.walk(traced_file.root): # type: ast.AST
for child in ast.iter_child_nodes(node):
child._depth = node._depth + 1
positions = [] # type: List[HTMLPosition]
for node, (classes, start, end) in nodes:
# noinspection PyArgumentList
positions.extend(map(
HTMLPosition,
[start, end],
[True, False], # is_start
[node._depth, node._depth],
['<span data-index="%s" class="%s">' % (node._tree_index, ' '.join(classes)),
'</span>']))
end_lineno = self._separate_comprehensions(
[n[0] for n in nodes],
end_lineno, positions, traced_file)
# This just makes the loop below simpler
positions.append(HTMLPosition(len(traced_file.source), False, 0, ''))
positions.sort()
html_parts = []
start = 0
for position in positions:
html_parts.append(html.escape(traced_file.source[start:position.index]))
html_parts.append(position.html)
start = position.index
html_body = ''.join(html_parts)
html_body = '\n'.join(html_body.split('\n')[start_lineno - 1:end_lineno - 1])
return html_body.strip('\n')
def _separate_comprehensions(self, nodes, end_lineno, positions, traced_file):
# type: (list, int, List[HTMLPosition], TracedFile) -> int
"""
Comprehensions (e.g. list comprehensions) are troublesome because they can
be navigated like loops, and the buttons for these need to be on separate lines.
This function inserts newlines to turn:
[x + y for x in range(3) for y in range(5)] and
[[x + y for x in range(3)] for y in range(5)]
into
[x + y for x in range(3)
for y in range(5)] and
[[x + y for x in range(3)]
for y in range(5)]
"""
comprehensions = group_by_key_func(of_type((ast.comprehension, ast.While, ast.For), nodes),
lambda c: c.first_token.start[0]
) # type: Dict[Any, Iterable[ast.comprehension]]
def get_start(n):
# type: (ast.AST) -> int
return traced_file.tokens.get_text_range(n)[0]
for comp_list in comprehensions.values():
prev_start = None # type: Optional[int]
for comp in sorted(comp_list, key=lambda c: c.first_token.startpos):
if isinstance(comp, ast.comprehension) and comp is comp.parent.generators[0]:
start = get_start(comp.parent)
if prev_start is not None and start < prev_start:
start = get_start(comp)
else:
start = get_start(comp)
if prev_start is not None:
positions.append(HTMLPosition(start, True, 0, '\n '))
end_lineno += 1
prev_start = start
return end_lineno
eye = BirdsEye()
HTMLPosition = NamedTuple('HTMLPosition', [
('index', int),
('is_start', bool),
('depth', int),
('html', str),
])
def _deep_dict():
return defaultdict(_deep_dict)
_bad_codes = (eye.enter_call.__code__,
eye.exit_call.__code__,
eye.after_expr.__code__,
eye.after_stmt.__code__)
def _tracing_recursively(frame):
while frame:
if frame.f_code in _bad_codes:
return True
frame = frame.f_back
class Iteration(object):
"""
Corresponds to an iteration of a loop during a call, OR
the call itself (FrameInfo.iteration).
"""
def __init__(self):
# Mapping of nodes (via node._tree_index) to the value of that
# node in this iteration. Only contains nodes within the corresponding
# loop or at the top of the function, but not in loops further within
# (those will be somewhere within self.loops)
# Therefore those nodes have at most one value.
self.vals = {} # type: Dict[int, NodeValue]
# Mapping of loop nodes (via node._tree_index) to IterationLists
# for loops that happened during this iteration
self.loops = defaultdict(IterationList) # type: Dict[int, IterationList]
# 0-based index of this iteration
self.index = None # type: int
self.keep = False
def extract_iterations(self):
# type: () -> Dict[str, Union[int, Dict]]
return {
'index': self.index,
'loops': {
tree_index: [iteration.extract_iterations()
for iteration in iteration_list]
for tree_index, iteration_list in self.loops.items()
}
}
class IterationList(Iterable[Iteration]):
"""
A list of Iterations, corresponding to a run of a loop.
If the loop has many iterations, only contains the first and last few
and any in the middle where unique nodes had values, so that
any node which appeared during this loop exists in at least some iterations.
"""
side_len = 3
def __init__(self):
# Contains the first few iterations
# and any after that have unique nodes in them
self.start = [] # type: List[Iteration]
# Contains the last few iterations
self.end = deque(maxlen=self.side_len) # type: Deque[Iteration]
# Total number of iterations in the loop, not all of which
# are kept
self.length = 0 # type: int
# Number of times each node has been recorded in this loop
self.recorded = Counter()
def append(self, iteration):
# type: (Iteration) -> None
if self.length < self.side_len:
self.start.append(iteration)
else:
# If self.end is too long, the first element self.end[0]
# is about to be dropped by the deque. If that iteration
# should be kept because of some node that was recorded,
# add it to self.start
if len(self.end) >= self.side_len and self.end[0].keep:
self.start.append(self.end[0])
self.end.append(iteration)
iteration.index = self.length
self.length += 1
def __iter__(self):
# type: () -> Iterator[Iteration]
return chain(self.start, self.end)
def last(self):
# type: () -> Iteration
if self.end:
return self.end[-1]
else:
return self.start[-1]
def recorded_node(self, node):
# type: (ast.AST) -> None
if self.recorded[node] >= 2:
# We've already seen this node enough
return
# This node is new(ish), make sure we keep this iteration
self.last().keep = True
self.recorded[node] += 1
class TypeRegistry(object):
basic_types = (type(None), bool, int, float, complex)
if PY2:
basic_types += (long,)
special_types = basic_types + (list, dict, tuple, set, frozenset, str)
if PY2:
special_types += (unicode if PY2 else bytes,)
num_special_types = len(special_types)
def __init__(self):
self.lock = Lock()
self.data = defaultdict(lambda: len(self.data)) # type: Dict[type, int]
for t in self.special_types:
_ = self.data[t]
def __getitem__(self, item):
t = correct_type(item)
with self.lock:
return self.data[t]
def names(self):
# type: () -> List[str]
rev = dict((v, k) for k, v in self.data.items())
return [safe_qualname(rev[i]) for i in range(len(rev))]
type_registry = TypeRegistry()
class NodeValue(object):
"""
The 'value' of a node during a particular iteration.
This can mean different things, see the classmethods.
Can also contain some metadata, including links to other calls.
"""
__slots__ = ('val_repr', 'type_index', 'meta', 'children')
def __init__(self, val_repr, type_index):
self.val_repr = val_repr # type: str
self.type_index = type_index # type: int
self.meta = None # type: Optional[Dict[str, Any]]
self.children = None # type: Optional[List[Tuple[str, NodeValue]]]
def set_meta(self, key, value):
# type: (str, Any) -> None
self.meta = self.meta or {}
self.meta[key] = value
def add_child(self, samples, level, key, value):
# type: (dict, int, str, Any) -> None
self.children = self.children or []
self.children.append((key, NodeValue.expression(samples, value, level)))
def as_json(self):
result = [self.val_repr, self.type_index, self.meta or {}] # type: list
if self.children:
result.extend(self.children)
return result
@classmethod
def covered(cls):
"""
Represents a bit of code, usually a statement, that executed successfully but
doesn't have an actual value.
"""
return cls('', -2)
@classmethod
def exception(cls, exc_value):
"""
Means that exc_value was raised by a node when executing, and not any inner node.
"""
return cls(exception_string(exc_value), -1)
@classmethod
def expression(cls, samples, val, level):
# type: (dict, Any, int) -> NodeValue
"""
The value of an expression or one of its children, with attributes,
dictionary items, etc as children. Has a max depth of `level` levels.
"""
result = cls(cheap_repr(val), type_registry[val])
if isinstance(val, (TypeRegistry.basic_types, BirdsEye)):
return result
length = None
if not isinstance(val, QuerySet): # len triggers a database query
try:
length = len(val)
except:
pass
else:
result.set_meta('len', length)
if isinstance(val, ModuleType):
level = min(level, 2)
add_child = partial(result.add_child, samples, level - 1)
if isinstance(val, (Series, ndarray)):
attrs = ['dtype']
if isinstance(val, ndarray):
attrs.append('shape')
for name in attrs:
try:
attr = getattr(val, name)
except AttributeError:
pass
else:
add_child(name, attr)
if level >= 3 or level >= 2 and isinstance(val, Series):
sample_type = 'big'
else:
sample_type = 'small'
samples = samples[sample_type]
# Always expand DataFrames and Series regardless of level to
# make the table view of DataFrames work
if isinstance(val, DataFrame):
meta = {}
result.set_meta('dataframe', meta)
max_rows = samples['pandas_rows']
max_cols = samples['pandas_cols']
if length > max_rows + 2:
meta['row_break'] = max_rows // 2
columns = val.columns
num_cols = len(columns)
if num_cols > max_cols + 2:
meta['col_break'] = max_cols // 2
indices = set(_sample_indices(num_cols, max_cols))
for i, (formatted_name, label) in enumerate(zip(val.columns.format(sparsify=False),
val.columns)):
if i in indices:
add_child(formatted_name, val[label])
return result
if isinstance(val, Series):
for i in _sample_indices(length, samples['pandas_rows']):
try:
k = val.index[i:i + 1].format(sparsify=False)[0]
v = val.iloc[i]
except:
pass
else:
add_child(k, v)
return result
if (level <= 0 or
isinstance(val,
(str, bytes, range)
if PY3 else
(str, unicode, xrange))):
return result
if isinstance(val, (Sequence, ndarray)) and length is not None:
for i in _sample_indices(length, samples['list']):
try:
v = val[i]
except:
pass
else:
add_child(str(i), v)
if isinstance(val, Mapping):
for k, v in islice(_safe_iter(val, iteritems), samples['dict']):
add_child(cheap_repr(k), v)
if isinstance(val, Set):
vals = _safe_iter(val)
num_items = samples['set']
if length is None or length > num_items + 2:
vals = islice(vals, num_items)
for i, v in enumerate(vals):
add_child('<%s>' % i, v)
d = getattr(val, '__dict__', None)
if d:
for k in sorted(islice(_safe_iter(d),
samples['attributes']),
key=str):
v = d[k]
if isinstance(v, TracedFile):
continue
add_child(str(k), v)
else:
for s in sorted(getattr(type(val), '__slots__', None) or ()):
try:
attr = getattr(val, s)
except AttributeError:
pass
else:
add_child(str(s), attr)
return result
def _safe_iter(val, f=lambda x: x):
try:
for x in f(val):
yield x
except:
pass
def _sample_indices(length, max_length):
if length <= max_length + 2:
return range(length)
else:
return chain(range(max_length // 2),
range(length - max_length // 2,
length))
@try_register_repr('pandas', 'Series')
def _repr_series_one_line(x, helper):
n = len(x)
if n == 0:
return repr(x)
newlevel = helper.level - 1
pieces = []
maxparts = _repr_series_one_line.maxparts
for i in _sample_indices(n, maxparts):
k = x.index[i:i + 1].format(sparsify=False)[0]
v = x.iloc[i]
pieces.append('%s = %s' % (k, cheap_repr(v, newlevel)))
if n > maxparts + 2:
pieces.insert(maxparts // 2, '...')
return '; '.join(pieces)
def is_interesting_expression(node):
# type: (ast.AST) -> bool
"""
If this expression has a value that may not be exactly what it looks like,
return True. Put differently, return False if this is just a literal.
"""
return (isinstance(node, ast.expr) and
not (isinstance(node, (ast.Num, ast.Str, getattr(ast, 'NameConstant', ()))) or
isinstance(getattr(node, 'ctx', None),
(ast.Store, ast.Del)) or
(isinstance(node, ast.UnaryOp) and
isinstance(node.op, (ast.UAdd, ast.USub)) and
isinstance(node.operand, ast.Num)) or
(isinstance(node, (ast.List, ast.Tuple, ast.Dict)) and
not any(is_interesting_expression(n) for n in ast.iter_child_nodes(node)))))
def is_obvious_builtin(node, value):
# type: (ast.expr, Any) -> bool
"""
Return True if this node looks like a builtin and it really is
(i.e. hasn't been shadowed).
"""
# noinspection PyUnresolvedReferences
builtins = cast(dict, __builtins__)
return ((isinstance(node, ast.Name) and
node.id in builtins and
builtins[node.id] is value) or
isinstance(node, getattr(ast, 'NameConstant', ())))
| {
"repo_name": "alexmojaki/executing",
"path": "tests/samples/bird.py",
"copies": "1",
"size": "41548",
"license": "mit",
"hash": -8994771742548180000,
"line_mean": 35.4776119403,
"line_max": 130,
"alpha_frac": 0.5481852315,
"autogenerated": false,
"ratio": 4.111627906976744,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5159813138476743,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ginga.misc import log
from ginga import toolkit
try:
toolkit.use('qt')
from ginga.gw import ColorBar
except ImportError:
# older versions of ginga
from ginga.qtw import ColorBar
from ginga.qtw.ImageViewCanvasQt import ImageViewCanvas
from qtpy import QtWidgets
from glue.plugins.ginga_viewer.qt.client import GingaClient
from glue.viewers.image.qt import ImageWidgetBase
from glue.viewers.common.qt.toolbar import BasicToolbar
from glue.plugins.ginga_viewer.qt.utils import ginga_graphic_to_roi
try:
from ginga.gw import Readout
except ImportError: # older versions of ginga
from ginga.qtw import Readout
__all__ = ['GingaWidget']
class GingaWidget(ImageWidgetBase):
LABEL = "Ginga Viewer"
_toolbar_cls = BasicToolbar
tools = ['ginga:rectangle', 'ginga:circle', 'ginga:polygon', 'ginga:pan',
'ginga:freepan', 'ginga:rotate', 'ginga:contrast', 'ginga:cuts',
'ginga:colormap', 'ginga:slicer', 'ginga:spectrum']
def __init__(self, session, parent=None):
self.logger = log.get_logger(name='ginga', level=20, null=True,
# uncomment for debugging
# log_stderr=True
)
self.viewer = ImageViewCanvas(self.logger, render='widget')
self.canvas = self.viewer
# prevent widget from grabbing focus
try:
self.canvas.set_enter_focus(False)
except AttributeError:
self.canvas.set_follow_focus(False)
# enable interactive features
bindings = self.canvas.get_bindings()
bindings.enable_all(True)
self.canvas.add_callback('none-move', self.motion_readout)
self.canvas.add_callback('draw-event', self._apply_roi_cb)
self.canvas.add_callback('draw-down', self._clear_roi_cb)
self.canvas.enable_draw(False)
self.canvas.enable_autozoom('off')
self.canvas.set_zoom_algorithm('rate')
self.canvas.set_zoomrate(1.4)
bm = self.canvas.get_bindmap()
bm.add_callback('mode-set', self.mode_set_cb)
self.mode_w = None
self.mode_actns = {}
# Create settings and set defaults
settings = self.canvas.get_settings()
self.settings = settings
settings.getSetting('cuts').add_callback('set', self.cut_levels_cb)
settings.set(autozoom='off', autocuts='override',
autocenter='override')
# make color bar, with color maps shared from ginga canvas
rgbmap = self.viewer.get_rgbmap()
self.colorbar = ColorBar.ColorBar(self.logger)
rgbmap.add_callback('changed', self.rgbmap_cb, self.viewer)
self.colorbar.set_rgbmap(rgbmap)
# make coordinates/value readout
self.readout = Readout.Readout(-1, 20)
self.roi_tag = None
super(GingaWidget, self).__init__(session, parent)
def make_client(self):
return GingaClient(self._data, self.viewer, self._layer_artist_container)
def make_central_widget(self):
topw = QtWidgets.QWidget()
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(self.viewer.get_widget(), stretch=1)
cbar_w = self.colorbar.get_widget()
if not isinstance(cbar_w, QtWidgets.QWidget):
# ginga wrapped widget
cbar_w = cbar_w.get_widget()
layout.addWidget(cbar_w, stretch=0)
readout_w = self.readout.get_widget()
if not isinstance(readout_w, QtWidgets.QWidget):
# ginga wrapped widget
readout_w = readout_w.get_widget()
layout.addWidget(readout_w, stretch=0)
topw.setLayout(layout)
return topw
def match_colorbar(self, canvas, colorbar):
rgbmap = self.viewer.get_rgbmap()
loval, hival = self.viewer.get_cut_levels()
colorbar.set_range(loval, hival)
colorbar.set_rgbmap(rgbmap)
def rgbmap_cb(self, rgbmap, canvas):
self.match_colorbar(canvas, self.colorbar)
def cut_levels_cb(self, setting, tup):
(loval, hival) = tup
self.colorbar.set_range(loval, hival)
def _set_roi_mode(self, name, tf):
self.canvas.enable_draw(True)
# XXX need better way of setting draw contexts
self.canvas.draw_context = self
self.canvas.set_drawtype(name, color='cyan', linestyle='dash')
bm = self.viewer.get_bindmap()
bm.set_mode('draw', mode_type='locked')
def _clear_roi_cb(self, canvas, *args):
try:
self.canvas.deleteObjectByTag(self.roi_tag)
except:
pass
def _apply_roi_cb(self, canvas, tag):
if self.canvas.draw_context is not self:
return
self.roi_tag = tag
obj = self.canvas.getObjectByTag(self.roi_tag)
roi = ginga_graphic_to_roi(obj)
# delete outline
self.canvas.deleteObject(obj, redraw=False)
self.apply_roi(roi)
def _tweak_geometry(self):
super(GingaWidget, self)._tweak_geometry()
# rgb mode not supported yet, so hide option
self.ui.monochrome.hide()
self.ui.rgb.hide()
def motion_readout(self, canvas, button, data_x, data_y):
"""This method is called when the user moves the mouse around the Ginga
canvas.
"""
d = self.client.point_details(data_x, data_y)
# Get the value under the data coordinates
try:
# value = fitsimage.get_data(data_x, data_y)
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = self.viewer.get_data(int(data_x + 0.5), int(data_y + 0.5))
except Exception:
value = None
x_lbl, y_lbl = d['labels'][0], d['labels'][1]
# x_txt, y_txt = d['world'][0], d['world'][1]
text = "%s %s X=%.2f Y=%.2f Value=%s" % (
x_lbl, y_lbl, data_x, data_y, value)
self.readout.set_text(text)
def mode_cb(self, modname, tf):
"""This method is called when a toggle button in the toolbar is pressed
selecting one of the modes.
"""
bm = self.viewer.get_bindmap()
if not tf:
bm.reset_mode(self.viewer)
return
bm.set_mode(modname, mode_type='locked')
return True
def mode_set_cb(self, bm, modname, mtype):
"""This method is called when a mode is selected in the viewer widget.
NOTE: it may be called when mode_cb() is not called (for example, when
a keypress initiates a mode); however, the converse is not true: calling
mode_cb() will always result in this method also being called as a result.
This logic is to insure that the toggle buttons are left in a sane state
that reflects the current mode, however it was initiated.
"""
if modname in self.mode_actns:
if self.mode_w and (self.mode_w != self.mode_actns[modname]):
self.mode_w.setChecked(False)
self.mode_w = self.mode_actns[modname]
self.mode_w.setChecked(True)
elif self.mode_w:
# keystroke turned on a mode for which we have no GUI button
# and a GUI button is selected--unselect it
self.mode_w.setChecked(False)
self.mode_w = None
return True
| {
"repo_name": "saimn/glue",
"path": "glue/plugins/ginga_viewer/qt/viewer_widget.py",
"copies": "1",
"size": "7539",
"license": "bsd-3-clause",
"hash": 4977925983985761000,
"line_mean": 35.0717703349,
"line_max": 82,
"alpha_frac": 0.6129460141,
"autogenerated": false,
"ratio": 3.6158273381294963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4728773352229496,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from glob import glob
from .chunks import Chunks, chunks
from .resource import resource
from .utils import copydoc
from toolz import memoize, first
from datashape import discover, var
import os
class _Directory(Chunks):
""" A directory of files on disk
For typed containers see the ``Directory`` function which generates
parametrized Directory classes.
>>> from odo import CSV
>>> c = Directory(CSV)('path/to/data/') # doctest: +SKIP
Normal use through resource strings
>>> r = resource('path/to/data/*.csv') # doctest: +SKIP
>>> r = resource('path/to/data/') # doctest: +SKIP
"""
def __init__(self, path, **kwargs):
self.path = path
self.kwargs = kwargs
def __iter__(self):
return (resource(os.path.join(self.path, fn), **self.kwargs)
for fn in sorted(os.listdir(self.path)))
@memoize
@copydoc(_Directory)
def Directory(cls):
""" Parametrized DirectoryClass """
return type('Directory(%s)' % cls.__name__, (_Directory, chunks(cls)), {})
re_path_sep = os.path.sep
if re_path_sep == '\\':
re_path_sep = '\\\\'
@discover.register(_Directory)
def discover_Directory(c, **kwargs):
return var * discover(first(c)).subshape[0]
@resource.register('.+' + re_path_sep + '\*\..+', priority=15)
def resource_directory(uri, **kwargs):
path = uri.rsplit(os.path.sep, 1)[0]
try:
one_uri = first(glob(uri))
except (OSError, StopIteration):
return _Directory(path, **kwargs)
subtype = type(resource(one_uri, **kwargs))
return Directory(subtype)(path, **kwargs)
@resource.register('.+' + re_path_sep, priority=9)
def resource_directory_with_trailing_slash(uri, **kwargs):
try:
one_uri = os.listdir(uri)[0]
except (OSError, IndexError):
return _Directory(uri, **kwargs)
subtype = type(resource(one_uri, **kwargs))
return Directory(subtype)(uri, **kwargs)
| {
"repo_name": "quantopian/odo",
"path": "odo/directory.py",
"copies": "4",
"size": "1996",
"license": "bsd-3-clause",
"hash": -5126755256467711000,
"line_mean": 27.1126760563,
"line_max": 78,
"alpha_frac": 0.6407815631,
"autogenerated": false,
"ratio": 3.5899280575539567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011323597026072056,
"num_lines": 71
} |
from __future__ import absolute_import, division, print_function
from glob import glob
from .chunks import Chunks
from .resource import resource
from toolz import memoize, first
from datashape import discover, var
import os
class _Directory(Chunks):
""" A directory of files on disk
For typed containers see the ``Directory`` function which generates
parametrized Directory classes.
>>> from into import CSV
>>> c = Directory(CSV)('path/to/data/')
Normal use through resource strings
>>> r = resource('path/to/data/*.csv')
>>> r = resource('path/to/data/')
"""
def __init__(self, path, **kwargs):
self.path = path
self.kwargs = kwargs
def __iter__(self):
return (resource(os.path.join(self.path, fn), **self.kwargs)
for fn in sorted(os.listdir(self.path)))
def Directory(cls):
""" Parametrized DirectoryClass """
return type('Directory(%s)' % cls.__name__, (_Directory,), {'container': cls})
Directory.__doc__ = Directory.__doc__
Directory = memoize(Directory)
@discover.register(_Directory)
def discover_Directory(c, **kwargs):
return var * discover(first(c)).subshape[0]
@resource.register('.+' + os.path.sep + '\*\..+', priority=15)
def resource_directory(uri, **kwargs):
path = uri.rsplit(os.path.sep, 1)[0]
try:
one_uri = first(glob(uri))
except (OSError, StopIteration):
return _Directory(path, **kwargs)
subtype = type(resource(one_uri, **kwargs))
return Directory(subtype)(path, **kwargs)
@resource.register('.+' + os.path.sep, priority=9)
def resource_directory_with_trailing_slash(uri, **kwargs):
try:
one_uri = os.listdir(uri)[0]
except (OSError, IndexError):
return _Directory(uri, **kwargs)
subtype = type(resource(one_uri, **kwargs))
return Directory(subtype)(uri, **kwargs)
| {
"repo_name": "mrocklin/into",
"path": "into/directory.py",
"copies": "1",
"size": "1877",
"license": "bsd-3-clause",
"hash": 7361175921667981000,
"line_mean": 26.6029411765,
"line_max": 82,
"alpha_frac": 0.6446457112,
"autogenerated": false,
"ratio": 3.7094861660079053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9847771328927483,
"avg_score": 0.0012721096560845751,
"num_lines": 68
} |
from __future__ import absolute_import, division, print_function
from glob import glob
from .chunks import Chunks
from .resource import resource
from .utils import copydoc
from toolz import memoize, first
from datashape import discover, var
import os
class _Directory(Chunks):
""" A directory of files on disk
For typed containers see the ``Directory`` function which generates
parametrized Directory classes.
>>> from odo import CSV
>>> c = Directory(CSV)('path/to/data/') # doctest: +SKIP
Normal use through resource strings
>>> r = resource('path/to/data/*.csv') # doctest: +SKIP
>>> r = resource('path/to/data/') # doctest: +SKIP
"""
def __init__(self, path, **kwargs):
self.path = path
self.kwargs = kwargs
def __iter__(self):
return (resource(os.path.join(self.path, fn), **self.kwargs)
for fn in sorted(os.listdir(self.path)))
@memoize
@copydoc(_Directory)
def Directory(cls):
""" Parametrized DirectoryClass """
return type('Directory(%s)' % cls.__name__, (_Directory,), {'container': cls})
re_path_sep = os.path.sep
if re_path_sep == '\\':
re_path_sep = '\\\\'
@discover.register(_Directory)
def discover_Directory(c, **kwargs):
return var * discover(first(c)).subshape[0]
@resource.register('.+' + re_path_sep + '\*\..+', priority=15)
def resource_directory(uri, **kwargs):
path = uri.rsplit(os.path.sep, 1)[0]
try:
one_uri = first(glob(uri))
except (OSError, StopIteration):
return _Directory(path, **kwargs)
subtype = type(resource(one_uri, **kwargs))
return Directory(subtype)(path, **kwargs)
@resource.register('.+' + re_path_sep, priority=9)
def resource_directory_with_trailing_slash(uri, **kwargs):
try:
one_uri = os.listdir(uri)[0]
except (OSError, IndexError):
return _Directory(uri, **kwargs)
subtype = type(resource(one_uri, **kwargs))
return Directory(subtype)(uri, **kwargs)
| {
"repo_name": "Dannnno/odo",
"path": "odo/directory.py",
"copies": "5",
"size": "1992",
"license": "bsd-3-clause",
"hash": -4025174196731448300,
"line_mean": 27.0563380282,
"line_max": 82,
"alpha_frac": 0.640562249,
"autogenerated": false,
"ratio": 3.595667870036101,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6736230119036101,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from glob import glob
import os
try:
from skimage.io import imread as sk_imread
except ImportError:
pass
from .core import Array
from ..base import tokenize
def add_leading_dimension(x):
return x[None, ...]
def imread(filename, imread=None, preprocess=None):
""" Read a stack of images into a dask array
Parameters
----------
filename: string
A globstring like 'myfile.*.png'
imread: function (optional)
Optionally provide custom imread function.
Function should expect a filename and produce a numpy array.
Defaults to ``skimage.io.imread``.
preprocess: function (optional)
Optionally provide custom function to preprocess the image.
Function should expect a numpy array for a single image.
Examples
--------
>>> from dask.array.image import imread
>>> im = imread('2015-*-*.png') # doctest: +SKIP
>>> im.shape # doctest: +SKIP
(365, 1000, 1000, 3)
Returns
-------
Dask array of all images stacked along the first dimension. All images
will be treated as individual chunks
"""
imread = imread or sk_imread
filenames = sorted(glob(filename))
if not filenames:
raise ValueError("No files found under name %s" % filename)
name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames))
sample = imread(filenames[0])
if preprocess:
sample = preprocess(sample)
keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))]
if preprocess:
values = [(add_leading_dimension, (preprocess, (imread, fn)))
for fn in filenames]
else:
values = [(add_leading_dimension, (imread, fn))
for fn in filenames]
dsk = dict(zip(keys, values))
chunks = ((1, ) * len(filenames), ) + tuple((d, ) for d in sample.shape)
return Array(dsk, name, chunks, sample.dtype)
| {
"repo_name": "jeffery-do/Vizdoombot",
"path": "doom/lib/python3.5/site-packages/dask/array/image.py",
"copies": "6",
"size": "1997",
"license": "mit",
"hash": -25959412088231750,
"line_mean": 27.1267605634,
"line_max": 80,
"alpha_frac": 0.6334501753,
"autogenerated": false,
"ratio": 3.9860279441117763,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7619478119411777,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from glue.config import colormaps
from glue.external.echo import CallbackProperty, SelectionCallbackProperty, delay_callback
from glue.core.state_objects import StateAttributeLimitsHelper
from glue.core.data_combo_helper import ComponentIDComboHelper
from ..common.layer_state import VispyLayerState
__all__ = ['IsosurfaceLayerState']
class IsosurfaceLayerState(VispyLayerState):
"""
A state object for volume layers
"""
attribute = SelectionCallbackProperty()
level_low = CallbackProperty()
level_high = CallbackProperty()
cmap = CallbackProperty()
step = CallbackProperty(4)
level_cache = CallbackProperty({})
def __init__(self, layer=None, **kwargs):
super(IsosurfaceLayerState, self).__init__(layer=layer)
self.att_helper = ComponentIDComboHelper(self, 'attribute')
self.lim_helper = StateAttributeLimitsHelper(self, attribute='attribute',
lower='level_low', upper='level_high')
self.add_callback('layer', self._on_layer_change)
if layer is not None:
self._on_layer_change()
self.cmap = colormaps.members[0][1]
self.update_from_dict(kwargs)
def _on_layer_change(self, layer=None):
with delay_callback(self, 'level_low', 'level_high'):
if self.layer is None:
self.att_helper.set_multiple_data([])
else:
self.att_helper.set_multiple_data([self.layer])
def update_priority(self, name):
return 0 if name == 'level' else 1
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/isosurface/layer_state.py",
"copies": "3",
"size": "1648",
"license": "bsd-2-clause",
"hash": -9152456319949282000,
"line_mean": 28.9636363636,
"line_max": 91,
"alpha_frac": 0.6541262136,
"autogenerated": false,
"ratio": 4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6154126213600001,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from glue.config import colormaps
from glue.external.echo import (CallbackProperty, SelectionCallbackProperty,
keep_in_sync, delay_callback)
from glue.core.state_objects import StateAttributeLimitsHelper
from glue.core.data_combo_helper import ComponentIDComboHelper
from ..common.layer_state import VispyLayerState
__all__ = ['ScatterLayerState']
class ScatterLayerState(VispyLayerState):
"""
A state object for volume layers
"""
size_mode = CallbackProperty('Fixed')
size = CallbackProperty()
size_attribute = SelectionCallbackProperty()
size_vmin = CallbackProperty()
size_vmax = CallbackProperty()
size_scaling = CallbackProperty(1)
color_mode = CallbackProperty('Fixed')
cmap_attribute = SelectionCallbackProperty()
cmap_vmin = CallbackProperty()
cmap_vmax = CallbackProperty()
cmap = CallbackProperty()
size_limits_cache = CallbackProperty({})
cmap_limits_cache = CallbackProperty({})
def __init__(self, layer=None, **kwargs):
self._sync_markersize = None
super(ScatterLayerState, self).__init__(layer=layer)
if self.layer is not None:
self.color = self.layer.style.color
self.size = self.layer.style.markersize
self.alpha = self.layer.style.alpha
self.size_att_helper = ComponentIDComboHelper(self, 'size_attribute')
self.cmap_att_helper = ComponentIDComboHelper(self, 'cmap_attribute')
self.size_lim_helper = StateAttributeLimitsHelper(self, attribute='size_attribute',
lower='size_vmin', upper='size_vmax',
cache=self.size_limits_cache)
self.cmap_lim_helper = StateAttributeLimitsHelper(self, attribute='cmap_attribute',
lower='cmap_vmin', upper='cmap_vmax',
cache=self.cmap_limits_cache)
self.add_callback('layer', self._on_layer_change)
if layer is not None:
self._on_layer_change()
self.cmap = colormaps.members[0][1]
self.update_from_dict(kwargs)
def _on_layer_change(self, layer=None):
with delay_callback(self, 'cmap_vmin', 'cmap_vmax', 'size_vmin', 'size_vmax'):
if self.layer is None:
self.cmap_att_helper.set_multiple_data([])
self.size_att_helper.set_multiple_data([])
else:
self.cmap_att_helper.set_multiple_data([self.layer])
self.size_att_helper.set_multiple_data([self.layer])
def update_priority(self, name):
return 0 if name.endswith(('vmin', 'vmax')) else 1
def _layer_changed(self):
super(ScatterLayerState, self)._layer_changed()
if self._sync_markersize is not None:
self._sync_markersize.stop_syncing()
if self.layer is not None:
self.size = self.layer.style.markersize
self._sync_markersize = keep_in_sync(self, 'size', self.layer.style, 'markersize')
def flip_size(self):
self.size_lim_helper.flip_limits()
def flip_cmap(self):
self.cmap_lim_helper.flip_limits()
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/scatter/layer_state.py",
"copies": "3",
"size": "3356",
"license": "bsd-2-clause",
"hash": 6718872008350335000,
"line_mean": 34.7021276596,
"line_max": 95,
"alpha_frac": 0.6114421931,
"autogenerated": false,
"ratio": 4.112745098039215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6224187291139216,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from glue.core.data_factories.hdf5 import is_hdf5, extract_hdf5_datasets
from glue.core.data_factories.fits import is_fits, is_image_hdu
from glue.core.coordinates import coordinates_from_header
from glue.core.data import Component, Data
from glue.config import data_factory
from glue.utils import file_format
__all__ = ['is_gridded_data', 'gridded_data']
def extract_data_hdf5(filename, use_datasets='all'):
'''
Extract non-tabular datasets from an HDF5 file. If `use_datasets` is
'all', then all non-tabular datasets are extracted, otherwise only the
ones specified by `use_datasets` are extracted (`use_datasets` should
then contain a list of paths). If the requested datasets do not have
the same dimensions, an Exception is raised.
'''
import h5py
# Open file
file_handle = h5py.File(filename, 'r')
# Define function to read
# Read in all datasets
datasets = extract_hdf5_datasets(file_handle)
# Only keep non-tabular datasets
remove = []
for key in datasets:
if datasets[key].dtype.fields is not None:
remove.append(key)
for key in remove:
datasets.pop(key)
# Check that dimensions of all datasets are the same
reference_shape = datasets[list(datasets.keys())[0]].value.shape
for key in datasets:
if datasets[key].value.shape != reference_shape:
raise Exception("Datasets are not all the same dimensions")
# Extract data
arrays = {}
for key in datasets:
arrays[key] = datasets[key].value
# Close HDF5 file
file_handle.close()
return arrays
def filter_hdulist_by_shape(hdulist, use_hdu='all'):
"""
Remove empty HDUs, and ensure that all HDUs can be
packed into a single Data object (ie have the same shape)
Parameters
----------
use_hdu : 'all' or list of integers (optional)
Which HDUs to use
Returns
-------
a new HDUList
"""
from astropy.io import fits
# If only a subset are requested, extract those
if use_hdu != 'all':
hdulist = [hdulist[hdu] for hdu in use_hdu]
# Now only keep HDUs that are not tables or empty.
valid_hdus = []
for hdu in hdulist:
if (isinstance(hdu, fits.PrimaryHDU) or
isinstance(hdu, fits.ImageHDU)) and \
hdu.data is not None:
valid_hdus.append(hdu)
# Check that dimensions of all HDU are the same
# Allow for HDU's that have no data.
reference_shape = valid_hdus[0].data.shape
for hdu in valid_hdus:
if hdu.data.shape != reference_shape:
raise Exception("HDUs are not all the same dimensions")
return valid_hdus
def extract_data_fits(filename, use_hdu='all'):
'''
Extract non-tabular HDUs from a FITS file. If `use_hdu` is 'all', then
all non-tabular HDUs are extracted, otherwise only the ones specified
by `use_hdu` are extracted (`use_hdu` should then contain a list of
integers). If the requested HDUs do not have the same dimensions, an
Exception is raised.
'''
from astropy.io import fits
# Read in all HDUs
hdulist = fits.open(filename, ignore_blank=True)
hdulist = filter_hdulist_by_shape(hdulist)
# Extract data
arrays = {}
for hdu in hdulist:
arrays[hdu.name] = hdu.data
return arrays
def is_gridded_data(filename, **kwargs):
if is_hdf5(filename):
return True
if is_fits(filename):
from astropy.io import fits
with fits.open(filename) as hdulist:
return is_image_hdu(hdulist[0])
return False
@data_factory(label="FITS/HDF5 Image",
identifier=is_gridded_data,
deprecated=True)
def gridded_data(filename, format='auto', **kwargs):
result = Data()
# Try and automatically find the format if not specified
if format == 'auto':
format = file_format(filename)
# Read in the data
if is_fits(filename):
from astropy.io import fits
arrays = extract_data_fits(filename, **kwargs)
header = fits.getheader(filename)
result.coords = coordinates_from_header(header)
elif is_hdf5(filename):
arrays = extract_data_hdf5(filename, **kwargs)
else:
raise Exception("Unkonwn format: %s" % format)
for component_name in arrays:
comp = Component.autotyped(arrays[component_name])
result.add_component(comp, component_name)
return result
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/data_factories/deprecated.py",
"copies": "4",
"size": "4547",
"license": "bsd-3-clause",
"hash": 3293082004932872000,
"line_mean": 27.9617834395,
"line_max": 74,
"alpha_frac": 0.6522982186,
"autogenerated": false,
"ratio": 3.6997558991049635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6352054117704964,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from glue.core.data import Data, Component
from glue.config import data_factory
from glue.core.data_factories.helpers import has_extension
__all__ = ['is_npy', 'npy_reader', 'is_npz', 'npz_reader']
# TODO: implement support for regular arrays, e.g., not just structured arrays?
def is_npy(filename):
"""
The first bytes are: x93NUMPY
see: https://github.com/numpy/numpy/blob/master/doc/neps/npy-format.rst
"""
from numpy.lib.format import MAGIC_PREFIX
with open(filename, 'rb') as infile:
return infile.read(6) == MAGIC_PREFIX
@data_factory(label="Numpy save file", identifier=is_npy, priority=100)
def npy_reader(filename, format='auto', auto_merge=False, **kwargs):
"""
Read in a Numpy structured array saved to a .npy or .npz file.
Parameters
----------
source: str
The pathname to the Numpy save file.
"""
import numpy as np
npy_data = np.load(filename)
if not hasattr(npy_data.dtype, 'names'):
raise ValueError("Numpy save file loading currently only supports structured"
" arrays, e.g., with specified names.")
d = Data()
for name in npy_data.dtype.names:
comp = Component.autotyped(npy_data[name])
d.add_component(comp, label=name)
return d
def is_npz(filename):
"""
The first bytes are: x93NUMPY
see: https://github.com/numpy/numpy/blob/master/doc/neps/npy-format.rst
"""
tester = has_extension('npz .npz')
MAGIC_PREFIX = b'PK\x03\x04' # first 4 bytes for a zipfile
with open(filename, 'rb') as infile:
check = infile.read(4) == MAGIC_PREFIX
return check and tester(filename)
@data_factory(label="Numpy multiple array save file", identifier=is_npz, priority=100)
def npz_reader(filename, format='auto', auto_merge=False, **kwargs):
"""
Read in a Numpy structured array saved to a .npy or .npz file.
Parameters
----------
source: str
The pathname to the Numpy save file.
"""
import numpy as np
npy_data = np.load(filename)
groups = []
for groupname in sorted(npy_data.files):
d = Data(label=groupname)
arr = npy_data[groupname]
if not hasattr(arr.dtype, 'names'):
raise ValueError("Numpy save file loading currently only supports structured"
" arrays, e.g., with specified names.")
for name in arr.dtype.names:
comp = Component.autotyped(arr[name])
d.add_component(comp, label=name)
groups.append(d)
return groups
| {
"repo_name": "saimn/glue",
"path": "glue/core/data_factories/npy.py",
"copies": "2",
"size": "2643",
"license": "bsd-3-clause",
"hash": -4987282703729713000,
"line_mean": 28.6966292135,
"line_max": 89,
"alpha_frac": 0.6375331063,
"autogenerated": false,
"ratio": 3.660664819944598,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012594876112937439,
"num_lines": 89
} |
from __future__ import absolute_import, division, print_function
from glue.core import command
from glue.viewers.matplotlib.qt.toolbar import MatplotlibViewerToolbar
from glue.core.edit_subset_mode import EditSubsetMode
from glue.core.util import update_ticks
from glue.viewers.matplotlib.qt.data_viewer import MatplotlibDataViewer
from glue.viewers.scatter.qt.layer_style_editor import ScatterLayerStyleEditor
from glue.viewers.scatter.layer_artist import ScatterLayerArtist
from glue.viewers.scatter.qt.options_widget import ScatterOptionsWidget
from glue.viewers.scatter.state import ScatterViewerState
from glue.viewers.scatter.compat import update_scatter_viewer_state
__all__ = ['ScatterViewer']
class ScatterViewer(MatplotlibDataViewer):
LABEL = '2D Scatter'
_toolbar_cls = MatplotlibViewerToolbar
_layer_style_widget_cls = ScatterLayerStyleEditor
_state_cls = ScatterViewerState
_options_cls = ScatterOptionsWidget
_data_artist_cls = ScatterLayerArtist
_subset_artist_cls = ScatterLayerArtist
tools = ['select:rectangle', 'select:xrange',
'select:yrange', 'select:circle',
'select:polygon']
def __init__(self, session, parent=None, state=None):
super(ScatterViewer, self).__init__(session, parent, state=state)
self.state.add_callback('x_att', self._update_axes)
self.state.add_callback('y_att', self._update_axes)
self.state.add_callback('x_log', self._update_axes)
self.state.add_callback('y_log', self._update_axes)
self._update_axes()
def _update_axes(self, *args):
if self.state.x_att is not None:
# Update ticks, which sets the labels to categories if components are categorical
update_ticks(self.axes, 'x', self.state._get_x_components(), self.state.x_log)
if self.state.x_log:
self.axes.set_xlabel('Log ' + self.state.x_att.label)
else:
self.axes.set_xlabel(self.state.x_att.label)
if self.state.y_att is not None:
# Update ticks, which sets the labels to categories if components are categorical
update_ticks(self.axes, 'y', self.state._get_y_components(), self.state.y_log)
if self.state.y_log:
self.axes.set_ylabel('Log ' + self.state.y_att.label)
else:
self.axes.set_ylabel(self.state.y_att.label)
self.axes.figure.canvas.draw()
# TODO: move some of the ROI stuff to state class?
def _roi_to_subset_state(self, roi):
x_comp = self.state.x_att.parent.get_component(self.state.x_att)
y_comp = self.state.y_att.parent.get_component(self.state.y_att)
return x_comp.subset_from_roi(self.state.x_att, roi,
other_comp=y_comp,
other_att=self.state.y_att,
coord='x')
@staticmethod
def update_viewer_state(rec, context):
return update_scatter_viewer_state(rec, context)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/scatter/qt/data_viewer.py",
"copies": "1",
"size": "3061",
"license": "bsd-3-clause",
"hash": -7325583605861242000,
"line_mean": 38.2435897436,
"line_max": 93,
"alpha_frac": 0.6566481542,
"autogenerated": false,
"ratio": 3.6054181389870434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47620662931870433,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from glue.core import Data, DataCollection
from qtpy import QtGui, QtWidgets
from qtpy.QtCore import Qt
from glue.core.hub import HubListener
from glue.core.message import (ComponentsChangedMessage,
DataCollectionAddMessage,
DataCollectionDeleteMessage,
DataUpdateMessage)
from glue.utils import nonpartial
from glue.utils.qt import update_combobox
from glue.utils.qt.widget_properties import CurrentComboDataProperty
__all__ = ['ComponentIDComboHelper', 'ManualDataComboHelper',
'DataCollectionComboHelper']
class ComponentIDComboHelper(HubListener):
"""
The purpose of this class is to set up a combo showing componentIDs for
one or more datasets, and to update these componentIDs if needed, for
example if new components are added to a dataset, or if componentIDs are
renamed.
Parameters
----------
component_id_combo : Qt combo widget
The Qt widget for the component ID combo box
data_collection : :class:`~glue.core.DataCollection`
The data collection to which the datasets belong - this is needed
because if a dataset is removed from the data collection, we want to
remove it here.
visible : bool, optional
Only show visible components
numeric : bool, optional
Show numeric components
categorical : bool, optional
Show categorical components
"""
def __init__(self, component_id_combo, data_collection, visible=True,
numeric=True, categorical=True):
super(ComponentIDComboHelper, self).__init__()
if data_collection.hub is None:
raise ValueError("Hub on data collection is not set")
self._visible = visible
self._numeric = numeric
self._categorical = categorical
self._component_id_combo = component_id_combo
self._data = []
self._data_collection = data_collection
self.hub = data_collection.hub
def clear(self):
self._data.clear()
self.refresh()
@property
def visible(self):
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
self.refresh()
@property
def numeric(self):
return self._numeric
@numeric.setter
def numeric(self, value):
self._numeric = value
self.refresh()
@property
def categorical(self):
return self._categorical
@categorical.setter
def categorical(self, value):
self._categorical = value
self.refresh()
def append_data(self, data):
if self.hub is None:
if data.hub is None:
raise ValueError("Hub is not set on Data object")
else:
self.hub = data.hub
elif data.hub is not self.hub:
raise ValueError("Data Hub is different from current hub")
self._data.append(data)
self.refresh()
def remove_data(self, data):
self._data.remove(data)
self.refresh()
def set_multiple_data(self, datasets):
"""
Add multiple datasets to the combo in one go (and clear any previous datasets).
Parameters
----------
datasets : list
The list of :class:`~glue.core.data.Data` objects to add
"""
try:
self._data.clear()
except AttributeError: # PY2
self._data[:] = []
self._data.extend(datasets)
self.refresh()
@property
def hub(self):
return self._hub
@hub.setter
def hub(self, value):
self._hub = value
if value is not None:
self.register_to_hub(value)
def refresh(self):
label_data = []
for data in self._data:
if len(self._data) > 1:
if data.label is None or data.label == '':
label_data.append(("Untitled Data", None))
else:
label_data.append((data.label, None))
if self.visible:
all_component_ids = data.visible_components
else:
all_component_ids = data.components
component_ids = []
for cid in all_component_ids:
comp = data.get_component(cid)
if (comp.numeric and self.numeric) or (comp.categorical and self.categorical):
component_ids.append(cid)
label_data.extend([(cid.label, (cid, data)) for cid in component_ids])
update_combobox(self._component_id_combo, label_data)
# Disable header rows
model = self._component_id_combo.model()
for index in range(self._component_id_combo.count()):
if self._component_id_combo.itemData(index) is None:
item = model.item(index)
palette = self._component_id_combo.palette()
item.setFlags(item.flags() & ~(Qt.ItemIsSelectable | Qt.ItemIsEnabled))
item.setData(palette.color(QtGui.QPalette.Disabled, QtGui.QPalette.Text))
index = self._component_id_combo.currentIndex()
if self._component_id_combo.itemData(index) is None:
for index in range(index + 1, self._component_id_combo.count()):
if self._component_id_combo.itemData(index) is not None:
self._component_id_combo.setCurrentIndex(index)
break
def register_to_hub(self, hub):
hub.subscribe(self, ComponentsChangedMessage,
handler=nonpartial(self.refresh),
filter=lambda msg: msg.data in self._data)
hub.subscribe(self, DataCollectionDeleteMessage,
handler=lambda msg: self.remove_data(msg.data),
filter=lambda msg: msg.sender is self._data_collection)
def unregister(self, hub):
hub.unsubscribe_all(self)
class BaseDataComboHelper(HubListener):
"""
This is a base class for helpers for combo boxes that need to show a list
of data objects.
Parameters
----------
data_combo : Qt combo widget
The Qt widget for the data combo box
"""
_data = CurrentComboDataProperty('_data_combo')
def __init__(self, data_combo):
super(BaseDataComboHelper, self).__init__()
self._data_combo = data_combo
self._component_id_helpers = []
self._data_combo.currentIndexChanged.connect(self.refresh_component_ids)
def refresh(self):
label_data = [(data.label, data) for data in self._datasets]
update_combobox(self._data_combo, label_data)
self.refresh_component_ids()
def refresh_component_ids(self):
for helper in self._component_id_helpers:
helper.clear()
if self._data is not None:
helper.append_data(self._data)
helper.refresh()
def add_component_id_combo(self, combo):
helper = ComponentIDComboHelper(combo)
self._component_id_helpers.append_data(helper)
if self._data is not None:
helper.append_data(self._data)
@property
def hub(self):
return self._hub
@hub.setter
def hub(self, value):
self._hub = value
if value is not None:
self.register_to_hub(value)
def register_to_hub(self, hub):
pass
class ManualDataComboHelper(BaseDataComboHelper):
"""
This is a helper for combo boxes that need to show a list of data objects
that is manually curated.
Datasets are added and removed using the
:meth:`~ManualDataComboHelper.append_data` and
:meth:`~ManualDataComboHelper.remove_data` methods.
Parameters
----------
data_combo : Qt combo widget
The Qt widget for the data combo box
data_collection : :class:`~glue.core.DataCollection`
The data collection to which the datasets belong - this is needed
because if a dataset is removed from the data collection, we want to
remove it here.
"""
def __init__(self, data_combo, data_collection):
super(ManualDataComboHelper, self).__init__(data_combo)
if data_collection.hub is None:
raise ValueError("Hub on data collection is not set")
self._data_collection = data_collection
self._datasets = []
self.hub = data_collection.hub
def append_data(self, data):
self._datasets.append(data)
self.refresh()
def remove_data(self, data):
self._datasets.remove(data)
self.refresh()
def register_to_hub(self, hub):
super(ManualDataComboHelper, self).register_to_hub(hub)
hub.subscribe(self, DataUpdateMessage,
handler=nonpartial(self.refresh),
filter=lambda msg: msg.sender in self._datasets)
hub.subscribe(self, DataCollectionDeleteMessage,
handler=lambda msg: self.remove_data(msg.data),
filter=lambda msg: msg.sender is self._data_collection)
class DataCollectionComboHelper(BaseDataComboHelper):
"""
This is a helper for combo boxes that need to show a list of data objects
that is always in sync with a :class:`~glue.core.DataCollection`.
Parameters
----------
data_combo : Qt combo widget
The Qt widget for the data combo box
data_collection : :class:`~glue.core.DataCollection`
The data collection with which to stay in sync
"""
def __init__(self, data_combo, data_collection):
super(DataCollectionComboHelper, self).__init__(data_combo)
if data_collection.hub is None:
raise ValueError("Hub on data collection is not set")
self._datasets = data_collection
self.register_to_hub(data_collection.hub)
self.refresh()
def register_to_hub(self, hub):
super(DataCollectionComboHelper, self).register_to_hub(hub)
hub.subscribe(self, DataUpdateMessage,
handler=nonpartial(self.refresh),
filter=lambda msg: msg.sender in self._datasets)
hub.subscribe(self,DataCollectionAddMessage,
handler=nonpartial(self.refresh),
filter=lambda msg: msg.sender is self._datasets)
hub.subscribe(self, DataCollectionDeleteMessage,
handler=nonpartial(self.refresh),
filter=lambda msg: msg.sender is self._datasets)
if __name__ == "__main__":
from glue.utils.qt import get_qapp
app = get_qapp()
window = QtWidgets.QWidget()
layout = QtWidgets.QVBoxLayout()
window.setLayout(layout)
data_combo = QtWidgets.QComboBox()
layout.addWidget(data_combo)
cid1_combo = QtWidgets.QComboBox()
layout.addWidget(cid1_combo)
cid2_combo = QtWidgets.QComboBox()
layout.addWidget(cid2_combo)
d1 = Data(x=[1,2,3], y=[2,3,4], label='banana')
d2 = Data(a=[0,1,1], b=[2,1,1], label='apple')
dc = DataCollection([d1, d2])
helper = DataCollectionComboHelper(data_combo, dc)
helper.add_component_id_combo(cid1_combo)
helper.add_component_id_combo(cid2_combo)
window.show()
window.raise_()
# app.exec_()
| {
"repo_name": "saimn/glue",
"path": "glue/core/qt/data_combo_helper.py",
"copies": "1",
"size": "11368",
"license": "bsd-3-clause",
"hash": 2405121046332123600,
"line_mean": 30.9325842697,
"line_max": 94,
"alpha_frac": 0.6111893033,
"autogenerated": false,
"ratio": 4.275291462955998,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5386480766255998,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from glue.core import Data, DataCollection
from qtpy import QtWidgets
from ..data_combo_helper import (ComponentIDComboHelper, ManualDataComboHelper,
DataCollectionComboHelper)
def _items_as_string(combo):
items = [combo.itemText(i) for i in range(combo.count())]
return ":".join(items)
def test_component_id_combo_helper():
combo = QtWidgets.QComboBox()
dc = DataCollection([])
helper = ComponentIDComboHelper(combo, dc)
assert _items_as_string(combo) == ""
data1 = Data(x=[1,2,3], y=[2,3,4], label='data1')
dc.append(data1)
helper.append_data(data1)
assert _items_as_string(combo) == "x:y"
data2 = Data(a=[1,2,3], b=['a','b','c'], label='data2')
dc.append(data2)
helper.append_data(data2)
assert _items_as_string(combo) == "data1:x:y:data2:a:b"
helper.categorical = False
assert _items_as_string(combo) == "data1:x:y:data2:a"
helper.numeric = False
assert _items_as_string(combo) == "data1:data2"
helper.categorical = True
helper.numeric = True
helper.visible = False
assert _items_as_string(combo) == "data1:Pixel Axis 0 [x]:World 0:x:y:data2:Pixel Axis 0 [x]:World 0:a:b"
helper.visible = True
dc.remove(data2)
assert _items_as_string(combo) == "x:y"
# TODO: check that renaming a component updates the combo
# data1.id['x'].label = 'z'
# assert _items_as_string(combo) == "z:y"
helper.remove_data(data1)
assert _items_as_string(combo) == ""
def test_component_id_combo_helper_init():
# Regression test to make sure that the numeric and categorical options
# in the __init__ are taken into account properly
combo = QtWidgets.QComboBox()
dc = DataCollection([])
data = Data(a=[1,2,3], b=['a','b','c'], label='data2')
dc.append(data)
helper = ComponentIDComboHelper(combo, dc)
helper.append_data(data)
assert _items_as_string(combo) == "a:b"
helper = ComponentIDComboHelper(combo, dc, numeric=False)
helper.append_data(data)
assert _items_as_string(combo) == "b"
helper = ComponentIDComboHelper(combo, dc, categorical=False)
helper.append_data(data)
assert _items_as_string(combo) == "a"
helper = ComponentIDComboHelper(combo, dc, numeric=False, categorical=False)
helper.append_data(data)
assert _items_as_string(combo) == ""
def test_manual_data_combo_helper():
combo = QtWidgets.QComboBox()
dc = DataCollection([])
helper = ManualDataComboHelper(combo, dc)
data1 = Data(x=[1,2,3], y=[2,3,4], label='data1')
dc.append(data1)
assert _items_as_string(combo) == ""
helper.append_data(data1)
assert _items_as_string(combo) == "data1"
data1.label = 'mydata1'
assert _items_as_string(combo) == "mydata1"
dc.remove(data1)
assert _items_as_string(combo) == ""
def test_data_collection_combo_helper():
combo = QtWidgets.QComboBox()
dc = DataCollection([])
helper = DataCollectionComboHelper(combo, dc)
data1 = Data(x=[1,2,3], y=[2,3,4], label='data1')
dc.append(data1)
assert _items_as_string(combo) == "data1"
data1.label = 'mydata1'
assert _items_as_string(combo) == "mydata1"
dc.remove(data1)
assert _items_as_string(combo) == ""
| {
"repo_name": "saimn/glue",
"path": "glue/core/qt/tests/test_data_combo_helper.py",
"copies": "1",
"size": "3366",
"license": "bsd-3-clause",
"hash": 1624665912514601000,
"line_mean": 23.0428571429,
"line_max": 109,
"alpha_frac": 0.642305407,
"autogenerated": false,
"ratio": 3.199619771863118,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4341925178863118,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from glue.core import Data, DataCollection, Subset
from qtpy import QtGui, QtWidgets
from qtpy.QtCore import Qt
from glue.core.hub import HubListener
from glue.core.message import (ComponentsChangedMessage,
DataCollectionAddMessage,
DataCollectionDeleteMessage,
DataUpdateMessage,
ComponentReplacedMessage)
from glue.utils import nonpartial
from glue.utils.qt import update_combobox
from glue.utils.qt.widget_properties import CurrentComboDataProperty
from glue.external.echo.qt.connect import _find_combo_data
__all__ = ['ComponentIDComboHelper', 'ManualDataComboHelper',
'DataCollectionComboHelper']
class ComponentIDComboHelper(HubListener):
"""
The purpose of this class is to set up a combo showing componentIDs for
one or more datasets, and to update these componentIDs if needed, for
example if new components are added to a dataset, or if componentIDs are
renamed.
Parameters
----------
component_id_combo : Qt combo widget
The Qt widget for the component ID combo box
data_collection : :class:`~glue.core.DataCollection`
The data collection to which the datasets belong - if specified,
this is used to remove datasets from the combo when they are removed
from the data collection.
data : :class:`~glue.core.Data`
If specified, set up the combo for this dataset only and don't allow
datasets to be added/removed
visible : bool, optional
Only show visible components
numeric : bool, optional
Show numeric components
categorical : bool, optional
Show categorical components
pixel_coord : bool, optional
Show pixel coordinate components
world_coord : bool, optional
Show world coordinate components
"""
def __init__(self, component_id_combo, data_collection=None, data=None,
visible=True, numeric=True, categorical=True,
pixel_coord=False, world_coord=False, default_index=0,):
super(ComponentIDComboHelper, self).__init__()
self._visible = visible
self._numeric = numeric
self._categorical = categorical
self._pixel_coord = pixel_coord
self._world_coord = world_coord
self._component_id_combo = component_id_combo
if data is None:
self._manual_data = False
self._data = []
else:
self._manual_data = True
self._data = [data]
self._data_collection = data_collection
if data_collection is not None:
if data_collection.hub is None:
raise ValueError("Hub on data collection is not set")
else:
self.hub = data_collection.hub
self.default_index = default_index
if data is not None:
self.refresh()
def clear(self):
self._data.clear()
self.refresh()
@property
def visible(self):
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
self.refresh()
@property
def numeric(self):
return self._numeric
@numeric.setter
def numeric(self, value):
self._numeric = value
self.refresh()
@property
def categorical(self):
return self._categorical
@categorical.setter
def categorical(self, value):
self._categorical = value
self.refresh()
@property
def pixel_coord(self):
return self._pixel_coord
@pixel_coord.setter
def pixel_coord(self, value):
self._pixel_coord = value
self.refresh()
@property
def world_coord(self):
return self._world_coord
@world_coord.setter
def world_coord(self, value):
self._world_coord = value
self.refresh()
def append_data(self, data, refresh=True):
if self._manual_data:
raise Exception("Cannot change data in ComponentIDComboHelper "
"initialized from a single dataset")
if isinstance(data, Subset):
data = data.data
if self.hub is None:
if data.hub is not None:
self.hub = data.hub
elif data.hub is not self.hub:
raise ValueError("Data Hub is different from current hub")
if data not in self._data:
self._data.append(data)
if refresh:
self.refresh()
def remove_data(self, data):
if self._manual_data:
raise Exception("Cannot change data in ComponentIDComboHelper "
"initialized from a single dataset")
if data in self._data:
self._data.remove(data)
self.refresh()
def set_multiple_data(self, datasets):
"""
Add multiple datasets to the combo in one go (and clear any previous datasets).
Parameters
----------
datasets : list
The list of :class:`~glue.core.data.Data` objects to add
"""
if self._manual_data:
raise Exception("Cannot change data in ComponentIDComboHelper "
"initialized from a single dataset")
try:
self._data.clear()
except AttributeError: # PY2
self._data[:] = []
for data in datasets:
self.append_data(data, refresh=False)
self.refresh()
@property
def hub(self):
return self._hub
@hub.setter
def hub(self, value):
self._hub = value
if value is not None:
self.register_to_hub(value)
def refresh(self):
label_data = []
for data in self._data:
if len(self._data) > 1:
if data.label is None or data.label == '':
label_data.append(("Untitled Data", None))
else:
label_data.append((data.label, None))
if self.visible:
all_component_ids = data.visible_components
else:
all_component_ids = data.components
component_ids = []
for cid in all_component_ids:
comp = data.get_component(cid)
if ((comp.numeric and self.numeric) or
(comp.categorical and self.categorical) or
(cid in data.pixel_component_ids and self.pixel_coord) or
(cid in data.world_component_ids and self.world_coord)):
component_ids.append(cid)
label_data.extend([(cid.label, cid) for cid in component_ids])
update_combobox(self._component_id_combo, label_data, default_index=self.default_index)
# Disable header rows
model = self._component_id_combo.model()
for index in range(self._component_id_combo.count()):
if self._component_id_combo.itemData(index) is None:
item = model.item(index)
palette = self._component_id_combo.palette()
item.setFlags(item.flags() & ~(Qt.ItemIsSelectable | Qt.ItemIsEnabled))
item.setData(palette.color(QtGui.QPalette.Disabled, QtGui.QPalette.Text))
index = self._component_id_combo.currentIndex()
if self._component_id_combo.itemData(index) is None:
for index in range(index + 1, self._component_id_combo.count()):
if self._component_id_combo.itemData(index) is not None:
self._component_id_combo.setCurrentIndex(index)
break
def register_to_hub(self, hub):
hub.subscribe(self, ComponentReplacedMessage,
handler=nonpartial(self.refresh),
filter=lambda msg: msg.data in self._data)
hub.subscribe(self, ComponentsChangedMessage,
handler=nonpartial(self.refresh),
filter=lambda msg: msg.data in self._data)
if self._data_collection is not None:
hub.subscribe(self, DataCollectionDeleteMessage,
handler=lambda msg: self.remove_data(msg.data),
filter=lambda msg: msg.sender is self._data_collection)
def unregister(self, hub):
hub.unsubscribe_all(self)
class BaseDataComboHelper(HubListener):
"""
This is a base class for helpers for combo boxes that need to show a list
of data objects.
Parameters
----------
data_combo : Qt combo widget
The Qt widget for the data combo box
"""
_data = CurrentComboDataProperty('_data_combo')
def __init__(self, data_combo):
super(BaseDataComboHelper, self).__init__()
self._data_combo = data_combo
self._component_id_helpers = []
self._data_combo.currentIndexChanged.connect(self.refresh_component_ids)
def refresh(self):
label_data = [(data.label, data) for data in self._datasets]
update_combobox(self._data_combo, label_data)
self.refresh_component_ids()
def refresh_component_ids(self):
for helper in self._component_id_helpers:
helper.clear()
if self._data is not None:
helper.append_data(self._data)
helper.refresh()
def add_component_id_combo(self, combo):
helper = ComponentIDComboHelper(combo)
self._component_id_helpers.append_data(helper)
if self._data is not None:
helper.append_data(self._data)
@property
def hub(self):
return self._hub
@hub.setter
def hub(self, value):
self._hub = value
if value is not None:
self.register_to_hub(value)
def register_to_hub(self, hub):
pass
class ManualDataComboHelper(BaseDataComboHelper):
"""
This is a helper for combo boxes that need to show a list of data objects
that is manually curated.
Datasets are added and removed using the
:meth:`~ManualDataComboHelper.append_data` and
:meth:`~ManualDataComboHelper.remove_data` methods.
Parameters
----------
data_combo : Qt combo widget
The Qt widget for the data combo box
data_collection : :class:`~glue.core.DataCollection`
The data collection to which the datasets belong - this is needed
because if a dataset is removed from the data collection, we want to
remove it here.
"""
def __init__(self, data_combo, data_collection):
super(ManualDataComboHelper, self).__init__(data_combo)
if data_collection.hub is None:
raise ValueError("Hub on data collection is not set")
self._data_collection = data_collection
self._datasets = []
self.hub = data_collection.hub
def set_multiple_data(self, datasets):
"""
Add multiple datasets to the combo in one go (and clear any previous datasets).
Parameters
----------
datasets : list
The list of :class:`~glue.core.data.Data` objects to add
"""
try:
self._datasets.clear()
except AttributeError: # PY2
self._datasets[:] = []
for data in datasets:
self._datasets.append(data)
self.refresh()
def append_data(self, data):
if data in self._datasets:
return
self._datasets.append(data)
self.refresh()
def remove_data(self, data):
if data not in self._datasets:
return
self._datasets.remove(data)
self.refresh()
def register_to_hub(self, hub):
super(ManualDataComboHelper, self).register_to_hub(hub)
hub.subscribe(self, DataUpdateMessage,
handler=nonpartial(self.refresh),
filter=lambda msg: msg.sender in self._datasets)
hub.subscribe(self, DataCollectionDeleteMessage,
handler=lambda msg: self.remove_data(msg.data),
filter=lambda msg: msg.sender is self._data_collection)
class DataCollectionComboHelper(BaseDataComboHelper):
"""
This is a helper for combo boxes that need to show a list of data objects
that is always in sync with a :class:`~glue.core.DataCollection`.
Parameters
----------
data_combo : Qt combo widget
The Qt widget for the data combo box
data_collection : :class:`~glue.core.DataCollection`
The data collection with which to stay in sync
"""
def __init__(self, data_combo, data_collection):
super(DataCollectionComboHelper, self).__init__(data_combo)
if data_collection.hub is None:
raise ValueError("Hub on data collection is not set")
self._datasets = data_collection
self.register_to_hub(data_collection.hub)
self.refresh()
def register_to_hub(self, hub):
super(DataCollectionComboHelper, self).register_to_hub(hub)
hub.subscribe(self, DataUpdateMessage,
handler=nonpartial(self.refresh),
filter=lambda msg: msg.sender in self._datasets)
hub.subscribe(self,DataCollectionAddMessage,
handler=nonpartial(self.refresh),
filter=lambda msg: msg.sender is self._datasets)
hub.subscribe(self, DataCollectionDeleteMessage,
handler=nonpartial(self.refresh),
filter=lambda msg: msg.sender is self._datasets)
if __name__ == "__main__":
from glue.utils.qt import get_qapp
app = get_qapp()
window = QtWidgets.QWidget()
layout = QtWidgets.QVBoxLayout()
window.setLayout(layout)
data_combo = QtWidgets.QComboBox()
layout.addWidget(data_combo)
cid1_combo = QtWidgets.QComboBox()
layout.addWidget(cid1_combo)
cid2_combo = QtWidgets.QComboBox()
layout.addWidget(cid2_combo)
d1 = Data(x=[1,2,3], y=[2,3,4], label='banana')
d2 = Data(a=[0,1,1], b=[2,1,1], label='apple')
dc = DataCollection([d1, d2])
helper = DataCollectionComboHelper(data_combo, dc)
helper.add_component_id_combo(cid1_combo)
helper.add_component_id_combo(cid2_combo)
window.show()
window.raise_()
# app.exec_()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/qt/data_combo_helper.py",
"copies": "2",
"size": "14412",
"license": "bsd-3-clause",
"hash": -1801757280630376400,
"line_mean": 31.3139013453,
"line_max": 95,
"alpha_frac": 0.5998473494,
"autogenerated": false,
"ratio": 4.368596544407396,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5968443893807396,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from glue.core import Subset
from glue.core.subset import MaskSubsetState
__all__ = ['SubsetMaskImporter', 'SubsetMaskExporter']
class SubsetMaskImporter(object):
def get_filename_and_reader(self):
raise NotImplementedError
def run(self, data_or_subset, data_collection):
filename, reader = self.get_filename_and_reader()
if filename is None:
return
# Read in the masks
masks = reader(filename)
# Make sure shape is unique
shapes = set(mask.shape for mask in masks.values())
if len(shapes) == 0:
raise ValueError("No subset masks were returned")
elif len(shapes) > 1:
raise ValueError("Not all subsets have the same shape")
if list(shapes)[0] != data_or_subset.shape:
raise ValueError("Mask shape(s) {0} does not match data shape {1}".format(list(shapes)[0], data_or_subset.shape))
if isinstance(data_or_subset, Subset):
subset = data_or_subset
if len(masks) != 1:
raise ValueError("Can only read in a single subset when importing into a subset")
mask = list(masks.values())[0]
subset_state = MaskSubsetState(mask, subset.pixel_component_ids)
subset.subset_state = subset_state
else:
data = data_or_subset
for label, mask in masks.items():
subset_state = MaskSubsetState(mask, data.pixel_component_ids)
data_collection.new_subset_group(label=label, subset_state=subset_state)
class SubsetMaskExporter(object):
def get_filename_and_writer(self):
raise NotImplementedError
def run(self, data_or_subset):
filename, writer = self.get_filename_and_writer()
if filename is None:
return
# Prepare dictionary of masks
masks = {}
if isinstance(data_or_subset, Subset):
subset = data_or_subset
masks[subset.label] = subset.to_mask()
else:
data = data_or_subset
for subset in data.subsets:
masks[subset.label] = subset.to_mask()
writer(filename, masks)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/io/subset_mask.py",
"copies": "2",
"size": "2274",
"license": "bsd-3-clause",
"hash": -782718395318820600,
"line_mean": 25.7529411765,
"line_max": 125,
"alpha_frac": 0.6064204046,
"autogenerated": false,
"ratio": 4.211111111111111,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00034560640735695024,
"num_lines": 85
} |
from __future__ import absolute_import, division, print_function
from glue.core import Subset
from glue.external.echo import (CallbackProperty, SelectionCallbackProperty,
delay_callback)
from glue.core.state_objects import StateAttributeLimitsHelper
from glue.core.data_combo_helper import ComponentIDComboHelper
from ..common.layer_state import VispyLayerState
__all__ = ['VolumeLayerState']
class VolumeLayerState(VispyLayerState):
"""
A state object for volume layers
"""
attribute = SelectionCallbackProperty()
vmin = CallbackProperty()
vmax = CallbackProperty()
subset_mode = CallbackProperty('data')
limits_cache = CallbackProperty({})
def __init__(self, layer=None, **kwargs):
super(VolumeLayerState, self).__init__(layer=layer)
if self.layer is not None:
self.color = self.layer.style.color
self.alpha = self.layer.style.alpha
self.att_helper = ComponentIDComboHelper(self, 'attribute')
self.lim_helper = StateAttributeLimitsHelper(self, attribute='attribute',
lower='vmin', upper='vmax',
cache=self.limits_cache)
self.add_callback('layer', self._on_layer_change)
if layer is not None:
self._on_layer_change()
if isinstance(self.layer, Subset):
self.vmin = 0
self.lim_helper.lower_frozen = True
self.update_from_dict(kwargs)
def _on_layer_change(self, layer=None):
with delay_callback(self, 'vmin', 'vmin'):
if self.layer is None:
self.att_helper.set_multiple_data([])
else:
self.att_helper.set_multiple_data([self.layer])
def update_priority(self, name):
return 0 if name.endswith(('vmin', 'vmax')) else 1
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/volume/layer_state.py",
"copies": "1",
"size": "1911",
"license": "bsd-2-clause",
"hash": 2214458956751394000,
"line_mean": 31.3898305085,
"line_max": 81,
"alpha_frac": 0.6117216117,
"autogenerated": false,
"ratio": 4.246666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004159457790434778,
"num_lines": 59
} |
from __future__ import absolute_import, division, print_function
from glue.core.message import (DataUpdateMessage,
SubsetUpdateMessage,
SubsetCreateMessage,
SubsetDeleteMessage,
DataCollectionDeleteMessage,
NumericalDataChangedMessage)
from glue.core.data_collection import DataCollection
from glue.core.subset import Subset
from glue.core.data import Data
from glue.core.hub import HubListener
__all__ = ['Client', 'BasicClient']
class Client(HubListener):
"""
Base class for interaction / visualization modules
Attributes
----------
data: DataCollection instance
The data associated with this client.
"""
def __init__(self, data):
"""
Create a new client object.
Parameters
----------
data: Data, DataCollection, or list of data
The primary data associated with this client.
Raises
------
TypeError: If the data input is the wrong type
"""
super(Client, self).__init__()
self._data = data
if not isinstance(data, DataCollection):
raise TypeError("Input data must be a DataCollection: %s"
% type(data))
@property
def data(self):
""" Returns the data collection """
return self._data
def register_to_hub(self, hub):
"""The main method to establish a link with a hub,
and set up event handlers. For common message types
Client subclasses at a minimum should override these methods
to provide functionality:
_add_subset
_update_subset
_remove_subset
_remove_data
Clients can also override register_to_hub to add additional
event handlers.
Attributes
----------
hub: The hub to subscribe to
"""
has_data = lambda x: x.sender.data in self._data
has_data_collection = lambda x: x.sender is self._data
hub.subscribe(self,
SubsetCreateMessage,
handler=self._add_subset,
filter=has_data)
hub.subscribe(self,
SubsetUpdateMessage,
handler=self._update_subset,
filter=has_data)
hub.subscribe(self,
SubsetDeleteMessage,
handler=self._remove_subset,
filter=has_data)
hub.subscribe(self,
DataUpdateMessage,
handler=self._update_data,
filter=has_data)
hub.subscribe(self,
NumericalDataChangedMessage,
handler=self._numerical_data_changed,
filter=has_data)
hub.subscribe(self,
DataCollectionDeleteMessage,
handler=self._remove_data,
filter=has_data_collection)
def _add_subset(self, message):
raise NotImplementedError
def _remove_data(self, message):
raise NotImplementedError
def _remove_subset(self, message):
raise NotImplementedError
def _update_data(self, message):
""" Default handler for DataMessage """
raise NotImplementedError
def _update_subset(self, message):
""" Default handler for SubsetUpdateMessage """
raise NotImplementedError
def apply_roi(self, roi):
raise NotImplementedError
def _numerical_data_changed(self, message):
raise NotImplementedError
class BasicClient(Client):
def _add_subset(self, message):
subset = message.subset
self.add_layer(subset)
def _update_subset(self, message):
subset = message.subset
self.update_layer(subset)
def _remove_subset(self, message):
subset = message.subset
self.remove_layer(subset)
def _remove_data(self, message):
self.remove_layer(message.data)
def _update_data(self, message):
self.update_layer(message.data)
def add_layer(self, layer):
if self.layer_present(layer):
return
if layer.data not in self.data:
raise TypeError("Data not in collection")
if isinstance(layer, Data):
self._do_add_data(layer)
for subset in layer.subsets:
self.add_layer(subset)
else:
if not self.layer_present(layer.data):
self.add_layer(layer.data)
else:
self._do_add_subset(layer)
self.update_layer(layer)
def update_layer(self, layer):
if not self.layer_present(layer):
return
if isinstance(layer, Subset):
self._do_update_subset(layer)
else:
self._do_update_data(layer)
def remove_layer(self, layer):
if not self.layer_present(layer):
return
if isinstance(layer, Data):
self._do_remove_data(layer)
for subset in layer.subsets:
self._do_remove_subset(subset)
else:
self._do_remove_subset(layer)
def _do_add_data(self, data):
raise NotImplementedError
def _do_add_subset(self, subset):
raise NotImplementedError
def _do_update_subset(self, subset):
raise NotImplementedError
def _do_update_data(self, data):
raise NotImplementedError
def _do_remove_subset(self, subset):
raise NotImplementedError
def _do_remove_data(self, data):
raise NotImplementedError
def layer_present(self, layer):
raise NotImplementedError
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/client.py",
"copies": "4",
"size": "5812",
"license": "bsd-3-clause",
"hash": 1220531006990243000,
"line_mean": 28.06,
"line_max": 69,
"alpha_frac": 0.5688231246,
"autogenerated": false,
"ratio": 4.767842493847416,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7336665618447415,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from glue.core.state import lookup_class_with_patches
from glue.config import settings
from glue.core import message as msg
from qtpy.QtWidgets import QMessageBox
from ..common.vispy_data_viewer import BaseVispyViewer
from .layer_artist import VolumeLayerArtist
from .layer_style_widget import VolumeLayerStyleWidget
from .viewer_state import Vispy3DVolumeViewerState
from ..scatter.layer_artist import ScatterLayerArtist
from ..scatter.layer_style_widget import ScatterLayerStyleWidget
from ..common import tools # noqa
from ..common import selection_tools # noqa
from . import volume_toolbar # noqa
try:
import OpenGL # flake8: noqa
except ImportError:
OPENGL_INSTALLED = False
else:
OPENGL_INSTALLED = True
class VispyVolumeViewer(BaseVispyViewer):
LABEL = "3D Volume Rendering"
_state_cls = Vispy3DVolumeViewerState
_layer_style_widget_cls = {VolumeLayerArtist: VolumeLayerStyleWidget,
ScatterLayerArtist: ScatterLayerStyleWidget}
tools = BaseVispyViewer.tools + ['vispy:lasso', 'vispy:rectangle',
'vispy:circle', 'volume3d:point']
def __init__(self, *args, **kwargs):
super(VispyVolumeViewer, self).__init__(*args, **kwargs)
if not OPENGL_INSTALLED:
self.close()
QMessageBox.critical(self, "Error",
"The PyOpenGL package is required for the "
"3D volume rendering viewer",
buttons=QMessageBox.Ok)
def add_data(self, data):
if data in self._layer_artist_container:
return True
if data.ndim == 1:
if len(self._layer_artist_container) == 0:
QMessageBox.critical(self, "Error",
"Can only add a scatter plot overlay once a volume is present".format(data.ndim),
buttons=QMessageBox.Ok)
# Assume that the user wants a scatter plot overlay
layer_artist = ScatterLayerArtist(layer=data, vispy_viewer=self)
self._vispy_widget._update_limits()
elif data.ndim == 3:
if len(self._layer_artist_container) > 0:
required_shape = self._layer_artist_container[0].shape
if data.shape != required_shape:
QMessageBox.critical(self, "Error",
"Shape of dataset ({0}) does not agree "
"with shape of existing datasets in volume "
"rendering ({1})".format(data.shape, required_shape),
buttons=QMessageBox.Ok)
return False
layer_artist = VolumeLayerArtist(layer=data, vispy_viewer=self)
else:
QMessageBox.critical(self, "Error",
"Data should be 1- or 3-dimensional ({0} dimensions found)".format(data.ndim),
buttons=QMessageBox.Ok)
return False
if len(self._layer_artist_container) == 0:
self.state.set_limits(*layer_artist.bbox)
self._layer_artist_container.append(layer_artist)
for subset in data.subsets:
self.add_subset(subset)
return True
def add_subset(self, subset):
if subset in self._layer_artist_container:
return
if subset.ndim == 1:
layer_artist = ScatterLayerArtist(layer=subset, vispy_viewer=self)
elif subset.ndim == 3:
layer_artist = VolumeLayerArtist(layer=subset, vispy_viewer=self)
else:
return
self._layer_artist_container.append(layer_artist)
def _add_subset(self, message):
self.add_subset(message.subset)
@classmethod
def __setgluestate__(cls, rec, context):
viewer = super(VispyVolumeViewer, cls).__setgluestate__(rec, context)
return viewer
def _update_appearance_from_settings(self, message):
super(VispyVolumeViewer, self)._update_appearance_from_settings(message)
if hasattr(self._vispy_widget, '_multivol'):
self._vispy_widget._multivol.set_background(settings.BACKGROUND_COLOR)
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/volume/volume_viewer.py",
"copies": "1",
"size": "4382",
"license": "bsd-2-clause",
"hash": -9130946912821858000,
"line_mean": 34.6260162602,
"line_max": 118,
"alpha_frac": 0.5983569147,
"autogenerated": false,
"ratio": 4.342913776015857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006184971485689158,
"num_lines": 123
} |
from __future__ import absolute_import, division, print_function
from glue.core.util import disambiguate
from glue.core.message import (DataCollectionAddMessage,
DataCollectionDeleteMessage,
DataAddComponentMessage)
from glue.core.registry import Registry
from glue.core.link_manager import LinkManager
from glue.core.data import Data
from glue.core.hub import Hub, HubListener
from glue.config import settings
from glue.utils import as_list
__all__ = ['DataCollection']
class DataCollection(HubListener):
"""The top-level object for interacting with datasets in Glue.
DataCollections have the following responsibilities:
* Providing a way to retrieve and store data
* Broadcasting messages when data are added or removed
* Keeping each managed data set's list of
:class:`~glue.core.component.DerivedComponent` instances up-to-date
* Creating the hub that all other objects should use to communicate
with one another (stored in ``self.hub``)
"""
def __init__(self, data=None):
"""
:param data: :class:`~glue.core.data.Data` object, or list of such objects
"""
super(DataCollection, self).__init__()
self._link_manager = LinkManager()
self._data = []
self.hub = None
self._subset_groups = []
self.register_to_hub(Hub())
self.extend(as_list(data or []))
self._sg_count = 0
@property
def data(self):
""" The :class:`~glue.core.data.Data` objects in the collection """
return self._data
def append(self, data):
""" Add a new dataset to this collection.
Appending emits a DataCollectionAddMessage.
It also updates the list of DerivedComponents that each
data set can work with.
:param data: :class:`~glue.core.data.Data` object to add
"""
if isinstance(data, list):
self.extend(data)
return
if data in self:
return
self._data.append(data)
if self.hub:
data.register_to_hub(self.hub)
for s in data.subsets:
s.register()
msg = DataCollectionAddMessage(self, data)
self.hub.broadcast(msg)
self._sync_link_manager()
def extend(self, data):
"""Add several new datasets to this collection
See :meth:`append` for more information
:param data: List of data objects to add
"""
[self.append(d) for d in data]
def remove(self, data):
""" Remove a data set from the collection
Emits a DataCollectionDeleteMessage
:param data: the object to remove
:type data: :class:`~glue.core.data.Data`
"""
if data not in self._data:
return
self._data.remove(data)
Registry().unregister(data, Data)
if self.hub:
msg = DataCollectionDeleteMessage(self, data)
self.hub.broadcast(msg)
def _sync_link_manager(self):
""" update the LinkManager, so all the DerivedComponents
for each data set are up-to-date
"""
# add any links in the data
for d in self._data:
for derived in d.derived_components:
self._link_manager.add_link(d.get_component(derived).link)
for link in d.coordinate_links:
self._link_manager.add_link(link)
for d in self._data:
self._link_manager.update_data_components(d)
@property
def links(self):
"""
Tuple of :class:`~glue.core.component_link.ComponentLink` objects.
"""
return tuple(self._link_manager.links)
def add_link(self, links):
"""Add one or more links to the data collection.
This will auto-update the components in each data set
:param links:
The links to add. A scalar or list of
:class:`~glue.core.component_link.ComponentLink`
instances, or a :class:`~glue.core.link_helpers.LinkCollection`
"""
self._link_manager.add_link(links)
for d in self._data:
self._link_manager.update_data_components(d)
def _merge_link(self, link):
pass
def set_links(self, links):
"""Override the links in the collection, and update data
objects as necessary.
:param links: The new links. An iterable of
:class:`~glue.core.component_link.ComponentLink` instances
"""
self._link_manager.clear()
for link in links:
self._link_manager.add_link(link)
for d in self._data:
self._link_manager.update_data_components(d)
def register_to_hub(self, hub):
""" Register managed data objects to a hub.
:param hub: The hub to register with
:type hub: :class:`~glue.core.hub.Hub`
"""
if self.hub is hub:
return
if self.hub is not None:
raise RuntimeError("Data Collection already registered "
"to a different Hub")
if not isinstance(hub, Hub):
raise TypeError("Input is not a Hub object: %s" % type(hub))
self.hub = hub
# re-assign all data, subset hub instances to this hub
for d in self._data:
d.register_to_hub(hub)
for s in d.subsets:
s.register()
hub.subscribe(self, DataAddComponentMessage,
lambda msg: self._sync_link_manager(),
filter=lambda x: x.sender in self._data)
def new_subset_group(self, label=None, subset_state=None):
"""
Create and return a new Subset Group.
:param label: The label to assign to the group
:type label: str
:param subset_state: The state to initialize the group with
:type subset_state: :class:`~glue.core.subset.SubsetState`
:returns: A new :class:`~glue.core.subset_group.SubsetGroup`
"""
from glue.core.subset_group import SubsetGroup
color = settings.SUBSET_COLORS[self._sg_count % len(settings.SUBSET_COLORS)]
self._sg_count += 1
label = label or "%i" % (self._sg_count)
result = SubsetGroup(color=color, label=label, subset_state=subset_state)
self._subset_groups.append(result)
result.register(self)
return result
def remove_subset_group(self, subset_grp):
"""
Remove an existing :class:`~glue.core.subset_group.SubsetGroup`
"""
if subset_grp not in self._subset_groups:
return
# remove from list first, so that group appears deleted
# by the time the first SubsetDelete message is broadcast
self._subset_groups.remove(subset_grp)
for s in subset_grp.subsets:
s.delete()
subset_grp.unregister(self.hub)
def merge(self, *data, **kwargs):
"""
Merge two or more datasets into a single dataset.
This has the following effects:
All components from all datasets are added to the first argument
All datasets except the first argument are removed from the collection
Any component name conflicts are disambiguated
The pixel and world components apart from the first argument are discarded
:note: All arguments must have the same shape
:param data: One or more :class:`~glue.core.data.Data` instances.
:returns: self
"""
if len(data) < 2:
raise ValueError("merge requires 2 or more arguments")
shp = data[0].shape
for d in data:
if d.shape != shp:
raise ValueError("All arguments must have the same shape")
label = kwargs.get('label', data[0].label)
master = Data(label=label)
self.append(master)
for d in data:
skip = d.pixel_component_ids + d.world_component_ids
for c in d.components:
if c in skip:
continue
if c in master.components: # already present (via a link)
continue
taken = [_.label for _ in master.components]
lbl = c.label
# Special-case 'PRIMARY', rename to data label
if lbl == 'PRIMARY':
lbl = d.label
# First-pass disambiguation, try component_data
if lbl in taken:
lbl = '%s_%s' % (lbl, d.label)
lbl = disambiguate(lbl, taken)
c._label = lbl
master.add_component(d.get_component(c), c)
self.remove(d)
return self
@property
def subset_groups(self):
"""
tuple of current :class:`Subset Groups <glue.core.subset_group.SubsetGroup>`
"""
return tuple(self._subset_groups)
def __contains__(self, obj):
return obj in self._data or obj in self.subset_groups
def __getitem__(self, key):
return self._data[key]
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __str__(self):
if len(self) == 1:
result = "DataCollection (1 data set)\n\t"
else:
result = "DataCollection (%i data sets)\n\t" % len(self)
result += '\n\t'.join("%3i: %s" % (i, d.label) for
i, d in enumerate(self))
return result
def __repr__(self):
return self.__str__()
def __bool__(self):
return True
def __nonzero__(self):
return True
| {
"repo_name": "saimn/glue",
"path": "glue/core/data_collection.py",
"copies": "1",
"size": "9736",
"license": "bsd-3-clause",
"hash": 75180354344016770,
"line_mean": 30.8169934641,
"line_max": 84,
"alpha_frac": 0.5780608053,
"autogenerated": false,
"ratio": 4.220199393151279,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5298260198451279,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from glue.external.echo import CallbackProperty, keep_in_sync
from glue.core.message import LayerArtistUpdatedMessage
from glue.viewers.common.state import LayerState
__all__ = ['VispyLayerState']
class VispyLayerState(LayerState):
"""
A base state object for all Vispy layers
"""
color = CallbackProperty()
alpha = CallbackProperty()
def __init__(self, **kwargs):
super(VispyLayerState, self).__init__(**kwargs)
self._sync_color = None
self._sync_alpha = None
self.add_callback('layer', self._layer_changed)
self._layer_changed()
self.add_global_callback(self._notify_layer_update)
def _notify_layer_update(self, **kwargs):
message = LayerArtistUpdatedMessage(self)
if self.layer is not None and self.layer.hub is not None:
self.layer.hub.broadcast(message)
def _layer_changed(self):
if self._sync_color is not None:
self._sync_color.stop_syncing()
if self._sync_alpha is not None:
self._sync_alpha.stop_syncing()
if self.layer is not None:
self.color = self.layer.style.color
self.alpha = self.layer.style.alpha
self._sync_color = keep_in_sync(self, 'color', self.layer.style, 'color')
self._sync_alpha = keep_in_sync(self, 'alpha', self.layer.style, 'alpha')
| {
"repo_name": "astrofrog/glue-vispy-viewers",
"path": "glue_vispy_viewers/common/layer_state.py",
"copies": "2",
"size": "1448",
"license": "bsd-2-clause",
"hash": -318366996663089900,
"line_mean": 28.5510204082,
"line_max": 85,
"alpha_frac": 0.6408839779,
"autogenerated": false,
"ratio": 3.7416020671834627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5382486045083462,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from glue.external.echo import CallbackProperty, keep_in_sync
from glue.core.state_objects import State
__all__ = ['VispyLayerState']
class VispyLayerState(State):
"""
A base state object for all Vispy layers
"""
layer = CallbackProperty()
visible = CallbackProperty(True)
zorder = CallbackProperty(0)
color = CallbackProperty()
alpha = CallbackProperty()
def __init__(self, **kwargs):
super(VispyLayerState, self).__init__(**kwargs)
self._sync_color = None
self._sync_alpha = None
self.add_callback('layer', self._layer_changed)
self._layer_changed()
def _layer_changed(self):
if self._sync_color is not None:
self._sync_color.stop_syncing()
if self._sync_alpha is not None:
self._sync_alpha.stop_syncing()
if self.layer is not None:
self.color = self.layer.style.color
self.alpha = self.layer.style.alpha
self._sync_color = keep_in_sync(self, 'color', self.layer.style, 'color')
self._sync_alpha = keep_in_sync(self, 'alpha', self.layer.style, 'alpha')
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/common/layer_state.py",
"copies": "1",
"size": "1211",
"license": "bsd-2-clause",
"hash": 7706980771562629000,
"line_mean": 26.5227272727,
"line_max": 85,
"alpha_frac": 0.6251032205,
"autogenerated": false,
"ratio": 3.7376543209876543,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48627575414876545,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from glue.external.echo import keep_in_sync
from glue.core.layer_artist import LayerArtistBase
from glue.viewers.matplotlib.state import DeferredDrawCallbackProperty
# TODO: should use the built-in class for this, though we don't need
# the _sync_style method, so just re-define here for now.
class MatplotlibLayerArtist(LayerArtistBase):
zorder = DeferredDrawCallbackProperty()
visible = DeferredDrawCallbackProperty()
def __init__(self, axes, viewer_state, layer_state=None, layer=None):
super(MatplotlibLayerArtist, self).__init__(layer)
# Keep a reference to the layer (data or subset) and axes
self.axes = axes
self._viewer_state = viewer_state
# Set up a state object for the layer artist
self.layer = layer or layer_state.layer
self.state = layer_state or self._layer_state_cls(viewer_state=viewer_state,
layer=self.layer)
if self.state not in self._viewer_state.layers:
self._viewer_state.layers.append(self.state)
self.mpl_artists = []
self.zorder = self.state.zorder
self.visible = self.state.visible
self._sync_zorder = keep_in_sync(self, 'zorder', self.state, 'zorder')
self._sync_visible = keep_in_sync(self, 'visible', self.state, 'visible')
def clear(self):
for artist in self.mpl_artists:
try:
artist.set_visible(False)
except AttributeError: # can happen for e.g. errorbars
pass
def remove(self):
for artist in self.mpl_artists:
try:
artist.remove()
except ValueError: # already removed
pass
except TypeError: # can happen for e.g. errorbars
pass
except AttributeError: # can happen for Matplotlib 1.4
pass
self.mpl_artists[:] = []
def get_layer_color(self):
return self.state.color
def redraw(self):
self.axes.figure.canvas.draw()
def __gluestate__(self, context):
return dict(state=context.id(self.state))
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/matplotlib/layer_artist.py",
"copies": "2",
"size": "2239",
"license": "bsd-3-clause",
"hash": 6687584650179023000,
"line_mean": 33.4461538462,
"line_max": 84,
"alpha_frac": 0.6154533274,
"autogenerated": false,
"ratio": 4.0783242258652095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5693777553265209,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from glue.utils.qt.mime import PyMimeData
__all__ = ['GlueItemWidget']
class GlueItemWidget(object):
"""
A mixin for QtWidgets.QListWidget/GlueTreeWidget subclasses, that provides
drag+drop funtionality.
"""
# Implementation detail: QXXWidgetItems are unhashable in PySide,
# and cannot be used as dictionary keys. we hash on IDs instead
SUPPORTED_MIME_TYPE = None
def __init__(self, parent=None):
super(GlueItemWidget, self).__init__(parent)
self._mime_data = {}
self.setDragEnabled(True)
def mimeTypes(self):
"""
Return the list of MIME Types supported for this object.
"""
types = [self.SUPPORTED_MIME_TYPE]
return types
def mimeData(self, selected_items):
"""
Return a list of MIME data associated with the each selected item.
Parameters
----------
selected_items : list
A list of ``QtWidgets.QListWidgetItems`` or ``QtWidgets.QTreeWidgetItems`` instances
Returns
-------
result : list
A list of MIME objects
"""
try:
data = [self.get_data(i) for i in selected_items]
except KeyError:
data = None
result = PyMimeData(data, **{self.SUPPORTED_MIME_TYPE: data})
# apparent bug in pyside garbage collects custom mime
# data, and crashes. Save result here to avoid
self._mime = result
return result
def get_data(self, item):
"""
Convenience method to fetch the data associated with a ``QxxWidgetItem``.
"""
# return item.data(Qt.UserRole)
return self._mime_data.get(id(item), None)
def set_data(self, item, data):
"""
Convenience method to set data associated with a ``QxxWidgetItem``.
"""
#item.setData(Qt.UserRole, data)
self._mime_data[id(item)] = data
def drop_data(self, item):
self._mime_data.pop(id(item))
@property
def data(self):
return self._mime_data
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/utils/qt/mixins.py",
"copies": "2",
"size": "2139",
"license": "bsd-3-clause",
"hash": 7885272700538479000,
"line_mean": 27.1447368421,
"line_max": 96,
"alpha_frac": 0.597942964,
"autogenerated": false,
"ratio": 4.145348837209302,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006170347921601843,
"num_lines": 76
} |
from __future__ import absolute_import, division, print_function
from glue.viewers.common.qt.data_viewer_with_state import DataViewerWithState
from glue.viewers.matplotlib.qt.widget import MplWidget
from glue.viewers.common.viz_client import init_mpl, update_appearance_from_settings
from glue.external.echo import delay_callback
from glue.utils import defer_draw
from glue.utils.decorators import avoid_circular
from glue.viewers.matplotlib.qt.toolbar import MatplotlibViewerToolbar
from glue.viewers.matplotlib.state import MatplotlibDataViewerState
from glue.core.command import ApplySubsetState
__all__ = ['MatplotlibDataViewer']
class MatplotlibDataViewer(DataViewerWithState):
_toolbar_cls = MatplotlibViewerToolbar
_state_cls = MatplotlibDataViewerState
def __init__(self, session, parent=None, wcs=None, state=None):
super(MatplotlibDataViewer, self).__init__(session, parent, state=state)
# Use MplWidget to set up a Matplotlib canvas inside the Qt window
self.mpl_widget = MplWidget()
self.setCentralWidget(self.mpl_widget)
# TODO: shouldn't have to do this
self.central_widget = self.mpl_widget
self.figure, self._axes = init_mpl(self.mpl_widget.canvas.fig, wcs=wcs)
self.state.add_callback('aspect', self.update_aspect)
self.update_aspect()
self.state.add_callback('x_min', self.limits_to_mpl)
self.state.add_callback('x_max', self.limits_to_mpl)
self.state.add_callback('y_min', self.limits_to_mpl)
self.state.add_callback('y_max', self.limits_to_mpl)
self.limits_to_mpl()
self.state.add_callback('x_log', self.update_x_log, priority=1000)
self.state.add_callback('y_log', self.update_y_log, priority=1000)
self.update_x_log()
self.axes.callbacks.connect('xlim_changed', self.limits_from_mpl)
self.axes.callbacks.connect('ylim_changed', self.limits_from_mpl)
self.axes.set_autoscale_on(False)
self.central_widget.resize(600, 400)
self.resize(self.central_widget.size())
def redraw(self):
self.figure.canvas.draw()
@defer_draw
def update_x_log(self, *args):
self.axes.set_xscale('log' if self.state.x_log else 'linear')
self.redraw()
@defer_draw
def update_y_log(self, *args):
self.axes.set_yscale('log' if self.state.y_log else 'linear')
self.redraw()
def update_aspect(self, aspect=None):
self.axes.set_aspect(self.state.aspect, adjustable='datalim')
@avoid_circular
def limits_from_mpl(self, *args):
with delay_callback(self.state, 'x_min', 'x_max', 'y_min', 'y_max'):
self.state.x_min, self.state.x_max = self.axes.get_xlim()
self.state.y_min, self.state.y_max = self.axes.get_ylim()
@avoid_circular
def limits_to_mpl(self, *args):
if self.state.x_min is not None and self.state.x_max is not None:
self.axes.set_xlim(self.state.x_min, self.state.x_max)
if self.state.y_min is not None and self.state.y_max is not None:
self.axes.set_ylim(self.state.y_min, self.state.y_max)
if self.state.aspect == 'equal':
# FIXME: for a reason I don't quite understand, dataLim doesn't
# get updated immediately here, which means that there are then
# issues in the first draw of the image (the limits are such that
# only part of the image is shown). We just set dataLim manually
# to avoid this issue.
self.axes.dataLim.intervalx = self.axes.get_xlim()
self.axes.dataLim.intervaly = self.axes.get_ylim()
# We then force the aspect to be computed straight away
self.axes.apply_aspect()
# And propagate any changes back to the state since we have the
# @avoid_circular decorator
with delay_callback(self.state, 'x_min', 'x_max', 'y_min', 'y_max'):
self.state.x_min, self.state.x_max = self.axes.get_xlim()
self.state.y_min, self.state.y_max = self.axes.get_ylim()
self.axes.figure.canvas.draw()
# TODO: shouldn't need this!
@property
def axes(self):
return self._axes
def _update_appearance_from_settings(self, message=None):
update_appearance_from_settings(self.axes)
self.redraw()
def get_layer_artist(self, cls, layer=None, layer_state=None):
return cls(self.axes, self.state, layer=layer, layer_state=layer_state)
def apply_roi(self, roi):
if len(self.layers) > 0:
subset_state = self._roi_to_subset_state(roi)
cmd = ApplySubsetState(data_collection=self._data,
subset_state=subset_state)
self._session.command_stack.do(cmd)
else:
# Make sure we force a redraw to get rid of the ROI
self.axes.figure.canvas.draw()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/matplotlib/qt/data_viewer.py",
"copies": "1",
"size": "4960",
"license": "bsd-3-clause",
"hash": -6686820566251339000,
"line_mean": 37.75,
"line_max": 84,
"alpha_frac": 0.6491935484,
"autogenerated": false,
"ratio": 3.4855938158819395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4634787364281939,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from glue.viewers.matplotlib.qt.toolbar import MatplotlibViewerToolbar
from glue.core.edit_subset_mode import EditSubsetMode
from glue.core.util import update_ticks
from glue.core.roi import RangeROI
from glue.core import command
from glue.viewers.matplotlib.qt.data_viewer import MatplotlibDataViewer
from glue.viewers.histogram.qt.layer_style_editor import HistogramLayerStyleEditor
from glue.viewers.histogram.layer_artist import HistogramLayerArtist
from glue.viewers.histogram.qt.options_widget import HistogramOptionsWidget
from glue.viewers.histogram.state import HistogramViewerState
from glue.viewers.histogram.compat import update_histogram_viewer_state
__all__ = ['HistogramViewer']
class HistogramViewer(MatplotlibDataViewer):
LABEL = '1D Histogram'
_toolbar_cls = MatplotlibViewerToolbar
_layer_style_widget_cls = HistogramLayerStyleEditor
_state_cls = HistogramViewerState
_options_cls = HistogramOptionsWidget
_data_artist_cls = HistogramLayerArtist
_subset_artist_cls = HistogramLayerArtist
tools = ['select:xrange']
def __init__(self, session, parent=None, state=None):
super(HistogramViewer, self).__init__(session, parent, state=state)
self.state.add_callback('x_att', self._update_axes)
self.state.add_callback('x_log', self._update_axes)
self.state.add_callback('normalize', self._update_axes)
def _update_axes(self, *args):
if self.state.x_att is not None:
# Update ticks, which sets the labels to categories if components are categorical
update_ticks(self.axes, 'x', self.state._get_x_components(), False)
if self.state.x_log:
self.axes.set_xlabel('Log ' + self.state.x_att.label)
else:
self.axes.set_xlabel(self.state.x_att.label)
if self.state.normalize:
self.axes.set_ylabel('Normalized number')
else:
self.axes.set_ylabel('Number')
self.axes.figure.canvas.draw()
# TODO: move some of the ROI stuff to state class?
def _roi_to_subset_state(self, roi):
# TODO Does subset get applied to all data or just visible data?
bins = self.state.bins
x = roi.to_polygon()[0]
lo, hi = min(x), max(x)
if lo >= bins.min():
lo = bins[bins <= lo].max()
if hi <= bins.max():
hi = bins[bins >= hi].min()
roi_new = RangeROI(min=lo, max=hi, orientation='x')
x_comp = self.state.x_att.parent.get_component(self.state.x_att)
return x_comp.subset_from_roi(self.state.x_att, roi_new, coord='x')
@staticmethod
def update_viewer_state(rec, context):
return update_histogram_viewer_state(rec, context)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/histogram/qt/data_viewer.py",
"copies": "1",
"size": "2818",
"license": "bsd-3-clause",
"hash": 1026471101702943100,
"line_mean": 34.225,
"line_max": 93,
"alpha_frac": 0.6788502484,
"autogenerated": false,
"ratio": 3.6035805626598467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4782430811059847,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
from gridded.pysgrid.sgrid import SGrid
from gridded.pyugrid.ugrid import UGrid
import numpy as np
from gridded.utilities import get_dataset, gen_celltree_mask_from_center_mask
from six import string_types
class GridBase(object):
'''
Base object for grids to share common behavior
'''
_def_count = 0
def __init__(self,
filename=None,
*args,
**kwargs):
"""
Init common to all Grid types. This initializer will take all the kwargs of both
pyugrid.UGrid and pysgrid.SGrid. See their documentation for details
:param filename: Name of the file this grid was constructed from, if available.
"""
if 'name' in kwargs:
self.name = kwargs['name']
else:
self.name = self.__class__.__name__ + '_' + str(type(self)._def_count)
self.filename = filename
type(self)._def_count += 1
super(GridBase, self).__init__(**kwargs)
@classmethod
def from_netCDF(cls, *args, **kwargs):
kwargs['grid_type'] = cls
return Grid.from_netCDF(*args, **kwargs)
@classmethod
def _find_required_grid_attrs(cls, filename, dataset=None, grid_topology=None,):
'''
This function is the top level 'search for attributes' function. If there are any
common attributes to all potential grid types, they will be sought here.
This function returns a dict, which maps an attribute name to a netCDF4
Variable or numpy array object extracted from the dataset. When called from
Grid_U or Grid_S, this function should provide all the kwargs needed to
create a valid instance.
'''
gf_vars = dataset.variables if dataset is not None else get_dataset(filename).variables
gf_vars = dict([(k.lower(), v) for k, v in gf_vars.items()])
init_args = {}
gt = {}
init_args['filename'] = filename
node_attrs = ['node_lon', 'node_lat']
node_coord_names = [['node_lon', 'node_lat'],
['lon', 'lat'],
['lon_psi', 'lat_psi'],
['longitude', 'latitude']]
composite_node_names = ['nodes', 'node']
if grid_topology is None:
for n1, n2 in node_coord_names:
if n1 in gf_vars and n2 in gf_vars:
init_args[node_attrs[0]] = gf_vars[n1][:]
init_args[node_attrs[1]] = gf_vars[n2][:]
gt[node_attrs[0]] = n1
gt[node_attrs[1]] = n2
break
if node_attrs[0] not in init_args:
for n in composite_node_names:
if n in gf_vars:
v = gf_vars[n][:].reshape(-1, 2)
init_args[node_attrs[0]] = v[:, 0]
init_args[node_attrs[1]] = v[:, 1]
gt['node_coordinates'] = n
break
if node_attrs[0] not in init_args:
raise ValueError('Unable to find node coordinates.')
else:
for n, v in grid_topology.items():
if n in node_attrs:
init_args[n] = gf_vars[v][:]
if n in composite_node_names:
v = gf_vars[n][:].reshape(-1, 2)
init_args[node_attrs[0]] = v[:, 0]
init_args[node_attrs[1]] = v[:, 1]
return init_args, gt
@property
def shape(self):
return self.node_lon.shape
def __eq__(self, o):
if self is o:
return True
for n in ('nodes', 'faces'):
if (hasattr(self, n) and
hasattr(o, n) and
getattr(self, n) is not None and
getattr(o, n) is not None):
s = getattr(self, n)
s2 = getattr(o, n)
if s.shape != s2.shape or np.any(s != s2):
return False
return True
def _write_grid_to_file(self, pth):
self.save_as_netcdf(pth)
def import_variable(self, variable, location='node'):
"""
Takes a Variable or VectorVariable and interpolates the data onto this grid.
You may pass a location ('nodes', 'faces', 'edge1', 'edge2) and the
variable will be interpolated there if possible
If no location is passed, the variable will be interpolated to the
nodes of this grid. If the Variable's grid and this grid are the same, this
function will return the Variable unchanged.
If this grid covers area that the source grid does not, all values
in this area will be masked. If regridding from cell centers to the nodes,
The values of any border point not within will be equal to the value at the
center of the border cell.
"""
raise NotImplementedError("GridBase cannot interpolate variables to itself")
class Grid_U(GridBase, UGrid):
@classmethod
def _find_required_grid_attrs(cls, filename, dataset=None, grid_topology=None):
gf_vars = dataset.variables if dataset is not None else get_dataset(filename).variables
gf_vars = dict([(k.lower(), v) for k, v in gf_vars.items()])
# Get superset attributes
init_args, gt = super(Grid_U, cls)._find_required_grid_attrs(filename=filename,
dataset=dataset,
grid_topology=grid_topology)
face_attrs = ['faces']
face_var_names = ['faces', 'tris', 'nv', 'ele']
if grid_topology is None:
for n in face_var_names:
if n in gf_vars:
init_args[face_attrs[0]] = gf_vars[n][:]
gt[face_attrs[0]] = n
break
if face_attrs[0] not in init_args:
raise ValueError('Unable to find face connectivity array.')
else:
for n, v in grid_topology.items():
if n in face_attrs:
init_args[n] = gf_vars[v][:]
break
if init_args['faces'].shape[0] == 3:
init_args['faces'] = np.ascontiguousarray(np.array(init_args['faces']).T - 1)
return init_args, gt
@classmethod
def gen_from_quads(cls, nodes):
if not len(nodes.shape) == 3:
raise ValueError('Nodes of a quad grid must be 2 dimensional')
lin_nodes = None
if isinstance(nodes, np.ma.MaskedArray):
lin_nodes = nodes.reshape(-1, 2)[nodes]
class Grid_S(GridBase, SGrid):
@classmethod
def _find_required_grid_attrs(cls, filename, dataset=None, grid_topology=None):
# THESE ARE ACTUALLY ALL OPTIONAL. This should be migrated when optional attributes
# are dealt with
# Get superset attributes
gf_vars = dataset.variables if dataset is not None else get_dataset(filename).variables
gf_vars = dict([(k.lower(), v) for k, v in gf_vars.items()])
init_args, gt = super(Grid_S, cls)._find_required_grid_attrs(filename,
dataset=dataset,
grid_topology=grid_topology)
center_attrs = ['center_lon', 'center_lat']
edge1_attrs = ['edge1_lon', 'edge1_lat']
edge2_attrs = ['edge2_lon', 'edge2_lat']
node_mask = 'node_mask'
center_mask = 'center_mask'
edge1_mask = 'edge1_mask'
edge2_mask = 'edge2_mask'
center_coord_names = [['center_lon', 'center_lat'], ['lon_rho', 'lat_rho'], ['lonc', 'latc']]
edge1_coord_names = [['edge1_lon', 'edge1_lat'], ['lon_u', 'lat_u']]
edge2_coord_names = [['edge2_lon', 'edge2_lat'], ['lon_v', 'lat_v']]
node_mask_names = ['mask_psi']
center_mask_names = ['mask_rho']
edge1_mask_names = ['mask_u']
edge2_mask_names = ['mask_v']
if grid_topology is None:
for attr, names, maskattr, maskname in (zip((center_attrs, edge1_attrs, edge2_attrs),
(center_coord_names, edge1_coord_names, edge2_coord_names),
(center_mask, edge1_mask, edge2_mask),
(center_mask_names, edge1_mask_names, edge2_mask_names))):
for n1, n2 in names:
if n1 in gf_vars and n2 in gf_vars:
mask = False
#for n in maskname:
#if n in gf_vars:
#mask = gen_mask(gf_vars[n])
a1 = gf_vars[n1][:]
a2 = gf_vars[n2][:]
init_args[attr[0]] = a1
init_args[attr[1]] = a2
if maskname[0] in gf_vars:
init_args[maskattr] = gf_vars[maskname[0]]
gt[maskattr] = maskname[0]
gt[attr[0]] = n1
gt[attr[1]] = n2
break
if 'node_lon' in init_args and 'node_lat' in init_args:
mask = False
for name in node_mask_names:
if name in gf_vars:
init_args[node_mask] = gf_vars[name]
gt[node_mask] = name
else:
for n, v in grid_topology.items():
if n in center_attrs + edge1_attrs + edge2_attrs and v in gf_vars:
init_args[n] = gf_vars[v][:]
return init_args, gt
class Grid_R(GridBase):
def __init__(self,
node_lon=None,
node_lat=None,
grid_topology=None,
dimensions=None,
node_dimensions=None,
node_coordinates=None,
*args,
**kwargs):
self.node_lon = node_lon
self.node_lat = node_lat
self.grid_topology = grid_topology
self.dimensions = dimensions
self.node_dimensions = node_dimensions
self.node_coordinates = node_coordinates
super(Grid_R, self).__init__(*args,**kwargs)
@classmethod
def _find_required_grid_attrs(cls, filename, dataset=None, grid_topology=None):
# THESE ARE ACTUALLY ALL OPTIONAL. This should be migrated when optional attributes
# are dealt with
# Get superset attributes
gf_vars = dataset.variables if dataset is not None else get_dataset(filename).variables
gf_vars = dict([(k.lower(), v) for k, v in gf_vars.items()] )
init_args, gt = super(Grid_R, cls)._find_required_grid_attrs(filename,
dataset=dataset,
grid_topology=grid_topology)
# Grid_R only needs node_lon and node_lat. However, they must be a specific shape (1D)
node_lon = init_args['node_lon']
node_lat = init_args['node_lat']
if len(node_lon.shape) != 1:
raise ValueError('Too many dimensions in node_lon. Must be 1D, was {0}D'.format(len(node_lon.shape)))
if len(node_lat.shape) != 1:
raise ValueError('Too many dimensions in node_lat. Must be 1D, was {0}D'.format(len(node_lat.shape)))
return init_args, gt
@property
def nodes(self):
return np.stack((np.meshgrid(self.node_lon, self.node_lat)), axis=-1)
@property
def center_lon(self):
return (self.node_lon[0:-1] + self.node_lon[1:]) / 2
@property
def center_lat(self):
return (self.node_lat[0:-1] + self.node_lat[1:]) / 2
@property
def centers(self):
return np.stack((np.meshgrid(self.center_lon, self.center_lat)), axis=-1)
def locate_faces(self,
points):
"""
Returns the node grid indices, one per point.
Points that are not on the node grid will have an index of -1
If a single point is passed in, a single index will be returned.
If a sequence of points is passed in an array of indexes will be returned.
:param points: The points that you want to locate -- (lon, lat). If the shape of point
is 1D, function will return a scalar index. If it is 2D, it will return
a 1D array of indices.
:type points: array-like containing one or more points: shape (2,) for one point,
shape (N, 2) for more than one point.
"""
points = np.asarray(points, dtype=np.float64)
just_one = (points.ndim == 1)
points = points.reshape(-1, 2)
lons = points[:, 0]
lats = points[:, 1]
lon_idxs = np.digitize(lons, self.node_lon) - 1
for i, n in enumerate(lon_idxs):
if n == len(self.node_lon) - 1:
lon_idxs[i] = -1
# if n == 0 and not lons[i] < self.node_lon.max() and not lons[i] >= self.node_lon.min():
# lon_idxs[i] = -1
lat_idxs = np.digitize(lats, self.node_lat) - 1
for i, n in enumerate(lat_idxs):
if n == len(self.node_lat) -1:
lat_idxs[i] = -1
# if n == 0 and not lats[i] < self.node_lat.max() and not lats[i] >= self.node_lat.min():
# lat_idxs[i] = -1
idxs = np.column_stack((lon_idxs, lat_idxs))
idxs[:,0] = np.where(idxs[:,1] == -1, -1, idxs[:,0])
idxs[:,1] = np.where(idxs[:,0] == -1, -1, idxs[:,1])
if just_one:
res = idxs[0]
return res
else:
return idxs
def interpolate_var_to_points(self,
points,
variable,
method='linear',
indices=None,
slices=None,
mask=None,
**kwargs):
try:
from scipy.interpolate import RegularGridInterpolator
except ImportError:
raise ImportError("The scipy package is required to use "
"Grid_R.interpolate_var_to_points\n"
" -- interpolating a regular grid")
points = np.asarray(points, dtype=np.float64)
just_one = (points.ndim == 1)
points = points.reshape(-1, 2)
if slices is not None:
variable = variable[slices]
if np.ma.isMA(variable):
variable = variable.filled(0) #eventually should use Variable fill value
x = self.node_lon if variable.shape[0] == len(self.node_lon) else self.node_lat
y = self.node_lat if x is self.node_lon else self.node_lon
interp_func = RegularGridInterpolator((x, y),
variable,
method=method,
bounds_error=False,
fill_value=0)
if x is self.node_lon:
vals = interp_func(points, method=method)
else:
vals = interp_func(points[:, ::-1], method=method)
if just_one:
return vals[0]
else:
return vals
def infer_location(self, variable):
"""
fixme: should first look for "location" attribute.
But now we are checking variable dimensions to which part
of the grid it is on.
"""
shape = None
node_shape = self.nodes.shape[0:-1]
# centers_shape = self.centers.shape[0:-1]
try:
shape = np.array(variable.shape)
except:
return None # Variable has no shape attribute!
if len(variable.shape) < 2:
return None
difference = (shape[-2:] - node_shape).tolist()
if (difference == [1, 1] or difference == [-1, -1]) and self.center_lon is not None:
return 'center'
elif difference == [1, 0] and self.edge1_lon is not None:
return 'edge1'
elif difference == [0, 1] and self.edge2_lon is not None:
return 'edge2'
elif difference == [0, 0] and self.node_lon is not None:
return 'node'
else:
return None
class Grid(object):
'''
Factory class that generates grid objects. Also handles common
loading and parsing operations
'''
def __init__(self):
'''
Init common to all Grid types. This constructor will take all the kwargs of both
pyugrid.UGrid and pysgrid.SGrid. See their documentation for details
:param filename: Name of the file this grid was constructed from, if available.
'''
raise NotImplementedError("Grid is not meant to be instantiated. "
"Please use the from_netCDF function. "
"or initialize the type of grid you want directly")
@staticmethod
def _load_grid(filename, grid_type, dataset=None):
'''
Redirect to grid-specific loading routine.
'''
if issubclass(grid_type, UGrid):
return grid_type.from_ncfile(filename)
elif issubclass(grid_type, SGrid):
ds = get_dataset(filename, dataset)
g = grid_type.load_grid(ds)
g.filename = filename
return g
else:
return grid_type.from_ncfile(filename)
pass
@staticmethod
def from_netCDF(filename=None,
dataset=None,
grid_type=None,
grid_topology=None,
_default_types=(('ugrid', Grid_U),
('sgrid', Grid_S),
('rgrid', Grid_R)),
*args,
**kwargs):
'''
:param filename: File containing a grid
:param dataset: Takes precedence over filename, if provided.
:param grid_type: Must be provided if Dataset does not have a 'grid_type' attribute,
or valid topology variable
:param grid_topology: A dictionary mapping of grid attribute to variable name.
Takes precedence over discovered attributes
:param kwargs: All kwargs to SGrid, UGrid, or RGrid are valid, and take precedence
over all.
:returns: Instance of Grid_U, Grid_S, or Grid_R
'''
gf = dataset if filename is None else get_dataset(filename, dataset)
if gf is None:
raise ValueError('No filename or dataset provided')
cls = grid_type
if (grid_type is None or
isinstance(grid_type, string_types) or
not issubclass(grid_type, GridBase)):
cls = Grid._get_grid_type(gf, grid_type, grid_topology, _default_types)
# if grid_topology is passed in, don't look for the variable
if not grid_topology:
compliant = Grid._find_topology_var(None, gf)
else:
compliant = None
if compliant is not None:
c = Grid._load_grid(filename, cls, dataset)
c.grid_topology = compliant.__dict__
else:
init_args, gt = cls._find_required_grid_attrs(filename,
dataset=dataset,
grid_topology=grid_topology)
c = cls(**init_args)
c.grid_topology = gt
return c
@staticmethod
def _get_grid_type(dataset,
grid_type=None,
grid_topology=None,
_default_types=(('ugrid', Grid_U),
('sgrid', Grid_S),
('rgrid', Grid_R))):
# fixme: this logic should probably be defered to
# the grid type code -- that is, ask each grid
# type if this dataset is its type.
#
# It also should be refactored to start with the standards
# and maybe havev a pedantic mode where it won't load non-standard
# files
if _default_types is None:
_default_types = dict()
else:
_default_types = dict(_default_types)
Grid_U = _default_types.get('ugrid', None)
Grid_S = _default_types.get('sgrid', None)
Grid_R = _default_types.get('rgrid', None)
sgrid_names = ['sgrid', 'pygrid_s', 'staggered', 'curvilinear', 'roms']
ugrid_names = ['ugrid', 'pygrid_u', 'triangular', 'unstructured']
rgrid_names = ['rgrid', 'regular', 'rectangular', 'rectilinear']
if grid_type is not None:
if grid_type.lower() in sgrid_names:
return Grid_S
elif grid_type.lower() in ugrid_names:
return Grid_U
elif grid_type.lower() in rgrid_names:
return Grid_R
else:
raise ValueError('Specified grid_type not recognized/supported')
if grid_topology is not None:
if ('faces' in grid_topology.keys() or
grid_topology.get('grid_type', 'notype').lower() in ugrid_names):
return Grid_U
elif grid_topology.get('grid_type', 'notype').lower() in rgrid_names:
return Grid_R
else:
return Grid_S
else:
# no topology, so search dataset for grid_type variable
if (hasattr(dataset, 'grid_type') and
dataset.grid_type in sgrid_names + ugrid_names):
if dataset.grid_type.lower() in ugrid_names:
return Grid_U
elif dataset.grid_type.lower() in rgrid_names:
return Grid_R
else:
return Grid_S
else:
# TODO: Determine an effective decision tree for picking if
# a topology variable is present
# no grid type explicitly specified. is a topology variable present?
topology = Grid._find_topology_var(None, dataset=dataset)
if topology is not None:
if (hasattr(topology, 'node_coordinates') and
not hasattr(topology, 'node_dimensions')):
return Grid_U
else:
return Grid_S
else:
# no topology variable either, so generate and try again.
# if no defaults are found, _gen_topology will raise an error
try:
u_init_args, u_gf_vars = Grid_U._find_required_grid_attrs(None, dataset)
return Grid_U
except ValueError:
try:
r_init_args, r_gf_vars = Grid_R._find_required_grid_attrs(None, dataset)
return Grid_R
except ValueError:
try:
s_init_args, s_gf_vars = Grid_S._find_required_grid_attrs(None, dataset)
except ValueError:
raise ValueError("Can not figure out what type of grid this is. "
"Try specifying the grid_topology attributes "
"or specifying the grid type")
return Grid_S
@staticmethod
def _find_topology_var(filename,
dataset=None):
gf = get_dataset(filename, dataset)
gts = []
for k, v in gf.variables.items():
if hasattr(v, 'cf_role') and 'topology' in v.cf_role:
gts.append(v)
# gts = gf.get_variables_by_attributes(cf_role=lambda t: t is not None and 'topology' in t)
if len(gts) != 0:
return gts[0]
else:
return None
| {
"repo_name": "NOAA-ORR-ERD/gridded",
"path": "gridded/grids.py",
"copies": "1",
"size": "24463",
"license": "unlicense",
"hash": -7558090765451816000,
"line_mean": 40.8886986301,
"line_max": 113,
"alpha_frac": 0.5102399542,
"autogenerated": false,
"ratio": 4.235283933518006,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5245523887718005,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .hub import Hub, HubListener
from .data import Data
from .link_manager import LinkManager
from .registry import Registry
from .visual import COLORS
from .message import (DataCollectionAddMessage,
DataCollectionDeleteMessage,
DataAddComponentMessage)
from .util import disambiguate
from ..utils import as_list
__all__ = ['DataCollection']
class DataCollection(HubListener):
"""The top-level object for interacting with datasets in Glue.
DataCollections have the following responsibilities:
* Providing a way to retrieve and store data
* Broadcasting messages when data are added or removed
* Keeping each managed data set's list of
:class:`~glue.core.data.DerivedComponent` instances up-to-date
* Creating the hub that all other objects should use to communicate
with one another (stored in ``self.hub``)
"""
def __init__(self, data=None):
"""
:param data: :class:`~glue.core.data.Data` object, or list of such objects
"""
super(DataCollection, self).__init__()
self._link_manager = LinkManager()
self._data = []
self.hub = None
self._subset_groups = []
self.register_to_hub(Hub())
self.extend(as_list(data or []))
self._sg_count = 0
@property
def data(self):
""" The :class:`~glue.core.data.Data` objects in the collection """
return self._data
def append(self, data):
""" Add a new dataset to this collection.
Appending emits a DataCollectionAddMessage.
It also updates the list of DerivedComponents that each
data set can work with.
:param data: :class:`~glue.core.data.Data` object to add
"""
if isinstance(data, list):
self.extend(data)
return
if data in self:
return
self._data.append(data)
if self.hub:
data.register_to_hub(self.hub)
for s in data.subsets:
s.register()
msg = DataCollectionAddMessage(self, data)
self.hub.broadcast(msg)
self._sync_link_manager()
def extend(self, data):
"""Add several new datasets to this collection
See :meth:`append` for more information
:param data: List of data objects to add
"""
[self.append(d) for d in data]
def remove(self, data):
""" Remove a data set from the collection
Emits a DataCollectionDeleteMessage
:param data: the object to remove
:type data: :class:`~glue.core.data.Data`
"""
if data not in self._data:
return
self._data.remove(data)
Registry().unregister(data, Data)
if self.hub:
msg = DataCollectionDeleteMessage(self, data)
self.hub.broadcast(msg)
def _sync_link_manager(self):
""" update the LinkManager, so all the DerivedComponents
for each data set are up-to-date
"""
# add any links in the data
for d in self._data:
for derived in d.derived_components:
self._link_manager.add_link(d.get_component(derived).link)
for link in d.coordinate_links:
self._link_manager.add_link(link)
for d in self._data:
self._link_manager.update_data_components(d)
@property
def links(self):
"""
Tuple of :class:`~glue.core.component_link.ComponentLink` objects.
"""
return tuple(self._link_manager.links)
def add_link(self, links):
"""Add one or more links to the data collection.
This will auto-update the components in each data set
:param links:
The links to add. A scalar or list of
:class:`~glue.core.component_link.ComponentLink`
instances, or a :class:`~glue.core.link_helpers.LinkCollection`
"""
self._link_manager.add_link(links)
for d in self._data:
self._link_manager.update_data_components(d)
def _merge_link(self, link):
pass
def set_links(self, links):
"""Override the links in the collection, and update data
objects as necessary.
:param links: The new links. An iterable of
:class:`~glue.core.component_link.ComponentLink` instances
"""
self._link_manager.clear()
for link in links:
self._link_manager.add_link(link)
for d in self._data:
self._link_manager.update_data_components(d)
def register_to_hub(self, hub):
""" Register managed data objects to a hub.
:param hub: The hub to register with
:type hub: :class:`~glue.core.hub.Hub`
"""
if self.hub is hub:
return
if self.hub is not None:
raise RuntimeError("Data Collection already registered "
"to a different Hub")
if not isinstance(hub, Hub):
raise TypeError("Input is not a Hub object: %s" % type(hub))
self.hub = hub
# re-assign all data, subset hub instances to this hub
for d in self._data:
d.register_to_hub(hub)
for s in d.subsets:
s.register()
hub.subscribe(self, DataAddComponentMessage,
lambda msg: self._sync_link_manager(),
filter=lambda x: x.sender in self._data)
def new_subset_group(self, label=None, subset_state=None):
"""
Create and return a new Subset Group.
:param label: The label to assign to the group
:type label: str
:param subset_state: The state to initialize the group with
:type subset_state: :class:`~glue.core.subset.SubsetState`
:returns: A new :class:`~glue.core.subset_group.SubsetGroup`
"""
from .subset_group import SubsetGroup
color = COLORS[self._sg_count % len(COLORS)]
self._sg_count += 1
label = label or "%i" % (self._sg_count)
result = SubsetGroup(color=color, label=label, subset_state=subset_state)
self._subset_groups.append(result)
result.register(self)
return result
def remove_subset_group(self, subset_grp):
"""
Remove an existing :class:`~glue.core.subset_group.SubsetGroup`
"""
if subset_grp not in self._subset_groups:
return
# remove from list first, so that group appears deleted
# by the time the first SubsetDelete message is broadcast
self._subset_groups.remove(subset_grp)
for s in subset_grp.subsets:
s.delete()
subset_grp.unregister(self.hub)
def merge(self, *data):
"""
Merge two or more datasets into a single dataset.
This has the following effects:
All components from all datasets are added to the first argument
All datasets except the first argument are removed from the collection
Any component name conflicts are disambiguated
The pixel and world components apart from the first argument are discarded
:note: All arguments must have the same shape
:param data: One or more :class:`~glue.core.data.Data` instances.
:returns: self
"""
if len(data) < 2:
raise ValueError("merge requires 2 or more arguments")
shp = data[0].shape
for d in data:
if d.shape != shp:
raise ValueError("All arguments must have the same shape")
master = data[0]
for d in data[1:]:
skip = d.pixel_component_ids + d.world_component_ids
for c in d.components:
if c in skip:
continue
if c in master.components: # already present (via a link)
continue
taken = [_.label for _ in master.components]
lbl = c.label
# first-pass disambiguation, try component_data
# also special-case 'PRIMARY', rename to data label
if lbl in taken:
lbl = d.label if lbl == 'PRIMARY' else '%s_%s' % (lbl, d.label)
lbl = disambiguate(lbl, taken)
c._label = lbl
master.add_component(d.get_component(c), c)
self.remove(d)
return self
@property
def subset_groups(self):
"""
tuple of current :class:`Subset Groups <glue.core.subset_group.SubsetGroup>`
"""
return tuple(self._subset_groups)
def __contains__(self, obj):
return obj in self._data or obj in self.subset_groups
def __getitem__(self, key):
return self._data[key]
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __str__(self):
if len(self) == 1:
result = "DataCollection (1 data set)\n\t"
else:
result = "DataCollection (%i data sets)\n\t" % len(self)
result += '\n\t'.join("%3i: %s" % (i, d.label) for
i, d in enumerate(self))
return result
def __repr__(self):
return self.__str__()
def __bool__(self):
return True
def __nonzero__(self):
return True
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/data_collection.py",
"copies": "1",
"size": "9476",
"license": "bsd-3-clause",
"hash": -2245078325687599600,
"line_mean": 30.9057239057,
"line_max": 84,
"alpha_frac": 0.5777754327,
"autogenerated": false,
"ratio": 4.230357142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5308132575557143,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .hub import HubListener
from .data import Data
from .subset import Subset
from .data_collection import DataCollection
from .message import (DataUpdateMessage,
SubsetUpdateMessage,
SubsetCreateMessage,
SubsetDeleteMessage,
DataCollectionDeleteMessage,
NumericalDataChangedMessage)
__all__ = ['Client', 'BasicClient']
class Client(HubListener):
"""
Base class for interaction / visualization modules
Attributes
----------
data: DataCollection instance
The data associated with this client.
"""
def __init__(self, data):
"""
Create a new client object.
Parameters
----------
data: Data, DataCollection, or list of data
The primary data associated with this client.
Raises
------
TypeError: If the data input is the wrong type
"""
super(Client, self).__init__()
self._data = data
if not isinstance(data, DataCollection):
raise TypeError("Input data must be a DataCollection: %s"
% type(data))
@property
def data(self):
""" Returns the data collection """
return self._data
def register_to_hub(self, hub):
"""The main method to establish a link with a hub,
and set up event handlers. For common message types
Client subclasses at a minimum should override these methods
to provide functionality:
_add_subset
_update_subset
_remove_subset
_remove_data
Clients can also override register_to_hub to add additional
event handlers.
Attributes
----------
hub: The hub to subscribe to
"""
has_data = lambda x: x.sender.data in self._data
has_data_collection = lambda x: x.sender is self._data
hub.subscribe(self,
SubsetCreateMessage,
handler=self._add_subset,
filter=has_data)
hub.subscribe(self,
SubsetUpdateMessage,
handler=self._update_subset,
filter=has_data)
hub.subscribe(self,
SubsetDeleteMessage,
handler=self._remove_subset,
filter=has_data)
hub.subscribe(self,
DataUpdateMessage,
handler=self._update_data,
filter=has_data)
hub.subscribe(self,
NumericalDataChangedMessage,
handler=self._numerical_data_changed,
filter=has_data)
hub.subscribe(self,
DataCollectionDeleteMessage,
handler=self._remove_data,
filter=has_data_collection)
def _add_subset(self, message):
raise NotImplementedError
def _remove_data(self, message):
raise NotImplementedError
def _remove_subset(self, message):
raise NotImplementedError
def _update_data(self, message):
""" Default handler for DataMessage """
raise NotImplementedError
def _update_subset(self, message):
""" Default handler for SubsetUpdateMessage """
raise NotImplementedError
def apply_roi(self, roi):
raise NotImplementedError
def _numerical_data_changed(self, message):
raise NotImplementedError
class BasicClient(Client):
def _add_subset(self, message):
subset = message.subset
self.add_layer(subset)
def _update_subset(self, message):
subset = message.subset
self.update_layer(subset)
def _remove_subset(self, message):
subset = message.subset
self.remove_layer(subset)
def _remove_data(self, message):
self.remove_layer(message.data)
def _update_data(self, message):
self.update_layer(message.data)
def add_layer(self, layer):
if self.layer_present(layer):
return
if layer.data not in self.data:
raise TypeError("Data not in collection")
if isinstance(layer, Data):
self._do_add_data(layer)
for subset in layer.subsets:
self.add_layer(subset)
else:
if not self.layer_present(layer.data):
self.add_layer(layer.data)
else:
self._do_add_subset(layer)
self.update_layer(layer)
def update_layer(self, layer):
if not self.layer_present(layer):
return
if isinstance(layer, Subset):
self._do_update_subset(layer)
else:
self._do_update_data(layer)
def remove_layer(self, layer):
if not self.layer_present(layer):
return
if isinstance(layer, Data):
self._do_remove_data(layer)
for subset in layer.subsets:
self._do_remove_subset(subset)
else:
self._do_remove_subset(layer)
def _do_add_data(self, data):
raise NotImplementedError
def _do_add_subset(self, subset):
raise NotImplementedError
def _do_update_subset(self, subset):
raise NotImplementedError
def _do_update_data(self, data):
raise NotImplementedError
def _do_remove_subset(self, subset):
raise NotImplementedError
def _do_remove_data(self, data):
raise NotImplementedError
def layer_present(self, layer):
raise NotImplementedError
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/client.py",
"copies": "1",
"size": "5721",
"license": "bsd-3-clause",
"hash": 4271329927683681000,
"line_mean": 27.7487437186,
"line_max": 69,
"alpha_frac": 0.5708792169,
"autogenerated": false,
"ratio": 4.771476230191826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00016792399759868682,
"num_lines": 199
} |
from __future__ import absolute_import, division, print_function
from . import IDataDescriptor, Capabilities
def blaze_func_iter(bfd, noiter_dims):
args = bfd.args
dim_size = 1
iters = []
for a, noiter in zip(args, noiter_dims):
if noiter:
iters.append(a)
else:
# TODO handle streaming dimension with no __len__
arg_dim_size = len(a)
if dim_size == 1:
dim_size = arg_dim_size
elif dim_size != arg_dim_size:
raise BroadcastError(('Cannot broadcast dimensions of ' +
'size %d and %d together') % (dim_size, arg_dim_size))
iters.append(a.__iter__())
# TODO continue...
class BlazeFuncDeprecatedDescriptor(IDataDescriptor):
"""
Data descriptor for blaze.bkernel.BlazeFunc
Attributes:
===========
kerneltree: blaze.bkernel.kernel_tree.KernelTree
deferred expression DAG/tree
outdshape: DataShape
result type
argmap: { blaze.bkernel.kernel_tree.Argument : Array }
Keeps track of concrete input arrays
"""
_args = None
deferred = True
def __init__(self, kerneltree, outdshape, argmap):
self.kerneltree = kerneltree
self.outdshape = outdshape
self.argmap = argmap
def _reset_args(self):
from blaze.compute.bkernel.kernel_tree import find_unique_args
unique_args = []
find_unique_args(self.kerneltree, unique_args)
self._args = [self.argmap[argument] for argument in unique_args]
@property
def capabilities(self):
"""The capabilities for the blaze function data descriptor."""
return Capabilities(
immutable = True,
deferred = True,
# persistency is not supported yet
persistent = False,
appendable = False,
remote = False,
)
@property
def args(self):
if self._args is None:
self._reset_args()
return self._args
@property
def isfused(self):
from blaze.compute.bkernel.kernel_tree import Argument
return all(isinstance(child, Argument) for child in self.kerneltree.children)
def fuse(self):
if not self.isfused:
return self.__class__(self.kerneltree.fuse(), self.outdshape)
else:
return self
def _printer(self):
return str(self.kerneltree)
@property
def dshape(self):
return self.outdshape
def __iter__(self):
# Figure out how the outermost dimension broadcasts, by
# subtracting the rank sizes of the blaze func elements from
# the argument dshape sizes
broadcast_dims = [len(a.dshape) - len(e.dshape)
for a, e in zip(self.args, self.kernel_elements)]
ndim = max(broadcast_dims)
if ndim > 1:
# Do a data descriptor-level broadcasting
noiter_dims = [x == 0 for x in broadcast_dim]
return blaze_func_iter(self, noiter_dims)
elif ndim == 1:
# Do an element-level broadcasting
raise NotImplemented
else:
raise IndexError('Cannot iterate over a scalar')
def __getitem__(self, key):
raise NotImplementedError
| {
"repo_name": "zeeshanali/blaze",
"path": "blaze/datadescriptor/blaze_func_descriptor.py",
"copies": "2",
"size": "3329",
"license": "bsd-3-clause",
"hash": 5383006264740548000,
"line_mean": 29.5412844037,
"line_max": 86,
"alpha_frac": 0.5872634425,
"autogenerated": false,
"ratio": 4.099753694581281,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012897538598303001,
"num_lines": 109
} |
from __future__ import absolute_import, division, print_function
from . import ops
from .groupby import DataArrayGroupBy, DatasetGroupBy
from .pycompat import OrderedDict, dask_array_type
RESAMPLE_DIM = '__resample_dim__'
class Resample(object):
"""An object that extends the `GroupBy` object with additional logic
for handling specialized re-sampling operations.
You should create a `Resample` object by using the `DataArray.resample` or
`Dataset.resample` methods. The dimension along re-sampling
See Also
--------
DataArray.resample
Dataset.resample
"""
def _upsample(self, method, *args, **kwargs):
"""Dispatch function to call appropriate up-sampling methods on
data.
This method should not be called directly; instead, use one of the
wrapper functions supplied by `Resample`.
Parameters
----------
method : str {'asfreq', 'pad', 'ffill', 'backfill', 'bfill', 'nearest',
'interpolate'}
Method to use for up-sampling
See Also
--------
Resample.asfreq
Resample.pad
Resample.backfill
Resample.interpolate
"""
upsampled_index = self._full_index
# Drop non-dimension coordinates along the resampled dimension
for k, v in self._obj.coords.items():
if k == self._dim:
continue
if self._dim in v.dims:
self._obj = self._obj.drop(k)
if method == 'asfreq':
return self.mean(self._dim)
elif method in ['pad', 'ffill', 'backfill', 'bfill', 'nearest']:
kwargs = kwargs.copy()
kwargs.update(**{self._dim: upsampled_index})
return self._obj.reindex(method=method, *args, **kwargs)
elif method == 'interpolate':
return self._interpolate(*args, **kwargs)
else:
raise ValueError('Specified method was "{}" but must be one of'
'"asfreq", "ffill", "bfill", or "interpolate"'
.format(method))
def asfreq(self):
"""Return values of original object at the new up-sampling frequency;
essentially a re-index with new times set to NaN.
"""
return self._upsample('asfreq')
def pad(self):
"""Forward fill new values at up-sampled frequency.
"""
return self._upsample('pad')
ffill = pad
def backfill(self):
"""Backward fill new values at up-sampled frequency.
"""
return self._upsample('backfill')
bfill = backfill
def nearest(self):
"""Take new values from nearest original coordinate to up-sampled
frequency coordinates.
"""
return self._upsample('nearest')
def interpolate(self, kind='linear'):
"""Interpolate up-sampled data using the original data
as knots.
Parameters
----------
kind : str {'linear', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic'}
Interpolation scheme to use
See Also
--------
scipy.interpolate.interp1d
"""
return self._interpolate(kind=kind)
def _interpolate(self, kind='linear'):
raise NotImplementedError
class DataArrayResample(DataArrayGroupBy, Resample):
"""DataArrayGroupBy object specialized to time resampling operations over a
specified dimension
"""
def __init__(self, *args, **kwargs):
self._dim = kwargs.pop('dim', None)
self._resample_dim = kwargs.pop('resample_dim', None)
if self._dim == self._resample_dim:
raise ValueError("Proxy resampling dimension ('{}') "
"cannot have the same name as actual dimension "
"('{}')! ".format(self._resample_dim, self._dim))
super(DataArrayResample, self).__init__(*args, **kwargs)
def apply(self, func, shortcut=False, **kwargs):
"""Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes or coordinates) but only on the data and
dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar`.
Returns
-------
applied : DataArray or DataArray
The result of splitting, applying and combining this array.
"""
combined = super(DataArrayResample, self).apply(
func, shortcut=shortcut, **kwargs)
# If the aggregation function didn't drop the original resampling
# dimension, then we need to do so before we can rename the proxy
# dimension we used.
if self._dim in combined.coords:
combined = combined.drop(self._dim)
if self._resample_dim in combined.dims:
combined = combined.rename({self._resample_dim: self._dim})
return combined
def _interpolate(self, kind='linear'):
"""Apply scipy.interpolate.interp1d along resampling dimension."""
from .dataarray import DataArray
from scipy.interpolate import interp1d
if isinstance(self._obj.data, dask_array_type):
raise TypeError(
"Up-sampling via interpolation was attempted on the the "
"variable '{}', but it is a dask array; dask arrays are not "
"yet supported in resample.interpolate(). Load into "
"memory with Dataset.load() before resampling."
.format(self._obj.data.name)
)
x = self._obj[self._dim].astype('float')
y = self._obj.data
axis = self._obj.get_axis_num(self._dim)
f = interp1d(x, y, kind=kind, axis=axis, bounds_error=True,
assume_sorted=True)
new_x = self._full_index.values.astype('float')
# construct new up-sampled DataArray
dummy = self._obj.copy()
dims = dummy.dims
# drop any existing non-dimension coordinates along the resampling
# dimension
coords = OrderedDict()
for k, v in dummy.coords.items():
# is the resampling dimension
if k == self._dim:
coords[self._dim] = self._full_index
# else, check if resampling dim is in coordinate dimensions
elif self._dim not in v.dims:
coords[k] = v
return DataArray(f(new_x), coords, dims, name=dummy.name,
attrs=dummy.attrs)
ops.inject_reduce_methods(DataArrayResample)
ops.inject_binary_ops(DataArrayResample)
class DatasetResample(DatasetGroupBy, Resample):
"""DatasetGroupBy object specialized to resampling a specified dimension
"""
def __init__(self, *args, **kwargs):
self._dim = kwargs.pop('dim', None)
self._resample_dim = kwargs.pop('resample_dim', None)
if self._dim == self._resample_dim:
raise ValueError("Proxy resampling dimension ('{}') "
"cannot have the same name as actual dimension "
"('{}')! ".format(self._resample_dim, self._dim))
super(DatasetResample, self).__init__(*args, **kwargs)
def apply(self, func, **kwargs):
"""Apply a function over each Dataset in the groups generated for
resampling and concatenate them together into a new Dataset.
`func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the datasets. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped item after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each sub-dataset.
**kwargs
Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.
Returns
-------
applied : Dataset or DataArray
The result of splitting, applying and combining this dataset.
"""
kwargs.pop('shortcut', None) # ignore shortcut if set (for now)
applied = (func(ds, **kwargs) for ds in self._iter_grouped())
combined = self._combine(applied)
return combined.rename({self._resample_dim: self._dim})
def reduce(self, func, dim=None, keep_attrs=False, **kwargs):
"""Reduce the items in this group by applying `func` along the
pre-defined resampling dimension.
Note that `dim` is by default here and ignored if passed by the user;
this ensures compatibility with the existing reduce interface.
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing
an np.ndarray over an integer valued axis.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
return super(DatasetResample, self).reduce(
func, self._dim, keep_attrs, **kwargs)
def _interpolate(self, kind='linear'):
"""Apply scipy.interpolate.interp1d along resampling dimension."""
from .dataset import Dataset
from .variable import Variable
from scipy.interpolate import interp1d
old_times = self._obj[self._dim].astype(float)
new_times = self._full_index.values.astype(float)
data_vars = OrderedDict()
coords = OrderedDict()
# Apply the interpolation to each DataArray in our original Dataset
for name, variable in self._obj.variables.items():
if name in self._obj.coords:
if name == self._dim:
coords[self._dim] = self._full_index
elif self._dim not in variable.dims:
coords[name] = variable
else:
if isinstance(variable.data, dask_array_type):
raise TypeError(
"Up-sampling via interpolation was attempted on the "
"variable '{}', but it is a dask array; dask arrays "
"are not yet supprted in resample.interpolate(). Load "
"into memory with Dataset.load() before resampling."
.format(name)
)
axis = variable.get_axis_num(self._dim)
# We've previously checked for monotonicity along the
# re-sampling dimension (in __init__ via the GroupBy
# constructor), so we can avoid sorting the data again by
# passing 'assume_sorted=True'
f = interp1d(old_times, variable.data, kind=kind,
axis=axis, bounds_error=True,
assume_sorted=True)
interpolated = Variable(variable.dims, f(new_times))
data_vars[name] = interpolated
return Dataset(data_vars, coords)
ops.inject_reduce_methods(DatasetResample)
ops.inject_binary_ops(DatasetResample)
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/resample.py",
"copies": "1",
"size": "13023",
"license": "apache-2.0",
"hash": -5081576379401345000,
"line_mean": 36.1025641026,
"line_max": 79,
"alpha_frac": 0.5866543807,
"autogenerated": false,
"ratio": 4.629576964095272,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5716231344795273,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from . import twisted
from ._core import (
Certificate,
DHParameters,
Key,
RSAPrivateKey,
parse,
parse_file,
)
from .twisted import (
certificateOptionsFromFiles as certificateOptionsFromFilesOriginal,
certificateOptionsFromPEMs as certificateOptionsFromPEMsOriginal,
)
__version__ = "15.1.0.dev0"
__author__ = "Hynek Schlawack"
__license__ = "MIT"
__description__ = "Easy PEM file parsing in Python."
__uri__ = "https://pem.readthedocs.org/"
__email__ = "[email protected]"
_DEPRECATION_WARNING = (
"Calling {func} from the pem package is deprecated as of pem 15.0.0. "
"Please use pem.twisted.{func} instead."
)
def certificateOptionsFromFiles(*a, **kw):
"""
Deprecated function. Please use pem.twisted.certificateOptionsFromFiles.
"""
import warnings
warnings.warn(
_DEPRECATION_WARNING.format(func="certificateOptionsFromFiles"),
DeprecationWarning
)
return certificateOptionsFromFilesOriginal(*a, **kw)
def certificateOptionsFromPEMs(*a, **kw):
"""
Deprecated function. Please use pem.twisted.certificateOptionsFromPEMs.
"""
import warnings
warnings.warn(
_DEPRECATION_WARNING.format(func="certificateOptionsFromPEMs"),
DeprecationWarning
)
return certificateOptionsFromPEMsOriginal(*a, **kw)
__all__ = [
"Certificate",
"DHParameters",
"Key",
"RSAPrivateKey",
"certificateOptionsFromFiles",
"certificateOptionsFromPEMs",
"parse",
"parse_file",
"twisted",
]
| {
"repo_name": "leandrotoledo/pem",
"path": "pem/__init__.py",
"copies": "1",
"size": "1596",
"license": "mit",
"hash": 105649644403248600,
"line_mean": 23.1818181818,
"line_max": 77,
"alpha_frac": 0.6810776942,
"autogenerated": false,
"ratio": 3.7116279069767444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48927056011767445,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from imps.core import Sorter
def test_smarkets_style():
input = '''from __future__ import absolute_import, division, print_function
import ast
import configparser
import os
import StringIO
import sys
from functools import *
from os import path
import flake8
import pytest
from flake8.defaults import NOQA_INLINE_REGEXP, STATISTIC_NAMES
from flake8.exceptions import *
from pytest import *
from pytest import capture
from pytest import compat, config
from common.interfaces import Config
from common.rest.decorators import jsonify
from han.db import Database
from winners.server.db_access import (
acknowledge_winner_exposure_for_market,
get_acknowledged_winner_exposures_for_market,
)
from . import A
from . import B
from .A import A
from .B import B
from .. import A
from .. import B
from ..A import A
from ..B import B
'''
assert Sorter('s', 80, ['common', 'winners', 'han']).sort(input) == input
def test_smarkets_style_from_import_capitals_are_not_lowered():
input = '''from __future__ import absolute_import, division, print_function
from imps.strings import AAAA
from imps.strings import get_doc_string, strip_to_module_name, strip_to_module_name_from_import
from imps.strings import ZZZZ
'''
# Possible alternative:
# output = '''from __future__ import absolute_import, division, print_function
#
# from imps.strings import (
# AAAA,
# get_doc_string,
# strip_to_module_name,
# strip_to_module_name_from_import
# ZZZZ,
# )
# '''
assert Sorter('s', max_line_length=110).sort(input) == input
def test_newlines_reduced():
s = Sorter('s', 80, ['local'])
input = """import io
import sys
import A
"""
output = """import io
import sys
import A
"""
assert s.sort(input) == output
def test_no_new_line_between_same_type():
s = Sorter(type='s', max_line_length=110, indent=" ")
input_str = """
from __future__ import absolute_import, division, print_function
import re
from collections import OrderedDict
"""
correct = """from __future__ import absolute_import, division, print_function
import re
from collections import OrderedDict
"""
assert s.sort(input_str) == correct
| {
"repo_name": "bootandy/imps",
"path": "imps/tests/test_smarkets.py",
"copies": "1",
"size": "2242",
"license": "apache-2.0",
"hash": 2559477037607645700,
"line_mean": 21.42,
"line_max": 95,
"alpha_frac": 0.7100802855,
"autogenerated": false,
"ratio": 3.402124430955994,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9610471717386545,
"avg_score": 0.00034659981388970515,
"num_lines": 100
} |
from __future__ import absolute_import, division, print_function
from imps.stdlib import get_paths, LOCAL, STDLIB, strip_to_first_module, THIRDPARTY
def test_strip_to_first_module():
assert strip_to_first_module('from alpha.beta import squid') == 'alpha'
assert strip_to_first_module('import sys') == 'sys'
assert strip_to_first_module('import sys, io') == 'sys'
assert strip_to_first_module('from sys import stdin') == 'sys'
assert strip_to_first_module('from . import A') == '.'
assert strip_to_first_module('from ..B import A') == '..B'
def test_path_std():
assert get_paths('import sys', []) == STDLIB
assert get_paths('import io', []) == STDLIB
assert get_paths('from contextlib import *', []) == STDLIB
def test_path_local():
assert get_paths('import a_local_path', ['a_local_path']) == LOCAL
assert get_paths('import a_local_path.submodule', ['a_local_path']) == LOCAL
def test_path_third():
assert get_paths('import pytest', []) == THIRDPARTY
assert get_paths('import flask.abort', []) == THIRDPARTY
assert get_paths('fom six import sax', []) == THIRDPARTY
| {
"repo_name": "bootandy/imps",
"path": "imps/tests/test_stdlib.py",
"copies": "1",
"size": "1130",
"license": "apache-2.0",
"hash": 3827987076141693000,
"line_mean": 37.9655172414,
"line_max": 83,
"alpha_frac": 0.6584070796,
"autogenerated": false,
"ratio": 3.3333333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4491740412933334,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from inspect import getargspec
from ..external.qt.QtGui import (QWidget, QHBoxLayout, QVBoxLayout,
QLabel, QLineEdit)
from ..external.qt.QtGui import QSpacerItem, QSizePolicy
from .. import core
from ..compat.collections import OrderedDict
from .qtutil import load_ui, is_pyside
def function_label(function):
""" Provide a label for a function
:param function: A member from the glue.config.link_function registry
"""
name = function.function.__name__
args = getargspec(function.function)[0]
args = ', '.join(args)
output = function.output_labels
output = ', '.join(output)
label = "Link from %s to %s" % (args, output)
return label
def helper_label(helper):
""" Provide a label for a link helper
:param helper: A member from the glue.config.link_helper registry
"""
return helper.info
class ArgumentWidget(QWidget):
def __init__(self, argument, parent=None):
super(ArgumentWidget, self).__init__(parent)
self.layout = QHBoxLayout()
self.layout.setContentsMargins(1, 0, 1, 1)
self.setLayout(self.layout)
label = QLabel(argument)
self._label = label
self._component_id = None
self.layout.addWidget(label)
self.editor = QLineEdit()
self.editor.setReadOnly(True)
try:
self.editor.setPlaceholderText("Drag a component from above")
except AttributeError: # feature added in Qt 4.7
pass
self.layout.addWidget(self.editor)
self.setAcceptDrops(True)
@property
def component_id(self):
return self._component_id
@component_id.setter
def component_id(self, cid):
self._component_id = cid
self.editor.setText(str(cid))
@property
def label(self):
return self._label.text()
@label.setter
def label(self, label):
self._label.setText(label)
@property
def editor_text(self):
return self.editor.text()
def clear(self):
self.component_id = None
self.editor.clear()
def dragEnterEvent(self, event):
if event.mimeData().hasFormat('application/py_instance'):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
obj = event.mimeData().data('application/py_instance')
if isinstance(obj, list):
obj = obj[0]
if not isinstance(obj, core.data.ComponentID):
event.ignore()
return
self.component_id = obj
event.accept()
class LinkEquation(QWidget):
""" Interactively define ComponentLinks from existing functions
This widget inspects the calling signatures of helper functions,
and presents the user with an interface for assigning componentIDs
to the input and output arguments. It also generates ComponentLinks
from this information.
ComponentIDs are assigned to arguments via drag and drop. This
widget is used within the LinkEditor dialog
Usage::
widget = LinkEquation()
"""
def __init__(self, parent=None):
super(LinkEquation, self).__init__(parent)
from ..config import link_function, link_helper
# Set up mapping of function/helper name -> function/helper tuple. For the helpers, we use the 'display' name if available.
def get_name(item):
if hasattr(item, 'display') and item.display is not None:
return item.display
else:
return item.__name__
f = [f for f in link_function.members if len(f.output_labels) == 1]
self._functions = OrderedDict((get_name(l[0]), l) for l in
f + link_helper.members)
self._argument_widgets = []
self.spacer = None
self._output_widget = ArgumentWidget("")
# pyqt4 can't take self as second argument here
# for some reason. Manually embed
self._ui = load_ui('link_equation', None)
l = QHBoxLayout()
l.addWidget(self._ui)
self.setLayout(l)
self._init_widgets()
self._populate_function_combo()
self._connect()
self._setup_editor()
def set_result_visible(self, state):
self._ui.output_canvas.setVisible(state)
self._ui.output_label.setVisible(state)
def is_helper(self):
return self.function is not None and \
type(self.function).__name__ == 'LinkHelper'
def is_function(self):
return self.function is not None and \
type(self.function).__name__ == 'LinkFunction'
def _init_widgets(self):
layout = QVBoxLayout()
layout.setSpacing(1)
self._ui.input_canvas.setLayout(layout)
layout = QVBoxLayout()
layout.setContentsMargins(1, 0, 1, 1)
self._ui.output_canvas.setLayout(layout)
layout.addWidget(self._output_widget)
spacer = QSpacerItem(5, 5, QSizePolicy.Minimum, QSizePolicy.Expanding)
layout.addItem(spacer)
@property
def add_button(self):
return self._ui.addButton
@property
def signature(self):
""" Returns the ComponentIDs assigned to the input and output arguments
:rtype: tuple of (input, output). Input is a list of ComponentIDs.
output is a ComponentID
"""
inp = [a.component_id for a in self._argument_widgets]
out = self._output_widget.component_id
return inp, out
@signature.setter
def signature(self, inout):
inp, out = inout
for i, a in zip(inp, self._argument_widgets):
a.component_id = i
self._output_widget.component_id = out
@property
def function(self):
""" The currently-selected function
:rtype: A function or helper tuple
"""
fname = str(self._ui.function.currentText())
func = self._functions[fname]
return func
@function.setter
def function(self, val):
if hasattr(val[0], 'display') and val[0].display is not None:
name = val[0].display
else:
name = val[0].__name__
pos = self._ui.function.findText(name)
if pos < 0:
raise KeyError("No function or helper found %s" % [val])
self._ui.function.setCurrentIndex(pos)
def links(self):
""" Create ComponentLinks from the state of the widget
:rtype: list of ComponentLinks that can be created.
If no links can be created (e.g. because of missing input),
the empty list is returned
"""
inp, out = self.signature
if self.is_function():
using = self.function.function
if not all(inp) or not out:
return []
link = core.component_link.ComponentLink(inp, out, using)
return [link]
if self.is_helper():
helper = self.function.helper
if not all(inp):
return []
return helper(*inp)
def _update_add_enabled(self):
state = True
for a in self._argument_widgets:
state = state and a.component_id is not None
if self.is_function():
state = state and self._output_widget.component_id is not None
self._ui.addButton.setEnabled(state)
def _connect(self):
signal = self._ui.function.currentIndexChanged
signal.connect(self._setup_editor)
signal.connect(self._update_add_enabled)
self._output_widget.editor.textChanged.connect(
self._update_add_enabled)
def clear_inputs(self):
for w in self._argument_widgets:
w.clear()
self._output_widget.clear()
def _setup_editor(self):
if self.is_function():
self._setup_editor_function()
else:
self._setup_editor_helper()
def _setup_editor_function(self):
""" Prepare the widget for the active function."""
assert self.is_function()
self.set_result_visible(True)
func = self.function.function
args = getargspec(func)[0]
label = function_label(self.function)
self._ui.info.setText(label)
self._output_widget.label = self.function.output_labels[0]
self._clear_input_canvas()
for a in args:
self._add_argument_widget(a)
self.spacer = QSpacerItem(5, 5, QSizePolicy.Minimum,
QSizePolicy.Expanding)
self._ui.input_canvas.layout().addItem(self.spacer)
def _setup_editor_helper(self):
"""Setup the editor for the selected link helper"""
assert self.is_helper()
self.set_result_visible(False)
label = helper_label(self.function)
args = self.function.input_labels
self._ui.info.setText(label)
self._clear_input_canvas()
for a in args:
self._add_argument_widget(a)
self.spacer = QSpacerItem(5, 5, QSizePolicy.Minimum,
QSizePolicy.Expanding)
self._ui.input_canvas.layout().addItem(self.spacer)
def _add_argument_widget(self, argument):
""" Create and add a single argument widget to the input canvas
:param arguement: The argument name (string)
"""
widget = ArgumentWidget(argument)
widget.editor.textChanged.connect(self._update_add_enabled)
self._ui.input_canvas.layout().addWidget(widget)
self._argument_widgets.append(widget)
def _clear_input_canvas(self):
""" Remove all widgets from the input canvas """
layout = self._ui.input_canvas.layout()
for a in self._argument_widgets:
layout.removeWidget(a)
a.close()
if not is_pyside():
# PySide crashing here
layout.removeItem(self.spacer)
self._argument_widgets = []
def _populate_function_combo(self):
""" Add name of functions to function combo box """
self._ui.function.clear()
for f in self._functions:
self._ui.function.addItem(f)
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/link_equation.py",
"copies": "1",
"size": "10206",
"license": "bsd-3-clause",
"hash": -197317345852547940,
"line_mean": 31.1955835962,
"line_max": 131,
"alpha_frac": 0.6034685479,
"autogenerated": false,
"ratio": 4.116982654296087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5220451202196087,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from io import TextIOBase
import sys
import threading
if sys.version_info[0] < 3:
from Queue import Queue
else:
from queue import Queue
def start_thread(runnable):
threading.Thread(target=lambda: runnable.run()).start()
class ConsoleInputStream(TextIOBase):
"""Receives input in on_input in one thread (non-blocking), and provides a read interface in
another thread (blocking). Reads will return bytes in Python 2 or unicode in Python 3.
"""
def __init__(self, task):
TextIOBase.__init__(self)
self.task = task
self.queue = Queue()
self.buffer = ""
self.eof = False
@property
def encoding(self):
return "UTF-8"
@property
def errors(self):
return "strict" # UTF-8 encoding should never fail.
def readable(self):
return True
def on_input(self, input):
if self.eof:
raise ValueError("Can't add more input after EOF")
if input is None:
self.eof = True
self.queue.put(input)
def read(self, size=None):
if size is not None and size < 0:
size = None
buffer = self.buffer
while (self.queue is not None) and ((size is None) or (len(buffer) < size)):
if self.queue.empty():
self.task.onInputState(True)
input = self.queue.get()
self.task.onInputState(False)
if input is None: # EOF
self.queue = None
else:
buffer += input
result = buffer if (size is None) else buffer[:size]
self.buffer = buffer[len(result):]
return result.encode(self.encoding, self.errors) if (sys.version_info[0] < 3) else result
def readline(self, size=None):
if size is not None and size < 0:
size = None
chars = []
while (size is None) or (len(chars) < size):
c = self.read(1)
if not c:
break
chars.append(c)
if c == "\n":
break
return "".join(chars)
class ConsoleOutputStream(TextIOBase):
"""Passes each write to the underlying stream, and also to the given method (which must take a
single String argument) on the given Task object.
"""
def __init__(self, task, method_name, stream):
TextIOBase.__init__(self)
self.stream = stream
self.method = getattr(task, method_name)
@property
def encoding(self):
return self.stream.encoding
@property
def errors(self):
return self.stream.errors
def writable(self):
return True
def write(self, s):
if sys.version_info[0] < 3 and isinstance(s, str):
u = s.decode(self.encoding, self.errors)
else:
u = s
self.method(u)
return self.stream.write(s)
def flush(self):
self.stream.flush()
| {
"repo_name": "fyookball/electrum",
"path": "android/app/src/main/python/chaquopy/utils/console.py",
"copies": "1",
"size": "2993",
"license": "mit",
"hash": 5238465816921857000,
"line_mean": 26.712962963,
"line_max": 98,
"alpha_frac": 0.5760106916,
"autogenerated": false,
"ratio": 4.0610583446404345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5137069036240435,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from itertools import chain
from dynd import nd
from collections import Iterator
from datashape import dshape, Record, DataShape, Mono
from datashape.predicates import isdimension
from toolz import partition_all, partial, map, compose, first
from ..dispatch import dispatch
from ..compatibility import _strtypes
def validate(schema, item):
try:
nd.array(item, dtype=str(schema))
return True
except:
return False
@dispatch((DataShape, Mono), object)
def coerce(dshape, item):
return coerce(str(dshape), item)
@dispatch(_strtypes, object)
def coerce(dshape, item):
try:
return nd.as_py(nd.array(item, dtype=dshape), tuple=True)
except ValueError as e:
raise ValueError("DataShape mismatch.\n"
"DyND failed to parse data with the following datashape: %s\n"
"Produced the following error: %s\n"
"Consider providing a more general datashape with "
"keyword dshape=" % (dshape, e.args[0]))
@dispatch(_strtypes, Iterator)
def coerce(dshape, item):
blocks = partition_all(1024, item)
return chain.from_iterable(map(partial(coerce, dshape), blocks))
def coerce_to_ordered(ds, data):
""" Coerce data with dicts into an ordered ND collection
>>> from datashape import dshape
>>> coerce_to_ordered('{x: int, y: int}', {'x': 1, 'y': 2})
(1, 2)
>>> coerce_to_ordered('var * {x: int, y: int}',
... [{'x': 1, 'y': 2}, {'x': 10, 'y': 20}])
((1, 2), (10, 20))
Idempotent
>>> coerce_to_ordered('var * {x: int, y: int}',
... ((1, 2), (10, 20)))
((1, 2), (10, 20))
"""
if isinstance(ds, _strtypes):
ds = dshape(ds)
if isinstance(ds[0], Record):
if isinstance(data, (list, tuple)):
return data
rec = ds[0]
return tuple(coerce_to_ordered(rec[name], data[name])
for name in rec.names)
if isdimension(ds[0]):
return tuple(coerce_to_ordered(ds.subarray(1), row)
for row in data)
return data
def coerce_record_to_row(schema, rec):
"""
>>> from datashape import dshape
>>> schema = dshape('{x: int, y: int}')
>>> coerce_record_to_row(schema, {'x': 1, 'y': 2})
[1, 2]
Idempotent
>>> coerce_record_to_row(schema, [1, 2])
[1, 2]
"""
if isinstance(rec, (tuple, list)):
return rec
return [rec[name] for name in schema[0].names]
def coerce_row_to_dict(schema, row):
"""
>>> from datashape import dshape
>>> schema = dshape('{x: int, y: int}')
>>> coerce_row_to_dict(schema, (1, 2)) # doctest: +SKIP
{'x': 1, 'y': 2}
Idempotent
>>> coerce_row_to_dict(schema, {'x': 1, 'y': 2}) # doctest: +SKIP
{'x': 1, 'y': 2}
"""
if isinstance(row, dict):
return row
return dict((name, item) for name, item in zip(schema[0].names, row))
def ordered_index(ind, ds):
""" Transform a named index into an ordered one
>>> ordered_index(1, '3 * int')
1
>>> ordered_index('name', '{name: string, amount: int}')
0
>>> ordered_index((0, 0), '3 * {x: int, y: int}')
(0, 0)
>>> ordered_index([0, 1], '3 * {x: int, y: int}')
[0, 1]
>>> ordered_index(([0, 1], 'x'), '3 * {x: int, y: int}')
([0, 1], 0)
>>> ordered_index((0, 'x'), '3 * {x: int, y: int}')
(0, 0)
>>> ordered_index((0, [0, 1]), '3 * {x: int, y: int}')
(0, [0, 1])
>>> ordered_index((0, ['x', 'y']), '3 * {x: int, y: int}')
(0, [0, 1])
"""
if isinstance(ds, _strtypes):
ds = dshape(ds)
if isinstance(ind, (int, slice)):
return ind
if isinstance(ind, list):
return [ordered_index(i, ds) for i in ind]
if isinstance(ind, _strtypes) and isinstance(ds[0], Record):
return ds[0].names.index(ind)
if isinstance(ind, tuple) and not ind:
return ()
if isdimension(ds[0]):
return (ind[0],) + tupleit(ordered_index(ind[1:], ds.subshape[0]))
if isinstance(ind, tuple):
return ((ordered_index(ind[0], ds),)
+ tupleit(ordered_index(ind[1:], ds.subshape[0])))
raise NotImplementedError("Rule for ind: %s, ds: %ds not found"
% (str(ind), str(ds)))
def tupleit(x):
if not isinstance(x, tuple):
return (x,)
else:
return x
def tuplify(x):
if isinstance(x, (tuple, list, Iterator)):
return tuple(map(tuplify, x))
else:
return x
def listpack(x):
"""
>>> listpack(1)
[1]
>>> listpack((1, 2))
[1, 2]
>>> listpack([1, 2])
[1, 2]
"""
if isinstance(x, tuple):
return list(x)
elif isinstance(x, list):
return x
else:
return [x]
def sort_dtype_items(items, names):
return sorted(items, key=compose(names.index, first))
| {
"repo_name": "vitan/blaze",
"path": "blaze/data/utils.py",
"copies": "1",
"size": "4949",
"license": "bsd-3-clause",
"hash": -8610963531535163000,
"line_mean": 25.8967391304,
"line_max": 78,
"alpha_frac": 0.5508183471,
"autogenerated": false,
"ratio": 3.2473753280839897,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42981936751839894,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from itertools import chain
from dynd import nd
from collections import Iterator
from datashape import dshape, Record
from datashape.predicates import isunit, isdimension
from toolz import partition_all, partial, map
from ..compatibility import _strtypes
def validate(schema, item):
try:
nd.array(item, dtype=str(schema))
return True
except:
return False
def coerce(dshape, item):
if isinstance(item, Iterator):
blocks = partition_all(1000, item)
return chain.from_iterable(map(partial(coerce, dshape), blocks))
return nd.as_py(nd.array(item, dtype=str(dshape)), tuple=True)
def coerce_to_ordered(ds, data):
""" Coerce data with dicts into an ordered ND collection
>>> from datashape import dshape
>>> coerce_to_ordered('{x: int, y: int}', {'x': 1, 'y': 2})
(1, 2)
>>> coerce_to_ordered('var * {x: int, y: int}',
... [{'x': 1, 'y': 2}, {'x': 10, 'y': 20}])
((1, 2), (10, 20))
Idempotent
>>> coerce_to_ordered('var * {x: int, y: int}',
... ((1, 2), (10, 20)))
((1, 2), (10, 20))
"""
if isinstance(ds, _strtypes):
ds = dshape(ds)
if isinstance(ds[0], Record):
if isinstance(data, (list, tuple)):
return data
rec = ds[0]
return tuple(coerce_to_ordered(rec[name], data[name])
for name in rec.names)
if isdimension(ds[0]):
return tuple(coerce_to_ordered(ds.subarray(1), row)
for row in data)
return data
def coerce_record_to_row(schema, rec):
"""
>>> from datashape import dshape
>>> schema = dshape('{x: int, y: int}')
>>> coerce_record_to_row(schema, {'x': 1, 'y': 2})
[1, 2]
Idempotent
>>> coerce_record_to_row(schema, [1, 2])
[1, 2]
"""
if isinstance(rec, (tuple, list)):
return rec
return [rec[name] for name in schema[0].names]
def coerce_row_to_dict(schema, row):
"""
>>> from datashape import dshape
>>> schema = dshape('{x: int, y: int}')
>>> coerce_row_to_dict(schema, (1, 2)) # doctest: +SKIP
{'x': 1, 'y': 2}
Idempotent
>>> coerce_row_to_dict(schema, {'x': 1, 'y': 2}) # doctest: +SKIP
{'x': 1, 'y': 2}
"""
if isinstance(row, dict):
return row
return dict((name, item) for name, item in zip(schema[0].names, row))
def ordered_index(ind, ds):
""" Transform a named index into an ordered one
>>> ordered_index(1, '3 * int')
1
>>> ordered_index('name', '{name: string, amount: int}')
0
>>> ordered_index((0, 0), '3 * {x: int, y: int}')
(0, 0)
>>> ordered_index([0, 1], '3 * {x: int, y: int}')
[0, 1]
>>> ordered_index(([0, 1], 'x'), '3 * {x: int, y: int}')
([0, 1], 0)
>>> ordered_index((0, 'x'), '3 * {x: int, y: int}')
(0, 0)
>>> ordered_index((0, [0, 1]), '3 * {x: int, y: int}')
(0, [0, 1])
>>> ordered_index((0, ['x', 'y']), '3 * {x: int, y: int}')
(0, [0, 1])
"""
if isinstance(ds, _strtypes):
ds = dshape(ds)
if isinstance(ind, (int, slice)):
return ind
if isinstance(ind, list):
return [ordered_index(i, ds) for i in ind]
if isinstance(ind, _strtypes) and isinstance(ds[0], Record):
return ds[0].names.index(ind)
if isinstance(ind, tuple) and not ind:
return ()
if isdimension(ds[0]):
return (ind[0],) + tupleit(ordered_index(ind[1:], ds.subshape[0]))
if isinstance(ind, tuple):
return ((ordered_index(ind[0], ds),)
+ tupleit(ordered_index(ind[1:], ds.subshape[0])))
raise NotImplementedError("Rule for ind: %s, ds: %ds not found"
% (str(ind), str(ds)))
def tupleit(x):
if not isinstance(x, tuple):
return (x,)
else:
return x
def tuplify(x):
if isinstance(x, (tuple, list, Iterator)):
return tuple(map(tuplify, x))
else:
return x
| {
"repo_name": "aterrel/blaze",
"path": "blaze/data/utils.py",
"copies": "1",
"size": "4043",
"license": "bsd-3-clause",
"hash": -974982947258924700,
"line_mean": 26.8827586207,
"line_max": 74,
"alpha_frac": 0.5426663369,
"autogenerated": false,
"ratio": 3.2036450079239303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42463113448239304,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from itertools import chain
from dynd import nd
import datashape
from datashape.internal_utils import IndexCallable
from datashape import discover
from functools import partial
from ..dispatch import dispatch
from blaze.expr import Projection, Field
from blaze.expr import Expr, UnaryOp
from .utils import validate, coerce, coerce_to_ordered, ordered_index
from ..utils import partition_all
__all__ = ['DataDescriptor', 'discover', 'compute_up']
def isdimension(ds):
return isinstance(ds, (datashape.Var, datashape.Fixed))
class DataDescriptor(object):
"""
Standard interface to data storage
Data descriptors provide read and write access to common data storage
systems like csv, json, HDF5, and SQL.
They provide Pythonic iteration over these resources as well as efficient
chunked access with DyND arrays.
Data Descriptors implement the following methods:
__iter__ - iterate over storage, getting results as Python objects
chunks - iterate over storage, getting results as DyND arrays
extend - insert new data into storage (if possible.)
Consumes a sequence of core Python objects
extend_chunks - insert new data into storage (if possible.)
Consumes a sequence of DyND arrays
as_dynd - load entire dataset into memory as a DyND array
"""
def extend(self, rows):
""" Extend data with many rows
"""
rows = iter(rows)
row = next(rows)
rows = chain([row], rows)
if not validate(self.schema, row):
raise ValueError('Invalid data:\n\t %s \nfor dshape \n\t%s' %
(str(row), self.schema))
if isinstance(row, dict):
rows = map(partial(coerce_to_ordered, self.schema), rows)
self._extend(rows)
def extend_chunks(self, chunks):
def dtype_of(chunk):
return str(len(chunk) * self.schema)
self._extend_chunks((nd.array(chunk, type=dtype_of(chunk))
for chunk in chunks))
def _extend_chunks(self, chunks):
self.extend((row for chunk in chunks
for row in nd.as_py(chunk, tuple=True)))
def chunks(self, **kwargs):
def dshape(chunk):
return str(len(chunk) * self.dshape.subshape[0])
for chunk in self._chunks(**kwargs):
yield nd.array(chunk, type=dshape(chunk))
def _chunks(self, blen=100):
return partition_all(blen, iter(self))
def as_dynd(self):
return self.dynd[:]
def as_py(self):
if isdimension(self.dshape[0]):
return tuple(self)
else:
return tuple(nd.as_py(self.as_dynd(), tuple=True))
def __array__(self):
return nd.as_numpy(self.as_dynd())
def __getitem__(self, key):
return self.get_py(key)
@property
def dynd(self):
return IndexCallable(self.get_dynd)
def get_py(self, key):
key = ordered_index(key, self.dshape)
subshape = self.dshape._subshape(key)
if hasattr(self, '_get_py'):
result = self._get_py(key)
elif hasattr(self, '_get_dynd'):
result = self._get_dynd(key)
else:
raise AttributeError("Data Descriptor defines neither "
"_get_py nor _get_dynd. Can not index")
return coerce(subshape, result)
def get_dynd(self, key):
key = ordered_index(key, self.dshape)
subshape = self.dshape._subshape(key)
if hasattr(self, '_get_dynd'):
result = self._get_dynd(key)
elif hasattr(self, '_get_py'):
result = nd.array(self._get_py(key), type=str(subshape))
else:
raise AttributeError("Data Descriptor defines neither "
"_get_py nor _get_dynd. Can not index")
# Currently nd.array(result, type=discover(result)) is oddly slower
# than just nd.array(result) , even though no type coercion should be
# necessary. As a short-term solution we check if this is the case and
# short-circuit the `type=` call
# This check can be deleted once these two run at similar speeds
ds_result = discover(result)
if (subshape == ds_result or
(isdimension(subshape[0]) and isdimension(ds_result[0]) and
subshape.subshape[0] == subshape.subshape[0])):
return nd.array(result)
else:
return nd.array(result, type=str(subshape))
def __iter__(self):
if not isdimension(self.dshape[0]):
raise TypeError("Data Descriptor not iterable, has dshape %s" %
self.dshape)
schema = self.dshape.subshape[0]
try:
seq = self._iter()
except NotImplementedError:
seq = iter(nd.as_py(self.as_dynd(), tuple=True))
if not isdimension(self.dshape[0]):
yield coerce(self.dshape, nd.as_py(self.as_dynd(), tuple=True))
else:
for block in partition_all(100, seq):
x = coerce(len(block) * schema, block)
for row in x:
yield row
def _iter(self):
raise NotImplementedError()
_dshape = None
@property
def dshape(self):
return datashape.dshape(self._dshape or datashape.Var() * self.schema)
_schema = None
@property
def schema(self):
if self._schema:
return datashape.dshape(self._schema)
if isdimension(self.dshape[0]):
return self.dshape.subarray(1)
raise TypeError('Datashape is not indexable to schema\n%s' %
self.dshape)
@property
def columns(self):
rec = self.schema[0]
if isinstance(rec, datashape.Record):
return rec.names
else:
raise TypeError('Columns attribute only valid on tabular '
'datashapes of records, got %s' % self.dshape)
@dispatch((Expr, UnaryOp), DataDescriptor)
def compute_up(t, ddesc, **kwargs):
return compute_up(t, iter(ddesc)) # use Python streaming by default
@dispatch(Projection, DataDescriptor)
def compute_up(t, ddesc, **kwargs):
return ddesc[:, t.fields]
@dispatch(Field, DataDescriptor)
def compute_up(t, ddesc, **kwargs):
return ddesc[:, t.fields[0]]
@dispatch(DataDescriptor)
def discover(dd):
return dd.dshape
| {
"repo_name": "vitan/blaze",
"path": "blaze/data/core.py",
"copies": "1",
"size": "6508",
"license": "bsd-3-clause",
"hash": -5320380126648248000,
"line_mean": 31.8686868687,
"line_max": 79,
"alpha_frac": 0.6040258144,
"autogenerated": false,
"ratio": 3.885373134328358,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4989398948728358,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from itertools import chain
from dynd import nd
import datashape
from datashape.internal_utils import IndexCallable
from .utils import validate, coerce, coerce_to_ordered, ordered_index
from ..utils import partition_all
__all__ = ['DataDescriptor', 'discover', 'compute']
def isdimension(ds):
return isinstance(ds, (datashape.Var, datashape.Fixed))
class DataDescriptor(object):
"""
Standard interface to data storage
Data descriptors provide read and write access to common data storage
systems like csv, json, HDF5, and SQL.
They provide Pythonic iteration over these resources as well as efficient
chunked access with DyND arrays.
Data Descriptors implement the following methods:
__iter__ - iterate over storage, getting results as Python objects
chunks - iterate over storage, getting results as DyND arrays
extend - insert new data into storage (if possible.)
Consumes a sequence of core Python objects
extend_chunks - insert new data into storage (if possible.)
Consumes a sequence of DyND arrays
as_dynd - load entire dataset into memory as a DyND array
"""
def extend(self, rows):
""" Extend data with many rows
"""
if not self.appendable or self.immutable:
raise TypeError('Data Descriptor not appendable')
rows = iter(rows)
row = next(rows)
if not validate(self.schema, row):
raise ValueError('Invalid data:\n\t %s \nfor dshape \n\t%s' %
(str(row), self.schema))
self._extend(chain([row], rows))
def extend_chunks(self, chunks):
if not self.appendable or self.immutable:
raise TypeError('Data Descriptor not appendable')
def dtype_of(chunk):
return str(len(chunk) * self.schema)
self._extend_chunks((nd.array(chunk, dtype=dtype_of(chunk))
for chunk in chunks))
def _extend_chunks(self, chunks):
self.extend((row for chunk in chunks
for row in nd.as_py(chunk, tuple=True)))
def chunks(self, **kwargs):
def dshape(chunk):
return str(len(chunk) * self.dshape.subshape[0])
chunks = self._chunks(**kwargs)
return (nd.array(chunk, dtype=dshape(chunk)) for chunk in chunks)
def _chunks(self, blen=100):
return partition_all(blen, iter(self))
def as_dynd(self):
return self.dynd[:]
def as_py(self):
if isdimension(self.dshape[0]):
return tuple(self)
else:
return tuple(nd.as_py(self.as_dynd(), tuple=True))
def __array__(self):
return nd.as_numpy(self.as_dynd())
@property
def py(self):
return IndexCallable(self.get_py)
@property
def dynd(self):
return IndexCallable(self.get_dynd)
def get_py(self, key):
key = ordered_index(key, self.dshape)
subshape = self.dshape._subshape(key)
if hasattr(self, '_get_py'):
result = self._get_py(key)
elif hasattr(self, '_get_dynd'):
result = self._get_dynd(key)
else:
raise AttributeError("Data Descriptor defines neither "
"_get_py nor _get_dynd. Can not index")
return coerce(subshape, result)
def get_dynd(self, key):
key = ordered_index(key, self.dshape)
subshape = self.dshape._subshape(key)
if hasattr(self, '_get_dynd'):
result = self._get_dynd(key)
elif hasattr(self, '_get_py'):
result = nd.array(self._get_py(key), type=str(subshape))
else:
raise AttributeError("Data Descriptor defines neither "
"_get_py nor _get_dynd. Can not index")
return nd.array(result, type=str(subshape))
def __iter__(self):
if not isdimension(self.dshape[0]):
raise TypeError("Data Descriptor not iterable, has dshape %s" %
self.dshape)
schema = self.dshape.subshape[0]
try:
seq = self._iter()
except NotImplementedError:
seq = iter(nd.as_py(self.as_dynd(), tuple=True))
if not isdimension(self.dshape[0]):
yield coerce(self.dshape, nd.as_py(self.as_dynd(), tuple=True))
else:
for block in partition_all(100, seq):
x = coerce(len(block) * schema, block)
for row in x:
yield row
def _iter(self):
raise NotImplementedError()
_dshape = None
@property
def dshape(self):
return datashape.dshape(self._dshape or datashape.Var() * self.schema)
_schema = None
@property
def schema(self):
if self._schema:
return datashape.dshape(self._schema)
if isdimension(self.dshape[0]):
return self.dshape.subarray(1)
raise TypeError('Datashape is not indexable to schema\n%s' %
self.dshape)
@property
def columns(self):
rec = self.schema[0]
if isinstance(rec, datashape.Record):
return rec.names
else:
raise TypeError('Columns attribute only valid on tabular '
'datashapes of records, got %s' % self.dshape)
from ..dispatch import dispatch
from blaze.expr.table import Join, TableExpr
from blaze.expr.core import Expr
@dispatch((Join, Expr), DataDescriptor)
def compute(t, ddesc):
return compute(t, iter(ddesc)) # use Python streaming by default
@dispatch(DataDescriptor)
def discover(dd):
return dd.dshape
| {
"repo_name": "aterrel/blaze",
"path": "blaze/data/core.py",
"copies": "1",
"size": "5728",
"license": "bsd-3-clause",
"hash": 4043698314402395000,
"line_mean": 31.5454545455,
"line_max": 78,
"alpha_frac": 0.6014315642,
"autogenerated": false,
"ratio": 3.947622329427981,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007041293617998163,
"num_lines": 176
} |
from __future__ import absolute_import, division, print_function
from itertools import chain
from dynd import nd
import datashape
from .utils import validate, coerce
from ..utils import partition_all
__all__ = ['DataDescriptor']
def isdimension(ds):
return isinstance(ds, (datashape.Var, datashape.Fixed))
class DataDescriptor(object):
"""
Standard interface to data storage
Data descriptors provide read and write access to common data storage
systems like csv, json, HDF5, and SQL.
They provide Pythonic iteration over these resources as well as efficient
chunked access with DyND arrays.
Data Descriptors implement the following methods:
__iter__ - iterate over storage, getting results as Python objects
chunks - iterate over storage, getting results as DyND arrays
extend - insert new data into storage (if possible.)
Consumes a sequence of core Python objects
extend_chunks - insert new data into storage (if possible.)
Consumes a sequence of DyND arrays
as_dynd - load entire dataset into memory as a DyND array
"""
def extend(self, rows):
""" Extend data with many rows
"""
if not self.appendable or self.immutable:
raise TypeError('Data Descriptor not appendable')
rows = iter(rows)
row = next(rows)
if not validate(self.schema, row):
raise ValueError('Invalid data:\n\t %s \nfor dshape \n\t%s' %
(str(row), self.schema))
self._extend(chain([row], rows))
def extend_chunks(self, chunks):
if not self.appendable or self.immutable:
raise TypeError('Data Descriptor not appendable')
self._extend_chunks((nd.array(chunk) for chunk in chunks))
def _extend_chunks(self, chunks):
self.extend((row for chunk in chunks for row in nd.as_py(chunk)))
def chunks(self, **kwargs):
def dshape(chunk):
return str(len(chunk) * self.dshape.subarray(1))
chunks = self._chunks(**kwargs)
return (nd.array(chunk, dtype=dshape(chunk)) for chunk in chunks)
def _chunks(self, blen=100):
return partition_all(blen, iter(self))
def getattr(self, name):
raise NotImplementedError('this data descriptor does not support attribute access')
def as_dynd(self):
return nd.array(self.as_py(), dtype=str(self.dshape))
def as_py(self):
if isdimension(self.dshape[0]):
return list(self)
else:
return nd.as_py(self.as_dynd())
def __array__(self):
return nd.as_numpy(self.as_dynd())
def __getitem__(self, key):
if hasattr(self, '_getitem'):
return coerce(self.schema, self._getitem(key))
else:
return self.as_dynd()[key]
def __iter__(self):
try:
for row in self._iter():
yield coerce(self.schema, row)
except NotImplementedError:
py = nd.as_py(self.as_dynd())
if isdimension(self.dshape[0]):
for row in py:
yield row
else:
yield py
def _iter(self):
raise NotImplementedError()
_dshape = None
@property
def dshape(self):
return datashape.dshape(self._dshape or datashape.Var() * self.schema)
_schema = None
@property
def schema(self):
if self._schema:
return datashape.dshape(self._schema)
if isdimension(self.dshape[0]):
return self.dshape.subarray(1)
raise TypeError('Datashape is not indexable to schema\n%s' %
self.dshape)
| {
"repo_name": "sethkontny/blaze",
"path": "blaze/data/core.py",
"copies": "1",
"size": "3681",
"license": "bsd-3-clause",
"hash": -7048420654411449000,
"line_mean": 30.1949152542,
"line_max": 91,
"alpha_frac": 0.6145069275,
"autogenerated": false,
"ratio": 4.058434399117972,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5172941326617971,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from itertools import chain
from .utils_test import add, inc # noqa: F401
def ishashable(x):
""" Is x hashable?
Examples
--------
>>> ishashable(1)
True
>>> ishashable([1])
False
"""
try:
hash(x)
return True
except TypeError:
return False
def istask(x):
""" Is x a runnable task?
A task is a tuple with a callable first argument
Examples
--------
>>> inc = lambda x: x + 1
>>> istask((inc, 1))
True
>>> istask(1)
False
"""
return type(x) is tuple and x and callable(x[0])
def has_tasks(dsk, x):
"""Whether ``x`` has anything to compute.
Returns True if:
- ``x`` is a task
- ``x`` is a key in ``dsk``
- ``x`` is a list that contains any tasks or keys
"""
if istask(x):
return True
try:
if x in dsk:
return True
except:
pass
if isinstance(x, list):
for i in x:
if has_tasks(dsk, i):
return True
return False
def preorder_traversal(task):
"""A generator to preorder-traverse a task."""
for item in task:
if istask(item):
for i in preorder_traversal(item):
yield i
elif isinstance(item, list):
yield list
for i in preorder_traversal(item):
yield i
else:
yield item
def _get_nonrecursive(d, x, maxdepth=1000):
# Non-recursive. DAG property is checked upon reaching maxdepth.
_list = lambda *args: list(args)
# We construct a nested hierarchy of tuples to mimic the execution stack
# of frames that Python would maintain for a recursive implementation.
# A frame is associated with a single task from a Dask.
# A frame tuple has three elements:
# 1) The function for the task.
# 2) The arguments for the task (typically keys in the Dask).
# Arguments are stored in reverse order, and elements are popped
# as they are evaluated.
# 3) The calculated results of the arguments from (2).
stack = [(lambda x: x, [x], [])]
while True:
func, args, results = stack[-1]
if not args:
val = func(*results)
if len(stack) == 1:
return val
stack.pop()
stack[-1][2].append(val)
continue
elif maxdepth and len(stack) > maxdepth:
cycle = getcycle(d, x)
if cycle:
cycle = '->'.join(cycle)
raise RuntimeError('Cycle detected in Dask: %s' % cycle)
maxdepth = None
key = args.pop()
if isinstance(key, list):
stack.append((_list, list(key[::-1]), []))
continue
elif ishashable(key) and key in d:
args.append(d[key])
continue
elif istask(key):
stack.append((key[0], list(key[:0:-1]), []))
else:
results.append(key)
def _get_recursive(d, x):
# recursive, no cycle detection
if isinstance(x, list):
return [_get_recursive(d, k) for k in x]
elif ishashable(x) and x in d:
return _get_recursive(d, d[x])
elif istask(x):
func, args = x[0], x[1:]
args2 = [_get_recursive(d, k) for k in args]
return func(*args2)
else:
return x
def get(d, x, recursive=False):
""" Get value from Dask
Examples
--------
>>> inc = lambda x: x + 1
>>> d = {'x': 1, 'y': (inc, 'x')}
>>> get(d, 'x')
1
>>> get(d, 'y')
2
"""
_get = _get_recursive if recursive else _get_nonrecursive
if isinstance(x, list):
return tuple(get(d, k) for k in x)
elif x in d:
return _get(d, x)
raise KeyError("{0} is not a key in the graph".format(x))
def get_dependencies(dsk, key=None, task=None, as_list=False):
""" Get the immediate tasks on which this task depends
Examples
--------
>>> dsk = {'x': 1,
... 'y': (inc, 'x'),
... 'z': (add, 'x', 'y'),
... 'w': (inc, 'z'),
... 'a': (add, (inc, 'x'), 1)}
>>> get_dependencies(dsk, 'x')
set([])
>>> get_dependencies(dsk, 'y')
set(['x'])
>>> get_dependencies(dsk, 'z') # doctest: +SKIP
set(['x', 'y'])
>>> get_dependencies(dsk, 'w') # Only direct dependencies
set(['z'])
>>> get_dependencies(dsk, 'a') # Ignore non-keys
set(['x'])
>>> get_dependencies(dsk, task=(inc, 'x')) # provide tasks directly
set(['x'])
"""
if key is not None:
arg = dsk[key]
elif task is not None:
arg = task
else:
raise ValueError("Provide either key or task")
result = []
work = [arg]
while work:
new_work = []
for w in work:
typ = type(w)
if typ is tuple and w and callable(w[0]): # istask(w)
new_work += w[1:]
elif typ is list:
new_work += w
elif typ is dict:
new_work += w.values()
else:
try:
if w in dsk:
result.append(w)
except TypeError: # not hashable
pass
work = new_work
return result if as_list else set(result)
def get_deps(dsk):
""" Get dependencies and dependents from dask dask graph
>>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> dependencies, dependents = get_deps(dsk)
>>> dependencies
{'a': set([]), 'c': set(['b']), 'b': set(['a'])}
>>> dependents
{'a': set(['b']), 'c': set([]), 'b': set(['c'])}
"""
dependencies = {k: get_dependencies(dsk, task=v)
for k, v in dsk.items()}
dependents = reverse_dict(dependencies)
return dependencies, dependents
def flatten(seq):
"""
>>> list(flatten([1]))
[1]
>>> list(flatten([[1, 2], [1, 2]]))
[1, 2, 1, 2]
>>> list(flatten([[[1], [2]], [[1], [2]]]))
[1, 2, 1, 2]
>>> list(flatten(((1, 2), (1, 2)))) # Don't flatten tuples
[(1, 2), (1, 2)]
>>> list(flatten((1, 2, [3, 4]))) # support heterogeneous
[1, 2, 3, 4]
"""
if isinstance(seq, str):
yield seq
else:
for item in seq:
if isinstance(item, list):
for item2 in flatten(item):
yield item2
else:
yield item
def reverse_dict(d):
"""
>>> a, b, c = 'abc'
>>> d = {a: [b, c], b: [c]}
>>> reverse_dict(d) # doctest: +SKIP
{'a': set([]), 'b': set(['a']}, 'c': set(['a', 'b'])}
"""
terms = list(d.keys()) + list(chain.from_iterable(d.values()))
result = {t: set() for t in terms}
for k, vals in d.items():
for val in vals:
result[val].add(k)
return result
def subs(task, key, val):
""" Perform a substitution on a task
Examples
--------
>>> subs((inc, 'x'), 'x', 1) # doctest: +SKIP
(inc, 1)
"""
type_task = type(task)
if not (type_task is tuple and task and callable(task[0])): # istask(task):
try:
if type_task is type(key) and task == key:
return val
except Exception:
pass
if type_task is list:
return [subs(x, key, val) for x in task]
return task
newargs = []
for arg in task[1:]:
type_arg = type(arg)
if type_arg is tuple and arg and callable(arg[0]): # istask(task):
arg = subs(arg, key, val)
elif type_arg is list:
arg = [subs(x, key, val) for x in arg]
elif type_arg is type(key) and arg == key:
arg = val
newargs.append(arg)
return task[:1] + tuple(newargs)
def _toposort(dsk, keys=None, returncycle=False, dependencies=None):
# Stack-based depth-first search traversal. This is based on Tarjan's
# method for topological sorting (see wikipedia for pseudocode)
if keys is None:
keys = dsk
elif not isinstance(keys, list):
keys = [keys]
if not returncycle:
ordered = []
# Nodes whose descendents have been completely explored.
# These nodes are guaranteed to not be part of a cycle.
completed = set()
# All nodes that have been visited in the current traversal. Because
# we are doing depth-first search, going "deeper" should never result
# in visiting a node that has already been seen. The `seen` and
# `completed` sets are mutually exclusive; it is okay to visit a node
# that has already been added to `completed`.
seen = set()
if dependencies is None:
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
for key in keys:
if key in completed:
continue
nodes = [key]
while nodes:
# Keep current node on the stack until all descendants are visited
cur = nodes[-1]
if cur in completed:
# Already fully traversed descendants of cur
nodes.pop()
continue
seen.add(cur)
# Add direct descendants of cur to nodes stack
next_nodes = []
for nxt in dependencies[cur]:
if nxt not in completed:
if nxt in seen:
# Cycle detected!
cycle = [nxt]
while nodes[-1] != nxt:
cycle.append(nodes.pop())
cycle.append(nodes.pop())
cycle.reverse()
if returncycle:
return cycle
else:
cycle = '->'.join(cycle)
raise RuntimeError('Cycle detected in Dask: %s' % cycle)
next_nodes.append(nxt)
if next_nodes:
nodes.extend(next_nodes)
else:
# cur has no more descendants to explore, so we're done with it
if not returncycle:
ordered.append(cur)
completed.add(cur)
seen.remove(cur)
nodes.pop()
if returncycle:
return []
return ordered
def toposort(dsk, dependencies=None):
""" Return a list of keys of dask sorted in topological order."""
return _toposort(dsk, dependencies=dependencies)
def getcycle(d, keys):
""" Return a list of nodes that form a cycle if Dask is not a DAG.
Returns an empty list if no cycle is found.
``keys`` may be a single key or list of keys.
Examples
--------
>>> d = {'x': (inc, 'z'), 'y': (inc, 'x'), 'z': (inc, 'y')}
>>> getcycle(d, 'x')
['x', 'z', 'y', 'x']
See Also
--------
isdag
"""
return _toposort(d, keys=keys, returncycle=True)
def isdag(d, keys):
""" Does Dask form a directed acyclic graph when calculating keys?
``keys`` may be a single key or list of keys.
Examples
--------
>>> inc = lambda x: x + 1
>>> isdag({'x': 0, 'y': (inc, 'x')}, 'y')
True
>>> isdag({'x': (inc, 'y'), 'y': (inc, 'x')}, 'y')
False
See Also
--------
getcycle
"""
return not getcycle(d, keys)
class literal(object):
"""A small serializable object to wrap literal values without copying"""
__slots__ = ('data',)
def __init__(self, data):
self.data = data
def __repr__(self):
return 'literal<type=%s>' % type(self.data).__name__
def __reduce__(self):
return (literal, (self.data,))
def __call__(self):
return self.data
def quote(x):
""" Ensure that this value remains this value in a dask graph
Some values in dask graph take on special meaning. Sometimes we want to
ensure that our data is not interpreted but remains literal.
>>> quote((add, 1, 2)) # doctest: +SKIP
(literal<type=tuple>,)
"""
if istask(x) or type(x) is list:
return (literal(x),)
return x
| {
"repo_name": "cpcloud/dask",
"path": "dask/core.py",
"copies": "1",
"size": "12150",
"license": "bsd-3-clause",
"hash": -1031776024656334800,
"line_mean": 25.6447368421,
"line_max": 84,
"alpha_frac": 0.5109465021,
"autogenerated": false,
"ratio": 3.7592821782178216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.976875422039178,
"avg_score": 0.00029489198520846264,
"num_lines": 456
} |
from __future__ import absolute_import, division, print_function
from itertools import count
from functools import wraps
from collections import Iterator
import numpy as np
from toolz import merge, merge_sorted
from .core import Array
from ..base import tokenize
from .. import sharedict
@wraps(np.percentile)
def _percentile(a, q, interpolation='linear'):
if not len(a):
return None
if isinstance(q, Iterator):
q = list(q)
if str(a.dtype) == 'category':
result = np.percentile(a.codes, q, interpolation=interpolation)
import pandas as pd
return pd.Categorical.from_codes(result, a.categories, a.ordered)
if np.issubdtype(a.dtype, np.datetime64):
a2 = a.astype('i8')
result = np.percentile(a2, q, interpolation=interpolation)
return result.astype(a.dtype)
if not np.issubdtype(a.dtype, np.number):
interpolation = 'nearest'
return np.percentile(a, q, interpolation=interpolation)
names = ('percentile-%d' % i for i in count(1))
def percentile(a, q, interpolation='linear'):
""" Approximate percentile of 1-D array
See numpy.percentile for more information
"""
if not a.ndim == 1:
raise NotImplementedError(
"Percentiles only implemented for 1-d arrays")
q = np.array(q)
token = tokenize(a, list(q), interpolation)
name = 'percentile_chunk-' + token
dsk = dict(((name, i), (_percentile, (key), q, interpolation))
for i, key in enumerate(a._keys()))
name2 = 'percentile-' + token
dsk2 = {(name2, 0): (merge_percentiles, q, [q] * len(a.chunks[0]),
sorted(dsk), a.chunks[0], interpolation)}
dtype = a.dtype
if np.issubdtype(dtype, np.integer):
dtype = (np.array([], dtype=dtype) / 0.5).dtype
dsk = merge(dsk, dsk2)
dsk = sharedict.merge(a.dask, (name2, dsk))
return Array(dsk, name2, chunks=((len(q),),), dtype=dtype)
def merge_percentiles(finalq, qs, vals, Ns, interpolation='lower'):
""" Combine several percentile calculations of different data.
Parameters
----------
finalq : numpy.array
Percentiles to compute (must use same scale as ``qs``).
qs : sequence of numpy.arrays
Percentiles calculated on different sets of data.
vals : sequence of numpy.arrays
Resulting values associated with percentiles ``qs``.
Ns : sequence of integers
The number of data elements associated with each data set.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Specify the type of interpolation to use to calculate final
percentiles. For more information, see numpy.percentile.
Examples
--------
>>> finalq = [10, 20, 30, 40, 50, 60, 70, 80]
>>> qs = [[20, 40, 60, 80], [20, 40, 60, 80]]
>>> vals = [np.array([1, 2, 3, 4]), np.array([10, 11, 12, 13])]
>>> Ns = [100, 100] # Both original arrays had 100 elements
>>> merge_percentiles(finalq, qs, vals, Ns)
array([ 1, 2, 3, 4, 10, 11, 12, 13])
"""
if isinstance(finalq, Iterator):
finalq = list(finalq)
finalq = np.array(finalq)
qs = list(map(list, qs))
vals = list(vals)
Ns = list(Ns)
L = list(zip(*[(q, val, N) for q, val, N in zip(qs, vals, Ns) if N]))
if not L:
raise ValueError("No non-trivial arrays found")
qs, vals, Ns = L
# TODO: Perform this check above in percentile once dtype checking is easy
# Here we silently change meaning
if str(vals[0].dtype) == 'category':
result = merge_percentiles(finalq, qs, [v.codes for v in vals], Ns, interpolation)
import pandas as pd
return pd.Categorical.from_codes(result, vals[0].categories, vals[0].ordered)
if not np.issubdtype(vals[0].dtype, np.number):
interpolation = 'nearest'
if len(vals) != len(qs) or len(Ns) != len(qs):
raise ValueError('qs, vals, and Ns parameters must be the same length')
# transform qs and Ns into number of observations between percentiles
counts = []
for q, N in zip(qs, Ns):
count = np.empty(len(q))
count[1:] = np.diff(q)
count[0] = q[0]
count *= N
counts.append(count)
# Sort by calculated percentile values, then number of observations.
# >95% of the time in this function is spent in `merge_sorted` below.
# An alternative that uses numpy sort is shown. It is sometimes
# comparable to, but typically slower than, `merge_sorted`.
#
# >>> A = np.concatenate(map(np.array, map(zip, vals, counts)))
# >>> A.sort(0, kind='mergesort')
combined_vals_counts = merge_sorted(*map(zip, vals, counts))
combined_vals, combined_counts = zip(*combined_vals_counts)
combined_vals = np.array(combined_vals)
combined_counts = np.array(combined_counts)
# percentile-like, but scaled by total number of observations
combined_q = np.cumsum(combined_counts)
# rescale finalq percentiles to match combined_q
desired_q = finalq * sum(Ns)
# the behavior of different interpolation methods should be
# investigated further.
if interpolation == 'linear':
rv = np.interp(desired_q, combined_q, combined_vals)
else:
left = np.searchsorted(combined_q, desired_q, side='left')
right = np.searchsorted(combined_q, desired_q, side='right') - 1
np.minimum(left, len(combined_vals) - 1, left) # don't exceed max index
lower = np.minimum(left, right)
upper = np.maximum(left, right)
if interpolation == 'lower':
rv = combined_vals[lower]
elif interpolation == 'higher':
rv = combined_vals[upper]
elif interpolation == 'midpoint':
rv = 0.5 * (combined_vals[lower] + combined_vals[upper])
elif interpolation == 'nearest':
lower_residual = np.abs(combined_q[lower] - desired_q)
upper_residual = np.abs(combined_q[upper] - desired_q)
mask = lower_residual > upper_residual
index = lower # alias; we no longer need lower
index[mask] = upper[mask]
rv = combined_vals[index]
else:
raise ValueError("interpolation can only be 'linear', 'lower', "
"'higher', 'midpoint', or 'nearest'")
return rv
| {
"repo_name": "cpcloud/dask",
"path": "dask/array/percentile.py",
"copies": "1",
"size": "6350",
"license": "bsd-3-clause",
"hash": -1907418161750873600,
"line_mean": 35.9186046512,
"line_max": 90,
"alpha_frac": 0.6207874016,
"autogenerated": false,
"ratio": 3.6747685185185186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47955559201185183,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from itertools import count, product
from functools import partial
from toolz import curry
import numpy as np
from .core import Array, normalize_chunks
from .numpy_compat import full
names = ('wrapped_%d' % i for i in count(1))
def dims_from_size(size, blocksize):
"""
>>> list(dims_from_size(30, 8))
[8, 8, 8, 6]
"""
result = (blocksize,) * (size // blocksize)
if size % blocksize:
result = result + (size % blocksize,)
return result
def wrap_func_size_as_kwarg(func, *args, **kwargs):
"""
Transform np.random function into blocked version
"""
if 'shape' in kwargs and 'size' not in kwargs:
kwargs['size'] = kwargs.pop('shape')
if 'size' not in kwargs:
args, size = args[:-1], args[-1]
else:
size = kwargs.pop('size')
if not isinstance(size, (tuple, list)):
size = (size,)
chunks = kwargs.pop('chunks', None)
chunks = normalize_chunks(chunks, size)
name = kwargs.pop('name', None)
dtype = kwargs.pop('dtype', None)
if dtype is None:
kw = kwargs.copy(); kw['size'] = (0,)
dtype = func(*args, **kw).dtype
name = name or next(names)
keys = product([name], *[range(len(bd)) for bd in chunks])
sizes = product(*chunks)
if not kwargs:
vals = ((func,) + args + (size,) for size in sizes)
else:
vals = ((partial(func, *args, size=size, **kwargs),) for size in sizes)
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, dtype=dtype)
def wrap_func_shape_as_first_arg(func, *args, **kwargs):
"""
Transform np.random function into blocked version
"""
if 'shape' not in kwargs:
shape, args = args[0], args[1:]
else:
shape = kwargs.pop('shape')
if not isinstance(shape, (tuple, list)):
shape = (shape,)
chunks = kwargs.pop('chunks', None)
chunks = normalize_chunks(chunks, shape)
name = kwargs.pop('name', None)
dtype = kwargs.pop('dtype', None)
if dtype is None:
dtype = func(shape, *args, **kwargs).dtype
name = name or next(names)
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
func = partial(func, dtype=dtype, **kwargs)
vals = ((func,) + (s,) + args for s in shapes)
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, dtype=dtype)
@curry
def wrap(wrap_func, func, **kwargs):
f = partial(wrap_func, func, **kwargs)
f.__doc__ = """
Blocked variant of %(name)s
Follows the signature of %(name)s exactly except that it also requires a
keyword argument chunks=(...)
Original signature follows below.
""" % {'name': func.__name__} + func.__doc__
f.__name__ = 'blocked_' + func.__name__
return f
w = wrap(wrap_func_shape_as_first_arg)
ones = w(np.ones, dtype='f8')
zeros = w(np.zeros, dtype='f8')
empty = w(np.empty, dtype='f8')
full = w(full)
| {
"repo_name": "freeman-lab/dask",
"path": "dask/array/wrap.py",
"copies": "5",
"size": "2996",
"license": "bsd-3-clause",
"hash": -9073903511037532000,
"line_mean": 25.052173913,
"line_max": 79,
"alpha_frac": 0.599799733,
"autogenerated": false,
"ratio": 3.4279176201372996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.65277173531373,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from itertools import count, product
from toolz import curry
from .core import Array
import numpy as np
names = ('wrapped_%d' % i for i in count(1))
def dims_from_size(size, blocksize):
"""
>>> list(dims_from_size(30, 8))
[8, 8, 8, 6]
"""
result = (blocksize,) * (size // blocksize)
if size % blocksize:
result = result + (size % blocksize,)
return result
def blockdims_from_blockshape(shape, blockshape):
"""
Convert blockshape to dimensions along each axis
>>> blockdims_from_blockshape((30, 30), (10, 10))
((10, 10, 10), (10, 10, 10))
>>> blockdims_from_blockshape((30, 30), (12, 12))
((12, 12, 6), (12, 12, 6))
"""
return tuple(map(tuple, map(dims_from_size, shape, blockshape)))
def wrap_func_size_as_kwarg(func, *args, **kwargs):
"""
Transform np.random function into blocked version
"""
if 'shape' in kwargs and 'size' not in kwargs:
kwargs['size'] = kwargs.pop('shape')
if 'size' not in kwargs:
args, size = args[:-1], args[-1]
else:
size = kwargs.pop('size')
if not isinstance(size, (tuple, list)):
size = (size,)
blockshape = kwargs.pop('blockshape', None)
blockdims = kwargs.pop('blockdims', None)
name = kwargs.pop('name', None)
if not blockdims and blockshape:
blockdims = blockdims_from_blockshape(size, blockshape)
name = name or next(names)
keys = product([name], *[range(len(bd)) for bd in blockdims])
sizes = product(*blockdims)
if not kwargs:
vals = ((func,) + args + (size,) for size in sizes)
else:
vals = ((curry(func, *args, size=size, **kwargs),) for size in sizes)
dsk = dict(zip(keys, vals))
return Array(dsk, name, shape=size, blockdims=blockdims)
def wrap_func_shape_as_first_arg(func, *args, **kwargs):
"""
Transform np.random function into blocked version
"""
if 'shape' not in kwargs:
shape, args = args[0], args[1:]
else:
shape = kwargs.pop('shape')
if not isinstance(shape, (tuple, list)):
shape = (shape,)
blockshape = kwargs.pop('blockshape', None)
blockdims = kwargs.pop('blockdims', None)
name = kwargs.pop('name', None)
if not blockdims and blockshape:
blockdims = blockdims_from_blockshape(shape, blockshape)
name = name or next(names)
keys = product([name], *[range(len(bd)) for bd in blockdims])
shapes = product(*blockdims)
if not kwargs:
func = curry(func, **kwargs)
vals = ((func,) + (s,) + args for s in shapes)
dsk = dict(zip(keys, vals))
return Array(dsk, name, shape=shape, blockdims=blockdims)
@curry
def wrap(wrap_func, func):
f = curry(wrap_func, func)
f.__doc__ = """
Blocked variant of %(name)s
Follows the signature of %(name)s exactly except that it also requires a
keyword argument blockshape=(...) or blockdims=(...).
Original signature follows below.
""" % {'name': func.__name__} + func.__doc__
f.__name__ = 'blocked_' + func.__name__
return f
w = wrap(wrap_func_shape_as_first_arg)
ones = w(np.ones)
zeros = w(np.zeros)
empty = w(np.empty)
| {
"repo_name": "PeterDSteinberg/dask",
"path": "dask/array/wrap.py",
"copies": "1",
"size": "3237",
"license": "bsd-3-clause",
"hash": -2869510694894797000,
"line_mean": 25.975,
"line_max": 77,
"alpha_frac": 0.609206055,
"autogenerated": false,
"ratio": 3.393081761006289,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4502287816006289,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from itertools import count, product
from toolz import curry
import numpy as np
from .core import Array, normalize_chunks
from .numpy_compat import full
names = ('wrapped_%d' % i for i in count(1))
def dims_from_size(size, blocksize):
"""
>>> list(dims_from_size(30, 8))
[8, 8, 8, 6]
"""
result = (blocksize,) * (size // blocksize)
if size % blocksize:
result = result + (size % blocksize,)
return result
def wrap_func_size_as_kwarg(func, *args, **kwargs):
"""
Transform np.random function into blocked version
"""
if 'shape' in kwargs and 'size' not in kwargs:
kwargs['size'] = kwargs.pop('shape')
if 'size' not in kwargs:
args, size = args[:-1], args[-1]
else:
size = kwargs.pop('size')
if not isinstance(size, (tuple, list)):
size = (size,)
chunks = kwargs.pop('chunks', None)
chunks = normalize_chunks(chunks, size)
name = kwargs.pop('name', None)
dtype = kwargs.pop('dtype', None)
if dtype is None:
kw = kwargs.copy(); kw['size'] = (0,)
dtype = func(*args, **kw).dtype
name = name or next(names)
keys = product([name], *[range(len(bd)) for bd in chunks])
sizes = product(*chunks)
if not kwargs:
vals = ((func,) + args + (size,) for size in sizes)
else:
vals = ((curry(func, *args, size=size, **kwargs),) for size in sizes)
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, dtype=dtype)
def wrap_func_shape_as_first_arg(func, *args, **kwargs):
"""
Transform np.random function into blocked version
"""
if 'shape' not in kwargs:
shape, args = args[0], args[1:]
else:
shape = kwargs.pop('shape')
if not isinstance(shape, (tuple, list)):
shape = (shape,)
chunks = kwargs.pop('chunks', None)
chunks = normalize_chunks(chunks, shape)
name = kwargs.pop('name', None)
dtype = kwargs.pop('dtype', None)
if dtype is None:
dtype = func(shape, *args, **kwargs).dtype
name = name or next(names)
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
func = curry(func, dtype=dtype, **kwargs)
vals = ((func,) + (s,) + args for s in shapes)
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, dtype=dtype)
@curry
def wrap(wrap_func, func, **kwargs):
f = curry(wrap_func, func, **kwargs)
f.__doc__ = """
Blocked variant of %(name)s
Follows the signature of %(name)s exactly except that it also requires a
keyword argument chunks=(...)
Original signature follows below.
""" % {'name': func.__name__} + func.__doc__
f.__name__ = 'blocked_' + func.__name__
return f
w = wrap(wrap_func_shape_as_first_arg)
ones = w(np.ones, dtype='f8')
zeros = w(np.zeros, dtype='f8')
empty = w(np.empty, dtype='f8')
full = w(full)
| {
"repo_name": "minrk/dask",
"path": "dask/array/wrap.py",
"copies": "4",
"size": "2960",
"license": "bsd-3-clause",
"hash": 1470628939049209600,
"line_mean": 24.9649122807,
"line_max": 77,
"alpha_frac": 0.5962837838,
"autogenerated": false,
"ratio": 3.3944954128440368,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019069412662090006,
"num_lines": 114
} |
from __future__ import absolute_import, division, print_function
from itertools import cycle
from operator import itemgetter, add
from toolz import unique, groupby, accumulate, pluck
import bokeh.plotting as bp
from bokeh.io import _state
from bokeh.palettes import brewer
from bokeh.models import HoverTool, LinearAxis, Range1d
from ..utils import funcname
from ..core import istask
from ..compatibility import apply
def unquote(expr):
if istask(expr):
if expr[0] in (tuple, list, set):
return expr[0](map(unquote, expr[1]))
elif expr[0] == dict and expr[1][0] == list:
return dict(map(unquote, expr[1][1]))
return expr
def pprint_task(task, keys, label_size=60):
"""Return a nicely formatted string for a task.
Parameters
----------
task:
Value within dask graph to render as text
keys: iterable
List of keys within dask graph
label_size: int (optional)
Maximum size of output label, defaults to 60
Examples
--------
>>> from operator import add, mul
>>> dsk = {'a': 1,
... 'b': 2,
... 'c': (add, 'a', 'b'),
... 'd': (add, (mul, 'a', 'b'), 'c'),
... 'e': (sum, ['a', 'b', 5]),
... 'f': (add,),
... 'g': []}
>>> pprint_task(dsk['c'], dsk)
'add(_, _)'
>>> pprint_task(dsk['d'], dsk)
'add(mul(_, _), _)'
>>> pprint_task(dsk['e'], dsk)
'sum([_, _, *])'
>>> pprint_task(dsk['f'], dsk)
'add()'
>>> pprint_task(dsk['g'], dsk)
'[]'
"""
if istask(task):
func = task[0]
if func is apply:
head = funcname(task[1])
tail = ')'
args = unquote(task[2]) if len(task) > 2 else ()
kwargs = unquote(task[3]) if len(task) > 3 else {}
else:
if hasattr(func, 'funcs'):
head = '('.join(funcname(f) for f in func.funcs)
tail = ')'*len(func.funcs)
else:
head = funcname(task[0])
tail = ')'
args = task[1:]
kwargs = {}
if args or kwargs:
label_size2 = int((label_size - len(head) - len(tail)) //
(len(args) + len(kwargs)))
pprint = lambda t: pprint_task(t, keys, label_size2)
if args:
if label_size2 > 5:
args = ', '.join(pprint(t) for t in args)
else:
args = '...'
else:
args = ''
if kwargs:
if label_size2 > 5:
kwargs = ', ' + ', '.join('{0}={1}'.format(k, pprint(v))
for k, v in sorted(kwargs.items()))
else:
kwargs = ', ...'
else:
kwargs = ''
return '{0}({1}{2}{3}'.format(head, args, kwargs, tail)
elif isinstance(task, list):
if not task:
return '[]'
elif len(task) > 3:
result = pprint_task(task[:3], keys, label_size)
return result[:-1] + ', ...]'
else:
label_size2 = int((label_size - 2 - 2*len(task)) // len(task))
args = ', '.join(pprint_task(t, keys, label_size2) for t in task)
return '[{0}]'.format(args)
else:
try:
if task in keys:
return '_'
else:
return '*'
except TypeError:
return '*'
def get_colors(palette, funcs):
"""Get a dict mapping funcs to colors from palette.
Parameters
----------
palette : string
Name of the palette. Must be a key in bokeh.palettes.brewer
funcs : iterable
Iterable of function names
"""
unique_funcs = list(sorted(unique(funcs)))
n_funcs = len(unique_funcs)
palette_lookup = brewer[palette]
keys = list(palette_lookup.keys())
low, high = min(keys), max(keys)
if n_funcs > high:
colors = cycle(palette_lookup[high])
elif n_funcs < low:
colors = palette_lookup[low]
else:
colors = palette_lookup[n_funcs]
color_lookup = dict(zip(unique_funcs, colors))
return [color_lookup[n] for n in funcs]
def visualize(profilers, file_path=None, show=True, save=True, **kwargs):
"""Visualize the results of profiling in a bokeh plot.
If multiple profilers are passed in, the plots are stacked vertically.
Parameters
----------
profilers : profiler or list
Profiler or list of profilers.
file_path : string, optional
Name of the plot output file.
show : boolean, optional
If True (default), the plot is opened in a browser.
save : boolean, optional
If True (default), the plot is saved to disk.
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
if not _state._notebook:
file_path = file_path or "profile.html"
bp.output_file(file_path)
if not isinstance(profilers, list):
profilers = [profilers]
figs = [prof._plot(**kwargs) for prof in profilers]
# Stack the plots
if len(figs) == 1:
p = figs[0]
else:
top = figs[0]
for f in figs[1:]:
f.x_range = top.x_range
f.title = None
f.min_border_top = 20
for f in figs[:1]:
f.xaxis.axis_label = None
f.min_border_bottom = 20
for f in figs:
f.min_border_left = 75
f.min_border_right = 75
p = bp.gridplot([[f] for f in figs])
if show:
bp.show(p)
if file_path and save:
bp.save(p)
return p
def plot_tasks(results, dsk, palette='GnBu', label_size=60, **kwargs):
"""Visualize the results of profiling in a bokeh plot.
Parameters
----------
results : sequence
Output of Profiler.results
dsk : dict
The dask graph being profiled.
palette : string, optional
Name of the bokeh palette to use, must be key in bokeh.palettes.brewer.
label_size: int (optional)
Maximum size of output labels in plot, defaults to 60
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
defaults = dict(title="Profile Results",
tools="hover,save,reset,resize,xwheel_zoom,xpan",
plot_width=800, plot_height=300)
defaults.update((k, v) for (k, v) in kwargs.items() if k in
bp.Figure.properties())
if results:
keys, tasks, starts, ends, ids = zip(*results)
id_group = groupby(itemgetter(4), results)
timings = dict((k, [i.end_time - i.start_time for i in v]) for (k, v) in
id_group.items())
id_lk = dict((t[0], n) for (n, t) in enumerate(sorted(timings.items(),
key=itemgetter(1), reverse=True)))
left = min(starts)
right = max(ends)
p = bp.figure(y_range=[str(i) for i in range(len(id_lk))],
x_range=[0, right - left], **defaults)
data = {}
data['width'] = width = [e - s for (s, e) in zip(starts, ends)]
data['x'] = [w/2 + s - left for (w, s) in zip(width, starts)]
data['y'] = [id_lk[i] + 1 for i in ids]
data['function'] = funcs = [pprint_task(i, dsk, label_size) for i in tasks]
data['color'] = get_colors(palette, funcs)
data['key'] = [str(i) for i in keys]
source = bp.ColumnDataSource(data=data)
p.rect(source=source, x='x', y='y', height=1, width='width',
color='color', line_color='gray')
else:
p = bp.figure(y_range=[str(i) for i in range(8)], x_range=[0, 10],
**defaults)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.yaxis.axis_label = "Worker ID"
p.xaxis.axis_label = "Time (s)"
hover = p.select(HoverTool)
hover.tooltips = """
<div>
<span style="font-size: 14px; font-weight: bold;">Key:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@key</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Task:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@function</span>
</div>
"""
hover.point_policy = 'follow_mouse'
return p
def plot_resources(results, palette='GnBu', **kwargs):
"""Plot resource usage in a bokeh plot.
Parameters
----------
results : sequence
Output of ResourceProfiler.results
palette : string, optional
Name of the bokeh palette to use, must be key in bokeh.palettes.brewer.
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by plot_resources.
Returns
-------
The completed bokeh plot object.
"""
defaults = dict(title="Profile Results",
tools="save,reset,resize,xwheel_zoom,xpan",
plot_width=800, plot_height=300)
defaults.update((k, v) for (k, v) in kwargs.items() if k in
bp.Figure.properties())
if results:
t, mem, cpu = zip(*results)
left, right = min(t), max(t)
t = [i - left for i in t]
p = bp.figure(y_range=(0, max(cpu)), x_range=(0, right - left), **defaults)
else:
t = mem = cpu = []
p = bp.figure(y_range=(0, 100), x_range=(0, 10), **defaults)
colors = brewer[palette][6]
p.line(t, cpu, color=colors[0], line_width=4, legend='% CPU')
p.yaxis.axis_label = "% CPU"
p.extra_y_ranges = {'memory': Range1d(start=0, end=(max(mem) if mem else 100))}
p.line(t, mem, color=colors[2], y_range_name='memory', line_width=4,
legend='Memory')
p.add_layout(LinearAxis(y_range_name='memory', axis_label='Memory (MB)'),
'right')
p.xaxis.axis_label = "Time (s)"
return p
def plot_cache(results, dsk, start_time, metric_name, palette='GnBu',
label_size=60, **kwargs):
"""Visualize the results of profiling in a bokeh plot.
Parameters
----------
results : sequence
Output of CacheProfiler.results
dsk : dict
The dask graph being profiled.
start_time : float
Start time of the profile.
metric_name : string
Metric used to measure cache size
palette : string, optional
Name of the bokeh palette to use, must be key in bokeh.palettes.brewer.
label_size: int (optional)
Maximum size of output labels in plot, defaults to 60
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
defaults = dict(title="Profile Results",
tools="hover,save,reset,resize,wheel_zoom,xpan",
plot_width=800, plot_height=300)
defaults.update((k, v) for (k, v) in kwargs.items() if k in
bp.Figure.properties())
if results:
starts, ends = list(zip(*results))[3:]
tics = list(sorted(unique(starts + ends)))
groups = groupby(lambda d: pprint_task(d[1], dsk, label_size), results)
data = {}
for k, vals in groups.items():
cnts = dict.fromkeys(tics, 0)
for v in vals:
cnts[v.cache_time] += v.metric
cnts[v.free_time] -= v.metric
data[k] = list(accumulate(add, pluck(1, sorted(cnts.items()))))
tics = [i - start_time for i in tics]
p = bp.figure(x_range=[0, max(tics)], **defaults)
for (key, val), color in zip(data.items(), get_colors(palette, data.keys())):
p.line('x', 'y', line_color=color, line_width=3,
source=bp.ColumnDataSource({'x': tics, 'y': val,
'label': [key for i in val]}))
else:
p = bp.figure(y_range=[0, 10], x_range=[0, 10], **defaults)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.yaxis.axis_label = "Cache Size ({0})".format(metric_name)
p.xaxis.axis_label = "Time (s)"
hover = p.select(HoverTool)
hover.tooltips = """
<div>
<span style="font-size: 14px; font-weight: bold;">Task:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@label</span>
</div>
"""
return p
| {
"repo_name": "pombredanne/dask",
"path": "dask/diagnostics/profile_visualize.py",
"copies": "1",
"size": "12738",
"license": "bsd-3-clause",
"hash": -7523364113404990000,
"line_mean": 32.171875,
"line_max": 87,
"alpha_frac": 0.5440414508,
"autogenerated": false,
"ratio": 3.611567904734902,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9653090671775296,
"avg_score": 0.0005037367519211524,
"num_lines": 384
} |
from __future__ import absolute_import, division, print_function
from itertools import cycle
from operator import itemgetter, add
from ..utils import funcname, import_required
from ..core import istask
from ..compatibility import apply
_BOKEH_MISSING_MSG = ("Diagnostics plots require `bokeh` to be installed")
_TOOLZ_MISSING_MSG = ("Diagnostics plots require `toolz` to be installed")
def unquote(expr):
if istask(expr):
if expr[0] in (tuple, list, set):
return expr[0](map(unquote, expr[1]))
elif (expr[0] == dict and
isinstance(expr[1], list) and
isinstance(expr[1][0], list)):
return dict(map(unquote, expr[1]))
return expr
def pprint_task(task, keys, label_size=60):
"""Return a nicely formatted string for a task.
Parameters
----------
task:
Value within dask graph to render as text
keys: iterable
List of keys within dask graph
label_size: int (optional)
Maximum size of output label, defaults to 60
Examples
--------
>>> from operator import add, mul
>>> dsk = {'a': 1,
... 'b': 2,
... 'c': (add, 'a', 'b'),
... 'd': (add, (mul, 'a', 'b'), 'c'),
... 'e': (sum, ['a', 'b', 5]),
... 'f': (add,),
... 'g': []}
>>> pprint_task(dsk['c'], dsk)
'add(_, _)'
>>> pprint_task(dsk['d'], dsk)
'add(mul(_, _), _)'
>>> pprint_task(dsk['e'], dsk)
'sum([_, _, *])'
>>> pprint_task(dsk['f'], dsk)
'add()'
>>> pprint_task(dsk['g'], dsk)
'[]'
"""
if istask(task):
func = task[0]
if func is apply:
head = funcname(task[1])
tail = ')'
args = unquote(task[2]) if len(task) > 2 else ()
kwargs = unquote(task[3]) if len(task) > 3 else {}
else:
if hasattr(func, 'funcs'):
head = '('.join(funcname(f) for f in func.funcs)
tail = ')'*len(func.funcs)
else:
head = funcname(task[0])
tail = ')'
args = task[1:]
kwargs = {}
if args or kwargs:
label_size2 = int((label_size - len(head) - len(tail)) //
(len(args) + len(kwargs)))
pprint = lambda t: pprint_task(t, keys, label_size2)
if args:
if label_size2 > 5:
args = ', '.join(pprint(t) for t in args)
else:
args = '...'
else:
args = ''
if kwargs:
if label_size2 > 5:
kwargs = ', ' + ', '.join('{0}={1}'.format(k, pprint(v))
for k, v in sorted(kwargs.items()))
else:
kwargs = ', ...'
else:
kwargs = ''
return '{0}({1}{2}{3}'.format(head, args, kwargs, tail)
elif isinstance(task, list):
if not task:
return '[]'
elif len(task) > 3:
result = pprint_task(task[:3], keys, label_size)
return result[:-1] + ', ...]'
else:
label_size2 = int((label_size - 2 - 2*len(task)) // len(task))
args = ', '.join(pprint_task(t, keys, label_size2) for t in task)
return '[{0}]'.format(args)
else:
try:
if task in keys:
return '_'
else:
return '*'
except TypeError:
return '*'
def get_colors(palette, funcs):
"""Get a dict mapping funcs to colors from palette.
Parameters
----------
palette : string
Name of the palette. Must be a key in bokeh.palettes.brewer
funcs : iterable
Iterable of function names
"""
palettes = import_required('bokeh.palettes', _BOKEH_MISSING_MSG)
tz = import_required('toolz', _TOOLZ_MISSING_MSG)
unique_funcs = list(sorted(tz.unique(funcs)))
n_funcs = len(unique_funcs)
palette_lookup = palettes.brewer[palette]
keys = list(palette_lookup.keys())
low, high = min(keys), max(keys)
if n_funcs > high:
colors = cycle(palette_lookup[high])
elif n_funcs < low:
colors = palette_lookup[low]
else:
colors = palette_lookup[n_funcs]
color_lookup = dict(zip(unique_funcs, colors))
return [color_lookup[n] for n in funcs]
def visualize(profilers, file_path=None, show=True, save=True, **kwargs):
"""Visualize the results of profiling in a bokeh plot.
If multiple profilers are passed in, the plots are stacked vertically.
Parameters
----------
profilers : profiler or list
Profiler or list of profilers.
file_path : string, optional
Name of the plot output file.
show : boolean, optional
If True (default), the plot is opened in a browser.
save : boolean, optional
If True (default), the plot is saved to disk.
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
bp = import_required('bokeh.plotting', _BOKEH_MISSING_MSG)
from bokeh.io import _state
if not _state._notebook:
file_path = file_path or "profile.html"
bp.output_file(file_path)
if not isinstance(profilers, list):
profilers = [profilers]
figs = [prof._plot(**kwargs) for prof in profilers]
# Stack the plots
if len(figs) == 1:
p = figs[0]
else:
top = figs[0]
for f in figs[1:]:
f.x_range = top.x_range
f.title = None
f.min_border_top = 20
f.plot_height -= 30
for f in figs[:-1]:
f.xaxis.axis_label = None
f.min_border_bottom = 20
f.plot_height -= 30
for f in figs:
f.min_border_left = 75
f.min_border_right = 75
p = bp.gridplot([[f] for f in figs])
if show:
bp.show(p)
if file_path and save:
bp.save(p)
return p
def _get_figure_keywords():
bp = import_required('bokeh.plotting', _BOKEH_MISSING_MSG)
o = bp.Figure.properties()
o.add('tools')
return o
def plot_tasks(results, dsk, palette='YlGnBu', label_size=60, **kwargs):
"""Visualize the results of profiling in a bokeh plot.
Parameters
----------
results : sequence
Output of Profiler.results
dsk : dict
The dask graph being profiled.
palette : string, optional
Name of the bokeh palette to use, must be key in bokeh.palettes.brewer.
label_size: int (optional)
Maximum size of output labels in plot, defaults to 60
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
bp = import_required('bokeh.plotting', _BOKEH_MISSING_MSG)
from bokeh.models import HoverTool
tz = import_required('toolz', _TOOLZ_MISSING_MSG)
defaults = dict(title="Profile Results",
tools="hover,save,reset,resize,xwheel_zoom,xpan",
plot_width=800, plot_height=300)
defaults.update((k, v) for (k, v) in kwargs.items() if k in
_get_figure_keywords())
if results:
keys, tasks, starts, ends, ids = zip(*results)
id_group = tz.groupby(itemgetter(4), results)
timings = dict((k, [i.end_time - i.start_time for i in v]) for (k, v) in
id_group.items())
id_lk = dict((t[0], n) for (n, t) in enumerate(sorted(timings.items(),
key=itemgetter(1), reverse=True)))
left = min(starts)
right = max(ends)
p = bp.figure(y_range=[str(i) for i in range(len(id_lk))],
x_range=[0, right - left], **defaults)
data = {}
data['width'] = width = [e - s for (s, e) in zip(starts, ends)]
data['x'] = [w/2 + s - left for (w, s) in zip(width, starts)]
data['y'] = [id_lk[i] + 1 for i in ids]
data['function'] = funcs = [pprint_task(i, dsk, label_size) for i in tasks]
data['color'] = get_colors(palette, funcs)
data['key'] = [str(i) for i in keys]
source = bp.ColumnDataSource(data=data)
p.rect(source=source, x='x', y='y', height=1, width='width',
color='color', line_color='gray')
else:
p = bp.figure(y_range=[str(i) for i in range(8)], x_range=[0, 10],
**defaults)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.yaxis.axis_label = "Worker ID"
p.xaxis.axis_label = "Time (s)"
hover = p.select(HoverTool)
hover.tooltips = """
<div>
<span style="font-size: 14px; font-weight: bold;">Key:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@key</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Task:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@function</span>
</div>
"""
hover.point_policy = 'follow_mouse'
return p
def plot_resources(results, palette='YlGnBu', **kwargs):
"""Plot resource usage in a bokeh plot.
Parameters
----------
results : sequence
Output of ResourceProfiler.results
palette : string, optional
Name of the bokeh palette to use, must be key in bokeh.palettes.brewer.
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by plot_resources.
Returns
-------
The completed bokeh plot object.
"""
bp = import_required('bokeh.plotting', _BOKEH_MISSING_MSG)
from bokeh.palettes import brewer
from bokeh.models import LinearAxis, Range1d
defaults = dict(title="Profile Results",
tools="save,reset,resize,xwheel_zoom,xpan",
plot_width=800, plot_height=300)
defaults.update((k, v) for (k, v) in kwargs.items() if k in
_get_figure_keywords())
if results:
t, mem, cpu = zip(*results)
left, right = min(t), max(t)
t = [i - left for i in t]
p = bp.figure(y_range=(0, max(cpu)), x_range=(0, right - left), **defaults)
else:
t = mem = cpu = []
p = bp.figure(y_range=(0, 100), x_range=(0, 10), **defaults)
colors = brewer[palette][6]
p.line(t, cpu, color=colors[0], line_width=4, legend='% CPU')
p.yaxis.axis_label = "% CPU"
p.extra_y_ranges = {'memory': Range1d(start=(min(mem) if mem else 0),
end=(max(mem) if mem else 100))}
p.line(t, mem, color=colors[2], y_range_name='memory', line_width=4,
legend='Memory')
p.add_layout(LinearAxis(y_range_name='memory', axis_label='Memory (MB)'),
'right')
p.xaxis.axis_label = "Time (s)"
return p
def plot_cache(results, dsk, start_time, metric_name, palette='YlGnBu',
label_size=60, **kwargs):
"""Visualize the results of profiling in a bokeh plot.
Parameters
----------
results : sequence
Output of CacheProfiler.results
dsk : dict
The dask graph being profiled.
start_time : float
Start time of the profile.
metric_name : string
Metric used to measure cache size
palette : string, optional
Name of the bokeh palette to use, must be key in bokeh.palettes.brewer.
label_size: int (optional)
Maximum size of output labels in plot, defaults to 60
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
bp = import_required('bokeh.plotting', _BOKEH_MISSING_MSG)
from bokeh.models import HoverTool
tz = import_required('toolz', _TOOLZ_MISSING_MSG)
defaults = dict(title="Profile Results",
tools="hover,save,reset,resize,wheel_zoom,xpan",
plot_width=800, plot_height=300)
defaults.update((k, v) for (k, v) in kwargs.items() if k in
_get_figure_keywords())
if results:
starts, ends = list(zip(*results))[3:]
tics = list(sorted(tz.unique(starts + ends)))
groups = tz.groupby(lambda d: pprint_task(d[1], dsk, label_size), results)
data = {}
for k, vals in groups.items():
cnts = dict.fromkeys(tics, 0)
for v in vals:
cnts[v.cache_time] += v.metric
cnts[v.free_time] -= v.metric
data[k] = [0] + list(tz.accumulate(add, tz.pluck(1, sorted(cnts.items()))))
tics = [0] + [i - start_time for i in tics]
p = bp.figure(x_range=[0, max(tics)], **defaults)
for (key, val), color in zip(data.items(), get_colors(palette, data.keys())):
p.line('x', 'y', line_color=color, line_width=3,
source=bp.ColumnDataSource({'x': tics, 'y': val,
'label': [key for i in val]}))
else:
p = bp.figure(y_range=[0, 10], x_range=[0, 10], **defaults)
p.yaxis.axis_label = "Cache Size ({0})".format(metric_name)
p.xaxis.axis_label = "Time (s)"
hover = p.select(HoverTool)
hover.tooltips = """
<div>
<span style="font-size: 14px; font-weight: bold;">Task:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@label</span>
</div>
"""
return p
| {
"repo_name": "cowlicks/dask",
"path": "dask/diagnostics/profile_visualize.py",
"copies": "2",
"size": "13680",
"license": "bsd-3-clause",
"hash": -2443241040388121600,
"line_mean": 32.6117936118,
"line_max": 87,
"alpha_frac": 0.548245614,
"autogenerated": false,
"ratio": 3.5849056603773586,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5133151274377359,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from itertools import ifilter, imap, islice
import json
import re
import sys
import traceback
import urllib
from urlparse import urljoin, urlparse, urlunparse
import urlnorm
from dossier.fc import FeatureCollection, StringCounter
from dossier.models.etl.interface import ETL, html_to_fc, mk_content_id
class Scrapy(ETL):
@staticmethod
def detect_url_prefix(filelike):
# This is a heuristic based on my observation of the data. This assumes
# that all data is coming from a single domain and that all discovered
# relative links have exactly the same prefix. I'm dubious. ---AG
#
# The heuristic is to find an absolute URL and a relative URL, then
# discover the prefix.
#
# If one could not be detected `None` is returned.
def find_prefix():
for uabs in uabss:
for urel in urels:
prefix = prefix_of(uabs, urel)
if prefix is not None:
return prefix
return None
def prefix_of(uabs, urel):
if len(uabs) >= len(urel) and uabs[-len(urel):] == urel:
return uabs[:-len(urel)]
return None
def add_url(url):
if url is None:
return
url = urlunparse(urlparse(url)._replace(query=''))
if re.search('^http', url):
uabss.add(url)
else:
urels.add(url.lstrip('.'))
uabss, urels = set(), set()
for row in ifilter(None, imap(json_maybe_loads, filelike)):
if row.get('_type') == 'ForumPostItem':
add_url(row.get('thread_link'))
add_url(row.get('author', {}).get('link'))
elif row.get('_type') == 'CcaItem':
add_url(row.get('url'))
prefix = find_prefix()
if prefix is not None:
return prefix
return None
def __init__(self, filelike, url_prefix=None):
self.rows = ifilter(None, imap(json_maybe_loads, filelike))
self.url_prefix = url_prefix
def cids_and_fcs(self, mapper, limit=5):
posts = ifilter(lambda d: d.get('_type') == 'ForumPostItem', self.rows)
return mapper(from_forum_post,
islice(imap(self.sanitize, posts), limit))
def sanitize(self, post):
for n in ['link', 'avatar']:
if n not in post['author']:
continue
post['author'][n] = self.sanitize_url(post['author'][n])
if 'thread_link' in post:
post['thread_link'] = self.sanitize_url(post['thread_link'])
return post
def sanitize_url(self, url):
url = normurl(url)
if self.url_prefix is None:
return url
if re.search('^http', url):
return url
return normurl(urljoin(self.url_prefix, url))
def from_forum_post(row):
cid = forum_post_id(row)
try:
fc = html_to_fc(row['content'].strip(),
url=row['thread_link'],
timestamp=forum_post_timestamp(row),
other_features=forum_post_features(row))
except:
fc = None
print('Could not create FC for %s:' % cid, file=sys.stderr)
print(traceback.format_exc())
return cid, fc
def forum_post_features(row):
fc = FeatureCollection()
for k in row['author']:
fc['post_author_' + k] = row['author'][k]
if 'image_urls' in row:
fc['image_url'] = StringCounter()
for image_url in row['image_urls']:
fc['image_url'][image_url] += 1
others = ['parent_id', 'thread_id', 'thread_link', 'thread_name', 'title']
for k in others:
if k in row:
fc['post_' + k] = uni(row[k])
return fc
def forum_post_id(row):
ticks = forum_post_timestamp(row)
abs_url = row['thread_link']
author = row['author'].get('username', 'unknown')
return mk_content_id('|'.join(map(urlquote, [ticks, abs_url, author])))
def forum_post_timestamp(row):
return str(int(row['created_at']) / 1000)
def urlquote(s):
if isinstance(s, unicode):
s = s.encode('utf-8')
return urllib.quote(s, safe='~')
def normurl(url):
try:
return urlnorm.norm(url)
except urlnorm.InvalidUrl:
return urlnorm.norm_path('', url)
def uni(s):
if isinstance(s, str):
return unicode(s, 'utf-8')
return s
def json_maybe_loads(s):
try:
d = json.loads(s)
except:
return None
if 'thread_link' not in d:
return None
return d
| {
"repo_name": "dossier/dossier.models",
"path": "dossier/models/etl/scrapy.py",
"copies": "1",
"size": "4702",
"license": "mit",
"hash": 6823929727462572000,
"line_mean": 28.949044586,
"line_max": 79,
"alpha_frac": 0.5586984262,
"autogenerated": false,
"ratio": 3.7287866772402856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9782177247813959,
"avg_score": 0.0010615711252653928,
"num_lines": 157
} |
from __future__ import absolute_import, division, print_function
from itertools import product
from distutils.version import LooseVersion
import numpy as np
from .. import Variable, coding, conventions
from ..core import indexing
from ..core.pycompat import OrderedDict, integer_types, iteritems
from ..core.utils import FrozenOrderedDict, HiddenKeyDict
from .common import AbstractWritableDataStore, ArrayWriter, BackendArray
# need some special secret attributes to tell us the dimensions
_DIMENSION_KEY = '_ARRAY_DIMENSIONS'
# zarr attributes have to be serializable as json
# many xarray datasets / variables have numpy arrays and values
# these functions handle encoding / decoding of such items
def _encode_zarr_attr_value(value):
if isinstance(value, np.ndarray):
encoded = value.tolist()
# this checks if it's a scalar number
elif isinstance(value, np.generic):
encoded = value.item()
else:
encoded = value
return encoded
class ZarrArrayWrapper(BackendArray):
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_array()
self.shape = array.shape
dtype = array.dtype
self.dtype = dtype
def get_array(self):
return self.datastore.ds[self.variable_name]
def __getitem__(self, key):
array = self.get_array()
if isinstance(key, indexing.BasicIndexer):
return array[key.tuple]
elif isinstance(key, indexing.VectorizedIndexer):
return array.vindex[indexing._arrayize_vectorized_indexer(
key.tuple, self.shape).tuple]
else:
assert isinstance(key, indexing.OuterIndexer)
return array.oindex[key.tuple]
# if self.ndim == 0:
# could possibly have a work-around for 0d data here
def _determine_zarr_chunks(enc_chunks, var_chunks, ndim):
"""
Given encoding chunks (possibly None) and variable chunks (possibly None)
"""
# zarr chunk spec:
# chunks : int or tuple of ints, optional
# Chunk shape. If not provided, will be guessed from shape and dtype.
# if there are no chunks in encoding and the variable data is a numpy
# array, then we let zarr use its own heuristics to pick the chunks
if var_chunks is None and enc_chunks is None:
return None
# if there are no chunks in encoding but there are dask chunks, we try to
# use the same chunks in zarr
# However, zarr chunks needs to be uniform for each array
# http://zarr.readthedocs.io/en/latest/spec/v1.html#chunks
# while dask chunks can be variable sized
# http://dask.pydata.org/en/latest/array-design.html#chunks
if var_chunks and enc_chunks is None:
all_var_chunks = list(product(*var_chunks))
first_var_chunk = all_var_chunks[0]
# all but the last chunk have to match exactly
for this_chunk in all_var_chunks[:-1]:
if this_chunk != first_var_chunk:
raise ValueError(
"Zarr requires uniform chunk sizes excpet for final chunk."
" Variable %r has incompatible chunks. Consider "
"rechunking using `chunk()`." % (var_chunks,))
# last chunk is allowed to be smaller
last_var_chunk = all_var_chunks[-1]
for len_first, len_last in zip(first_var_chunk, last_var_chunk):
if len_last > len_first:
raise ValueError(
"Final chunk of Zarr array must be smaller than first. "
"Variable %r has incompatible chunks. Consider rechunking "
"using `chunk()`." % var_chunks)
return first_var_chunk
# from here on, we are dealing with user-specified chunks in encoding
# zarr allows chunks to be an integer, in which case it uses the same chunk
# size on each dimension.
# Here we re-implement this expansion ourselves. That makes the logic of
# checking chunk compatibility easier
if isinstance(enc_chunks, integer_types):
enc_chunks_tuple = ndim * (enc_chunks,)
else:
enc_chunks_tuple = tuple(enc_chunks)
if len(enc_chunks_tuple) != ndim:
raise ValueError("zarr chunks tuple %r must have same length as "
"variable.ndim %g" %
(enc_chunks_tuple, ndim))
for x in enc_chunks_tuple:
if not isinstance(x, int):
raise TypeError("zarr chunks must be an int or a tuple of ints. "
"Instead found %r" % (enc_chunks_tuple,))
# if there are chunks in encoding and the variable data is a numpy array,
# we use the specified chunks
if var_chunks is None:
return enc_chunks_tuple
# the hard case
# DESIGN CHOICE: do not allow multiple dask chunks on a single zarr chunk
# this avoids the need to get involved in zarr synchronization / locking
# From zarr docs:
# "If each worker in a parallel computation is writing to a separate
# region of the array, and if region boundaries are perfectly aligned
# with chunk boundaries, then no synchronization is required."
# TODO: incorporate synchronizer to allow writes from multiple dask
# threads
if var_chunks and enc_chunks_tuple:
for zchunk, dchunks in zip(enc_chunks_tuple, var_chunks):
for dchunk in dchunks:
if dchunk % zchunk:
raise NotImplementedError(
"Specified zarr chunks %r would overlap multiple dask "
"chunks %r. This is not implemented in xarray yet. "
" Consider rechunking the data using "
"`chunk()` or specifying different chunks in encoding."
% (enc_chunks_tuple, var_chunks))
return enc_chunks_tuple
raise AssertionError(
"We should never get here. Function logic must be wrong.")
def _get_zarr_dims_and_attrs(zarr_obj, dimension_key):
# Zarr arrays do not have dimenions. To get around this problem, we add
# an attribute that specifies the dimension. We have to hide this attribute
# when we send the attributes to the user.
# zarr_obj can be either a zarr group or zarr array
try:
dimensions = zarr_obj.attrs[dimension_key]
except KeyError:
raise KeyError("Zarr object is missing the attribute `%s`, which is "
"required for xarray to determine variable dimensions."
% (dimension_key))
attributes = HiddenKeyDict(zarr_obj.attrs, [dimension_key])
return dimensions, attributes
def _extract_zarr_variable_encoding(variable, raise_on_invalid=False):
encoding = variable.encoding.copy()
valid_encodings = set(['chunks', 'compressor', 'filters',
'cache_metadata'])
if raise_on_invalid:
invalid = [k for k in encoding if k not in valid_encodings]
if invalid:
raise ValueError('unexpected encoding parameters for zarr '
'backend: %r' % invalid)
else:
for k in list(encoding):
if k not in valid_encodings:
del encoding[k]
chunks = _determine_zarr_chunks(encoding.get('chunks'), variable.chunks,
variable.ndim)
encoding['chunks'] = chunks
return encoding
# Function below is copied from conventions.encode_cf_variable.
# The only change is to raise an error for object dtypes.
def encode_zarr_variable(var, needs_copy=True, name=None):
"""
Converts an Variable into an Variable which follows some
of the CF conventions:
- Nans are masked using _FillValue (or the deprecated missing_value)
- Rescaling via: scale_factor and add_offset
- datetimes are converted to the CF 'units since time' format
- dtype encodings are enforced.
Parameters
----------
var : xarray.Variable
A variable holding un-encoded data.
Returns
-------
out : xarray.Variable
A variable which has been encoded as described above.
"""
var = conventions.encode_cf_variable(var, name=name)
# zarr allows unicode, but not variable-length strings, so it's both
# simpler and more compact to always encode as UTF-8 explicitly.
# TODO: allow toggling this explicitly via dtype in encoding.
coder = coding.strings.EncodedStringCoder(allows_unicode=False)
var = coder.encode(var, name=name)
var = coding.strings.ensure_fixed_length_bytes(var)
return var
class ZarrStore(AbstractWritableDataStore):
"""Store for reading and writing data via zarr
"""
@classmethod
def open_group(cls, store, mode='r', synchronizer=None, group=None,
writer=None):
import zarr
min_zarr = '2.2'
if LooseVersion(zarr.__version__) < min_zarr: # pragma: no cover
raise NotImplementedError("Zarr version %s or greater is "
"required by xarray. See zarr "
"installation "
"http://zarr.readthedocs.io/en/stable/"
"#installation" % min_zarr)
zarr_group = zarr.open_group(store=store, mode=mode,
synchronizer=synchronizer, path=group)
return cls(zarr_group, writer=writer)
def __init__(self, zarr_group, writer=None):
self.ds = zarr_group
self._read_only = self.ds.read_only
self._synchronizer = self.ds.synchronizer
self._group = self.ds.path
if writer is None:
# by default, we should not need a lock for writing zarr because
# we do not (yet) allow overlapping chunks during write
zarr_writer = ArrayWriter(lock=False)
else:
zarr_writer = writer
# do we need to define attributes for all of the opener keyword args?
super(ZarrStore, self).__init__(zarr_writer)
def open_store_variable(self, name, zarr_array):
data = indexing.LazilyOuterIndexedArray(ZarrArrayWrapper(name, self))
dimensions, attributes = _get_zarr_dims_and_attrs(zarr_array,
_DIMENSION_KEY)
attributes = OrderedDict(attributes)
encoding = {'chunks': zarr_array.chunks,
'compressor': zarr_array.compressor,
'filters': zarr_array.filters}
# _FillValue needs to be in attributes, not encoding, so it will get
# picked up by decode_cf
if getattr(zarr_array, 'fill_value') is not None:
attributes['_FillValue'] = zarr_array.fill_value
return Variable(dimensions, data, attributes, encoding)
def get_variables(self):
return FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in self.ds.arrays())
def get_attrs(self):
attributes = OrderedDict(self.ds.attrs.asdict())
return attributes
def get_dimensions(self):
dimensions = OrderedDict()
for k, v in self.ds.arrays():
try:
for d, s in zip(v.attrs[_DIMENSION_KEY], v.shape):
if d in dimensions and dimensions[d] != s:
raise ValueError(
'found conflicting lengths for dimension %s '
'(%d != %d)' % (d, s, dimensions[d]))
dimensions[d] = s
except KeyError:
raise KeyError("Zarr object is missing the attribute `%s`, "
"which is required for xarray to determine "
"variable dimensions." % (_DIMENSION_KEY))
return dimensions
def set_dimensions(self, variables, unlimited_dims=None):
if unlimited_dims is not None:
raise NotImplementedError(
"Zarr backend doesn't know how to handle unlimited dimensions")
def set_attributes(self, attributes):
self.ds.attrs.put(attributes)
def encode_variable(self, variable):
variable = encode_zarr_variable(variable)
return variable
def encode_attribute(self, a):
return _encode_zarr_attr_value(a)
def prepare_variable(self, name, variable, check_encoding=False,
unlimited_dims=None):
attrs = variable.attrs.copy()
dims = variable.dims
dtype = variable.dtype
shape = variable.shape
fill_value = attrs.pop('_FillValue', None)
if variable.encoding == {'_FillValue': None} and fill_value is None:
variable.encoding = {}
encoding = _extract_zarr_variable_encoding(
variable, raise_on_invalid=check_encoding)
encoded_attrs = OrderedDict()
# the magic for storing the hidden dimension data
encoded_attrs[_DIMENSION_KEY] = dims
for k, v in iteritems(attrs):
encoded_attrs[k] = self.encode_attribute(v)
zarr_array = self.ds.create(name, shape=shape, dtype=dtype,
fill_value=fill_value, **encoding)
zarr_array.attrs.put(encoded_attrs)
return zarr_array, variable.data
def store(self, variables, attributes, *args, **kwargs):
AbstractWritableDataStore.store(self, variables, attributes,
*args, **kwargs)
def sync(self, compute=True):
self.delayed_store = self.writer.sync(compute=compute)
def open_zarr(store, group=None, synchronizer=None, auto_chunk=True,
decode_cf=True, mask_and_scale=True, decode_times=True,
concat_characters=True, decode_coords=True,
drop_variables=None):
"""Load and decode a dataset from a Zarr store.
.. note:: Experimental
The Zarr backend is new and experimental. Please report any
unexpected behavior via github issues.
The `store` object should be a valid store for a Zarr group. `store`
variables must contain dimension metadata encoded in the
`_ARRAY_DIMENSIONS` attribute.
Parameters
----------
store : MutableMapping or str
A MutableMapping where a Zarr Group has been stored or a path to a
directory in file system where a Zarr DirectoryStore has been stored.
synchronizer : object, optional
Array synchronizer provided to zarr
group : str, obtional
Group path. (a.k.a. `path` in zarr terminology.)
auto_chunk : bool, optional
Whether to automatically create dask chunks corresponding to each
variable's zarr chunks. If False, zarr array data will lazily convert
to numpy arrays upon access.
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
Returns
-------
dataset : Dataset
The newly created dataset.
See Also
--------
open_dataset
References
----------
http://zarr.readthedocs.io/
"""
if not decode_cf:
mask_and_scale = False
decode_times = False
concat_characters = False
decode_coords = False
def maybe_decode_store(store, lock=False):
ds = conventions.decode_cf(
store, mask_and_scale=mask_and_scale, decode_times=decode_times,
concat_characters=concat_characters, decode_coords=decode_coords,
drop_variables=drop_variables)
# TODO: this is where we would apply caching
return ds
# Zarr supports a wide range of access modes, but for now xarray either
# reads or writes from a store, never both. For open_zarr, we only read
mode = 'r'
zarr_store = ZarrStore.open_group(store, mode=mode,
synchronizer=synchronizer,
group=group)
ds = maybe_decode_store(zarr_store)
# auto chunking needs to be here and not in ZarrStore because variable
# chunks do not survive decode_cf
if auto_chunk:
# adapted from Dataset.Chunk()
def maybe_chunk(name, var):
from dask.base import tokenize
chunks = var.encoding.get('chunks')
if (var.ndim > 0) and (chunks is not None):
# does this cause any data to be read?
token2 = tokenize(name, var._data)
name2 = 'zarr-%s' % token2
return var.chunk(chunks, name=name2, lock=None)
else:
return var
variables = OrderedDict([(k, maybe_chunk(k, v))
for k, v in ds.variables.items()])
return ds._replace_vars_and_dims(variables)
else:
return ds
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/backends/zarr.py",
"copies": "1",
"size": "18357",
"license": "apache-2.0",
"hash": -2325744561222545000,
"line_mean": 38.9065217391,
"line_max": 79,
"alpha_frac": 0.6195456774,
"autogenerated": false,
"ratio": 4.380100214745884,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 460
} |
from __future__ import absolute_import, division, print_function
from itertools import product
from functools import partial
from toolz import curry
import numpy as np
from ..base import tokenize
from .core import Array, normalize_chunks
from .numpy_compat import full
def dims_from_size(size, blocksize):
"""
>>> list(dims_from_size(30, 8))
[8, 8, 8, 6]
"""
result = (blocksize,) * (size // blocksize)
if size % blocksize:
result = result + (size % blocksize,)
return result
def wrap_func_shape_as_first_arg(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
if 'shape' not in kwargs:
shape, args = args[0], args[1:]
else:
shape = kwargs.pop('shape')
if not isinstance(shape, (tuple, list)):
shape = (shape,)
chunks = kwargs.pop('chunks', None)
chunks = normalize_chunks(chunks, shape)
name = kwargs.pop('name', None)
dtype = kwargs.pop('dtype', None)
if dtype is None:
dtype = func(shape, *args, **kwargs).dtype
name = name or 'wrapped-' + tokenize(func, shape, chunks, dtype, args, kwargs)
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
func = partial(func, dtype=dtype, **kwargs)
vals = ((func,) + (s,) + args for s in shapes)
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, dtype=dtype)
@curry
def wrap(wrap_func, func, **kwargs):
f = partial(wrap_func, func, **kwargs)
f.__doc__ = """
Blocked variant of %(name)s
Follows the signature of %(name)s exactly except that it also requires a
keyword argument chunks=(...)
Original signature follows below.
""" % {'name': func.__name__} + func.__doc__
f.__name__ = 'blocked_' + func.__name__
return f
w = wrap(wrap_func_shape_as_first_arg)
ones = w(np.ones, dtype='f8')
zeros = w(np.zeros, dtype='f8')
empty = w(np.empty, dtype='f8')
full = w(full)
| {
"repo_name": "clarkfitzg/dask",
"path": "dask/array/wrap.py",
"copies": "4",
"size": "1978",
"license": "bsd-3-clause",
"hash": 4193349603082930700,
"line_mean": 24.6883116883,
"line_max": 82,
"alpha_frac": 0.6203235592,
"autogenerated": false,
"ratio": 3.4520069808027922,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6072330540002793,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from itertools import product
from math import ceil
from numbers import Number
from operator import getitem, add, itemgetter
import numpy as np
from toolz import merge, accumulate, pluck, memoize
from ..base import tokenize
from ..compatibility import long
colon = slice(None, None, None)
def sanitize_index(ind):
""" Sanitize the elements for indexing along one axis
>>> sanitize_index([2, 3, 5])
[2, 3, 5]
>>> sanitize_index([True, False, True, False])
[0, 2]
>>> sanitize_index(np.array([1, 2, 3]))
[1, 2, 3]
>>> sanitize_index(np.array([False, True, True]))
[1, 2]
>>> type(sanitize_index(np.int32(0)))
<type 'int'>
>>> sanitize_index(1.0)
1
>>> sanitize_index(0.5)
Traceback (most recent call last):
...
IndexError: Bad index. Must be integer-like: 0.5
"""
if isinstance(ind, Number):
ind2 = int(ind)
if ind2 != ind:
raise IndexError("Bad index. Must be integer-like: %s" % ind)
else:
return ind2
if hasattr(ind, 'tolist'):
ind = ind.tolist()
if isinstance(ind, list) and ind and isinstance(ind[0], bool):
ind = [a for a, b in enumerate(ind) if b]
return ind
if isinstance(ind, list):
return [sanitize_index(i) for i in ind]
if isinstance(ind, slice):
return slice(sanitize_index(ind.start),
sanitize_index(ind.stop),
sanitize_index(ind.step))
if ind is None:
return ind
try:
return sanitize_index(np.array(ind).tolist())
except:
raise TypeError("Invalid index type", type(ind), ind)
def slice_array(out_name, in_name, blockdims, index):
"""
Master function for array slicing
This function makes a new dask that slices blocks along every
dimension and aggregates (via cartesian product) each dimension's
slices so that the resulting block slices give the same results
as the original slice on the original structure
Index must be a tuple. It may contain the following types
int, slice, list (at most one list), None
Parameters
----------
in_name - string
This is the dask variable name that will be used as input
out_name - string
This is the dask variable output name
blockshape - iterable of integers
index - iterable of integers, slices, lists, or None
Returns
-------
Dict where the keys are tuples of
(out_name, dim_index[, dim_index[, ...]])
and the values are
(function, (in_name, dim_index, dim_index, ...),
(slice(...), [slice()[,...]])
Also new blockdims with shapes of each block
((10, 10, 10, 10), (20, 20))
Examples
--------
>>> dsk, blockdims = slice_array('y', 'x', [(20, 20, 20, 20, 20)],
... (slice(10, 35),)) # doctest: +SKIP
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, ('x', 0), (slice(10, 20),)),
('y', 1): (getitem, ('x', 1), (slice(0, 15),))}
>>> blockdims # doctest: +SKIP
((10, 15),)
See Also
--------
This function works by successively unwrapping cases and passing down
through a sequence of functions.
slice_with_newaxis - handle None/newaxis case
slice_wrap_lists - handle fancy indexing with lists
slice_slices_and_integers - handle everything else
"""
index = replace_ellipsis(len(blockdims), index)
index = tuple(map(sanitize_index, index))
blockdims = tuple(map(tuple, blockdims))
# x[:, :, :] - Punt and return old value
if all(index == slice(None, None, None) for index in index):
suffixes = product(*[range(len(bd)) for bd in blockdims])
dsk = dict(((out_name,) + s, (in_name,) + s)
for s in suffixes)
return dsk, blockdims
# Add in missing colons at the end as needed. x[5] -> x[5, :, :]
missing = len(blockdims) - (len(index) - index.count(None))
index += (slice(None, None, None),) * missing
# Pass down to next function
dsk_out, bd_out = slice_with_newaxes(out_name, in_name, blockdims, index)
bd_out = tuple(map(tuple, bd_out))
return dsk_out, bd_out
def slice_with_newaxes(out_name, in_name, blockdims, index):
"""
Handle indexing with Nones
Strips out Nones then hands off to slice_wrap_lists
"""
# Strip Nones from index
index2 = tuple([ind for ind in index if ind is not None])
where_none = [i for i, ind in enumerate(index) if ind is None]
where_none_orig = list(where_none)
for i, x in enumerate(where_none):
n = sum(isinstance(ind, int) for ind in index[:x])
if n:
where_none[i] -= n
# Pass down and do work
dsk, blockdims2 = slice_wrap_lists(out_name, in_name, blockdims, index2)
if where_none:
expand = expander(where_none)
expand_orig = expander(where_none_orig)
# Insert ",0" into the key: ('x', 2, 3) -> ('x', 0, 2, 0, 3)
dsk2 = {(out_name,) + expand(k[1:], 0):
(v[:2] + (expand_orig(v[2], None),))
for k, v in dsk.items()
if k[0] == out_name}
# Add back intermediate parts of the dask that weren't the output
dsk3 = merge(dsk2, {k: v for k, v in dsk.items() if k[0] != out_name})
# Insert (1,) into blockdims: ((2, 2), (3, 3)) -> ((2, 2), (1,), (3, 3))
blockdims3 = expand(blockdims2, (1,))
return dsk3, blockdims3
else:
return dsk, blockdims2
def slice_wrap_lists(out_name, in_name, blockdims, index):
"""
Fancy indexing along blocked array dasks
Handles index of type list. Calls slice_slices_and_integers for the rest
See Also
--------
take - handle slicing with lists ("fancy" indexing)
slice_slices_and_integers - handle slicing with slices and integers
"""
shape = tuple(map(sum, blockdims))
assert all(isinstance(i, (slice, list, int, long)) for i in index)
if not len(blockdims) == len(index):
raise IndexError("Too many indices for array")
for bd, i in zip(blockdims, index):
check_index(i, sum(bd))
# Change indices like -1 to 9
index2 = posify_index(shape, index)
# Do we have more than one list in the index?
where_list = [i for i, ind in enumerate(index) if isinstance(ind, list)]
if len(where_list) > 1:
raise NotImplementedError("Don't yet support nd fancy indexing")
# Is the single list an empty list? In this case just treat it as a zero
# length slice
if where_list and not index[where_list[0]]:
index2 = list(index2)
index2[where_list.pop()] = slice(0, 0, 1)
index2 = tuple(index2)
# No lists, hooray! just use slice_slices_and_integers
if not where_list:
return slice_slices_and_integers(out_name, in_name, blockdims, index2)
# Replace all lists with full slices [3, 1, 0] -> slice(None, None, None)
index_without_list = tuple(slice(None, None, None)
if isinstance(i, list) else i
for i in index2)
# lists and full slices. Just use take
if all(isinstance(i, list) or i == slice(None, None, None)
for i in index2):
axis = where_list[0]
blockdims2, dsk3 = take(out_name, in_name, blockdims,
index2[where_list[0]], axis=axis)
# Mixed case. Both slices/integers and lists. slice/integer then take
else:
# Do first pass without lists
tmp = 'slice-' + tokenize((out_name, in_name, blockdims, index))
dsk, blockdims2 = slice_slices_and_integers(tmp, in_name, blockdims, index_without_list)
# After collapsing some axes due to int indices, adjust axis parameter
axis = where_list[0]
axis2 = axis - sum(1 for i, ind in enumerate(index2)
if i < axis and isinstance(ind, (int, long)))
# Do work
blockdims2, dsk2 = take(out_name, tmp, blockdims2, index2[axis],
axis=axis2)
dsk3 = merge(dsk, dsk2)
return dsk3, blockdims2
def slice_slices_and_integers(out_name, in_name, blockdims, index):
"""
Dask array indexing with slices and integers
See Also
--------
_slice_1d
"""
shape = tuple(map(sum, blockdims))
for dim, ind in zip(shape, index):
if np.isnan(dim) and ind != slice(None, None, None):
raise ValueError("Arrays chunk sizes are unknown: %s", shape)
assert all(isinstance(ind, (slice, int, long)) for ind in index)
assert len(index) == len(blockdims)
# Get a list (for each dimension) of dicts{blocknum: slice()}
block_slices = list(map(_slice_1d, shape, blockdims, index))
sorted_block_slices = [sorted(i.items()) for i in block_slices]
# (in_name, 1, 1, 2), (in_name, 1, 1, 4), (in_name, 2, 1, 2), ...
in_names = list(product([in_name], *[pluck(0, s) for s in sorted_block_slices]))
# (out_name, 0, 0, 0), (out_name, 0, 0, 1), (out_name, 0, 1, 0), ...
out_names = list(product([out_name],
*[range(len(d))[::-1] if i.step and i.step < 0 else range(len(d))
for d, i in zip(block_slices, index)
if not isinstance(i, (int, long))]))
all_slices = list(product(*[pluck(1, s) for s in sorted_block_slices]))
dsk_out = {out_name: (getitem, in_name, slices)
for out_name, in_name, slices
in zip(out_names, in_names, all_slices)}
new_blockdims = [new_blockdim(d, db, i)
for d, i, db in zip(shape, index, blockdims)
if not isinstance(i, (int, long))]
return dsk_out, new_blockdims
def _slice_1d(dim_shape, lengths, index):
"""Returns a dict of {blocknum: slice}
This function figures out where each slice should start in each
block for a single dimension. If the slice won't return any elements
in the block, that block will not be in the output.
Parameters
----------
dim_shape - the number of elements in this dimension.
This should be a positive, non-zero integer
blocksize - the number of elements per block in this dimension
This should be a positive, non-zero integer
index - a description of the elements in this dimension that we want
This might be an integer, a slice(), or an Ellipsis
Returns
-------
dictionary where the keys are the integer index of the blocks that
should be sliced and the values are the slices
Examples
--------
Trivial slicing
>>> _slice_1d(100, [60, 40], slice(None, None, None))
{0: slice(None, None, None), 1: slice(None, None, None)}
100 length array cut into length 20 pieces, slice 0:35
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 35))
{0: slice(None, None, None), 1: slice(0, 15, 1)}
Support irregular blocks and various slices
>>> _slice_1d(100, [20, 10, 10, 10, 25, 25], slice(10, 35))
{0: slice(10, 20, 1), 1: slice(None, None, None), 2: slice(0, 5, 1)}
Support step sizes
>>> _slice_1d(100, [15, 14, 13], slice(10, 41, 3))
{0: slice(10, 15, 3), 1: slice(1, 14, 3), 2: slice(2, 12, 3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 100, 40)) # step > blocksize
{0: slice(0, 20, 40), 2: slice(0, 20, 40), 4: slice(0, 20, 40)}
Also support indexing single elements
>>> _slice_1d(100, [20, 20, 20, 20, 20], 25)
{1: 5}
And negative slicing
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 0, -3))
{0: slice(-2, -20, -3), 1: slice(-1, -21, -3), 2: slice(-3, -21, -3), 3: slice(-2, -21, -3), 4: slice(-1, -21, -3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 12, -3))
{0: slice(-2, -8, -3), 1: slice(-1, -21, -3), 2: slice(-3, -21, -3), 3: slice(-2, -21, -3), 4: slice(-1, -21, -3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, -12, -3))
{4: slice(-1, -12, -3)}
"""
if isinstance(index, (int, long)):
i = 0
ind = index
lens = list(lengths)
while ind >= lens[0]:
i += 1
ind -= lens.pop(0)
return {i: ind}
assert isinstance(index, slice)
if index == colon:
return {k: colon for k in range(len(lengths))}
step = index.step or 1
if step > 0:
start = index.start or 0
stop = index.stop if index.stop is not None else dim_shape
else:
start = index.start or dim_shape - 1
start = dim_shape - 1 if start >= dim_shape else start
stop = -(dim_shape + 1) if index.stop is None else index.stop
# posify start and stop
if start < 0:
start += dim_shape
if stop < 0:
stop += dim_shape
d = dict()
if step > 0:
for i, length in enumerate(lengths):
if start < length and stop > 0:
d[i] = slice(start, min(stop, length), step)
start = (start - length) % step
else:
start = start - length
stop -= length
else:
rstart = start # running start
chunk_boundaries = list(accumulate(add, lengths))
for i, chunk_stop in reversed(list(enumerate(chunk_boundaries))):
# create a chunk start and stop
if i == 0:
chunk_start = 0
else:
chunk_start = chunk_boundaries[i - 1]
# if our slice is in this chunk
if (chunk_start <= rstart < chunk_stop) and (rstart > stop):
d[i] = slice(rstart - chunk_stop,
max(chunk_start - chunk_stop - 1,
stop - chunk_stop),
step)
# compute the next running start point,
offset = (rstart - (chunk_start - 1)) % step
rstart = chunk_start + offset - 1
# replace 0:20:1 with : if appropriate
for k, v in d.items():
if v == slice(0, lengths[k], 1):
d[k] = slice(None, None, None)
if not d: # special case x[:0]
d[0] = slice(0, 0, 1)
return d
def partition_by_size(sizes, seq):
"""
>>> partition_by_size([10, 20, 10], [1, 5, 9, 12, 29, 35])
[[1, 5, 9], [2, 19], [5]]
"""
seq = np.array(seq)
right = np.cumsum(sizes)
locations = np.searchsorted(seq, right)
locations = [0] + locations.tolist()
left = [0] + right.tolist()
return [(seq[locations[i]:locations[i + 1]] - left[i]).tolist()
for i in range(len(locations) - 1)]
def issorted(seq):
""" Is sequence sorted?
>>> issorted([1, 2, 3])
True
>>> issorted([3, 1, 2])
False
"""
if not seq:
return True
x = seq[0]
for elem in seq[1:]:
if elem < x:
return False
x = elem
return True
def take_sorted(outname, inname, blockdims, index, axis=0):
""" Index array with sorted list index
Forms a dask for the following case
x[:, [1, 3, 5, 10], ...]
where the index, ``[1, 3, 5, 10]`` is sorted in non-decreasing order.
>>> blockdims, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], axis=0)
>>> blockdims
((3, 1),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, ('x', 0), ([1, 3, 5],)),
('y', 1): (getitem, ('x', 2), ([7],))}
See Also
--------
take - calls this function
"""
sizes = blockdims[axis] # the blocksizes on the axis that we care about
index_lists = partition_by_size(sizes, sorted(index))
where_index = [i for i, il in enumerate(index_lists) if il]
index_lists = [il for il in index_lists if il]
dims = [range(len(bd)) for bd in blockdims]
indims = list(dims)
indims[axis] = list(range(len(where_index)))
keys = list(product([outname], *indims))
outdims = list(dims)
outdims[axis] = where_index
slices = [[colon] * len(bd) for bd in blockdims]
slices[axis] = index_lists
slices = list(product(*slices))
inkeys = list(product([inname], *outdims))
values = [(getitem, inkey, slc) for inkey, slc in zip(inkeys, slices)]
blockdims2 = list(blockdims)
blockdims2[axis] = tuple(map(len, index_lists))
return tuple(blockdims2), dict(zip(keys, values))
def take(outname, inname, blockdims, index, axis=0):
""" Index array with an iterable of index
Handles a single index by a single list
Mimics ``np.take``
>>> blockdims, dsk = take('y', 'x', [(20, 20, 20, 20)], [5, 1, 47, 3], axis=0)
>>> blockdims
((4,),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, (np.concatenate, [(getitem, ('x', 0), ([1, 3, 5],)),
(getitem, ('x', 2), ([7],))],
0),
(2, 0, 4, 1))}
When list is sorted we retain original block structure
>>> blockdims, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], axis=0)
>>> blockdims
((3, 1),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, ('x', 0), ([1, 3, 5],)),
('y', 2): (getitem, ('x', 2), ([7],))}
"""
if issorted(index):
return take_sorted(outname, inname, blockdims, index, axis)
n = len(blockdims)
sizes = blockdims[axis] # the blocksizes on the axis that we care about
index_lists = partition_by_size(sizes, sorted(index))
dims = [[0] if axis == i else list(range(len(bd)))
for i, bd in enumerate(blockdims)]
keys = list(product([outname], *dims))
rev_index = list(map(sorted(index).index, index))
vals = [(getitem, (np.concatenate,
[(getitem, ((inname, ) + d[:axis] + (i, ) + d[axis + 1:]),
((colon, ) * axis + (IL, ) + (colon, ) * (n - axis - 1)))
for i, IL in enumerate(index_lists) if IL], axis),
((colon, ) * axis + (rev_index, ) + (colon, ) * (n - axis - 1)))
for d in product(*dims)]
blockdims2 = list(blockdims)
blockdims2[axis] = (len(index), )
return tuple(blockdims2), dict(zip(keys, vals))
def posify_index(shape, ind):
""" Flip negative indices around to positive ones
>>> posify_index(10, 3)
3
>>> posify_index(10, -3)
7
>>> posify_index(10, [3, -3])
[3, 7]
>>> posify_index((10, 20), (3, -3))
(3, 17)
>>> posify_index((10, 20), (3, [3, 4, -3]))
(3, [3, 4, 17])
"""
if isinstance(ind, tuple):
return tuple(map(posify_index, shape, ind))
if isinstance(ind, (int, long)):
if ind < 0:
return ind + shape
else:
return ind
if isinstance(ind, list):
return [i + shape if i < 0 else i for i in ind]
return ind
@memoize
def _expander(where):
if not where:
def expand(seq, val):
return seq
return expand
else:
decl = """def expand(seq, val):
return ({left}) + tuple({right})
"""
left = []
j = 0
for i in range(max(where) + 1):
if i in where:
left.append("val, ")
else:
left.append("seq[%d], " % j)
j += 1
right = "seq[%d:]" % j
left = "".join(left)
decl = decl.format(**locals())
ns = {}
exec(compile(decl, "<dynamic>", "exec"), ns, ns)
return ns['expand']
def expander(where):
"""Create a function to insert value at many locations in sequence.
>>> expander([0, 2])(['a', 'b', 'c'], 'z')
('z', 'a', 'z', 'b', 'c')
"""
return _expander(tuple(where))
def new_blockdim(dim_shape, lengths, index):
"""
>>> new_blockdim(100, [20, 10, 20, 10, 40], slice(0, 90, 2))
[10, 5, 10, 5, 15]
>>> new_blockdim(100, [20, 10, 20, 10, 40], [5, 1, 30, 22])
[4]
>>> new_blockdim(100, [20, 10, 20, 10, 40], slice(90, 10, -2))
[16, 5, 10, 5, 4]
"""
if index == slice(None, None, None):
return lengths
if isinstance(index, list):
return [len(index)]
assert not isinstance(index, (int, long))
pairs = sorted(_slice_1d(dim_shape, lengths, index).items(),
key=itemgetter(0))
slices = [slice(0, lengths[i], 1) if slc == slice(None, None, None) else slc
for i, slc in pairs]
if isinstance(index, slice) and index.step and index.step < 0:
slices = slices[::-1]
return [int(ceil((1. * slc.stop - slc.start) / slc.step)) for slc in slices]
def replace_ellipsis(n, index):
""" Replace ... with slices, :, : ,:
>>> replace_ellipsis(4, (3, Ellipsis, 2))
(3, slice(None, None, None), slice(None, None, None), 2)
>>> replace_ellipsis(2, (Ellipsis, None))
(slice(None, None, None), slice(None, None, None), None)
"""
# Careful about using in or index because index may contain arrays
isellipsis = [i for i, ind in enumerate(index) if ind is Ellipsis]
extra_dimensions = n - (len(index) - sum(i is None for i in index) - 1)
if not isellipsis:
return index
else:
loc = isellipsis[0]
return (index[:loc] + (slice(None, None, None),) * extra_dimensions +
index[loc + 1:])
def check_index(ind, dimension):
""" Check validity of index for a given dimension
Examples
--------
>>> check_index(3, 5)
>>> check_index(5, 5)
Traceback (most recent call last):
...
IndexError: Index is not smaller than dimension 5 >= 5
>>> check_index(6, 5)
Traceback (most recent call last):
...
IndexError: Index is not smaller than dimension 6 >= 5
>>> check_index(-1, 5)
>>> check_index(-6, 5)
Traceback (most recent call last):
...
IndexError: Negative index is not greater than negative dimension -6 <= -5
>>> check_index([1, 2], 5)
>>> check_index([6, 3], 5)
Traceback (most recent call last):
...
IndexError: Index out of bounds 5
>>> check_index(slice(0, 3), 5)
"""
if isinstance(ind, list):
x = np.array(ind)
if (x >= dimension).any() or (x < -dimension).any():
raise IndexError("Index out of bounds %s" % dimension)
elif isinstance(ind, slice):
return
elif ind >= dimension:
raise IndexError("Index is not smaller than dimension %d >= %d" %
(ind, dimension))
elif ind < -dimension:
msg = "Negative index is not greater than negative dimension %d <= -%d"
raise IndexError(msg % (ind, dimension))
| {
"repo_name": "mraspaud/dask",
"path": "dask/array/slicing.py",
"copies": "1",
"size": "22671",
"license": "bsd-3-clause",
"hash": 4678590991876652000,
"line_mean": 30.8412921348,
"line_max": 119,
"alpha_frac": 0.5539676238,
"autogenerated": false,
"ratio": 3.417910447761194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9470179690501153,
"avg_score": 0.00033967621200811264,
"num_lines": 712
} |
from __future__ import absolute_import, division, print_function
from itertools import product
from math import ceil
from numbers import Number
from operator import getitem, add
import numpy as np
from toolz import merge, first, accumulate
from ..base import tokenize
from ..compatibility import long
def sanitize_index(ind):
""" Sanitize the elements for indexing along one axis
>>> sanitize_index([2, 3, 5])
[2, 3, 5]
>>> sanitize_index([True, False, True, False])
[0, 2]
>>> sanitize_index(np.array([1, 2, 3]))
[1, 2, 3]
>>> sanitize_index(np.array([False, True, True]))
[1, 2]
>>> type(sanitize_index(np.int32(0)))
<type 'int'>
>>> sanitize_index(1.0)
1
>>> sanitize_index(0.5)
Traceback (most recent call last):
...
IndexError: Bad index. Must be integer-like: 0.5
"""
if isinstance(ind, Number):
ind2 = int(ind)
if ind2 != ind:
raise IndexError("Bad index. Must be integer-like: %s" % ind)
else:
return ind2
if isinstance(ind, np.ndarray):
ind = ind.tolist()
if isinstance(ind, list) and ind and isinstance(ind[0], bool):
ind = [a for a, b in enumerate(ind) if b]
return ind
if isinstance(ind, list):
return [sanitize_index(i) for i in ind]
if isinstance(ind, slice):
return slice(sanitize_index(ind.start),
sanitize_index(ind.stop),
sanitize_index(ind.step))
if ind is None:
return ind
raise IndexError("Invalid index", i)
def slice_array(out_name, in_name, blockdims, index):
"""
Master function for array slicing
This function makes a new dask that slices blocks along every
dimension and aggregates (via cartesian product) each dimension's
slices so that the resulting block slices give the same results
as the original slice on the original structure
Index must be a tuple. It may contain the following types
int, slice, list (at most one list), None
Parameters
----------
in_name - string
This is the dask variable name that will be used as input
out_name - string
This is the dask variable output name
blockshape - iterable of integers
index - iterable of integers, slices, lists, or None
Returns
-------
Dict where the keys are tuples of
(out_name, dim_index[, dim_index[, ...]])
and the values are
(function, (in_name, dim_index, dim_index, ...),
(slice(...), [slice()[,...]])
Also new blockdims with shapes of each block
((10, 10, 10, 10), (20, 20))
Examples
--------
>>> dsk, blockdims = slice_array('y', 'x', [(20, 20, 20, 20, 20)],
... (slice(10, 35),)) # doctest: +SKIP
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, ('x', 0), (slice(10, 20),)),
('y', 1): (getitem, ('x', 1), (slice(0, 15),))}
>>> blockdims # doctest: +SKIP
((10, 15),)
See Also
--------
This function works by successively unwrapping cases and passing down
through a sequence of functions.
slice_with_newaxis - handle None/newaxis case
slice_wrap_lists - handle fancy indexing with lists
slice_slices_and_integers - handle everything else
"""
index = replace_ellipsis(len(blockdims), index)
index = tuple(map(sanitize_index, index))
blockdims = tuple(map(tuple, blockdims))
# x[:, :, :] - Punt and return old value
if all(index == slice(None, None, None) for index in index):
suffixes = product(*[range(len(bd)) for bd in blockdims])
dsk = dict(((out_name,) + s, (in_name,) + s)
for s in suffixes)
return dsk, blockdims
# Add in missing colons at the end as needed. x[5] -> x[5, :, :]
missing = len(blockdims) - (len(index) - index.count(None))
index += (slice(None, None, None),) * missing
# Pass down to next function
dsk_out, bd_out = slice_with_newaxes(out_name, in_name, blockdims, index)
bd_out = tuple(map(tuple, bd_out))
return dsk_out, bd_out
def slice_with_newaxes(out_name, in_name, blockdims, index):
"""
Handle indexing with Nones
Strips out Nones then hands off to slice_wrap_lists
"""
# Strip Nones from index
index2 = tuple([ind for ind in index if ind is not None])
where_none = [i for i, ind in enumerate(index) if ind is None]
# Pass down and do work
dsk, blockdims2 = slice_wrap_lists(out_name, in_name, blockdims, index2)
# Insert ",0" into the key: ('x', 2, 3) -> ('x', 0, 2, 0, 3)
dsk2 = dict(((out_name,) + insert_many(k[1:], where_none, 0),
(v[:2] + (insert_many(v[2], where_none, None),)))
for k, v in dsk.items()
if k[0] == out_name)
# Add back intermediate parts of the dask that weren't the output
dsk3 = merge(dsk2, dict((k, v) for k, v in dsk.items() if k[0] != out_name))
# Insert (1,) into blockdims: ((2, 2), (3, 3)) -> ((2, 2), (1,), (3, 3))
blockdims3 = insert_many(blockdims2, where_none, (1,))
return dsk3, blockdims3
def slice_wrap_lists(out_name, in_name, blockdims, index):
"""
Fancy indexing along blocked array dasks
Handles index of type list. Calls slice_slices_and_integers for the rest
See Also
--------
take - handle slicing with lists ("fancy" indexing)
slice_slices_and_integers - handle slicing with slices and integers
"""
shape = tuple(map(sum, blockdims))
assert all(isinstance(i, (slice, list, int, long)) for i in index)
# Change indices like -1 to 9
index2 = posify_index(shape, index)
# Do we have more than one list in the index?
where_list = [i for i, ind in enumerate(index) if isinstance(ind, list)]
if len(where_list) > 1:
raise NotImplementedError("Don't yet support nd fancy indexing")
# Replace all lists with full slices [3, 1, 0] -> slice(None, None, None)
index_without_list = tuple(slice(None, None, None)
if isinstance(i, list)
else i
for i in index2)
# No lists, hooray! just use slice_slices_and_integers
if index2 == index_without_list:
return slice_slices_and_integers(out_name, in_name, blockdims, index2)
# lists and full slices. Just use take
if all(isinstance(i, list) or i == slice(None, None, None)
for i in index2):
axis = where_list[0]
blockdims2, dsk3 = take(out_name, in_name, blockdims,
index2[where_list[0]], axis=axis)
# Mixed case. Both slices/integers and lists. slice/integer then take
else:
# Do first pass without lists
tmp = 'slice-' + tokenize((out_name, in_name, blockdims, index))
dsk, blockdims2 = slice_slices_and_integers(tmp, in_name, blockdims, index_without_list)
# After collapsing some axes due to int indices, adjust axis parameter
axis = where_list[0]
axis2 = axis - sum(1 for i, ind in enumerate(index2)
if i < axis and isinstance(ind, (int, long)))
# Do work
blockdims2, dsk2 = take(out_name, tmp, blockdims2, index2[axis],
axis=axis2)
dsk3 = merge(dsk, dsk2)
return dsk3, blockdims2
def slice_slices_and_integers(out_name, in_name, blockdims, index):
"""
Dask array indexing with slices and integers
See Also
--------
_slice_1d
"""
shape = tuple(map(sum, blockdims))
assert all(isinstance(ind, (slice, int, long)) for ind in index)
assert len(index) == len(blockdims)
# Get a list (for each dimension) of dicts{blocknum: slice()}
block_slices = list(map(_slice_1d, shape, blockdims, index))
# (in_name, 1, 1, 2), (in_name, 1, 1, 4), (in_name, 2, 1, 2), ...
in_names = list(product([in_name], *[sorted(i.keys()) for i in block_slices]))
# (out_name, 0, 0, 0), (out_name, 0, 0, 1), (out_name, 0, 1, 0), ...
out_names = list(product([out_name],
*[range(len(d))[::-1] if i.step and i.step < 0 else range(len(d))
for d, i in zip(block_slices, index)
if not isinstance(i, (int, long))]))
all_slices = list(product(*[i.values() for i in block_slices]))
dsk_out = dict((out_name, (getitem, in_name, slices))
for out_name, in_name, slices
in zip(out_names, in_names, all_slices))
new_blockdims = [new_blockdim(d, db, i)
for d, i, db in zip(shape, index, blockdims)
if not isinstance(i, (int, long))]
return dsk_out, new_blockdims
def _slice_1d(dim_shape, lengths, index):
"""Returns a dict of {blocknum: slice}
This function figures out where each slice should start in each
block for a single dimension. If the slice won't return any elements
in the block, that block will not be in the output.
Parameters
----------
dim_shape - the number of elements in this dimension.
This should be a positive, non-zero integer
blocksize - the number of elements per block in this dimension
This should be a positive, non-zero integer
index - a description of the elements in this dimension that we want
This might be an integer, a slice(), or an Ellipsis
Returns
-------
dictionary where the keys are the integer index of the blocks that
should be sliced and the values are the slices
Examples
--------
100 length array cut into length 20 pieces, slice 0:35
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 35))
{0: slice(None, None, None), 1: slice(0, 15, 1)}
Support irregular blocks and various slices
>>> _slice_1d(100, [20, 10, 10, 10, 25, 25], slice(10, 35))
{0: slice(10, 20, 1), 1: slice(None, None, None), 2: slice(0, 5, 1)}
Support step sizes
>>> _slice_1d(100, [15, 14, 13], slice(10, 41, 3))
{0: slice(10, 15, 3), 1: slice(1, 14, 3), 2: slice(2, 12, 3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 100, 40)) # step > blocksize
{0: slice(0, 20, 40), 2: slice(0, 20, 40), 4: slice(0, 20, 40)}
Also support indexing single elements
>>> _slice_1d(100, [20, 20, 20, 20, 20], 25)
{1: 5}
And negative slicing
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 0, -3))
{0: slice(-2, -20, -3), 1: slice(-1, -21, -3), 2: slice(-3, -21, -3), 3: slice(-2, -21, -3), 4: slice(-1, -21, -3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 12, -3))
{0: slice(-2, -8, -3), 1: slice(-1, -21, -3), 2: slice(-3, -21, -3), 3: slice(-2, -21, -3), 4: slice(-1, -21, -3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, -12, -3))
{4: slice(-1, -12, -3)}
"""
if isinstance(index, (int, long)):
i = 0
ind = index
lens = list(lengths)
while ind >= lens[0]:
i += 1
ind -= lens.pop(0)
return {i: ind}
assert isinstance(index, slice)
step = index.step or 1
if step > 0:
start = index.start or 0
stop = index.stop if index.stop is not None else dim_shape
else:
start = index.start or dim_shape - 1
start = dim_shape - 1 if start >= dim_shape else start
stop = -(dim_shape + 1) if index.stop is None else index.stop
# posify start and stop
if start < 0:
start += dim_shape
if stop < 0:
stop += dim_shape
d = dict()
if step > 0:
for i, length in enumerate(lengths):
if start < length and stop > 0:
d[i] = slice(start, min(stop, length), step)
start = (start - length) % step
else:
start = start - length
stop -= length
else:
rstart = start # running start
chunk_boundaries = list(accumulate(add, lengths))
for i, chunk_stop in reversed(list(enumerate(chunk_boundaries))):
# create a chunk start and stop
if i == 0:
chunk_start = 0
else:
chunk_start = chunk_boundaries[i - 1]
# if our slice is in this chunk
if (chunk_start <= rstart < chunk_stop) and (rstart > stop):
d[i] = slice(rstart - chunk_stop,
max(chunk_start - chunk_stop - 1,
stop - chunk_stop),
step)
# compute the next running start point,
offset = (rstart - (chunk_start - 1)) % step
rstart = chunk_start + offset - 1
# replace 0:20:1 with : if appropriate
for k, v in d.items():
if v == slice(0, lengths[k], 1):
d[k] = slice(None, None, None)
if not d: # special case x[:0]
d[0] = slice(0, 0, 1)
return d
def partition_by_size(sizes, seq):
"""
>>> partition_by_size([10, 20, 10], [1, 5, 9, 12, 29, 35])
[[1, 5, 9], [2, 19], [5]]
"""
seq = list(seq)
pretotal = 0
total = 0
i = 0
result = list()
for s in sizes:
total += s
L = list()
while i < len(seq) and seq[i] < total:
L.append(seq[i] - pretotal)
i += 1
result.append(L)
pretotal += s
return result
def issorted(seq):
""" Is sequence sorted?
>>> issorted([1, 2, 3])
True
>>> issorted([3, 1, 2])
False
"""
if not seq:
return True
x = seq[0]
for elem in seq[1:]:
if elem < x:
return False
x = elem
return True
colon = slice(None, None, None)
def take_sorted(outname, inname, blockdims, index, axis=0):
""" Index array with sorted list index
Forms a dask for the following case
x[:, [1, 3, 5, 10], ...]
where the index, ``[1, 3, 5, 10]`` is sorted in non-decreasing order.
>>> blockdims, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], axis=0)
>>> blockdims
((3, 1),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, ('x', 0), ([1, 3, 5],)),
('y', 1): (getitem, ('x', 2), ([7],))}
See Also
--------
take - calls this function
"""
n = len(blockdims)
sizes = blockdims[axis] # the blocksizes on the axis that we care about
index_lists = partition_by_size(sizes, sorted(index))
where_index = [i for i, il in enumerate(index_lists) if il]
index_lists = [il for il in index_lists if il]
dims = [range(len(bd)) for bd in blockdims]
indims = list(dims)
indims[axis] = list(range(len(where_index)))
keys = list(product([outname], *indims))
outdims = list(dims)
outdims[axis] = where_index
slices = [[colon]*len(bd) for bd in blockdims]
slices[axis] = index_lists
slices = list(product(*slices))
inkeys = list(product([inname], *outdims))
values = [(getitem, inkey, slc) for inkey, slc in zip(inkeys, slices)]
blockdims2 = list(blockdims)
blockdims2[axis] = tuple(map(len, index_lists))
return tuple(blockdims2), dict(zip(keys, values))
def take(outname, inname, blockdims, index, axis=0):
""" Index array with an iterable of index
Handles a single index by a single list
Mimics ``np.take``
>>> blockdims, dsk = take('y', 'x', [(20, 20, 20, 20)], [5, 1, 47, 3], axis=0)
>>> blockdims
((4,),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, (np.concatenate, [(getitem, ('x', 0), ([1, 3, 5],)),
(getitem, ('x', 2), ([7],))],
0),
(2, 0, 4, 1))}
When list is sorted we retain original block structure
>>> blockdims, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], axis=0)
>>> blockdims
((3, 1),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, ('x', 0), ([1, 3, 5],)),
('y', 2): (getitem, ('x', 2), ([7],))}
"""
if issorted(index):
return take_sorted(outname, inname, blockdims, index, axis)
n = len(blockdims)
sizes = blockdims[axis] # the blocksizes on the axis that we care about
index_lists = partition_by_size(sizes, sorted(index))
dims = [[0] if axis == i else list(range(len(bd)))
for i, bd in enumerate(blockdims)]
keys = list(product([outname], *dims))
rev_index = list(map(sorted(index).index, index))
vals = [(getitem, (np.concatenate,
(list, [(getitem, ((inname,) + d[:axis] + (i,) + d[axis+1:]),
((colon,)*axis + (IL,) + (colon,)*(n-axis-1)))
for i, IL in enumerate(index_lists)
if IL]),
axis),
((colon,)*axis + (rev_index,) + (colon,)*(n-axis-1)))
for d in product(*dims)]
blockdims2 = list(blockdims)
blockdims2[axis] = (len(index),)
return tuple(blockdims2), dict(zip(keys, vals))
def posify_index(shape, ind):
""" Flip negative indices around to positive ones
>>> posify_index(10, 3)
3
>>> posify_index(10, -3)
7
>>> posify_index(10, [3, -3])
[3, 7]
>>> posify_index((10, 20), (3, -3))
(3, 17)
>>> posify_index((10, 20), (3, [3, 4, -3]))
(3, [3, 4, 17])
"""
if isinstance(ind, tuple):
return tuple(map(posify_index, shape, ind))
if isinstance(ind, (int, long)):
if ind < 0:
return ind + shape
else:
return ind
if isinstance(ind, list):
return [posify_index(shape, i) for i in ind]
return ind
def insert_many(seq, where, val):
""" Insert value at many locations in sequence
>>> insert_many(['a', 'b', 'c'], [0, 2], 'z')
('z', 'a', 'z', 'b', 'c')
"""
seq = list(seq)
result = []
for i in range(len(where) + len(seq)):
if i in where:
result.append(val)
else:
result.append(seq.pop(0))
return tuple(result)
def new_blockdim(dim_shape, lengths, index):
"""
>>> new_blockdim(100, [20, 10, 20, 10, 40], slice(0, 90, 2))
[10, 5, 10, 5, 15]
>>> new_blockdim(100, [20, 10, 20, 10, 40], [5, 1, 30, 22])
[4]
>>> new_blockdim(100, [20, 10, 20, 10, 40], slice(90, 10, -2))
[16, 5, 10, 5, 4]
"""
if isinstance(index, list):
return [len(index)]
assert not isinstance(index, (int, long))
pairs = sorted(_slice_1d(dim_shape, lengths, index).items(), key=first)
slices = [slice(0, lengths[i], 1) if slc == slice(None, None, None) else slc
for i, slc in pairs]
if isinstance(index, slice) and index.step and index.step < 0:
slices = slices[::-1]
return [int(ceil((1. * slc.stop - slc.start) / slc.step)) for slc in slices]
def replace_ellipsis(n, index):
""" Replace ... with slices, :, : ,:
>>> replace_ellipsis(4, (3, Ellipsis, 2))
(3, slice(None, None, None), slice(None, None, None), 2)
"""
# Careful about using in or index because index may contain arrays
isellipsis = [i for i, ind in enumerate(index) if ind is Ellipsis]
if not isellipsis:
return index
else:
loc = isellipsis[0]
return (index[:loc]
+ (slice(None, None, None),) * (n - len(index) + 1)
+ index[loc+1:])
| {
"repo_name": "vikhyat/dask",
"path": "dask/array/slicing.py",
"copies": "1",
"size": "19485",
"license": "bsd-3-clause",
"hash": -7097070787210551000,
"line_mean": 30.6314935065,
"line_max": 119,
"alpha_frac": 0.5490377213,
"autogenerated": false,
"ratio": 3.388695652173913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44377333734739133,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from itertools import product
from operator import getitem
import numpy as np
from .core import (normalize_chunks, Array, slices_from_chunks,
broadcast_shapes, broadcast_to)
from ..base import tokenize
from ..utils import different_seeds, ignoring
def doc_wraps(func):
""" Copy docstring from one function to another """
def _(func2):
if func.__doc__ is not None:
func2.__doc__ = func.__doc__.replace('>>>', '>>').replace('...', '..')
return func2
return _
class RandomState(object):
"""
Mersenne Twister pseudo-random number generator
This object contains state to deterministically generate pseudo-random
numbers from a variety of probability distributions. It is identical to
``np.random.RandomState`` except that all functions also take a ``chunks=``
keyword argument.
Examples
--------
>>> import dask.array as da
>>> state = da.random.RandomState(1234) # a seed
>>> x = state.normal(10, 0.1, size=3, chunks=(2,))
>>> x.compute()
array([ 9.95487579, 10.02999135, 10.08498441])
See Also:
np.random.RandomState
"""
def __init__(self, seed=None):
self._numpy_state = np.random.RandomState(seed)
def seed(self, seed=None):
self._numpy_state.seed(seed)
def _wrap(self, func, *args, **kwargs):
""" Wrap numpy random function to produce dask.array random function
extra_chunks should be a chunks tuple to append to the end of chunks
"""
size = kwargs.pop('size', None)
chunks = kwargs.pop('chunks')
extra_chunks = kwargs.pop('extra_chunks', ())
if size is not None and not isinstance(size, (tuple, list)):
size = (size,)
args_shapes = {ar.shape for ar in args
if isinstance(ar, (Array, np.ndarray))}
args_shapes.union({ar.shape for ar in kwargs.values()
if isinstance(ar, (Array, np.ndarray))})
shapes = list(args_shapes)
if size is not None:
shapes += [size]
# broadcast to the final size(shape)
size = broadcast_shapes(*shapes)
chunks = normalize_chunks(chunks, size)
slices = slices_from_chunks(chunks)
def _broadcast_any(ar, shape, chunks):
if isinstance(ar, Array):
return broadcast_to(ar, shape).rechunk(chunks)
if isinstance(ar, np.ndarray):
return np.ascontiguousarray(np.broadcast_to(ar, shape))
# Broadcast all arguments, get tiny versions as well
# Start adding the relevant bits to the graph
dsk = {}
lookup = {}
small_args = []
for i, ar in enumerate(args):
if isinstance(ar, (np.ndarray, Array)):
res = _broadcast_any(ar, size, chunks)
if isinstance(res, Array):
dsk.update(res.dask)
lookup[i] = res.name
elif isinstance(res, np.ndarray):
name = 'array-{}'.format(tokenize(res))
lookup[i] = name
dsk[name] = res
small_args.append(ar[tuple(0 for _ in ar.shape)])
else:
small_args.append(ar)
small_kwargs = {}
for key, ar in kwargs.items():
if isinstance(ar, (np.ndarray, Array)):
res = _broadcast_any(ar, size, chunks)
if isinstance(res, Array):
dsk.update(res.dask)
lookup[key] = res.name
elif isinstance(res, np.ndarray):
name = 'array-{}'.format(tokenize(res))
lookup[key] = name
dsk[name] = res
small_kwargs[key] = ar[tuple(0 for _ in ar.shape)]
else:
small_kwargs[key] = ar
# Get dtype
small_kwargs['size'] = (0,)
dtype = func(np.random.RandomState(), *small_args,
**small_kwargs).dtype
sizes = list(product(*chunks))
seeds = different_seeds(len(sizes), self._numpy_state)
token = tokenize(seeds, size, chunks, args, kwargs)
name = 'da.random.{0}-{1}'.format(func.__name__, token)
keys = product([name], *([range(len(bd)) for bd in chunks]
+ [[0]] * len(extra_chunks)))
blocks = product(*[range(len(bd)) for bd in chunks])
vals = []
for seed, size, slc, block in zip(seeds, sizes, slices, blocks):
arg = []
for i, ar in enumerate(args):
if i not in lookup:
arg.append(ar)
else:
if isinstance(ar, Array):
arg.append((lookup[i], ) + block)
else: # np.ndarray
arg.append((getitem, lookup[i], slc))
kwrg = {}
for k, ar in kwargs.items():
if k not in lookup:
kwrg[k] = ar
else:
if isinstance(ar, Array):
kwrg[k] = (lookup[k], ) + block
else: # np.ndarray
kwrg[k] = (getitem, lookup[k], slc)
vals.append((_apply_random, func.__name__, seed, size, arg, kwrg))
dsk.update(dict(zip(keys, vals)))
return Array(dsk, name, chunks + extra_chunks, dtype=dtype)
@doc_wraps(np.random.RandomState.beta)
def beta(self, a, b, size=None, chunks=None):
return self._wrap(np.random.RandomState.beta, a, b,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.binomial)
def binomial(self, n, p, size=None, chunks=None):
return self._wrap(np.random.RandomState.binomial, n, p,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.chisquare)
def chisquare(self, df, size=None, chunks=None):
return self._wrap(np.random.RandomState.chisquare, df,
size=size, chunks=chunks)
with ignoring(AttributeError):
@doc_wraps(np.random.RandomState.choice)
def choice(self, a, size=None, replace=True, p=None, chunks=None):
return self._wrap(np.random.RandomState.choice, a,
size=size, replace=True, p=None, chunks=chunks)
# @doc_wraps(np.random.RandomState.dirichlet)
# def dirichlet(self, alpha, size=None, chunks=None):
@doc_wraps(np.random.RandomState.exponential)
def exponential(self, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.exponential, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.f)
def f(self, dfnum, dfden, size=None, chunks=None):
return self._wrap(np.random.RandomState.f, dfnum, dfden,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.gamma)
def gamma(self, shape, scale=1.0, chunks=None):
return self._wrap(np.random.RandomState.gamma, scale,
size=shape, chunks=chunks)
@doc_wraps(np.random.RandomState.geometric)
def geometric(self, p, size=None, chunks=None):
return self._wrap(np.random.RandomState.geometric, p,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.gumbel)
def gumbel(self, loc=0.0, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.gumbel, loc, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.hypergeometric)
def hypergeometric(self, ngood, nbad, nsample, size=None, chunks=None):
return self._wrap(np.random.RandomState.hypergeometric,
ngood, nbad, nsample,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.laplace)
def laplace(self, loc=0.0, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.laplace, loc, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.logistic)
def logistic(self, loc=0.0, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.logistic, loc, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.lognormal)
def lognormal(self, mean=0.0, sigma=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.lognormal, mean, sigma,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.logseries)
def logseries(self, p, size=None, chunks=None):
return self._wrap(np.random.RandomState.logseries, p,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.multinomial)
def multinomial(self, n, pvals, size=None, chunks=None):
return self._wrap(np.random.RandomState.multinomial, n, pvals,
size=size, chunks=chunks,
extra_chunks=((len(pvals),),))
@doc_wraps(np.random.RandomState.negative_binomial)
def negative_binomial(self, n, p, size=None, chunks=None):
return self._wrap(np.random.RandomState.negative_binomial, n, p,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.noncentral_chisquare)
def noncentral_chisquare(self, df, nonc, size=None, chunks=None):
return self._wrap(np.random.RandomState.noncentral_chisquare, df, nonc,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.noncentral_f)
def noncentral_f(self, dfnum, dfden, nonc, size=None, chunks=None):
return self._wrap(np.random.RandomState.noncentral_f,
dfnum, dfden, nonc,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.normal)
def normal(self, loc=0.0, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.normal, loc, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.pareto)
def pareto(self, a, size=None, chunks=None):
return self._wrap(np.random.RandomState.pareto, a,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.poisson)
def poisson(self, lam=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.poisson, lam,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.power)
def power(self, a, size=None, chunks=None):
return self._wrap(np.random.RandomState.power, a,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.randint)
def randint(self, low, high=None, size=None, chunks=None):
return self._wrap(np.random.RandomState.randint, low, high,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.random_integers)
def random_integers(self, low, high=None, size=None, chunks=None):
return self._wrap(np.random.RandomState.random_integers, low, high,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.random_sample)
def random_sample(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.random_sample,
size=size, chunks=chunks)
random = random_sample
@doc_wraps(np.random.RandomState.rayleigh)
def rayleigh(self, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.rayleigh, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_cauchy)
def standard_cauchy(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_cauchy,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_exponential)
def standard_exponential(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_exponential,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_gamma)
def standard_gamma(self, shape, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_gamma, shape,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_normal)
def standard_normal(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_normal,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_t)
def standard_t(self, df, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_t, df,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.tomaxint)
def tomaxint(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.tomaxint,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.triangular)
def triangular(self, left, mode, right, size=None, chunks=None):
return self._wrap(np.random.RandomState.triangular, left, mode, right,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.uniform)
def uniform(self, low=0.0, high=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.uniform, low, high,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.vonmises)
def vonmises(self, mu, kappa, size=None, chunks=None):
return self._wrap(np.random.RandomState.vonmises, mu, kappa,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.wald)
def wald(self, mean, scale, size=None, chunks=None):
return self._wrap(np.random.RandomState.wald, mean, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.weibull)
def weibull(self, a, size=None, chunks=None):
return self._wrap(np.random.RandomState.weibull, a,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.zipf)
def zipf(self, a, size=None, chunks=None):
return self._wrap(np.random.RandomState.zipf, a,
size=size, chunks=chunks)
def _apply_random(func, seed, size, args, kwargs):
""" Apply RandomState method with seed
>>> _apply_random('normal', 123, 3, (10, 1.0), {})
array([ 8.9143694 , 10.99734545, 10.2829785 ])
"""
state = np.random.RandomState(seed)
func = getattr(state, func)
return func(*args, size=size, **kwargs)
_state = RandomState()
seed = _state.seed
beta = _state.beta
binomial = _state.binomial
chisquare = _state.chisquare
exponential = _state.exponential
f = _state.f
gamma = _state.gamma
geometric = _state.geometric
gumbel = _state.gumbel
hypergeometric = _state.hypergeometric
laplace = _state.laplace
logistic = _state.logistic
lognormal = _state.lognormal
logseries = _state.logseries
multinomial = _state.multinomial
negative_binomial = _state.negative_binomial
noncentral_chisquare = _state.noncentral_chisquare
noncentral_f = _state.noncentral_f
normal = _state.normal
pareto = _state.pareto
poisson = _state.poisson
power = _state.power
rayleigh = _state.rayleigh
random_sample = _state.random_sample
random = random_sample
randint = _state.randint
random_integers = _state.random_integers
triangular = _state.triangular
uniform = _state.uniform
vonmises = _state.vonmises
wald = _state.wald
weibull = _state.weibull
zipf = _state.zipf
"""
Standard distributions
"""
standard_cauchy = _state.standard_cauchy
standard_exponential = _state.standard_exponential
standard_gamma = _state.standard_gamma
standard_normal = _state.standard_normal
standard_t = _state.standard_t
| {
"repo_name": "cowlicks/dask",
"path": "dask/array/random.py",
"copies": "2",
"size": "15992",
"license": "bsd-3-clause",
"hash": -3918635735124103700,
"line_mean": 38.1960784314,
"line_max": 82,
"alpha_frac": 0.6016133067,
"autogenerated": false,
"ratio": 3.7487107360525083,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5350324042752508,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from itertools import product
from operator import mul
import numpy as np
from .core import Array
from ..base import tokenize
from ..core import flatten
from ..compatibility import reduce
from .. import sharedict
def reshape_rechunk(inshape, outshape, inchunks):
assert all(isinstance(c, tuple) for c in inchunks)
ii = len(inshape) - 1
oi = len(outshape) - 1
result_inchunks = [None for i in range(len(inshape))]
result_outchunks = [None for i in range(len(outshape))]
while ii >= 0 or oi >= 0:
if inshape[ii] == outshape[oi]:
result_inchunks[ii] = inchunks[ii]
result_outchunks[oi] = inchunks[ii]
ii -= 1
oi -= 1
continue
din = inshape[ii]
dout = outshape[oi]
if din == 1:
result_inchunks[ii] = (1,)
ii -= 1
elif dout == 1:
result_outchunks[oi] = (1,)
oi -= 1
elif din < dout: # (4, 4, 4) -> (64,)
ileft = ii - 1
while ileft >= 0 and reduce(mul, inshape[ileft:ii + 1]) < dout: # 4 < 64, 4*4 < 64, 4*4*4 == 64
ileft -= 1
if reduce(mul, inshape[ileft:ii + 1]) != dout:
raise ValueError("Shapes not compatible")
for i in range(ileft + 1, ii + 1): # need single-shape dimensions
result_inchunks[i] = (inshape[i],) # chunks[i] = (4,)
chunk_reduction = reduce(mul, map(len, inchunks[ileft + 1:ii + 1]))
result_inchunks[ileft] = expand_tuple(inchunks[ileft], chunk_reduction)
prod = reduce(mul, inshape[ileft + 1: ii + 1]) # 16
result_outchunks[oi] = tuple(prod * c for c in result_inchunks[ileft]) # (1, 1, 1, 1) .* 16
oi -= 1
ii = ileft - 1
elif din > dout: # (64,) -> (4, 4, 4)
oleft = oi - 1
while oleft >= 0 and reduce(mul, outshape[oleft:oi + 1]) < din:
oleft -= 1
if reduce(mul, outshape[oleft:oi + 1]) != din:
raise ValueError("Shapes not compatible")
# TODO: don't coalesce shapes unnecessarily
cs = reduce(mul, outshape[oleft + 1: oi + 1])
result_inchunks[ii] = contract_tuple(inchunks[ii], cs) # (16, 16, 16, 16)
for i in range(oleft + 1, oi + 1):
result_outchunks[i] = (outshape[i],)
result_outchunks[oleft] = tuple(c // cs for c in result_inchunks[ii])
oi = oleft - 1
ii -= 1
return tuple(result_inchunks), tuple(result_outchunks)
def expand_tuple(chunks, factor):
"""
>>> expand_tuple((2, 4), 2)
(1, 1, 2, 2)
>>> expand_tuple((2, 4), 3)
(1, 1, 1, 1, 2)
>>> expand_tuple((3, 4), 2)
(1, 2, 2, 2)
>>> expand_tuple((7, 4), 3)
(2, 2, 3, 1, 1, 2)
"""
if factor == 1:
return chunks
out = []
for c in chunks:
x = c
part = max(x / factor, 1)
while x >= 2 * part:
out.append(int(part))
x -= int(part)
if x:
out.append(x)
assert sum(chunks) == sum(out)
return tuple(out)
def contract_tuple(chunks, factor):
""" Return simple chunks tuple such that factor divides all elements
Examples
--------
>>> contract_tuple((2, 2, 8, 4), 4)
(4, 8, 4)
"""
assert sum(chunks) % factor == 0
out = []
residual = 0
for chunk in chunks:
chunk += residual
div = chunk // factor
residual = chunk % factor
good = factor * div
if good:
out.append(good)
return tuple(out)
def reshape(x, shape):
""" Reshape array to new shape
This is a parallelized version of the ``np.reshape`` function with the
following limitations:
1. It assumes that the array is stored in C-order
2. It only allows for reshapings that collapse or merge dimensions like
``(1, 2, 3, 4) -> (1, 6, 4)`` or ``(64,) -> (4, 4, 4)``
When communication is necessary this algorithm depends on the logic within
rechunk. It endeavors to keep chunk sizes roughly the same when possible.
See Also
--------
dask.array.rechunk
numpy.reshape
"""
# Sanitize inputs, look for -1 in shape
from .slicing import sanitize_index
shape = tuple(map(sanitize_index, shape))
known_sizes = [s for s in shape if s != -1]
if len(known_sizes) < len(shape):
if len(known_sizes) - len(shape) > 1:
raise ValueError('can only specify one unknown dimension')
missing_size = sanitize_index(x.size / reduce(mul, known_sizes, 1))
shape = tuple(missing_size if s == -1 else s for s in shape)
if np.isnan(sum(x.shape)):
raise ValueError("Array chunk size or shape is unknown. shape: %s", x.shape)
if reduce(mul, shape, 1) != x.size:
raise ValueError('total size of new array must be unchanged')
if x.shape == shape:
return x
name = 'reshape-' + tokenize(x, shape)
if x.npartitions == 1:
key = next(flatten(x._keys()))
dsk = {(name,) + (0,) * len(shape): (np.reshape, key, shape)}
chunks = tuple((d,) for d in shape)
return Array(sharedict.merge((name, dsk), x.dask), name, chunks,
dtype=x.dtype)
# Logic for how to rechunk
inchunks, outchunks = reshape_rechunk(x.shape, shape, x.chunks)
x2 = x.rechunk(inchunks)
# Construct graph
in_keys = list(product([x2.name], *[range(len(c)) for c in inchunks]))
out_keys = list(product([name], *[range(len(c)) for c in outchunks]))
shapes = list(product(*outchunks))
dsk = {a: (np.reshape, b, shape) for a, b, shape in zip(out_keys, in_keys, shapes)}
return Array(sharedict.merge((name, dsk), x2.dask), name, outchunks,
dtype=x.dtype)
| {
"repo_name": "cpcloud/dask",
"path": "dask/array/reshape.py",
"copies": "1",
"size": "5917",
"license": "bsd-3-clause",
"hash": -7006342420301881000,
"line_mean": 30.3068783069,
"line_max": 107,
"alpha_frac": 0.5519689032,
"autogenerated": false,
"ratio": 3.404487917146145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4456456820346145,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from itertools import product
import numpy as np
from .core import normalize_chunks, Array
from ..base import tokenize
from ..utils import different_seeds, ignoring
def doc_wraps(func):
""" Copy docstring from one function to another """
def _(func2):
func2.__doc__ = func.__doc__.replace('>>>', '>>').replace('...', '..')
return func2
return _
class RandomState(object):
"""
Mersenne Twister pseudo-random number generator
This object contains state to deterministically generate pseudo-random
numbers from a variety of probability distributions. It is identical to
``np.random.RandomState`` except that all functions also take a ``chunks=``
keyword argument.
Examples
--------
>>> import dask.array as da
>>> state = da.random.RandomState(1234) # a seed
>>> x = state.normal(10, 0.1, size=3, chunks=(2,))
>>> x.compute()
array([ 9.95487579, 10.02999135, 10.08498441])
See Also:
np.random.RandomState
"""
def __init__(self, seed=None):
self._numpy_state = np.random.RandomState(seed)
def seed(self, seed=None):
self._numpy_state.seed(seed)
def _wrap(self, func, *args, **kwargs):
""" Wrap numpy random function to produce dask.array random function
extra_chunks should be a chunks tuple to append to the end of chunks
"""
size = kwargs.pop('size')
chunks = kwargs.pop('chunks')
extra_chunks = kwargs.pop('extra_chunks', ())
if not isinstance(size, (tuple, list)):
size = (size,)
chunks = normalize_chunks(chunks, size)
# Get dtype
kw = kwargs.copy()
kw['size'] = (0,)
dtype = func(np.random.RandomState(), *args, **kw).dtype
# Build graph
sizes = list(product(*chunks))
seeds = different_seeds(len(sizes), self._numpy_state)
token = tokenize(seeds, size, chunks, args, kwargs)
name = 'da.random.{0}-{1}'.format(func.__name__, token)
keys = product([name], *([range(len(bd)) for bd in chunks]
+ [[0]] * len(extra_chunks)))
vals = ((_apply_random, func.__name__, seed, size, args, kwargs)
for seed, size in zip(seeds, sizes))
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks + extra_chunks, dtype=dtype)
@doc_wraps(np.random.RandomState.beta)
def beta(self, a, b, size=None, chunks=None):
return self._wrap(np.random.RandomState.beta, a, b,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.binomial)
def binomial(self, n, p, size=None, chunks=None):
return self._wrap(np.random.RandomState.binomial, n, p,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.chisquare)
def chisquare(self, df, size=None, chunks=None):
return self._wrap(np.random.RandomState.chisquare, df,
size=size, chunks=chunks)
with ignoring(AttributeError):
@doc_wraps(np.random.RandomState.choice)
def choice(self, a, size=None, replace=True, p=None, chunks=None):
return self._wrap(np.random.RandomState.choice, a,
size=size, replace=True, p=None, chunks=chunks)
# @doc_wraps(np.random.RandomState.dirichlet)
# def dirichlet(self, alpha, size=None, chunks=None):
@doc_wraps(np.random.RandomState.exponential)
def exponential(self, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.exponential, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.f)
def f(self, dfnum, dfden, size=None, chunks=None):
return self._wrap(np.random.RandomState.f, dfnum, dfden,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.gamma)
def gamma(self, shape, scale=1.0, chunks=None):
return self._wrap(np.random.RandomState.gamma, scale,
size=shape, chunks=chunks)
@doc_wraps(np.random.RandomState.geometric)
def geometric(self, p, size=None, chunks=None):
return self._wrap(np.random.RandomState.geometric, p,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.gumbel)
def gumbel(self, loc=0.0, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.gumbel, loc, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.hypergeometric)
def hypergeometric(self, ngood, nbad, nsample, size=None, chunks=None):
return self._wrap(np.random.RandomState.hypergeometric,
ngood, nbad, nsample,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.laplace)
def laplace(self, loc=0.0, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.laplace, loc, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.logistic)
def logistic(self, loc=0.0, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.logistic, loc, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.lognormal)
def lognormal(self, mean=0.0, sigma=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.lognormal, mean, sigma,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.logseries)
def logseries(self, p, size=None, chunks=None):
return self._wrap(np.random.RandomState.logseries, p,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.multinomial)
def multinomial(self, n, pvals, size=None, chunks=None):
return self._wrap(np.random.RandomState.multinomial, n, pvals,
size=size, chunks=chunks,
extra_chunks=((len(pvals),),))
@doc_wraps(np.random.RandomState.negative_binomial)
def negative_binomial(self, n, p, size=None, chunks=None):
return self._wrap(np.random.RandomState.negative_binomial, n, p,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.noncentral_chisquare)
def noncentral_chisquare(self, df, nonc, size=None, chunks=None):
return self._wrap(np.random.RandomState.noncentral_chisquare, df, nonc,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.noncentral_f)
def noncentral_f(self, dfnum, dfden, nonc, size=None, chunks=None):
return self._wrap(np.random.RandomState.noncentral_f,
dfnum, dfden, nonc,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.normal)
def normal(self, loc=0.0, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.normal, loc, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.pareto)
def pareto(self, a, size=None, chunks=None):
return self._wrap(np.random.RandomState.pareto, a,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.poisson)
def poisson(self, lam=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.poisson, lam,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.power)
def power(self, a, size=None, chunks=None):
return self._wrap(np.random.RandomState.power, a,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.randint)
def randint(self, low, high=None, size=None, chunks=None):
return self._wrap(np.random.RandomState.randint, low, high,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.random_integers)
def random_integers(self, low, high=None, size=None, chunks=None):
return self._wrap(np.random.RandomState.random_integers, low, high,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.random_sample)
def random_sample(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.random_sample,
size=size, chunks=chunks)
random = random_sample
@doc_wraps(np.random.RandomState.rayleigh)
def rayleigh(self, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.rayleigh, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_cauchy)
def standard_cauchy(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_cauchy,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_exponential)
def standard_exponential(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_exponential,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_gamma)
def standard_gamma(self, shape, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_gamma, shape,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_normal)
def standard_normal(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_normal,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_t)
def standard_t(self, df, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_t, df,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.tomaxint)
def tomaxint(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.tomaxint,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.triangular)
def triangular(self, left, mode, right, size=None, chunks=None):
return self._wrap(np.random.RandomState.triangular, left, mode, right,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.uniform)
def uniform(self, low=0.0, high=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.uniform, low, high,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.vonmises)
def vonmises(self, mu, kappa, size=None, chunks=None):
return self._wrap(np.random.RandomState.vonmises, mu, kappa,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.wald)
def wald(self, mean, scale, size=None, chunks=None):
return self._wrap(np.random.RandomState.wald, mean, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.weibull)
def weibull(self, a, size=None, chunks=None):
return self._wrap(np.random.RandomState.weibull, a,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.zipf)
def zipf(self, a, size=None, chunks=None):
return self._wrap(np.random.RandomState.zipf, a,
size=size, chunks=chunks)
def _apply_random(func, seed, size, args, kwargs):
""" Apply RandomState method with seed
>>> _apply_random('normal', 123, 3, (10, 1.0), {})
array([ 8.9143694 , 10.99734545, 10.2829785 ])
"""
state = np.random.RandomState(seed)
func = getattr(state, func)
return func(*args, size=size, **kwargs)
_state = RandomState()
seed = _state.seed
beta = _state.beta
binomial = _state.binomial
chisquare = _state.chisquare
exponential = _state.exponential
f = _state.f
gamma = _state.gamma
geometric = _state.geometric
gumbel = _state.gumbel
hypergeometric = _state.hypergeometric
laplace = _state.laplace
logistic = _state.logistic
lognormal = _state.lognormal
logseries = _state.logseries
multinomial = _state.multinomial
negative_binomial = _state.negative_binomial
noncentral_chisquare = _state.noncentral_chisquare
noncentral_f = _state.noncentral_f
normal = _state.normal
pareto = _state.pareto
poisson = _state.poisson
power = _state.power
rayleigh = _state.rayleigh
random_sample = _state.random_sample
random = random_sample
randint = _state.randint
random_integers = _state.random_integers
triangular = _state.triangular
uniform = _state.uniform
vonmises = _state.vonmises
wald = _state.wald
weibull = _state.weibull
zipf = _state.zipf
"""
Standard distributions
"""
standard_cauchy = _state.standard_cauchy
standard_exponential = _state.standard_exponential
standard_gamma = _state.standard_gamma
standard_normal = _state.standard_normal
standard_t = _state.standard_t
| {
"repo_name": "mikegraham/dask",
"path": "dask/array/random.py",
"copies": "1",
"size": "12905",
"license": "bsd-3-clause",
"hash": -9215933013564944000,
"line_mean": 37.4077380952,
"line_max": 79,
"alpha_frac": 0.6333204184,
"autogenerated": false,
"ratio": 3.6290776152980877,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47623980336980876,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from keyword import iskeyword
import re
import datashape
from datashape import dshape, DataShape, Record, Var, Mono, Fixed
from datashape.predicates import isscalar, iscollection, isboolean, isrecord
import numpy as np
from odo.utils import copydoc
import toolz
from toolz import concat, memoize, partial, first
from toolz.curried import map, filter
from ..compatibility import _strtypes, builtins, boundmethod, PY2
from .core import Node, subs, common_subexpression, path
from .method_dispatch import select_functions
from ..dispatch import dispatch
from .utils import hashable_index, replace_slices
__all__ = ['Expr', 'ElemWise', 'Field', 'Symbol', 'discover', 'Projection',
'projection', 'Selection', 'selection', 'Label', 'label', 'Map',
'ReLabel', 'relabel', 'Apply', 'apply', 'Slice', 'shape', 'ndim',
'label', 'symbol', 'Coerce', 'coerce']
_attr_cache = dict()
def isvalid_identifier(s):
"""Check whether a string is a valid Python identifier
Examples
--------
>>> isvalid_identifier('Hello')
True
>>> isvalid_identifier('Hello world')
False
>>> isvalid_identifier('Helloworld!')
False
>>> isvalid_identifier('1a')
False
>>> isvalid_identifier('a1')
True
>>> isvalid_identifier('for')
False
>>> isvalid_identifier(None)
False
"""
# the re module compiles and caches regexs so no need to compile it
return (s is not None and not iskeyword(s) and
re.match(r'^[_a-zA-Z][_a-zA-Z0-9]*$', s) is not None)
def valid_identifier(s):
"""Rewrite a string to be a valid identifier if it contains
>>> valid_identifier('hello')
'hello'
>>> valid_identifier('hello world')
'hello_world'
>>> valid_identifier('hello.world')
'hello_world'
>>> valid_identifier('hello-world')
'hello_world'
>>> valid_identifier(None)
>>> valid_identifier('1a')
"""
if isinstance(s, _strtypes):
if s[0].isdigit():
return
return s.replace(' ', '_').replace('.', '_').replace('-', '_')
return s
class Expr(Node):
"""
Symbolic expression of a computation
All Blaze expressions (Join, By, Sort, ...) descend from this class. It
contains shared logic and syntax. It in turn inherits from ``Node`` which
holds all tree traversal logic
"""
def _get_field(self, fieldname):
if not isinstance(self.dshape.measure, Record):
if fieldname == self._name:
return self
raise ValueError(
"Can not get field '%s' of non-record expression %s" %
(fieldname, self))
return Field(self, fieldname)
def __getitem__(self, key):
if isinstance(key, _strtypes) and key in self.fields:
return self._get_field(key)
elif isinstance(key, Expr) and iscollection(key.dshape):
return selection(self, key)
elif (isinstance(key, list)
and builtins.all(isinstance(k, _strtypes) for k in key)):
if set(key).issubset(self.fields):
return self._project(key)
else:
raise ValueError('Names %s not consistent with known names %s'
% (key, self.fields))
elif (isinstance(key, tuple) and
all(isinstance(k, (int, slice, type(None), list, np.ndarray))
for k in key)):
return sliceit(self, key)
elif isinstance(key, (slice, int, type(None), list, np.ndarray)):
return sliceit(self, (key,))
raise ValueError("Not understood %s[%s]" % (self, key))
def map(self, func, schema=None, name=None):
return Map(self, func, schema, name)
def _project(self, key):
return projection(self, key)
@property
def schema(self):
return datashape.dshape(self.dshape.measure)
@property
def fields(self):
if isinstance(self.dshape.measure, Record):
return self.dshape.measure.names
name = getattr(self, '_name', None)
if name is not None:
return [self._name]
return []
def _len(self):
try:
return int(self.dshape[0])
except TypeError:
raise ValueError('Can not determine length of table with the '
'following datashape: %s' % self.dshape)
def __len__(self): # pragma: no cover
return self._len()
def __iter__(self):
raise NotImplementedError(
'Iteration over expressions is not supported.\n'
'Iterate over computed result instead, e.g. \n'
"\titer(expr) # don't do this\n"
"\titer(compute(expr)) # do this instead")
def __dir__(self):
result = dir(type(self))
if isrecord(self.dshape.measure) and self.fields:
result.extend(list(map(valid_identifier, self.fields)))
d = toolz.merge(schema_methods(self.dshape.measure),
dshape_methods(self.dshape))
result.extend(list(d))
return sorted(set(filter(isvalid_identifier, result)))
def __getattr__(self, key):
if key == '_hash':
raise AttributeError()
try:
return _attr_cache[(self, key)]
except:
pass
try:
result = object.__getattribute__(self, key)
except AttributeError:
fields = dict(zip(map(valid_identifier, self.fields),
self.fields))
# prefer the method if there's a field with the same name
methods = toolz.merge(
schema_methods(self.dshape.measure),
dshape_methods(self.dshape)
)
if key in methods:
func = methods[key]
if func in method_properties:
result = func(self)
else:
result = boundmethod(func, self)
elif self.fields and key in fields:
if isscalar(self.dshape.measure): # t.foo.foo is t.foo
result = self
else:
result = self[fields[key]]
else:
raise
_attr_cache[(self, key)] = result
return result
@property
def _name(self):
if (isscalar(self.dshape.measure) and
len(self._inputs) == 1 and
isscalar(self._child.dshape.measure)):
return self._child._name
def __enter__(self):
""" Enter context """
return self
def __exit__(self, *args):
""" Exit context
Close any open resource if we are called in context
"""
for value in self._resources().values():
try:
value.close()
except AttributeError:
pass
return True
_symbol_cache = dict()
def _symbol_key(args, kwargs):
if len(args) == 1:
name, = args
ds = None
token = None
if len(args) == 2:
name, ds = args
token = None
elif len(args) == 3:
name, ds, token = args
ds = kwargs.get('dshape', ds)
token = kwargs.get('token', token)
ds = dshape(ds)
return (name, ds, token)
@memoize(cache=_symbol_cache, key=_symbol_key)
def symbol(name, dshape, token=None):
return Symbol(name, dshape, token=token)
class Symbol(Expr):
"""
Symbolic data. The leaf of a Blaze expression
Examples
--------
>>> points = symbol('points', '5 * 3 * {x: int, y: int}')
>>> points
points
>>> points.dshape
dshape("5 * 3 * {x: int32, y: int32}")
"""
__slots__ = '_hash', '_name', 'dshape', '_token'
__inputs__ = ()
def __init__(self, name, dshape, token=None):
self._name = name
if isinstance(dshape, _strtypes):
dshape = datashape.dshape(dshape)
if isinstance(dshape, Mono) and not isinstance(dshape, DataShape):
dshape = DataShape(dshape)
self.dshape = dshape
self._token = token
def __str__(self):
return self._name or ''
def _resources(self):
return dict()
@dispatch(Symbol, dict)
def _subs(o, d):
""" Subs symbols using symbol function
Supports caching"""
newargs = [subs(arg, d) for arg in o._args]
return symbol(*newargs)
class ElemWise(Expr):
"""
Elementwise operation.
The shape of this expression matches the shape of the child.
"""
@property
def dshape(self):
return datashape.DataShape(*(self._child.dshape.shape
+ tuple(self.schema)))
class Field(ElemWise):
"""
A single field from an expression.
Get a single field from an expression with record-type schema.
We store the name of the field in the ``_name`` attribute.
Examples
--------
>>> points = symbol('points', '5 * 3 * {x: int32, y: int32}')
>>> points.x.dshape
dshape("5 * 3 * int32")
For fields that aren't valid Python identifiers, use ``[]`` syntax:
>>> points = symbol('points', '5 * 3 * {"space station": float64}')
>>> points['space station'].dshape
dshape("5 * 3 * float64")
"""
__slots__ = '_hash', '_child', '_name'
def __str__(self):
fmt = '%s.%s' if isvalid_identifier(self._name) else '%s[%r]'
return fmt % (self._child, self._name)
@property
def _expr(self):
return symbol(self._name, datashape.DataShape(self.dshape.measure))
@property
def dshape(self):
shape = self._child.dshape.shape
schema = self._child.dshape.measure.dict[self._name]
shape = shape + schema.shape
schema = (schema.measure,)
return DataShape(*(shape + schema))
class Projection(ElemWise):
"""Select a subset of fields from data.
Examples
--------
>>> accounts = symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> accounts[['name', 'amount']].schema
dshape("{name: string, amount: int32}")
>>> accounts[['name', 'amount']]
accounts[['name', 'amount']]
See Also
--------
blaze.expr.expressions.Field
"""
__slots__ = '_hash', '_child', '_fields'
@property
def fields(self):
return list(self._fields)
@property
def schema(self):
d = self._child.schema[0].dict
return DataShape(Record([(name, d[name]) for name in self.fields]))
def __str__(self):
return '%s[%s]' % (self._child, self.fields)
def _project(self, key):
if isinstance(key, list) and set(key).issubset(set(self.fields)):
return self._child[key]
raise ValueError("Column Mismatch: %s" % key)
def _get_field(self, fieldname):
if fieldname in self.fields:
return Field(self._child, fieldname)
raise ValueError("Field %s not found in columns %s" % (fieldname,
self.fields))
@copydoc(Projection)
def projection(expr, names):
if not names:
raise ValueError("Projection with no names")
if not isinstance(names, (tuple, list)):
raise TypeError("Wanted list of strings, got %s" % names)
if not set(names).issubset(expr.fields):
raise ValueError("Mismatched names. Asking for names %s "
"where expression has names %s" %
(names, expr.fields))
return Projection(expr, tuple(names))
def sanitize_index_lists(ind):
""" Handle lists/arrays of integers/bools as indexes
>>> sanitize_index_lists([2, 3, 5])
[2, 3, 5]
>>> sanitize_index_lists([True, False, True, False])
[0, 2]
>>> sanitize_index_lists(np.array([1, 2, 3]))
[1, 2, 3]
>>> sanitize_index_lists(np.array([False, True, True]))
[1, 2]
"""
if not isinstance(ind, (list, np.ndarray)):
return ind
if isinstance(ind, np.ndarray):
ind = ind.tolist()
if isinstance(ind, list) and ind and isinstance(ind[0], bool):
ind = [a for a, b in enumerate(ind) if b]
return ind
def sliceit(child, index):
index2 = tuple(map(sanitize_index_lists, index))
index3 = hashable_index(index2)
s = Slice(child, index3)
hash(s)
return s
class Slice(Expr):
"""Elements `start` until `stop`. On many backends, a `step` parameter
is also allowed.
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts[2:7].dshape
dshape("5 * {name: string, amount: int32}")
>>> accounts[2:7:2].dshape
dshape("3 * {name: string, amount: int32}")
"""
__slots__ = '_hash', '_child', '_index'
@property
def dshape(self):
return self._child.dshape.subshape[self.index]
@property
def index(self):
return replace_slices(self._index)
def __str__(self):
if isinstance(self.index, tuple):
index = ', '.join(map(str, self._index))
else:
index = str(self._index)
return '%s[%s]' % (self._child, index)
class Selection(Expr):
""" Filter elements of expression based on predicate
Examples
--------
>>> accounts = symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> deadbeats = accounts[accounts.amount < 0]
"""
__slots__ = '_hash', '_child', 'predicate'
def __str__(self):
return "%s[%s]" % (self._child, self.predicate)
@property
def dshape(self):
shape = list(self._child.dshape.shape)
shape[0] = Var()
return DataShape(*(shape + [self._child.dshape.measure]))
@copydoc(Selection)
def selection(table, predicate):
subexpr = common_subexpression(table, predicate)
if not builtins.all(isinstance(node, (ElemWise, Symbol))
or node.isidentical(subexpr)
for node in concat([path(predicate, subexpr),
path(table, subexpr)])):
raise ValueError("Selection not properly matched with table:\n"
"child: %s\n"
"apply: %s\n"
"predicate: %s" % (subexpr, table, predicate))
if not isboolean(predicate.dshape):
raise TypeError("Must select over a boolean predicate. Got:\n"
"%s[%s]" % (table, predicate))
return table._subs({subexpr: Selection(subexpr, predicate)})
class Label(ElemWise):
"""An expression with a name.
Examples
--------
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> expr = accounts.amount * 100
>>> expr._name
'amount'
>>> expr.label('new_amount')._name
'new_amount'
See Also
--------
blaze.expr.expressions.ReLabel
"""
__slots__ = '_hash', '_child', 'label'
@property
def schema(self):
return self._child.schema
@property
def _name(self):
return self.label
def _get_field(self, key):
if key[0] == self.fields[0]:
return self
raise ValueError("Column Mismatch: %s" % key)
def __str__(self):
return 'label(%s, %r)' % (self._child, self.label)
@copydoc(Label)
def label(expr, lab):
if expr._name == lab:
return expr
return Label(expr, lab)
class ReLabel(ElemWise):
"""
Table with same content but with new labels
Examples
--------
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.schema
dshape("{name: string, amount: int32}")
>>> accounts.relabel(amount='balance').schema
dshape("{name: string, balance: int32}")
>>> accounts.relabel(not_a_column='definitely_not_a_column')
Traceback (most recent call last):
...
ValueError: Cannot relabel non-existent child fields: {'not_a_column'}
>>> s = symbol('s', 'var * {"0": int64}')
>>> s.relabel({'0': 'foo'})
s.relabel({'0': 'foo'})
>>> s.relabel(0='foo') # doctest: +SKIP
Traceback (most recent call last):
...
SyntaxError: keyword can't be an expression
Notes
-----
When names are not valid Python names, such as integers or string with
spaces, you must pass a dictionary to ``relabel``. For example
.. code-block:: python
>>> s = symbol('s', 'var * {"0": int64}')
>>> s.relabel({'0': 'foo'})
s.relabel({'0': 'foo'})
>>> t = symbol('t', 'var * {"whoo hoo": ?float32}')
>>> t.relabel({"whoo hoo": 'foo'})
t.relabel({'whoo hoo': 'foo'})
See Also
--------
blaze.expr.expressions.Label
"""
__slots__ = '_hash', '_child', 'labels'
@property
def schema(self):
subs = dict(self.labels)
param = self._child.dshape.measure.parameters[0]
return DataShape(Record([[subs.get(name, name), dtype]
for name, dtype in param]))
def __str__(self):
labels = self.labels
if all(map(isvalid_identifier, map(first, labels))):
rest = ', '.join('%s=%r' % l for l in labels)
else:
rest = '{%s}' % ', '.join('%r: %r' % l for l in labels)
return '%s.relabel(%s)' % (self._child, rest)
@copydoc(ReLabel)
def relabel(child, labels=None, **kwargs):
labels = labels or dict()
labels = toolz.merge(labels, kwargs)
labels = dict((k, v) for k, v in labels.items() if k != v)
label_keys = set(labels)
fields = child.fields
if not label_keys.issubset(fields):
non_existent_fields = label_keys.difference(fields)
raise ValueError("Cannot relabel non-existent child fields: {%s}" %
', '.join(map(repr, non_existent_fields)))
if not labels:
return child
if isinstance(labels, dict): # Turn dict into tuples
labels = tuple(sorted(labels.items()))
if isscalar(child.dshape.measure):
if child._name == labels[0][0]:
return child.label(labels[0][1])
else:
return child
return ReLabel(child, labels)
class Map(ElemWise):
""" Map an arbitrary Python function across elements in a collection
Examples
--------
>>> from datetime import datetime
>>> t = symbol('t', 'var * {price: real, time: int64}') # times as integers
>>> datetimes = t.time.map(datetime.utcfromtimestamp)
Optionally provide extra schema information
>>> datetimes = t.time.map(datetime.utcfromtimestamp,
... schema='{time: datetime}')
See Also
--------
blaze.expr.expresions.Apply
"""
__slots__ = '_hash', '_child', 'func', '_schema', '_name0'
@property
def schema(self):
if self._schema:
return dshape(self._schema)
else:
raise NotImplementedError("Schema of mapped column not known.\n"
"Please specify datashape keyword in "
".map method.\nExample: "
"t.columnname.map(function, 'int64')")
def label(self, name):
assert isscalar(self.dshape.measure)
return Map(self._child,
self.func,
self.schema,
name)
@property
def shape(self):
return self._child.shape
@property
def ndim(self):
return self._child.ndim
@property
def _name(self):
if self._name0:
return self._name0
else:
return self._child._name
if PY2:
copydoc(Map, Expr.map.im_func)
else:
copydoc(Map, Expr.map)
class Apply(Expr):
""" Apply an arbitrary Python function onto an expression
Examples
--------
>>> t = symbol('t', 'var * {name: string, amount: int}')
>>> h = t.apply(hash, dshape='int64') # Hash value of resultant dataset
You must provide the datashape of the result with the ``dshape=`` keyword.
For datashape examples see
http://datashape.pydata.org/grammar.html#some-simple-examples
If using a chunking backend and your operation may be safely split and
concatenated then add the ``splittable=True`` keyword argument
>>> t.apply(f, dshape='...', splittable=True) # doctest: +SKIP
See Also
--------
blaze.expr.expressions.Map
"""
__slots__ = '_hash', '_child', 'func', '_dshape', '_splittable'
@property
def schema(self):
if iscollection(self.dshape):
return self.dshape.subshape[0]
else:
raise TypeError("Non-tabular datashape, %s" % self.dshape)
@property
def dshape(self):
return dshape(self._dshape)
@copydoc(Apply)
def apply(expr, func, dshape, splittable=False):
return Apply(expr, func, datashape.dshape(dshape), splittable)
class Coerce(Expr):
"""Coerce an expression to a different type.
Examples
--------
>>> t = symbol('t', '100 * float64')
>>> t.coerce(to='int64')
t.coerce(to='int64')
>>> t.coerce('float32')
t.coerce(to='float32')
>>> t.coerce('int8').dshape
dshape("100 * int8")
"""
__slots__ = '_hash', '_child', 'to'
@property
def schema(self):
return self.to
@property
def dshape(self):
return DataShape(*(self._child.shape + (self.schema,)))
def __str__(self):
return '%s.coerce(to=%r)' % (self._child, str(self.schema))
@copydoc(Coerce)
def coerce(expr, to):
return Coerce(expr, dshape(to) if isinstance(to, _strtypes) else to)
dshape_method_list = list()
schema_method_list = list()
method_properties = set()
dshape_methods = memoize(partial(select_functions, dshape_method_list))
schema_methods = memoize(partial(select_functions, schema_method_list))
@dispatch(DataShape)
def shape(ds):
s = ds.shape
s = tuple(int(d) if isinstance(d, Fixed) else d for d in s)
return s
@dispatch(object)
def shape(expr):
""" Shape of expression
>>> symbol('s', '3 * 5 * int32').shape
(3, 5)
Works on anything discoverable
>>> shape([[1, 2], [3, 4]])
(2, 2)
"""
s = list(discover(expr).shape)
for i, elem in enumerate(s):
try:
s[i] = int(elem)
except TypeError:
pass
return tuple(s)
def ndim(expr):
""" Number of dimensions of expression
>>> symbol('s', '3 * var * int32').ndim
2
"""
return len(shape(expr))
dshape_method_list.extend([
(lambda ds: True, set([apply])),
(iscollection, set([shape, ndim])),
(lambda ds: iscollection(ds) and isscalar(ds.measure), set([coerce]))
])
schema_method_list.extend([
(isscalar, set([label, relabel, coerce])),
(isrecord, set([relabel])),
])
method_properties.update([shape, ndim])
@dispatch(Expr)
def discover(expr):
return expr.dshape
| {
"repo_name": "xlhtc007/blaze",
"path": "blaze/expr/expressions.py",
"copies": "6",
"size": "23031",
"license": "bsd-3-clause",
"hash": -9032310935931768000,
"line_mean": 27.4333333333,
"line_max": 80,
"alpha_frac": 0.5645000217,
"autogenerated": false,
"ratio": 3.820670205706702,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7385170227406702,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ..layout import Rectangle, snap_to_grid
class TestSnap(object):
@staticmethod
def check(input, expected, **kwargs):
result = snap_to_grid(input, **kwargs)
for i, e in zip(input, expected):
assert result[i] == e
def test_2x2(self):
rs = [Rectangle(-.2, -.1, .45, .52),
Rectangle(.52, -.23, .49, .49),
Rectangle(0, .45, .51, .53),
Rectangle(.50, .45, .51, .53)]
ex = [Rectangle(0, 0, .5, .5),
Rectangle(.5, 0, .5, .5),
Rectangle(0, .5, .5, .5),
Rectangle(.5, .5, .5, .5)]
self.check(rs, ex)
def test_1x2(self):
rs = [Rectangle(-.2, -.2, .95, .48),
Rectangle(0, .45, .51, .53),
Rectangle(.50, .45, .51, .53)]
ex = [Rectangle(0, 0, 1, .5),
Rectangle(0, .5, .5, .5),
Rectangle(.5, .5, .5, .5)]
self.check(rs, ex)
def test_1x3(self):
rs = [Rectangle(-.02, -.2, .95, .48),
Rectangle(0.1, .51, 0.32, .53),
Rectangle(0.32, .49, .30, .53),
Rectangle(0.7, .52, .40, .53)]
ex = [Rectangle(0, 0, 1, .5),
Rectangle(0, .5, 1 / 3., .5),
Rectangle(1 / 3., .5, 1 / 3., .5),
Rectangle(2 / 3., .5, 1 / 3., .5)]
self.check(rs, ex)
def test_padding_1x2(self):
rs = [Rectangle(0, 0, 1, .5),
Rectangle(0, .5, .5, .5),
Rectangle(.5, .5, .5, .5)]
ex = [Rectangle(.1, .1, .8, .3),
Rectangle(.1, .6, .3, .3),
Rectangle(.6, .6, .3, .3)]
self.check(rs, ex, padding=0.1)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/tests/test_layout.py",
"copies": "5",
"size": "1757",
"license": "bsd-3-clause",
"hash": 3604348905946779600,
"line_mean": 26.8888888889,
"line_max": 64,
"alpha_frac": 0.4302788845,
"autogenerated": false,
"ratio": 2.8615635179153096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 63
} |
from __future__ import absolute_import, division, print_function
from .llvm_array import (array_type, const_intp, auto_const_intp,
intp_type, int_type,
store_at, load_at, get_shape_ptr, get_data_ptr,
get_strides_ptr, sizeof, isinteger, isiterable,
F_CONTIGUOUS, C_CONTIGUOUS, STRIDED)
from llvm.core import Constant, Type
import llvm.core as lc
import itertools
def _check_N(N):
if N is None:
raise ValueError("negative integers not supported")
def adjust_slice(key, N=None):
start = key.start
if start is None:
start = 0
if start < 0:
_check_N(N)
while start < 0:
start += N
stop = key.stop
if stop is None:
_check_N(N)
stop = N
if stop < 0:
_check_N(N)
while stop < 0:
stop += N
step = key.step
if step is None:
step = 1
return start, stop, step
# STRIDED
def Sarr_from_S(arr, key):
raise NotImplementedError
def Sarr_from_S_slice(arr, start, stop, step):
raise NotImplementedError
def from_S_int(arr, index):
return from_S_ints(arr, (index,))
def from_S_ints(arr, key):
raise NotImplementedError
builder = arr.builder
num = len(key)
newnd = arr.nd - num
if newnd < 0:
raise ValueError("Too many keys")
new = arr.getview(nd=newnd)
oldshape = get_shape_ptr(builder, arr.array_ptr)
newshape = get_shape_ptr(builder, new.array_ptr)
# Load the shape array
for i in range(newnd):
val = load_at(builder, oldshape, i+num)
store_at(builder, newshape, i, val)
# Load the data-pointer
old_data_ptr = get_data_ptr(builder, arr.array_ptr)
new_data_ptr = get_data_ptr(builder, new.array_ptr)
loc = Constant.int(intp_type, 0)
factor = Constant.int(intp_type, 1)
for index in range(arr.nd-1,-1,-1):
val = load_at(builder, oldshape, index)
factor = builder.mul(factor, val)
if index < num: #
keyval = auto_const_intp(key[index])
# Multiply by strides
tmp = builder.mul(keyval, factor)
# Add to location
loc = builder.add(loc, tmp)
ptr = builder.gep(old_data_ptr, [loc])
builder.store(ptr, new_data_ptr)
return new
def from_S_slice(arr, start, end):
raise NotImplementedError
builder = arr.builder
new = arr.getview()
# Load the shape array
oldshape = get_shape_ptr(builder, arr.array_ptr)
newshape = get_shape_ptr(builder, new.array_ptr)
diff = Constant.int(intp_int, end-start)
store_at(builder, newshape, 0, diff)
for i in range(1, new.nd):
val = load_at(builder, oldshape, i)
store_at(builder, newshape, i, val)
# Data Pointer
old_data_ptr = get_data_ptr(builder, arr.array_ptr)
loc = Constant.int(intp_type, start)
while dim in arr.shape[1:]:
loc = builder.mul(loc, dim)
ptr = builder.gep(old_data_ptr, [loc])
new_data_ptr = get_data_ptr(builder, new.array_ptr)
builder.store(ptr, new_data_ptr)
return new
# FORTRAN CONTIGUOUS
def Sarr_from_F(arr, key):
raise NotImplementedError
def Sarr_from_F_slice(arr, start, stop, step):
raise NotImplementedError
def from_F_int(arr, index):
return from_F_ints(arr, (index,))
# key will be *just* the final integers to extract
# so that resulting array stays F_CONTIGUOUS
def from_F_ints(arr, key):
raise NotImplementedError
builder = arr.builder
num = len(key)
newnd = arr.nd - num
if newnd < 0:
raise ValueError("Too many keys")
new = arr.getview(nd=newnd)
oldshape = get_shape_ptr(builder, arr.array_ptr)
newshape = get_shape_ptr(builder, new.array_ptr)
# Load the shape array
for i in range(newnd):
val = load_at(builder, oldshape, i+num)
store_at(builder, newshape, i, val)
# Load the data-pointer
old_data_ptr = get_data_ptr(builder, arr.array_ptr)
new_data_ptr = get_data_ptr(builder, new.array_ptr)
loc = Constant.int(intp_type, 0)
factor = Constant.int(intp_type, 1)
for index in range(arr.nd-1,-1,-1):
val = load_at(builder, oldshape, index)
factor = builder.mul(factor, val)
if index < num: #
keyval = auto_const_intp(key[index])
# Multiply by strides
tmp = builder.mul(keyval, factor)
# Add to location
loc = builder.add(loc, tmp)
ptr = builder.gep(old_data_ptr, [loc])
builder.store(ptr, new_data_ptr)
return new
def from_F_slice(arr, start, end):
raise NotImplementedError
builder = arr.builder
new = arr.getview()
# Load the shape array
oldshape = get_shape_ptr(builder, arr.array_ptr)
newshape = get_shape_ptr(builder, new.array_ptr)
diff = Constant.int(intp_int, end-start)
store_at(builder, newshape, 0, diff)
for i in range(1, new.nd):
val = load_at(builder, oldshape, i)
store_at(builder, newshape, i, val)
# Data Pointer
old_data_ptr = get_data_ptr(builder, arr.array_ptr)
loc = Constant.int(intp_type, start)
while dim in arr.shape[1:]:
loc = builder.mul(loc, dim)
ptr = builder.gep(old_data_ptr, [loc])
new_data_ptr = get_data_ptr(builder, new.array_ptr)
builder.store(ptr, new_data_ptr)
return new
# C-CONTIGUOUS
def Sarr_from_C(arr, key):
raise NotImplementedError
def Sarr_from_C_slice(arr, start, stop, step):
builder = arr.builder
new = arr.getview(kind=STRIDED)
oldshape = get_shape_ptr(builder, arr.array_ptr)
newshape = get_shape_ptr(builder, new.array_ptr)
newstrides = get_strides_ptr(bulder, new.array_ptr)
if all(hasattr(x, '__index__') for x in [start, stop, step]):
step = auto_const_intp(step)
newdim = auto_const_intp((stop - start) // step)
else:
start, stop, step = [auto_const_intp(x) for x in [start, stop, step]]
tmp = builder.sub(stop, start)
newdim = builder.udiv(tmp, step)
store_at(builder, newshape, 0, newdim)
# Copy other dimensions over
for i in range(1, arr.nd):
val = load_at(builder, oldshape, i)
store_at(builder, newshape, i, val)
raise NotImplementedError
# Fill-in strides
# Update data-ptr
def from_C_int(arr, index):
return from_C_ints(arr, (index,))
def from_C_ints(arr, key):
builder = arr.builder
num = len(key)
newnd = arr.nd - num
if newnd < 0:
raise ValueError("Too many keys")
new = arr.getview(nd=newnd)
oldshape = get_shape_ptr(builder, arr.array_ptr)
newshape = get_shape_ptr(builder, new.array_ptr)
# Load the shape array
for i in range(newnd):
val = load_at(builder, oldshape, i+num)
store_at(builder, newshape, i, val)
# Load the data-pointer
old_data_ptr = get_data_ptr(builder, arr.array_ptr)
new_data_ptr = get_data_ptr(builder, new.array_ptr)
loc = Constant.int(intp_type, 0)
factor = Constant.int(intp_type, 1)
for index in range(arr.nd-1,-1,-1):
val = load_at(builder, oldshape, index)
factor = builder.mul(factor, val)
if index < num: #
keyval = auto_const_intp(key[index])
# Multiply by strides
tmp = builder.mul(keyval, factor)
# Add to location
loc = builder.add(loc, tmp)
ptr = builder.gep(old_data_ptr, [loc])
builder.store(ptr, new_data_ptr)
return new
def from_C_slice(arr, start, end):
builder = arr.builder
new = arr.getview()
# Load the shape array
oldshape = get_shape_ptr(builder, arr.array_ptr)
newshape = get_shape_ptr(builder, new.array_ptr)
diff = Constant.int(intp_int, end-start)
store_at(builder, newshape, 0, diff)
for i in range(1, new.nd):
val = load_at(builder, oldshape, i)
store_at(builder, newshape, i, val)
# Data Pointer
old_data_ptr = get_data_ptr(builder, arr.array_ptr)
loc = Constant.int(intp_type, start)
while dim in arr.shape[1:]:
loc = builder.mul(loc, dim)
ptr = builder.gep(old_data_ptr, [loc])
new_data_ptr = get_data_ptr(builder, new.array_ptr)
builder.store(ptr, new_data_ptr)
return new
# get just the integers
def _convert(x):
if hasattr(x, '__index__'):
return x.__index__()
else:
return x
_keymsg = "Unsupported getitem value %s"
# val is either Ellipsis or slice object.
# check to see if start, stop, and/or step is given for slice
def _needstride(val):
if not isinstance(val, slice):
return False
if val.start is not None and val.start != 0:
return True
if val.stop is not None:
return True
if (val.step is not None) and (val.step != 1):
return True
return False
def _getitem_C(arr, key):
lastint = None
needstrided = False
# determine if 1) the elements of the geitem iterable are
# integers (LLVM or Python indexable), Ellipsis,
# or slice objects
# 2) the integer elements are all at the front
# so that the resulting slice is continuous
for i, val in enumerate(key):
if isinteger(val):
if lastint is not None:
needstrided = True
elif isinstance(val, (Ellipsis, slice)):
if lastint is None:
lastint = i
needstrided = _needstride(val)
else:
raise ValueError(_keymsg % val)
if not needstrided:
key = [_convert(x) for x in itertools.islice(key, lastint)]
return needstrided, key
def _getitem_F(arr, key):
# This looks for integers at the end of the key iterable
# arr[:,...,i,j] would not need strided
# arr[:,i,:,j] would need strided as would a[:,i,5:20,j]
# and a[:,...,5:10,j]
# elements can be integers or LLVM ints
# with indexing being done either at compile time (Python int)
# or run time (LLVM int)
last_elsl = None
needstrided = False
for i, val in enumerate(key):
if isinteger(val):
if last_elsl is None:
last_elsl = i
elif isinstance(val, (Ellipsis, slice)):
if last_elsl is not None:
needstrided = True
needstrided = needstrided or _needstride(val)
else:
raise ValueError(_keymsg % val)
# Return just the integers fields if needstrided not set
if not needstrided:
key = [_convert(x) for x in itertools.islice(key, lastint, None)]
return needstrided, key
def _getitem_S(arr, key):
return True, key
def from_Array(arr, key, char):
if isinteger(key):
return eval('from_%s_int' % char)(arr, key)
elif isinstance(key, slice):
if key == slice(None):
return arr
else:
start, stop, step = adjust_slice(arr, key)
if step == 1:
return eval('from_%s_slice' % char)(arr, start, stop)
else:
return eval('Sarr_from_%s_slice' % char)(arr, start, stop, step)
elif isiterable(key):
# will be less than arr._nd or have '...' or ':'
# at the end
needstrided, key = eval("_getitem_%s" % char)(arr, key)
if needstrided:
return eval('Sarr_from_%s' % char)(arr, key)
if len(key) > arr.nd:
raise ValueError('Too many indicies')
return eval('from_%s_ints' % char)(arr, key)
else:
raise ValueError(_keymsg % key)
| {
"repo_name": "cezary12/blaze",
"path": "blaze/compute/llgetitem.py",
"copies": "7",
"size": "11586",
"license": "bsd-3-clause",
"hash": 4172442806679004000,
"line_mean": 29.5699208443,
"line_max": 80,
"alpha_frac": 0.6002934576,
"autogenerated": false,
"ratio": 3.3524305555555554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7452724013155555,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from logging import getLogger
from os.path import isfile, join
from .core.link import UnlinkLinkTransaction
from .core.package_cache_data import ProgressiveFetchExtract
from .exceptions import CondaFileIOError
from .gateways.disk.link import islink
log = getLogger(__name__)
# op codes
CHECK_FETCH = 'CHECK_FETCH'
FETCH = 'FETCH'
CHECK_EXTRACT = 'CHECK_EXTRACT'
EXTRACT = 'EXTRACT'
RM_EXTRACTED = 'RM_EXTRACTED'
RM_FETCHED = 'RM_FETCHED'
PREFIX = 'PREFIX'
PRINT = 'PRINT'
PROGRESS = 'PROGRESS'
SYMLINK_CONDA = 'SYMLINK_CONDA'
UNLINK = 'UNLINK'
LINK = 'LINK'
UNLINKLINKTRANSACTION = 'UNLINKLINKTRANSACTION'
PROGRESSIVEFETCHEXTRACT = 'PROGRESSIVEFETCHEXTRACT'
PROGRESS_COMMANDS = set([EXTRACT, RM_EXTRACTED])
ACTION_CODES = (
CHECK_FETCH,
FETCH,
CHECK_EXTRACT,
EXTRACT,
UNLINK,
LINK,
SYMLINK_CONDA,
RM_EXTRACTED,
RM_FETCHED,
)
def PREFIX_CMD(state, prefix):
state['prefix'] = prefix
def PRINT_CMD(state, arg): # pragma: no cover
if arg.startswith(('Unlinking packages', 'Linking packages')):
return
getLogger('conda.stdout.verbose').info(arg)
def FETCH_CMD(state, package_cache_entry):
raise NotImplementedError()
def EXTRACT_CMD(state, arg):
raise NotImplementedError()
def PROGRESSIVEFETCHEXTRACT_CMD(state, progressive_fetch_extract): # pragma: no cover
assert isinstance(progressive_fetch_extract, ProgressiveFetchExtract)
progressive_fetch_extract.execute()
def UNLINKLINKTRANSACTION_CMD(state, arg): # pragma: no cover
unlink_link_transaction = arg
assert isinstance(unlink_link_transaction, UnlinkLinkTransaction)
unlink_link_transaction.execute()
def check_files_in_package(source_dir, files):
for f in files:
source_file = join(source_dir, f)
if isfile(source_file) or islink(source_file):
return True
else:
raise CondaFileIOError(source_file, "File %s does not exist in tarball" % f)
# Map instruction to command (a python function)
commands = {
PREFIX: PREFIX_CMD,
PRINT: PRINT_CMD,
FETCH: FETCH_CMD,
PROGRESS: lambda x, y: None,
EXTRACT: EXTRACT_CMD,
RM_EXTRACTED: lambda x, y: None,
RM_FETCHED: lambda x, y: None,
UNLINK: None,
LINK: None,
SYMLINK_CONDA: lambda x, y: None,
UNLINKLINKTRANSACTION: UNLINKLINKTRANSACTION_CMD,
PROGRESSIVEFETCHEXTRACT: PROGRESSIVEFETCHEXTRACT_CMD,
}
OP_ORDER = (RM_FETCHED,
FETCH,
RM_EXTRACTED,
EXTRACT,
UNLINK,
LINK,
)
| {
"repo_name": "Microsoft/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/instructions.py",
"copies": "1",
"size": "2609",
"license": "apache-2.0",
"hash": 8638009812320842000,
"line_mean": 23.8476190476,
"line_max": 88,
"alpha_frac": 0.690302798,
"autogenerated": false,
"ratio": 3.383916990920882,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45742197889208824,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from math import ceil
from operator import getitem
import os
from threading import Lock
from warnings import warn
import pandas as pd
import numpy as np
from toolz import merge
from ...base import tokenize
from ...compatibility import unicode, PY3
from ... import array as da
from ...delayed import delayed
from ..core import DataFrame, Series, new_dd_object
from ..shuffle import set_partition
from ..utils import insert_meta_param_description
from ...utils import M, ensure_dict
lock = Lock()
def _meta_from_array(x, columns=None):
""" Create empty pd.DataFrame or pd.Series which has correct dtype """
if x.ndim > 2:
raise ValueError('from_array does not input more than 2D array, got'
' array with shape %r' % (x.shape,))
if getattr(x.dtype, 'names', None) is not None:
# record array has named columns
if columns is None:
columns = list(x.dtype.names)
elif np.isscalar(columns):
raise ValueError("For a struct dtype, columns must be a list.")
elif not all(i in x.dtype.names for i in columns):
extra = sorted(set(columns).difference(x.dtype.names))
raise ValueError("dtype {0} doesn't have fields "
"{1}".format(x.dtype, extra))
fields = x.dtype.fields
dtypes = [fields[n][0] if n in fields else 'f8' for n in columns]
elif x.ndim == 1:
if np.isscalar(columns) or columns is None:
return pd.Series([], name=columns, dtype=x.dtype)
elif len(columns) == 1:
return pd.DataFrame(np.array([], dtype=x.dtype), columns=columns)
raise ValueError("For a 1d array, columns must be a scalar or single "
"element list")
else:
if np.isnan(x.shape[1]):
raise ValueError("Shape along axis 1 must be known")
if columns is None:
columns = list(range(x.shape[1])) if x.ndim == 2 else [0]
elif len(columns) != x.shape[1]:
raise ValueError("Number of column names must match width of the "
"array. Got {0} names for {1} "
"columns".format(len(columns), x.shape[1]))
dtypes = [x.dtype] * len(columns)
data = {c: np.array([], dtype=dt) for (c, dt) in zip(columns, dtypes)}
return pd.DataFrame(data, columns=columns)
def from_array(x, chunksize=50000, columns=None):
""" Read any slicable array into a Dask Dataframe
Uses getitem syntax to pull slices out of the array. The array need not be
a NumPy array but must support slicing syntax
x[50000:100000]
and have 2 dimensions:
x.ndim == 2
or have a record dtype:
x.dtype == [('name', 'O'), ('balance', 'i8')]
"""
if isinstance(x, da.Array):
return from_dask_array(x, columns=columns)
meta = _meta_from_array(x, columns)
divisions = tuple(range(0, len(x), chunksize))
divisions = divisions + (len(x) - 1,)
token = tokenize(x, chunksize, columns)
name = 'from_array-' + token
dsk = {}
for i in range(0, int(ceil(len(x) / chunksize))):
data = (getitem, x, slice(i * chunksize, (i + 1) * chunksize))
if isinstance(meta, pd.Series):
dsk[name, i] = (pd.Series, data, None, meta.dtype, meta.name)
else:
dsk[name, i] = (pd.DataFrame, data, None, meta.columns)
return new_dd_object(dsk, name, meta, divisions)
def from_pandas(data, npartitions=None, chunksize=None, sort=True, name=None):
"""
Construct a Dask DataFrame from a Pandas DataFrame
This splits an in-memory Pandas dataframe into several parts and constructs
a dask.dataframe from those parts on which Dask.dataframe can operate in
parallel.
Note that, despite parallelism, Dask.dataframe may not always be faster
than Pandas. We recommend that you stay with Pandas for as long as
possible before switching to Dask.dataframe.
Parameters
----------
df : pandas.DataFrame or pandas.Series
The DataFrame/Series with which to construct a Dask DataFrame/Series
npartitions : int, optional
The number of partitions of the index to create. Note that depending on
the size and index of the dataframe, the output may have fewer
partitions than requested.
chunksize : int, optional
The size of the partitions of the index.
sort: bool
Sort input first to obtain cleanly divided partitions or don't sort and
don't get cleanly divided partitions
name: string, optional
An optional keyname for the dataframe. Defaults to hashing the input
Returns
-------
dask.DataFrame or dask.Series
A dask DataFrame/Series partitioned along the index
Examples
--------
>>> df = pd.DataFrame(dict(a=list('aabbcc'), b=list(range(6))),
... index=pd.date_range(start='20100101', periods=6))
>>> ddf = from_pandas(df, npartitions=3)
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', freq='D'),
Timestamp('2010-01-03 00:00:00', freq='D'),
Timestamp('2010-01-05 00:00:00', freq='D'),
Timestamp('2010-01-06 00:00:00', freq='D'))
>>> ddf = from_pandas(df.a, npartitions=3) # Works with Series too!
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', freq='D'),
Timestamp('2010-01-03 00:00:00', freq='D'),
Timestamp('2010-01-05 00:00:00', freq='D'),
Timestamp('2010-01-06 00:00:00', freq='D'))
Raises
------
TypeError
If something other than a ``pandas.DataFrame`` or ``pandas.Series`` is
passed in.
See Also
--------
from_array : Construct a dask.DataFrame from an array that has record dtype
read_csv : Construct a dask.DataFrame from a CSV file
"""
if isinstance(getattr(data, 'index', None), pd.MultiIndex):
raise NotImplementedError("Dask does not support MultiIndex Dataframes.")
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("Input must be a pandas DataFrame or Series")
if ((npartitions is None) == (chunksize is None)):
raise ValueError('Exactly one of npartitions and chunksize must be specified.')
nrows = len(data)
if chunksize is None:
chunksize = int(ceil(nrows / npartitions))
else:
npartitions = int(ceil(nrows / chunksize))
name = name or ('from_pandas-' + tokenize(data, chunksize))
if not nrows:
return new_dd_object({(name, 0): data}, name, data, [None, None])
if sort and not data.index.is_monotonic_increasing:
data = data.sort_index(ascending=True)
if sort:
divisions, locations = sorted_division_locations(data.index,
chunksize=chunksize)
else:
locations = list(range(0, nrows, chunksize)) + [len(data)]
divisions = [None] * len(locations)
dsk = dict(((name, i), data.iloc[start: stop])
for i, (start, stop) in enumerate(zip(locations[:-1],
locations[1:])))
return new_dd_object(dsk, name, data, divisions)
def from_bcolz(x, chunksize=None, categorize=True, index=None, lock=lock,
**kwargs):
""" Read BColz CTable into a Dask Dataframe
BColz is a fast on-disk compressed column store with careful attention
given to compression. https://bcolz.readthedocs.io/en/latest/
Parameters
----------
x : bcolz.ctable
chunksize : int, optional
The size of blocks to pull out from ctable.
categorize : bool, defaults to True
Automatically categorize all string dtypes
index : string, optional
Column to make the index
lock: bool or Lock
Lock to use when reading or False for no lock (not-thread-safe)
See Also
--------
from_array: more generic function not optimized for bcolz
"""
if lock is True:
lock = Lock()
import dask.array as da
import bcolz
if isinstance(x, (str, unicode)):
x = bcolz.ctable(rootdir=x)
bc_chunklen = max(x[name].chunklen for name in x.names)
if chunksize is None and bc_chunklen > 10000:
chunksize = bc_chunklen
categories = dict()
if categorize:
for name in x.names:
if (np.issubdtype(x.dtype[name], np.string_) or
np.issubdtype(x.dtype[name], np.unicode_) or
np.issubdtype(x.dtype[name], np.object_)):
a = da.from_array(x[name], chunks=(chunksize * len(x.names),))
categories[name] = da.unique(a)
columns = tuple(x.dtype.names)
divisions = tuple(range(0, len(x), chunksize))
divisions = divisions + (len(x) - 1,)
if x.rootdir:
token = tokenize((x.rootdir, os.path.getmtime(x.rootdir)), chunksize,
categorize, index, kwargs)
else:
token = tokenize((id(x), x.shape, x.dtype), chunksize, categorize,
index, kwargs)
new_name = 'from_bcolz-' + token
dsk = dict(((new_name, i),
(dataframe_from_ctable,
x,
(slice(i * chunksize, (i + 1) * chunksize),),
columns, categories, lock))
for i in range(0, int(ceil(len(x) / chunksize))))
meta = dataframe_from_ctable(x, slice(0, 0), columns, categories, lock)
result = DataFrame(dsk, new_name, meta, divisions)
if index:
assert index in x.names
a = da.from_array(x[index], chunks=(chunksize * len(x.names),))
q = np.linspace(0, 100, len(x) // chunksize + 2)
divisions = tuple(da.percentile(a, q).compute())
return set_partition(result, index, divisions, **kwargs)
else:
return result
def dataframe_from_ctable(x, slc, columns=None, categories=None, lock=lock):
""" Get DataFrame from bcolz.ctable
Parameters
----------
x: bcolz.ctable
slc: slice
columns: list of column names or None
>>> import bcolz
>>> x = bcolz.ctable([[1, 2, 3, 4], [10, 20, 30, 40]], names=['a', 'b'])
>>> dataframe_from_ctable(x, slice(1, 3))
a b
1 2 20
2 3 30
>>> dataframe_from_ctable(x, slice(1, 3), columns=['b'])
b
1 20
2 30
>>> dataframe_from_ctable(x, slice(1, 3), columns='b')
1 20
2 30
Name: b, dtype: int...
"""
import bcolz
if columns is None:
columns = x.dtype.names
if isinstance(columns, tuple):
columns = list(columns)
x = x[columns]
if type(slc) is slice:
start = slc.start
stop = slc.stop if slc.stop < len(x) else len(x)
else:
start = slc[0].start
stop = slc[0].stop if slc[0].stop < len(x) else len(x)
idx = pd.Index(range(start, stop))
if lock:
lock.acquire()
try:
if isinstance(x, bcolz.ctable):
chunks = [x[name][slc] for name in columns]
if categories is not None:
chunks = [pd.Categorical.from_codes(
np.searchsorted(categories[name], chunk),
categories[name], True)
if name in categories else chunk
for name, chunk in zip(columns, chunks)]
result = pd.DataFrame(dict(zip(columns, chunks)), columns=columns,
index=idx)
elif isinstance(x, bcolz.carray):
chunk = x[slc]
if categories is not None and columns and columns in categories:
chunk = pd.Categorical.from_codes(
np.searchsorted(categories[columns], chunk),
categories[columns], True)
result = pd.Series(chunk, name=columns, index=idx)
finally:
if lock:
lock.release()
return result
def from_dask_array(x, columns=None):
""" Create Dask Array from a Dask DataFrame
Converts a 2d array into a DataFrame and a 1d array into a Series.
Parameters
----------
x: da.Array
columns: list or string
list of column names if DataFrame, single string if Series
Examples
--------
>>> import dask.array as da
>>> import dask.dataframe as dd
>>> x = da.ones((4, 2), chunks=(2, 2))
>>> df = dd.io.from_dask_array(x, columns=['a', 'b'])
>>> df.compute()
a b
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
3 1.0 1.0
See Also
--------
dask.bag.to_dataframe: from dask.bag
dask.dataframe._Frame.values: Reverse conversion
dask.dataframe._Frame.to_records: Reverse conversion
"""
meta = _meta_from_array(x, columns)
if x.ndim == 2 and len(x.chunks[1]) > 1:
x = x.rechunk({1: x.shape[1]})
name = 'from-dask-array' + tokenize(x, columns)
if np.isnan(sum(x.shape)):
divisions = [None] * (len(x.chunks[0]) + 1)
index = [None] * len(x.chunks[0])
else:
divisions = [0]
for c in x.chunks[0]:
divisions.append(divisions[-1] + c)
index = [(np.arange, a, b, 1, 'i8') for a, b in
zip(divisions[:-1], divisions[1:])]
divisions[-1] -= 1
dsk = {}
for i, (chunk, ind) in enumerate(zip(x._keys(), index)):
if x.ndim == 2:
chunk = chunk[0]
if isinstance(meta, pd.Series):
dsk[name, i] = (pd.Series, chunk, ind, x.dtype, meta.name)
else:
dsk[name, i] = (pd.DataFrame, chunk, ind, meta.columns)
return new_dd_object(merge(ensure_dict(x.dask), dsk), name, meta, divisions)
def _link(token, result):
""" A dummy function to link results together in a graph
We use this to enforce an artificial sequential ordering on tasks that
don't explicitly pass around a shared resource
"""
return None
def _df_to_bag(df, index=False):
if isinstance(df, pd.DataFrame):
return list(map(tuple, df.itertuples(index)))
elif isinstance(df, pd.Series):
return list(df.iteritems()) if index else list(df)
def to_bag(df, index=False):
"""Create Dask Bag from a Dask DataFrame
Parameters
----------
index : bool, optional
If True, the elements are tuples of ``(index, value)``, otherwise
they're just the ``value``. Default is False.
Examples
--------
>>> bag = df.to_bag() # doctest: +SKIP
"""
from ...bag.core import Bag
if not isinstance(df, (DataFrame, Series)):
raise TypeError("df must be either DataFrame or Series")
name = 'to_bag-' + tokenize(df, index)
dsk = dict(((name, i), (_df_to_bag, block, index))
for (i, block) in enumerate(df._keys()))
dsk.update(df._optimize(df.dask, df._keys()))
return Bag(dsk, name, df.npartitions)
def to_records(df):
""" Create Dask Array from a Dask Dataframe
Warning: This creates a dask.array without precise shape information.
Operations that depend on shape information, like slicing or reshaping,
will not work.
Examples
--------
>>> df.to_records() # doctest: +SKIP
dask.array<shape=(nan,), dtype=(numpy.record, [('ind', '<f8'), ('x', 'O'), ('y', '<i8')]), chunksize=(nan,)>
See Also
--------
dask.dataframe._Frame.values
dask.dataframe.from_dask_array
"""
from ...array.core import Array
if not isinstance(df, (DataFrame, Series)):
raise TypeError("df must be either DataFrame or Series")
name = 'to-records-' + tokenize(df)
dsk = {(name, i): (M.to_records, key)
for (i, key) in enumerate(df._keys())}
x = df._meta.to_records()
chunks = ((np.nan,) * df.npartitions,)
return Array(merge(df.dask, dsk), name, chunks, x.dtype)
@insert_meta_param_description
def from_delayed(dfs, meta=None, divisions=None, prefix='from-delayed',
metadata=None):
""" Create Dask DataFrame from many Dask Delayed objects
Parameters
----------
dfs : list of Delayed
An iterable of ``dask.delayed.Delayed`` objects, such as come from
``dask.delayed`` These comprise the individual partitions of the
resulting dataframe.
$META
divisions : tuple, str, optional
Partition boundaries along the index.
For tuple, see http://dask.pydata.org/en/latest/dataframe-design.html#partitions
For string 'sorted' will compute the delayed values to find index
values. Assumes that the indexes are mutually sorted.
If None, then won't use index information
prefix : str, optional
Prefix to prepend to the keys.
"""
if metadata is not None and meta is None:
warn("Deprecation warning: Use meta keyword, not metadata")
meta = metadata
from dask.delayed import Delayed
if isinstance(dfs, Delayed):
dfs = [dfs]
dfs = [delayed(df)
if not isinstance(df, Delayed) and hasattr(df, 'key')
else df
for df in dfs]
for df in dfs:
if not isinstance(df, Delayed):
raise TypeError("Expected Delayed object, got %s" %
type(df).__name__)
dsk = merge(df.dask for df in dfs)
name = prefix + '-' + tokenize(*dfs)
names = [(name, i) for i in range(len(dfs))]
values = [df.key for df in dfs]
dsk2 = dict(zip(names, values))
dsk3 = merge(dsk, dsk2)
if meta is None:
meta = dfs[0].compute()
if isinstance(meta, (str, pd.Series)):
Frame = Series
else:
Frame = DataFrame
if divisions is None or divisions == 'sorted':
divs = [None] * (len(dfs) + 1)
else:
divs = tuple(divisions)
if len(divs) != len(dfs) + 1:
raise ValueError("divisions should be a tuple of len(dfs) + 1")
df = Frame(dsk3, name, meta, divs)
if divisions == 'sorted':
from ..shuffle import compute_divisions
divisions = compute_divisions(df)
df.divisions = divisions
return df
def sorted_division_locations(seq, npartitions=None, chunksize=None):
""" Find division locations and values in sorted list
Examples
--------
>>> L = ['A', 'B', 'C', 'D', 'E', 'F']
>>> sorted_division_locations(L, chunksize=2)
(['A', 'C', 'E', 'F'], [0, 2, 4, 6])
>>> sorted_division_locations(L, chunksize=3)
(['A', 'D', 'F'], [0, 3, 6])
>>> L = ['A', 'A', 'A', 'A', 'B', 'B', 'B', 'C']
>>> sorted_division_locations(L, chunksize=3)
(['A', 'B', 'C'], [0, 4, 8])
>>> sorted_division_locations(L, chunksize=2)
(['A', 'B', 'C'], [0, 4, 8])
>>> sorted_division_locations(['A'], chunksize=2)
(['A', 'A'], [0, 1])
"""
if ((npartitions is None) == (chunksize is None)):
raise ValueError('Exactly one of npartitions and chunksize must be specified.')
if npartitions:
chunksize = ceil(len(seq) / npartitions)
positions = [0]
values = [seq[0]]
for pos in list(range(0, len(seq), chunksize)):
if pos <= positions[-1]:
continue
while pos + 1 < len(seq) and seq[pos - 1] == seq[pos]:
pos += 1
values.append(seq[pos])
if pos == len(seq) - 1:
pos += 1
positions.append(pos)
if positions[-1] != len(seq):
positions.append(len(seq))
values.append(seq[-1])
return values, positions
if PY3:
DataFrame.to_records.__doc__ = to_records.__doc__
DataFrame.to_bag.__doc__ = to_bag.__doc__
| {
"repo_name": "cpcloud/dask",
"path": "dask/dataframe/io/io.py",
"copies": "1",
"size": "19641",
"license": "bsd-3-clause",
"hash": 2977808181552649000,
"line_mean": 32.3463497453,
"line_max": 112,
"alpha_frac": 0.5871391477,
"autogenerated": false,
"ratio": 3.6691574817859145,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9755724867293477,
"avg_score": 0.00011435243848750694,
"num_lines": 589
} |
from __future__ import absolute_import, division, print_function
from math import ceil
from operator import getitem
import os
from threading import Lock
import uuid
from warnings import warn
import pandas as pd
import numpy as np
from toolz import merge
from ...base import tokenize
from ...compatibility import unicode
from ... import array as da
from ...async import get_sync
from ...delayed import Delayed, delayed
from ..core import DataFrame, Series, new_dd_object
from ..shuffle import set_partition
from ..utils import insert_meta_param_description
from ...utils import M
lock = Lock()
def _meta_from_array(x, columns=None):
""" Create empty pd.DataFrame or pd.Series which has correct dtype """
if x.ndim > 2:
raise ValueError('from_array does not input more than 2D array, got'
' array with shape %r' % (x.shape,))
if getattr(x.dtype, 'names', None) is not None:
# record array has named columns
if columns is None:
columns = list(x.dtype.names)
elif np.isscalar(columns):
raise ValueError("For a struct dtype, columns must be a list.")
elif not all(i in x.dtype.names for i in columns):
extra = sorted(set(columns).difference(x.dtype.names))
raise ValueError("dtype {0} doesn't have fields "
"{1}".format(x.dtype, extra))
fields = x.dtype.fields
dtypes = [fields[n][0] if n in fields else 'f8' for n in columns]
elif x.ndim == 1:
if np.isscalar(columns) or columns is None:
return pd.Series([], name=columns, dtype=x.dtype)
elif len(columns) == 1:
return pd.DataFrame(np.array([], dtype=x.dtype), columns=columns)
raise ValueError("For a 1d array, columns must be a scalar or single "
"element list")
else:
if columns is None:
columns = list(range(x.shape[1])) if x.ndim == 2 else [0]
elif len(columns) != x.shape[1]:
raise ValueError("Number of column names must match width of the "
"array. Got {0} names for {1} "
"columns".format(len(columns), x.shape[1]))
dtypes = [x.dtype] * len(columns)
data = {c: np.array([], dtype=dt) for (c, dt) in zip(columns, dtypes)}
return pd.DataFrame(data, columns=columns)
def from_array(x, chunksize=50000, columns=None):
""" Read dask Dataframe from any slicable array
Uses getitem syntax to pull slices out of the array. The array need not be
a NumPy array but must support slicing syntax
x[50000:100000]
and have 2 dimensions:
x.ndim == 2
or have a record dtype:
x.dtype == [('name', 'O'), ('balance', 'i8')]
"""
if isinstance(x, da.Array):
return from_dask_array(x, columns=columns)
meta = _meta_from_array(x, columns)
divisions = tuple(range(0, len(x), chunksize))
divisions = divisions + (len(x) - 1,)
token = tokenize(x, chunksize, columns)
name = 'from_array-' + token
dsk = {}
for i in range(0, int(ceil(len(x) / chunksize))):
data = (getitem, x, slice(i * chunksize, (i + 1) * chunksize))
if isinstance(meta, pd.Series):
dsk[name, i] = (pd.Series, data, None, meta.dtype, meta.name)
else:
dsk[name, i] = (pd.DataFrame, data, None, meta.columns)
return new_dd_object(dsk, name, meta, divisions)
def from_pandas(data, npartitions=None, chunksize=None, sort=True, name=None):
"""Construct a dask object from a pandas object.
If given a ``pandas.Series`` a ``dask.Series`` will be returned. If given a
``pandas.DataFrame`` a ``dask.DataFrame`` will be returned. All other
pandas objects will raise a ``TypeError``.
Parameters
----------
df : pandas.DataFrame or pandas.Series
The DataFrame/Series with which to construct a dask DataFrame/Series
npartitions : int, optional
The number of partitions of the index to create.
chunksize : int, optional
The size of the partitions of the index.
sort: bool
Sort input first to obtain cleanly divided partitions or don't sort and
don't get cleanly divided partitions
name: string, optional
An optional keyname for the dataframe. Defaults to hashing the input
Returns
-------
dask.DataFrame or dask.Series
A dask DataFrame/Series partitioned along the index
Examples
--------
>>> df = pd.DataFrame(dict(a=list('aabbcc'), b=list(range(6))),
... index=pd.date_range(start='20100101', periods=6))
>>> ddf = from_pandas(df, npartitions=3)
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', freq='D'),
Timestamp('2010-01-03 00:00:00', freq='D'),
Timestamp('2010-01-05 00:00:00', freq='D'),
Timestamp('2010-01-06 00:00:00', freq='D'))
>>> ddf = from_pandas(df.a, npartitions=3) # Works with Series too!
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', freq='D'),
Timestamp('2010-01-03 00:00:00', freq='D'),
Timestamp('2010-01-05 00:00:00', freq='D'),
Timestamp('2010-01-06 00:00:00', freq='D'))
Raises
------
TypeError
If something other than a ``pandas.DataFrame`` or ``pandas.Series`` is
passed in.
See Also
--------
from_array : Construct a dask.DataFrame from an array that has record dtype
from_bcolz : Construct a dask.DataFrame from a bcolz ctable
read_csv : Construct a dask.DataFrame from a CSV file
"""
if isinstance(getattr(data, 'index', None), pd.MultiIndex):
raise NotImplementedError("Dask does not support MultiIndex Dataframes.")
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("Input must be a pandas DataFrame or Series")
if ((npartitions is None) == (chunksize is None)):
raise ValueError('Exactly one of npartitions and chunksize must be specified.')
nrows = len(data)
if chunksize is None:
chunksize = int(ceil(nrows / npartitions))
else:
npartitions = int(ceil(nrows / chunksize))
name = name or ('from_pandas-' + tokenize(data, chunksize))
if not nrows:
return new_dd_object({(name, 0): data}, name, data, [None, None])
if sort and not data.index.is_monotonic_increasing:
data = data.sort_index(ascending=True)
if sort:
divisions, locations = sorted_division_locations(data.index,
chunksize=chunksize)
else:
locations = list(range(0, nrows, chunksize)) + [len(data)]
divisions = [None] * len(locations)
dsk = dict(((name, i), data.iloc[start: stop])
for i, (start, stop) in enumerate(zip(locations[:-1],
locations[1:])))
return new_dd_object(dsk, name, data, divisions)
def from_bcolz(x, chunksize=None, categorize=True, index=None, lock=lock,
**kwargs):
""" Read dask Dataframe from bcolz.ctable
Parameters
----------
x : bcolz.ctable
Input data
chunksize : int, optional
The size of blocks to pull out from ctable. Ideally as large as can
comfortably fit in memory
categorize : bool, defaults to True
Automatically categorize all string dtypes
index : string, optional
Column to make the index
lock: bool or Lock
Lock to use when reading or False for no lock (not-thread-safe)
See Also
--------
from_array: more generic function not optimized for bcolz
"""
if lock is True:
lock = Lock()
import dask.array as da
import bcolz
if isinstance(x, (str, unicode)):
x = bcolz.ctable(rootdir=x)
bc_chunklen = max(x[name].chunklen for name in x.names)
if chunksize is None and bc_chunklen > 10000:
chunksize = bc_chunklen
categories = dict()
if categorize:
for name in x.names:
if (np.issubdtype(x.dtype[name], np.string_) or
np.issubdtype(x.dtype[name], np.unicode_) or
np.issubdtype(x.dtype[name], np.object_)):
a = da.from_array(x[name], chunks=(chunksize * len(x.names),))
categories[name] = da.unique(a)
columns = tuple(x.dtype.names)
divisions = tuple(range(0, len(x), chunksize))
divisions = divisions + (len(x) - 1,)
if x.rootdir:
token = tokenize((x.rootdir, os.path.getmtime(x.rootdir)), chunksize,
categorize, index, kwargs)
else:
token = tokenize((id(x), x.shape, x.dtype), chunksize, categorize,
index, kwargs)
new_name = 'from_bcolz-' + token
dsk = dict(((new_name, i),
(dataframe_from_ctable,
x,
(slice(i * chunksize, (i + 1) * chunksize),),
columns, categories, lock))
for i in range(0, int(ceil(len(x) / chunksize))))
meta = dataframe_from_ctable(x, slice(0, 0), columns, categories, lock)
result = DataFrame(dsk, new_name, meta, divisions)
if index:
assert index in x.names
a = da.from_array(x[index], chunks=(chunksize * len(x.names),))
q = np.linspace(0, 100, len(x) // chunksize + 2)
divisions = tuple(da.percentile(a, q).compute())
return set_partition(result, index, divisions, **kwargs)
else:
return result
def dataframe_from_ctable(x, slc, columns=None, categories=None, lock=lock):
""" Get DataFrame from bcolz.ctable
Parameters
----------
x: bcolz.ctable
slc: slice
columns: list of column names or None
>>> import bcolz
>>> x = bcolz.ctable([[1, 2, 3, 4], [10, 20, 30, 40]], names=['a', 'b'])
>>> dataframe_from_ctable(x, slice(1, 3))
a b
1 2 20
2 3 30
>>> dataframe_from_ctable(x, slice(1, 3), columns=['b'])
b
1 20
2 30
>>> dataframe_from_ctable(x, slice(1, 3), columns='b')
1 20
2 30
Name: b, dtype: int...
"""
import bcolz
if columns is None:
columns = x.dtype.names
if isinstance(columns, tuple):
columns = list(columns)
x = x[columns]
if type(slc) is slice:
start = slc.start
stop = slc.stop if slc.stop < len(x) else len(x)
else:
start = slc[0].start
stop = slc[0].stop if slc[0].stop < len(x) else len(x)
idx = pd.Index(range(start, stop))
if lock:
lock.acquire()
try:
if isinstance(x, bcolz.ctable):
chunks = [x[name][slc] for name in columns]
if categories is not None:
chunks = [pd.Categorical.from_codes(
np.searchsorted(categories[name], chunk),
categories[name], True)
if name in categories else chunk
for name, chunk in zip(columns, chunks)]
result = pd.DataFrame(dict(zip(columns, chunks)), columns=columns,
index=idx)
elif isinstance(x, bcolz.carray):
chunk = x[slc]
if categories is not None and columns and columns in categories:
chunk = pd.Categorical.from_codes(
np.searchsorted(categories[columns], chunk),
categories[columns], True)
result = pd.Series(chunk, name=columns, index=idx)
finally:
if lock:
lock.release()
return result
def from_dask_array(x, columns=None):
""" Convert dask Array to dask DataFrame
Converts a 2d array into a DataFrame and a 1d array into a Series.
Parameters
----------
x: da.Array
columns: list or string
list of column names if DataFrame, single string if Series
Examples
--------
>>> import dask.array as da
>>> import dask.dataframe as dd
>>> x = da.ones((4, 2), chunks=(2, 2))
>>> df = dd.io.from_dask_array(x, columns=['a', 'b'])
>>> df.compute()
a b
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
3 1.0 1.0
"""
meta = _meta_from_array(x, columns)
name = 'from-dask-array' + tokenize(x, columns)
divisions = [0]
for c in x.chunks[0]:
divisions.append(divisions[-1] + c)
index = [(np.arange, a, b, 1, 'i8') for a, b in
zip(divisions[:-1], divisions[1:])]
divisions[-1] -= 1
if x.ndim == 2:
if len(x.chunks[1]) > 1:
x = x.rechunk({1: x.shape[1]})
dsk = {}
for i, (chunk, ind) in enumerate(zip(x._keys(), index)):
if x.ndim == 2:
chunk = chunk[0]
if isinstance(meta, pd.Series):
dsk[name, i] = (pd.Series, chunk, ind, x.dtype, meta.name)
else:
dsk[name, i] = (pd.DataFrame, chunk, ind, meta.columns)
return new_dd_object(merge(x.dask, dsk), name, meta, divisions)
def from_castra(x, columns=None):
"""Load a dask DataFrame from a Castra.
Parameters
----------
x : filename or Castra
columns: list or string, optional
The columns to load. Default is all columns.
"""
from castra import Castra
if not isinstance(x, Castra):
x = Castra(x, readonly=True)
return x.to_dask(columns)
def _link(token, result):
""" A dummy function to link results together in a graph
We use this to enforce an artificial sequential ordering on tasks that
don't explicitly pass around a shared resource
"""
return None
def to_castra(df, fn=None, categories=None, sorted_index_column=None,
compute=True, get=get_sync):
""" Write DataFrame to Castra on-disk store
See https://github.com/blosc/castra for details
See Also
--------
Castra.to_dask
"""
from castra import Castra
name = 'to-castra-' + uuid.uuid1().hex
if sorted_index_column:
func = lambda part: (M.set_index, part, sorted_index_column)
else:
func = lambda part: part
dsk = dict()
dsk[(name, -1)] = (Castra, fn, func((df._name, 0)), categories)
for i in range(0, df.npartitions):
dsk[(name, i)] = (_link, (name, i - 1),
(Castra.extend, (name, -1), func((df._name, i))))
dsk = merge(dsk, df.dask)
keys = [(name, -1), (name, df.npartitions - 1)]
if compute:
return DataFrame._get(dsk, keys, get=get)[0]
else:
return delayed([Delayed(key, [dsk]) for key in keys])[0]
def _df_to_bag(df, index=False):
if isinstance(df, pd.DataFrame):
return list(map(tuple, df.itertuples(index)))
elif isinstance(df, pd.Series):
return list(df.iteritems()) if index else list(df)
def to_bag(df, index=False):
from ...bag.core import Bag
if not isinstance(df, (DataFrame, Series)):
raise TypeError("df must be either DataFrame or Series")
name = 'to_bag-' + tokenize(df, index)
dsk = dict(((name, i), (_df_to_bag, block, index))
for (i, block) in enumerate(df._keys()))
dsk.update(df._optimize(df.dask, df._keys()))
return Bag(dsk, name, df.npartitions)
@insert_meta_param_description
def from_delayed(dfs, meta=None, divisions=None, prefix='from-delayed',
metadata=None):
""" Create DataFrame from many dask.delayed objects
Parameters
----------
dfs : list of Delayed
An iterable of ``dask.delayed.Delayed`` objects, such as come from
``dask.delayed`` These comprise the individual partitions of the
resulting dataframe.
$META
divisions : tuple, str, optional
Partition boundaries along the index.
For tuple, see http://dask.pydata.io/en/latest/dataframe-partitions.html
For string 'sorted' will compute the delayed values to find index
values. Assumes that the indexes are mutually sorted.
If None, then won't use index information
prefix : str, optional
Prefix to prepend to the keys.
"""
if metadata is not None and meta is None:
warn("Deprecation warning: Use meta keyword, not metadata")
meta = metadata
from dask.delayed import Delayed
if isinstance(dfs, Delayed):
dfs = [dfs]
dsk = merge(df.dask for df in dfs)
name = prefix + '-' + tokenize(*dfs)
names = [(name, i) for i in range(len(dfs))]
values = [df.key for df in dfs]
dsk2 = dict(zip(names, values))
dsk3 = merge(dsk, dsk2)
if meta is None:
meta = dfs[0].compute()
if isinstance(meta, (str, pd.Series)):
Frame = Series
else:
Frame = DataFrame
if divisions == 'sorted':
from ..core import compute_divisions
divisions = [None] * (len(dfs) + 1)
df = Frame(dsk3, name, meta, divisions)
return compute_divisions(df)
elif divisions is None:
divisions = [None] * (len(dfs) + 1)
return Frame(dsk3, name, meta, divisions)
def sorted_division_locations(seq, npartitions=None, chunksize=None):
""" Find division locations and values in sorted list
Examples
--------
>>> L = ['A', 'B', 'C', 'D', 'E', 'F']
>>> sorted_division_locations(L, chunksize=2)
(['A', 'C', 'E', 'F'], [0, 2, 4, 6])
>>> sorted_division_locations(L, chunksize=3)
(['A', 'D', 'F'], [0, 3, 6])
>>> L = ['A', 'A', 'A', 'A', 'B', 'B', 'B', 'C']
>>> sorted_division_locations(L, chunksize=3)
(['A', 'B', 'C'], [0, 4, 8])
>>> sorted_division_locations(L, chunksize=2)
(['A', 'B', 'C'], [0, 4, 8])
>>> sorted_division_locations(['A'], chunksize=2)
(['A', 'A'], [0, 1])
"""
if ((npartitions is None) == (chunksize is None)):
raise ValueError('Exactly one of npartitions and chunksize must be specified.')
if npartitions:
chunksize = ceil(len(seq) / npartitions)
positions = [0]
values = [seq[0]]
for pos in list(range(0, len(seq), chunksize)):
if pos <= positions[-1]:
continue
while pos + 1 < len(seq) and seq[pos - 1] == seq[pos]:
pos += 1
values.append(seq[pos])
if pos == len(seq) - 1:
pos += 1
positions.append(pos)
if positions[-1] != len(seq):
positions.append(len(seq))
values.append(seq[-1])
return values, positions
| {
"repo_name": "jeffery-do/Vizdoombot",
"path": "doom/lib/python3.5/site-packages/dask/dataframe/io/io.py",
"copies": "1",
"size": "18447",
"license": "mit",
"hash": -8264843338888125000,
"line_mean": 31.5918727915,
"line_max": 87,
"alpha_frac": 0.5843768634,
"autogenerated": false,
"ratio": 3.6198979591836733,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47042748225836734,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from matplotlib.colors import ColorConverter
from glue.config import settings
from glue.external.echo import callback_property
from glue.external import six
# Define acceptable line styles
VALID_LINESTYLES = ['solid', 'dashed', 'dash-dot', 'dotted', 'none']
__all__ = ['VisualAttributes']
class VisualAttributes(object):
'''
This class is used to define visual attributes for any kind of objects
The essential attributes of a VisualAttributes instance are:
:param color: A matplotlib color string
:param alpha: Opacity (0-1)
:param linewidth: The linewidth (float or int)
:param linestyle: The linestyle (``'solid' | 'dashed' | 'dash-dot' | 'dotted' | 'none'``)
:param marker: The matplotlib marker shape (``'o' | 's' | '^' | etc``)
:param markersize: The size of the marker (int)
'''
def __init__(self, parent=None, washout=False, color=None, alpha=None):
# We have to set the defaults here, otherwise the settings are fixed
# once the class is defined.
color = color or settings.DATA_COLOR
alpha = alpha or settings.DATA_ALPHA
self.parent = parent
self._atts = ['color', 'alpha', 'linewidth', 'linestyle', 'marker',
'markersize']
self.color = color
self.alpha = alpha
self.linewidth = 1
self.linestyle = 'solid'
self.marker = 'o'
self.markersize = 3
def __eq__(self, other):
if not isinstance(other, VisualAttributes):
return False
elif self is other:
return True
else:
return all(getattr(self, a) == getattr(other, a) for a in self._atts)
# In Python 3, if __eq__ is defined, then __hash__ has to be re-defined
if six.PY3:
__hash__ = object.__hash__
def set(self, other):
"""
Update this instance's properties based on another VisualAttributes instance.
"""
for att in self._atts:
setattr(self, att, getattr(other, att))
def copy(self, new_parent=None):
"""
Create a new instance with the same visual properties
"""
result = VisualAttributes()
result.set(self)
if new_parent is not None:
result.parent = new_parent
return result
@callback_property
def color(self):
"""
Color specified using Matplotlib notation
Specifically, it can be:
* A string with a common color (e.g. 'black', 'red', 'orange')
* A string containing a float in the rng [0:1] for a shade of
gray ('0.0' = black,'1.0' = white)
* A tuple of three floats in the rng [0:1] for (R, G, B)
* An HTML hexadecimal string (e.g. '#eeefff')
"""
return self._color
@color.setter
def color(self, value):
if isinstance(value, six.string_types):
self._color = value.lower()
else:
self._color = value
@callback_property
def alpha(self):
"""
Transparency, given as a floating point value between 0 and 1.
"""
return self._alpha
@alpha.setter
def alpha(self, value):
self._alpha = value
@property
def rgba(self):
r, g, b = ColorConverter().to_rgb(self.color)
return (r, g, b, self.alpha)
@callback_property
def linestyle(self):
"""
The line style, which can be one of 'solid', 'dashed', 'dash-dot',
'dotted', or 'none'.
"""
return self._linestyle
@linestyle.setter
def linestyle(self, value):
if value not in VALID_LINESTYLES:
raise Exception("Line style should be one of %s" %
'/'.join(VALID_LINESTYLES))
self._linestyle = value
@callback_property
def linewidth(self):
"""
The line width, in points.
"""
return self._linewidth
@linewidth.setter
def linewidth(self, value):
if type(value) not in [float, int]:
raise Exception("Line width should be a float or an int")
if value < 0:
raise Exception("Line width should be positive")
self._linewidth = value
@callback_property
def marker(self):
"""
The marker symbol.
"""
return self._marker
@marker.setter
def marker(self, value):
self._marker = value
@callback_property
def markersize(self):
return self._markersize
@markersize.setter
def markersize(self, value):
self._markersize = int(value)
def __setattr__(self, attribute, value):
# Check that the attribute exists (don't allow new attributes)
allowed = set(['color', 'linewidth', 'linestyle',
'alpha', 'parent', 'marker', 'markersize'])
if attribute not in allowed and not attribute.startswith('_'):
raise Exception("Attribute %s does not exist" % attribute)
changed = getattr(self, attribute, None) != value
super(VisualAttributes, self).__setattr__(attribute, value)
# if parent has a broadcast method, broadcast the change
if (changed and hasattr(self, 'parent') and
hasattr(self.parent, 'broadcast') and
attribute != 'parent' and not attribute.startswith('_')):
self.parent.broadcast('style')
| {
"repo_name": "saimn/glue",
"path": "glue/core/visual.py",
"copies": "4",
"size": "5472",
"license": "bsd-3-clause",
"hash": -1179724166966962000,
"line_mean": 29.5698324022,
"line_max": 93,
"alpha_frac": 0.5858918129,
"autogenerated": false,
"ratio": 4.218966846569005,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019252137877264874,
"num_lines": 179
} |
from __future__ import absolute_import, division, print_function
from mentor.executor import OneOffExecutor, Running
from mentor.messages import PythonTask, PythonTaskStatus
from mentor.utils import RemoteException
class FakeThread(object):
def __init__(self, target):
self.target = target
def start(self):
return self.target()
def test_finished_status_updates(mocker):
mocker.patch('threading.Thread', side_effect=FakeThread)
driver = mocker.Mock()
task = PythonTask(fn=sum, args=[range(5)])
executor = OneOffExecutor()
executor.on_launch(driver, task)
calls = driver.update.call_args_list
args, kwargs = calls[0]
status = args[0]
assert isinstance(status, PythonTaskStatus)
assert status.state == 'TASK_RUNNING'
assert status.data is None
args, kwargs = calls[1]
status = args[0]
assert isinstance(status, PythonTaskStatus)
assert status.state == 'TASK_FINISHED'
assert status.data == 10
def test_failed_status_updates(mocker):
mocker.patch('threading.Thread', FakeThread)
def failing_function(*args):
raise Exception("Booom!")
driver = mocker.Mock()
task = PythonTask(fn=failing_function, args=['arbitrary', 'args'])
executor = OneOffExecutor()
executor.on_launch(driver, task)
calls = driver.update.call_args_list
args, kwargs = calls[0]
status = args[0]
assert isinstance(status, PythonTaskStatus)
assert status.state == 'TASK_RUNNING'
assert status.data is None
args, kwargs = calls[1]
status = args[0]
assert isinstance(status, PythonTaskStatus)
assert status.state == 'TASK_FAILED'
assert isinstance(status.data, tuple)
assert isinstance(status.exception, RemoteException)
assert status.message == 'Booom!'
# def test_runner_context_manager():
# executor = OneOffExecutor()
# with Running(executor):
# pass
# assert executor
| {
"repo_name": "lensacom/satyr",
"path": "mentor/tests/test_executor.py",
"copies": "1",
"size": "1949",
"license": "apache-2.0",
"hash": 5379852632266473000,
"line_mean": 24.6447368421,
"line_max": 70,
"alpha_frac": 0.6859928168,
"autogenerated": false,
"ratio": 3.8290766208251474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5015069437625147,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from mock import MagicMock
import pytest
import numpy as np
from ... import core
from .. import roi
from .. import command as c
from ..data_factories import tabular_data
from .util import simple_session, simple_catalog
from ...external.six.moves import range as xrange
class TestCommandStack(object):
def setup_method(self, method):
self.session = simple_session()
self.stack = self.session.command_stack
def make_command(self):
return MagicMock(c.Command)
def make_data(self):
with simple_catalog() as path:
cmd = c.LoadData(path=path, factory=tabular_data)
data = self.stack.do(cmd)
return data
def test_do(self):
c1 = self.make_command()
self.stack.do(c1)
c1.do.assert_called_once_with(self.session)
def test_undo(self):
c1, c2 = self.make_command(), self.make_command()
self.stack.do(c1)
self.stack.do(c2)
self.stack.undo()
c2.undo.assert_called_once_with(self.session)
self.stack.undo()
c1.undo.assert_called_once_with(self.session)
def test_redo(self):
c1, c2 = self.make_command(), self.make_command()
self.stack.do(c1)
self.stack.do(c2)
self.stack.undo()
self.stack.redo()
c2.undo.assert_called_once_with(self.session)
assert c2.do.call_count == 2
assert c2.undo.call_count == 1
assert c1.do.call_count == 1
assert c1.undo.call_count == 0
def test_max_undo(self):
cmds = [self.make_command() for _ in xrange(c.MAX_UNDO + 1)]
for cmd in cmds:
self.stack.do(cmd)
for cmd in cmds[:-1]:
self.stack.undo()
with pytest.raises(IndexError):
self.stack.undo()
def test_invalid_redo(self):
with pytest.raises(IndexError) as exc:
self.stack.redo()
assert exc.value.args[0] == 'No commands to redo'
def test_load_data(self):
data = self.make_data()
np.testing.assert_array_equal(data['a'], [1, 3])
def test_add_data(self):
data = self.make_data()
cmd = c.AddData(data=data)
self.stack.do(cmd)
assert len(self.session.data_collection) == 1
self.stack.undo()
assert len(self.session.data_collection) == 0
def test_remove_data(self):
data = self.make_data()
add = c.AddData(data=data)
remove = c.RemoveData(data=data)
self.stack.do(add)
assert len(self.session.data_collection) == 1
self.stack.do(remove)
assert len(self.session.data_collection) == 0
self.stack.undo()
assert len(self.session.data_collection) == 1
def test_new_data_viewer(self):
cmd = c.NewDataViewer(viewer=None, data=None)
v = self.stack.do(cmd)
self.session.application.new_data_viewer.assert_called_once_with(
None, None)
self.stack.undo()
v.close.assert_called_once_with(warn=False)
def test_apply_roi(self):
x = core.Data(x=[1, 2, 3])
s = x.new_subset()
dc = self.session.data_collection
dc.append(x)
r = MagicMock(roi.Roi)
client = MagicMock(core.client.Client)
client.data = dc
cmd = c.ApplyROI(client=client, roi=r)
self.stack.do(cmd)
client.apply_roi.assert_called_once_with(r)
old_state = s.subset_state
s.subset_state = MagicMock(spec_set=core.subset.SubsetState)
self.stack.undo()
assert s.subset_state is old_state
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/tests/test_command.py",
"copies": "1",
"size": "3669",
"license": "bsd-3-clause",
"hash": 4369406948513540000,
"line_mean": 25.2071428571,
"line_max": 73,
"alpha_frac": 0.5979831017,
"autogenerated": false,
"ratio": 3.4418386491557222,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9539821750855723,
"avg_score": 0,
"num_lines": 140
} |
from __future__ import absolute_import, division, print_function
from mock import MagicMock
from ..application_base import Application
from .. import Data
from ...external.six.moves import cPickle as pickle
class MockApplication(Application):
def __init__(self, data=None, hub=None):
super(MockApplication, self).__init__(data, hub)
self.tab = MagicMock()
self.errors = MagicMock()
def report_error(self, message, detail):
self.errors.report(message, detail)
def new_tab(self):
self.tab.tab()
def add_widget(self, widget, label=None, tab=None):
self.tab.add(widget, label)
def close_tab(self):
self.tab.close()
def _load_settings(self):
pass
class TestApplicationBase(object):
def setup_method(self, method):
self.app = MockApplication()
def test_suggest_mergers(self):
x = Data(x=[1, 2, 3])
y = Data(y=[1, 2, 3, 4])
z = Data(z=[1, 2, 3])
Application._choose_merge = MagicMock()
Application._choose_merge.return_value = [x]
self.app.data_collection.merge = MagicMock()
self.app.data_collection.append(x)
self.app.data_collection.append(y)
self.app.add_datasets(self.app.data_collection, z)
args = self.app._choose_merge.call_args[0]
assert args[0] == z
assert args[1] == [x]
assert self.app.data_collection.merge.call_count == 1
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/tests/test_application_base.py",
"copies": "1",
"size": "1456",
"license": "bsd-3-clause",
"hash": 4910993275697213000,
"line_mean": 25,
"line_max": 64,
"alpha_frac": 0.6222527473,
"autogenerated": false,
"ratio": 3.6129032258064515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9735155973106451,
"avg_score": 0,
"num_lines": 56
} |
from __future__ import absolute_import, division, print_function
from mock import patch
from matplotlib import cm
from glue.core import Data, DataCollection
from ..subset_facet import SubsetFacet
patched_facet = patch('glue.dialogs.subset_facet.qt.subset_facet.facet_subsets')
class TestSubsetFacet(object):
def setup_method(self, method):
d = Data(x=[1, 2, 3])
dc = DataCollection([d])
self.collect = dc
self.s = dc.new_subset_group()
def test_limits(self):
s = SubsetFacet(self.collect)
s.data = self.collect[0]
s.component = self.collect[0].id['x']
assert s.vmin == 1
assert s.vmax == 3
def test_get_set_cmap(self):
s = SubsetFacet(self.collect)
assert s.cmap is cm.cool
def test_apply(self):
with patched_facet as p:
s = SubsetFacet(self.collect)
s.data = self.collect[0]
s.component = self.collect[0].id['x']
s._apply()
p.assert_called_once_with(self.collect, s.component,
lo=1, hi=3,
steps=5, log=False)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/dialogs/subset_facet/qt/tests/test_subset_facet.py",
"copies": "4",
"size": "1169",
"license": "bsd-3-clause",
"hash": -1800748357862443300,
"line_mean": 27.512195122,
"line_max": 80,
"alpha_frac": 0.5739948674,
"autogenerated": false,
"ratio": 3.5749235474006116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00030111412225233364,
"num_lines": 41
} |
from __future__ import absolute_import, division, print_function
from mock import patch
from qtpy import QtWidgets
from glue import _plugin_helpers as ph
from glue.main import load_plugins
from ..plugin_manager import QtPluginManager
def setup_function(func):
from glue import config
func.CFG_DIR_ORIG = config.CFG_DIR
def teardown_function(func):
from glue import config
config.CFG_DIR = func.CFG_DIR_ORIG
def test_basic_empty(tmpdir):
# Test that things work when the plugin cfg file is empty
from glue import config
config.CFG_DIR = tmpdir.join('.glue').strpath
w = QtPluginManager()
w.clear()
w.update_list()
w.finalize()
def test_basic(tmpdir):
# Test that things work when the plugin cfg file is populated
from glue import config
config.CFG_DIR = tmpdir.join('.glue').strpath
load_plugins()
config = ph.PluginConfig.load()
config.plugins['spectrum_tool'] = False
config.plugins['pv_slicer'] = False
config.save()
w = QtPluginManager()
w.clear()
w.update_list()
w.finalize()
config2 = ph.PluginConfig.load()
assert config.plugins == config2.plugins
def test_permission_fail(tmpdir):
from glue import config
config.CFG_DIR = tmpdir.join('.glue').strpath
# Make a *file* at that location so that reading the plugin file will fail
with open(config.CFG_DIR, 'w') as f:
f.write("test")
config2 = ph.PluginConfig.load()
with patch.object(QtWidgets.QMessageBox, 'exec_', return_value=None) as qmb:
w = QtPluginManager()
w.finalize()
assert qmb.call_count == 1
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/app/qt/tests/test_plugin_manager.py",
"copies": "3",
"size": "1640",
"license": "bsd-3-clause",
"hash": -8504459362969016000,
"line_mean": 20.5789473684,
"line_max": 80,
"alpha_frac": 0.6762195122,
"autogenerated": false,
"ratio": 3.6363636363636362,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00016244314489928524,
"num_lines": 76
} |
from __future__ import absolute_import, division, print_function
from multipledispatch import dispatch
from six import string_types
from pennies.trading.assets import Asset
from pennies.market.market import Market
BASE_ASSET = (Asset, object)
BASE_MARKET = (Market, object)
BASE_STRING = string_types + (object,)
@dispatch(BASE_ASSET, BASE_MARKET, BASE_STRING)
def present_value(contract, market, reporting_ccy):
"""Base present value calculation.
Given an asset (or sequence of assets), calculate it's present
value as of today. The supplied market provides prices, curves, and so on.
Parameters
----------
contract: Asset
Asset to calculate present value
market: Market
Market to retrieve raw and computed market values
such as prices, curves, and surfaces.
reporting_ccy: str
Specifies which currency to report value in
Returns
-------
float
Present Value in the reporting currency
"""
raise NotImplementedError("Not available for base types")
@dispatch(BASE_ASSET, BASE_MARKET, BASE_STRING, object, BASE_STRING)
def sens_to_zero_rates(contract, market, curve_ccy, curve_key, reporting_ccy):
"""Sensitivity of each cashflow to the curve specified by currency and key
Given an asset contract (or sequence of assets), calculate the change in
Present Value to a unit shift in the zero rate of the curve specified
by curve_ccy and curve_key.
Parameters
----------
contract: Asset
Calculate Sensitivity to this Asset.
market: Market
Market to retrieve raw and computed market values
such as prices, curves, and surfaces.
curve_ccy: str
Specifies currency of the curve
curve_key: str
Along with curve_ccy, specifies which curve to compute sensitivity to.
reporting_ccy: str
Specifies which currency to report value in
Returns
-------
DataFrame
Table containing maturities, sensitivities, curve currency and key,
columns=['ttm', 'sens', 'ccy', 'curve'].
"""
raise NotImplementedError("Not available for base types")
@dispatch(BASE_ASSET, BASE_MARKET, str)
def sens_to_market_rates(contract, market, reporting_ccy):
"""Compute sensitivity of contract to each node in the market's curves.
Market Curve Calibration consists of finding the discount rates for a
desired set of curves that correctly prices a target set of contracts.
To do this, we form a Jacobian matrix consisting of the sensitivities
of each contract's Present Value, V_i,
to each node of each curve in the market, r_j: d(V_i)/dr_j.
This function produces a single row of the Jacobian. Note the indices:
i = index over market contracts. (rows)
j = index over model curve nodes, for all curves. (columns)
Thus, if one had a discount curve with N nodes,
and a single LIBOR curve with M nodes. j = 0..N+M-1
V_i = Present Value of the i'th contract.
r_j = Discount rate of node j.
t_k = Maturity of some bond required to price V_i.
r_{c,k} = discount rate of curve, c, at time t_k.
z_{c,k} = discount bond price of curve, c, at time t_k.
dP/dr_j = ( dV/dz_{c,k} ) * ( dz_{c,k} / dr_{c,k} ) * ( dr_{c,k} / dr_j )
Note that dr_{c,k} / dr_j == 0 if node j does not belong to curve c.
Parameters
----------
contract: Asset
Asset to calculate present value
market: Market
Market to retrieve raw and computed market values
such as prices, curves, and surfaces.
Returns
-------
Array of CurrencyAmount's
Derivative of Present Value with respect to each node's rate
"""
raise NotImplementedError("Not available for base types")
class AssetCalculator(object):
def __init__(self, contract, market):
"""
Parameters
----------
contract: Asset
Asset (or Trade) representing the payment of a fixed amount
market: Market
Market values required to price the Asset.
"""
self.contract = contract
self.market = market
def all_calculators():
return AssetCalculator.__subclasses__()
def default_calculators():
from pennies.trading import assets
from pennies.calculators import payments
return {
str(assets.BulletPayment): payments.BulletPaymentCalculator,
str(assets.DiscountBond): payments.BulletPaymentCalculator,
str(assets.SettlementPayment): payments.BulletPaymentCalculator,
str(assets.Zero): payments.BulletPaymentCalculator,
str(assets.ZeroCouponBond): payments.BulletPaymentCalculator,
str(assets.CompoundAsset): None,
str(assets.Annuity): None,
str(assets.FixedLeg): None,
str(assets.IborLeg): None,
str(assets.Swap): None,
str(assets.VanillaSwap): None,
str(assets.CurrencySwap): None,
str(assets.TenorSwap): None,
str(assets.Deposit): None,
str(assets.StirFuture): None,
str(assets.FRA): None,
str(assets.IborFixing): None}
| {
"repo_name": "caseyclements/pennies",
"path": "pennies/calculators/assets.py",
"copies": "1",
"size": "5119",
"license": "apache-2.0",
"hash": 4056597498530552300,
"line_mean": 32.4575163399,
"line_max": 79,
"alpha_frac": 0.6665364329,
"autogenerated": false,
"ratio": 3.9407236335642803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.510726006646428,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from multipledispatch import dispatch
from pennies.trading.assets import BulletPayment
from pennies.calculators.assets import AssetCalculator
from pennies import CurrencyAmount
from pennies.market.market import Market, RatesTermStructure
@dispatch(BulletPayment, RatesTermStructure, str)
def present_value(contract, market, reporting_ccy):
"""Present Value as sum of discount cash flows.
This assumes that one has already computed the rates.
For fixed rate annuities, this will be done during construction.
For floating rate annuities, this will have to be pre-computed,
typically via psuedo-discount factors of other curves."""
df = market.discount_factor(contract.dt_payment, contract.currency)
pv = contract.notional * df
if reporting_ccy != contract.currency:
pv *= market.fx(reporting_ccy, contract.currency)
return pv
class BulletPaymentCalculator(AssetCalculator):
"""(Deprecated) Calculator for BulletPayments and its aliases."""
measures = { # TODO Complete this. Consider visitor pattern for ccrs
"present_value",
#"pv01",
#"position",
#"cashflow_dates" # Should this be here?
}
def __init__(self, contract, market):
"""
Parameters
----------
contract: BulletPayment
Asset (or Trade) representing the payment of a fixed amount
market: Market
Market values required to price the Asset. Here, a DiscountCurve
"""
super(BulletPaymentCalculator, self).__init__(contract, market)
# TODO As it stands, init isn't needed as it just calls super
# TODO It is here as a reminder to refactor if market gets specific
def present_value(self):
"""Present, or Fair, Value of a known BulletPayment."""
df = self.market.discount_factor(self.contract.dt_payment,
self.contract.currency)
return CurrencyAmount(self.contract.notional * df, self.contract.currency)
| {
"repo_name": "caseyclements/pennies",
"path": "pennies/calculators/payments.py",
"copies": "1",
"size": "2089",
"license": "apache-2.0",
"hash": 4311841912290494500,
"line_mean": 37.6851851852,
"line_max": 82,
"alpha_frac": 0.6840593585,
"autogenerated": false,
"ratio": 4.2719836400818,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5456042998581799,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from multipledispatch import MDNotImplementedError
from odo import Chunks, chunks, convert, discover, into
from collections import Iterator, Iterable
from toolz import curry, concat, map
from datashape.dispatch import dispatch
import pandas as pd
import numpy as np
from ..expr import Head, ElemWise, Distinct, Symbol, Expr, path
from ..expr.split import split
from .core import compute
from .pmap import get_default_pmap
Cheap = (Head, ElemWise, Distinct, Symbol)
@dispatch(Head, Chunks)
def pre_compute(expr, data, **kwargs):
leaf = expr._leaves()[0]
if all(isinstance(e, Cheap) for e in path(expr, leaf)):
return convert(Iterator, data)
else:
raise MDNotImplementedError()
def compute_chunk(chunk, chunk_expr, part):
return compute(chunk_expr, {chunk: part})
@dispatch(Expr, Chunks)
def compute_down(expr, data, map=None, **kwargs):
if map is None:
map = get_default_pmap()
leaf = expr._leaves()[0]
(chunk, chunk_expr), (agg, agg_expr) = split(leaf, expr)
parts = list(map(curry(compute_chunk, chunk, chunk_expr), data))
if isinstance(parts[0], np.ndarray):
intermediate = np.concatenate(parts)
elif isinstance(parts[0], pd.DataFrame):
intermediate = pd.concat(parts)
elif isinstance(parts[0], (Iterable, Iterator)):
intermediate = list(concat(parts))
return compute(agg_expr, {agg: intermediate})
Cheap = (Head, ElemWise, Distinct, Symbol)
@dispatch(Head, Chunks)
def compute_down(expr, data, **kwargs):
leaf = expr._leaves()[0]
if all(isinstance(e, Cheap) for e in path(expr, leaf)):
return compute(expr, {leaf: into(Iterator, data)}, **kwargs)
else:
raise MDNotImplementedError()
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/compute/chunks.py",
"copies": "1",
"size": "1792",
"license": "bsd-3-clause",
"hash": 3515949893189694500,
"line_mean": 28.3770491803,
"line_max": 68,
"alpha_frac": 0.69140625,
"autogenerated": false,
"ratio": 3.534516765285996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4725923015285996,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from multipledispatch import MDNotImplementedError
from odo import Chunks, convert, into
from collections import Iterator, Iterable
from toolz import curry, concat
from datashape.dispatch import dispatch
import pandas as pd
import numpy as np
from ..expr import Head, ElemWise, Distinct, Symbol, Expr, path
from ..expr.split import split
from .core import compute
from .pmap import get_default_pmap
__all__ = ['Cheap', 'compute_chunk', 'compute_down']
Cheap = (Head, ElemWise, Distinct, Symbol)
@dispatch(Head, Chunks)
def pre_compute(expr, data, **kwargs):
leaf = expr._leaves()[0]
if all(isinstance(e, Cheap) for e in path(expr, leaf)):
return convert(Iterator, data)
else:
raise MDNotImplementedError()
def compute_chunk(chunk, chunk_expr, part):
return compute(chunk_expr, {chunk: part})
@dispatch(Expr, Chunks)
def compute_down(expr, data, map=None, **kwargs):
if map is None:
map = get_default_pmap()
leaf = expr._leaves()[0]
(chunk, chunk_expr), (agg, agg_expr) = split(leaf, expr)
parts = list(map(curry(compute_chunk, chunk, chunk_expr), data))
if isinstance(parts[0], np.ndarray):
intermediate = np.concatenate(parts)
elif isinstance(parts[0], pd.DataFrame):
intermediate = pd.concat(parts)
elif isinstance(parts[0], (Iterable, Iterator)):
intermediate = list(concat(parts))
return compute(agg_expr, {agg: intermediate})
Cheap = (Head, ElemWise, Distinct, Symbol)
@dispatch(Head, Chunks)
def compute_down(expr, data, **kwargs):
leaf = expr._leaves()[0]
if all(isinstance(e, Cheap) for e in path(expr, leaf)):
return compute(expr, {leaf: into(Iterator, data)}, **kwargs)
else:
raise MDNotImplementedError()
| {
"repo_name": "dwillmer/blaze",
"path": "blaze/compute/chunks.py",
"copies": "16",
"size": "1826",
"license": "bsd-3-clause",
"hash": -1086373702137248600,
"line_mean": 26.6666666667,
"line_max": 68,
"alpha_frac": 0.6861993428,
"autogenerated": false,
"ratio": 3.504798464491363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006313131313131313,
"num_lines": 66
} |
from __future__ import absolute_import, division, print_function
from nfldb.db import _upsert
class Entity (object):
"""
This is an abstract base class that handles most of the SQL
plumbing for entities in `nfldb`. Its interface is meant to be
declarative: specify the schema and let the methods defined here
do the SQL generation work. However, it is possible to override
methods (like `nfldb.Entity._sql_field`) when more customization
is desired.
Note that many of the methods defined here take an `aliases`
argument. This should be a dictionary mapping table name (defined
in `nfldb.Entity._sql_tables`) to some custom prefix. If it
isn't provided, then the table name itself is used.
"""
# This class doesn't introduce any instance variables, but we need
# to declare as such, otherwise all subclasses will wind up with a
# `__dict__`. (Thereby negating the benefit of using __slots__.)
__slots__ = []
_sql_tables = {}
"""
A dictionary with four keys: `primary`, `tables`, `managed` and
`derived`.
The `primary` key should map to a list of primary key
fields that correspond to a shared minimal subset of primary keys
in all tables that represent this entity. (i.e., It should be the
foreign key that joins all tables in the representation together.)
The `tables` key should map to an association list of table names
that map to lists of fields for that table. The lists of fields for
every table should be *disjoint*: no two tables may share a field
name in common (outside of the primary key).
The `managed` key should be a list of tables that are managed
directly by `nfldb`. `INSERT`, `UPDATE` and `DELETE` queries
will be generated appropriately. (Tables not in this list are
assumed to be maintained by the database itself, e.g., they are
actually views or materialized views maintained by triggers.)
The `derived` key should map to a list of *computed* fields. These
are fields that aren't directly stored in the table, but can be
computed from combining columns in the table (like `offense_tds` or
`points`). This API will expose such fields as regular SQL columns
in the API, and will handle writing them for you in `WHERE` and
`ORDER BY` statements. The actual implementation of each computed
field should be in an entity's `_sql_field` method (overriding the
one defined on `nfldb.Entity`). The derived fields must be listed
here so that the SQL generation code is aware of them.
"""
@classmethod
def _sql_columns(cls):
"""
Returns all columns defined for this entity. Every field
corresponds to a single column in a table.
The first `N` columns returned correspond to this entity's
primary key, where `N` is the number of columns in the
primary key.
"""
cols = cls._sql_tables['primary'][:]
for table, table_cols in cls._sql_tables['tables']:
cols += table_cols
return cols
@classmethod
def sql_fields(cls):
"""
Returns a list of all SQL fields across all tables for this
entity, including derived fields. This method can be used
in conjunction with `nfldb.Entity.from_row_tuple` to quickly
create new `nfldb` objects without every constructing a dict.
"""
if not hasattr(cls, '_cached_sql_fields'):
cls._cached_sql_fields = cls._sql_columns()
cls._cached_sql_fields += cls._sql_tables['derived']
return cls._cached_sql_fields
@classmethod
def from_row_dict(cls, db, row):
"""
Introduces a new entity object from a full SQL row result from
the entity's tables. (i.e., `row` is a dictionary mapping
column to value.) Note that the column names must be of the
form '{entity_name}_{column_name}'. For example, in the `game`
table, the `gsis_id` column must be named `game_gsis_id` in
`row`.
"""
obj = cls(db)
seta = setattr
prefix = cls._sql_primary_table() + '_'
slice_from = len(prefix)
for k in row:
if k.startswith(prefix):
seta(obj, k[slice_from:], row[k])
return obj
@classmethod
def from_row_tuple(cls, db, t):
"""
Given a tuple `t` corresponding to a result from a SELECT query,
this will construct a new instance for this entity. Note that
the tuple `t` must be in *exact* correspondence with the columns
returned by `nfldb.Entity.sql_fields`.
"""
cols = cls.sql_fields()
seta = setattr
obj = cls(db)
for i, field in enumerate(cols):
seta(obj, field, t[i])
return obj
@classmethod
def _sql_from(cls, aliases=None):
"""
Return a valid SQL `FROM table AS alias [LEFT JOIN extra_table
...]` string for this entity.
"""
# This is a little hokey. Pick the first table as the 'FROM' table.
# Subsequent tables are joined.
from_table = cls._sql_primary_table()
as_from_table = cls._sql_table_alias(from_table, aliases)
extra_tables = ''
for table, _ in cls._sql_tables['tables'][1:]:
extra_tables += cls._sql_join_to(cls,
from_table=from_table,
to_table=table,
from_aliases=aliases,
to_aliases=aliases)
return '''
FROM {from_table} AS {as_from_table}
{extra_tables}
'''.format(from_table=from_table, as_from_table=as_from_table,
extra_tables=extra_tables)
@classmethod
def _sql_select_fields(cls, fields, wrap=None, aliases=None):
"""
Returns correctly qualified SELECT expressions for each
field in `fields` (namely, a field may be a derived field).
If `wrap` is a not `None`, then it is applied to the result
of calling `cls._sql_field` on each element in `fields`.
All resulting fields are aliased with `AS` to correspond to
the name given in `fields`. Namely, this makes table aliases
opaque to the resulting query, but this also disallows
selecting columns of the same name from multiple tables.
"""
if wrap is None:
wrap = lambda x: x
sql = lambda f: wrap(cls._sql_field(f, aliases=aliases))
entity_prefix = cls._sql_primary_table()
return ['%s AS %s_%s' % (sql(f), entity_prefix, f) for f in fields]
@classmethod
def _sql_relation_distance(cls_from, cls_to):
primf = set(cls_from._sql_tables['primary'])
primt = set(cls_to._sql_tables['primary'])
if len(primf.intersection(primt)) == 0:
return None
outsiders = primf.difference(primt).union(primt.difference(primf))
if len(primf) > len(primt):
return -len(outsiders)
else:
return len(outsiders)
@classmethod
def _sql_join_all(cls_from, cls_tos):
"""
Given a list of sub classes `cls_tos` of `nfldb.Entity`,
produce as many SQL `LEFT JOIN` clauses as is necessary so
that all fields in all entity types given are available for
filtering.
Unlike the other join functions, this one has no alias support
or support for controlling particular tables.
The key contribution of this function is that it knows how to
connect a group of tables correctly. e.g., If the group of
tables is `game`, `play` and `play_player`, then `game` and
`play` will be joined and `play` and `play_player` will be
joined. (Instead of `game` and `play_player` or some other
erronoeous combination.)
In essence, each table is joined with the least general table
in the group.
"""
assert cls_from not in cls_tos, \
'cannot join %s with itself with `sql_join_all`' % cls_from
def dist(f, t):
return f._sql_relation_distance(t)
def relation_dists(froms, tos):
return filter(lambda (f, t, d): d is not None,
((f, t, dist(f, t)) for f in froms for t in tos))
def more_general(froms, tos):
return filter(lambda (f, t, d): d < 0, relation_dists(froms, tos))
def more_specific(froms, tos):
return filter(lambda (f, t, d): d > 0, relation_dists(froms, tos))
joins = ''
froms, tos = set([cls_from]), set(cls_tos)
while len(tos) > 0:
general = more_general(froms, tos)
specific = more_specific(froms, tos)
assert len(general) > 0 or len(specific) > 0, \
'Cannot compute distances between sets. From: %s, To: %s' \
% (froms, tos)
def add_join(f, t):
tos.discard(t)
froms.add(t)
return f._sql_join_to_all(t)
if general:
f, t, _ = max(general, key=lambda (f, t, d): d)
joins += add_join(f, t)
if specific:
f, t, _ = min(specific, key=lambda (f, t, d): d)
joins += add_join(f, t)
return joins
@classmethod
def _sql_join_to_all(cls_from, cls_to, from_table=None,
from_aliases=None, to_aliases=None):
"""
Given a **sub class** `cls_to` of `nfldb.Entity`, produce
as many SQL `LEFT JOIN` clauses as is necessary so that all
fields in `cls_to.sql_fields()` are available for filtering.
See the documentation for `nfldb.Entity._sql_join_to` for
information on the parameters.
"""
to_primary = cls_to._sql_primary_table()
joins = cls_from._sql_join_to(cls_to,
from_table=from_table,
to_table=to_primary,
from_aliases=from_aliases,
to_aliases=to_aliases)
for table, _ in cls_to._sql_tables['tables'][1:]:
joins += cls_to._sql_join_to(cls_to,
from_table=to_primary,
to_table=table,
from_aliases=to_aliases,
to_aliases=to_aliases)
return joins
@classmethod
def _sql_join_to(cls_from, cls_to,
from_table=None, to_table=None,
from_aliases=None, to_aliases=None):
"""
Given a **sub class** `cls_to` of `nfldb.Entity`, produce
a SQL `LEFT JOIN` clause.
If the primary keys in `cls_from` and `cls_to` have an empty
intersection, then an assertion error is raised.
Note that the first table defined for each of `cls_from` and
`cls_to` is used to join them if `from_table` or `to_table`
are `None`.
`from_aliases` are only applied to the `from` tables and
`to_aliases` are only applied to the `to` tables. This allows
one to do self joins.
"""
if from_table is None:
from_table = cls_from._sql_primary_table()
if to_table is None:
to_table = cls_to._sql_primary_table()
from_table = cls_from._sql_table_alias(from_table,
aliases=from_aliases)
as_to_table = cls_to._sql_table_alias(to_table, aliases=to_aliases)
from_pkey = cls_from._sql_tables['primary']
to_pkey = cls_to._sql_tables['primary']
# Avoiding set.intersection so we can preserve order.
common = [k for k in from_pkey if k in to_pkey]
assert len(common) > 0, \
"Cannot join %s to %s with non-overlapping primary keys." \
% (cls_from.__name__, cls_to.__name__)
fkey = [qualified_field(from_table, f) for f in common]
tkey = [qualified_field(as_to_table, f) for f in common]
return '''
LEFT JOIN {to_table} AS {as_to_table}
ON ({fkey}) = ({tkey})
'''.format(to_table=to_table, as_to_table=as_to_table,
fkey=', '.join(fkey), tkey=', '.join(tkey))
@classmethod
def _sql_primary_key(cls, table, aliases=None):
t = cls._sql_table_alias(table, aliases)
return [qualified_field(t, f)
for f in cls._sql_tables['primary']]
@classmethod
def _sql_primary_table(cls):
return cls._sql_tables['tables'][0][0]
@classmethod
def _sql_column_to_table(cls, name):
"""
Returns the table in `cls._sql_tables` containing the
field `name`.
If `name` corresponds to a primary key column, then
the primary table (first table) is returned.
If a table could not be found, a `exceptions.KeyError` is
raised.
"""
if name in cls._sql_tables['primary']:
return cls._sql_primary_table()
for table_name, fields in cls._sql_tables['tables']:
if name in fields:
return table_name
raise KeyError("Could not find table for %s" % name)
@classmethod
def _sql_table_alias(cls, table_name, aliases):
if aliases is None or table_name not in aliases:
return table_name
else:
return aliases[table_name]
@classmethod
def _sql_field(cls, name, aliases=None):
"""
Returns a SQL expression corresponding to the field `name`.
The default implementation returns `table_for_name`.`name`.
Entities can override this for special computed fields.
"""
prefix = cls._sql_table_alias(cls._sql_column_to_table(name), aliases)
return qualified_field(prefix, name)
def _save(self, cursor):
"""
Does an upsert for each managed table specified in
`nfldb.Entity._sql_tables`. The data is drawn from
`self`.
"""
for table, prim, vals in self._rows:
_upsert(cursor, table, vals, prim)
@property
def _rows(self):
prim = self._sql_tables['primary'][:]
for table, table_fields in self._sql_tables['tables']:
if table in self._sql_tables['managed']:
r = _as_row(prim + table_fields, self)
yield table, r[0:len(prim)], r
def _as_row(fields, obj):
"""
Given a list of fields in a SQL table and a Python object, return
an association list where the keys are from `fields` and the values
are the result of `getattr(obj, fields[i], None)` for some `i`.
Note that the `time_inserted` and `time_updated` fields are always
omitted.
"""
exclude = ('time_inserted', 'time_updated')
return [(f, getattr(obj, f, None)) for f in fields if f not in exclude]
def ands(*exprs):
anded = ' AND '.join('(%s)' % e for e in exprs if e)
return 'true' if len(anded) == 0 else anded
def qualified_field(alias, field):
"""
Qualifies the SQL `field` with `alias`. If `alias` is empty,
then no qualification is used. (Just `field` is returned.)
"""
if not alias:
return field
else:
return '%s.%s' % (alias, field)
| {
"repo_name": "verdimrc/nfldb",
"path": "nfldb/sql.py",
"copies": "5",
"size": "15501",
"license": "unlicense",
"hash": 2931798795539151400,
"line_mean": 38.4427480916,
"line_max": 78,
"alpha_frac": 0.5810592865,
"autogenerated": false,
"ratio": 4.008533747090768,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7089593033590768,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from numbers import Number
from toolz import concat, first, curry, compose
from datashape import DataShape
from blaze import compute, ndim
from blaze.dispatch import dispatch
from blaze.compute.core import compute_up, optimize
from blaze.expr import (ElemWise, symbol, Reduction, Transpose, TensorDot,
Expr, Slice, Broadcast)
from blaze.expr.split import split
from dask.array.core import (_concatenate2, Array, atop, names, transpose,
tensordot)
def compute_it(expr, leaves, *data, **kwargs):
kwargs.pop('scope')
return compute(expr, dict(zip(leaves, data)), **kwargs)
def elemwise_array(expr, *data, **kwargs):
leaves = expr._inputs
expr_inds = tuple(range(ndim(expr)))[::-1]
return atop(curry(compute_it, expr, leaves, **kwargs),
expr_inds,
*concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))
try:
from blaze.compute.numba import (get_numba_ufunc, broadcast_collect,
Broadcastable)
def compute_broadcast(expr, *data, **kwargs):
expr_inds = tuple(range(ndim(expr)))[::-1]
func = get_numba_ufunc(expr)
return atop(func,
expr_inds,
*concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))
def optimize_array(expr, *data):
return broadcast_collect(expr, Broadcastable=Broadcastable,
WantToBroadcast=Broadcastable)
for i in range(5):
compute_up.register(Broadcast, *([(Array, Number)] * i))(compute_broadcast)
optimize.register(Expr, *([(Array, Number)] * i))(optimize_array)
except ImportError:
pass
for i in range(5):
compute_up.register(ElemWise, *([Array] * i))(elemwise_array)
@dispatch(Reduction, Array)
def compute_up(expr, data, **kwargs):
leaf = expr._leaves()[0]
chunk = symbol('chunk', DataShape(*(tuple(map(first, data.chunks)) +
(leaf.dshape.measure,))))
(chunk, chunk_expr), (agg, agg_expr) = split(expr._child, expr,
chunk=chunk)
inds = tuple(range(ndim(leaf)))
tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs), inds, data,
inds)
return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs),
curry(_concatenate2, axes=expr.axis)),
tuple(i for i in inds if i not in expr.axis),
tmp, inds)
@dispatch(Transpose, Array)
def compute_up(expr, data, **kwargs):
return transpose(data, expr.axes)
@dispatch(TensorDot, Array, Array)
def compute_up(expr, lhs, rhs, **kwargs):
return tensordot(lhs, rhs, (expr._left_axes, expr._right_axes))
@dispatch(Slice, Array)
def compute_up(expr, data, **kwargs):
return data[expr.index]
| {
"repo_name": "LiaoPan/blaze",
"path": "blaze/compute/dask.py",
"copies": "3",
"size": "2914",
"license": "bsd-3-clause",
"hash": -5792800541194806000,
"line_mean": 32.1136363636,
"line_max": 83,
"alpha_frac": 0.6091283459,
"autogenerated": false,
"ratio": 3.5842558425584254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008554057499840632,
"num_lines": 88
} |
from __future__ import absolute_import, division, print_function
from numbers import Number
import numpy as np
from toolz import concat, first, curry, compose
from datashape import DataShape
from blaze.dispatch import dispatch
from blaze.compute.core import compute_up, optimize
from blaze import compute, ndim
from blaze.expr import (ElemWise, symbol, Reduction, Transpose, TensorDot,
Expr, Slice, Broadcast)
from .core import (getem, _concatenate2, top, Array, get, atop, names,
transpose, tensordot)
from .slicing import slice_array
def compute_it(expr, leaves, *data, **kwargs):
kwargs.pop('scope')
return compute(expr, dict(zip(leaves, data)), **kwargs)
def elemwise_array(expr, *data, **kwargs):
leaves = expr._inputs
expr_inds = tuple(range(ndim(expr)))[::-1]
return atop(curry(compute_it, expr, leaves, **kwargs),
next(names), expr_inds,
*concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))
try:
from blaze.compute.numba import (get_numba_ufunc, broadcast_collect,
Broadcastable)
def compute_broadcast(expr, *data, **kwargs):
leaves = expr._inputs
expr_inds = tuple(range(ndim(expr)))[::-1]
func = get_numba_ufunc(expr)
return atop(func,
next(names), expr_inds,
*concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))
def optimize_array(expr, *data):
return broadcast_collect(expr, Broadcastable=Broadcastable,
WantToBroadcast=Broadcastable)
for i in range(5):
compute_up.register(Broadcast, *([(Array, Number)] * i))(compute_broadcast)
optimize.register(Expr, *([(Array, Number)] * i))(optimize_array)
except ImportError:
pass
for i in range(5):
compute_up.register(ElemWise, *([Array] * i))(elemwise_array)
from blaze.expr.split import split
@dispatch(Reduction, Array)
def compute_up(expr, data, **kwargs):
leaf = expr._leaves()[0]
chunk = symbol('chunk', DataShape(*(tuple(map(first, data.blockdims)) +
(leaf.dshape.measure,))))
(chunk, chunk_expr), (agg, agg_expr) = split(expr._child, expr, chunk=chunk)
inds = tuple(range(ndim(leaf)))
tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs),
next(names), inds,
data, inds)
return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs),
curry(_concatenate2, axes=expr.axis)),
next(names), tuple(i for i in inds if i not in expr.axis),
tmp, inds)
@dispatch(Transpose, Array)
def compute_up(expr, data, **kwargs):
return transpose(data, expr.axes)
@dispatch(TensorDot, Array, Array)
def compute_up(expr, lhs, rhs, **kwargs):
return tensordot(lhs, rhs, (expr._left_axes, expr._right_axes))
@dispatch(Slice, Array)
def compute_up(expr, data, **kwargs):
return data[expr.index]
| {
"repo_name": "PeterDSteinberg/dask",
"path": "dask/array/blaze.py",
"copies": "1",
"size": "2956",
"license": "bsd-3-clause",
"hash": 2220561997763636000,
"line_mean": 31.1304347826,
"line_max": 83,
"alpha_frac": 0.6339648173,
"autogenerated": false,
"ratio": 3.4573099415204678,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4591274758820468,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from numpy import inf
import toolz
import datashape
from datashape import Record, DataShape, dshape, TimeDelta
from datashape import coretypes as ct
from datashape.predicates import iscollection, isboolean, isnumeric, isdatelike
from .core import common_subexpression
from .expressions import Expr, ndim
from .strings import isstring
from .expressions import dshape_method_list, method_properties
class Reduction(Expr):
""" A column-wise reduction
Blaze supports the same class of reductions as NumPy and Pandas.
sum, min, max, any, all, mean, var, std, count, nunique
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = t['amount'].sum()
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 3]]
>>> from blaze.compute.python import compute
>>> compute(e, data)
350
"""
__slots__ = '_hash', '_child', 'axis', 'keepdims'
def __init__(self, _child, axis=None, keepdims=False):
self._child = _child
if axis is None:
axis = tuple(range(_child.ndim))
if isinstance(axis, (set, list)):
axis = tuple(axis)
if not isinstance(axis, tuple):
axis = (axis,)
axis = tuple(sorted(axis))
self.axis = axis
self.keepdims = keepdims
@property
def dshape(self):
axis = self.axis
if self.keepdims:
shape = tuple(1 if i in axis else d
for i, d in enumerate(self._child.shape))
else:
shape = tuple(d
for i, d in enumerate(self._child.shape)
if i not in axis)
return DataShape(*(shape + (self.schema,)))
@property
def schema(self):
schema = self._child.schema[0]
if isinstance(schema, Record) and len(schema.types) == 1:
result = toolz.first(schema.types)
else:
result = schema
return DataShape(result)
@property
def symbol(self):
return type(self).__name__
@property
def _name(self):
try:
return self._child._name + '_' + type(self).__name__
except (AttributeError, ValueError, TypeError):
return type(self).__name__
def __str__(self):
kwargs = list()
if self.keepdims:
kwargs.append('keepdims=True')
if self.axis != tuple(range(self._child.ndim)):
kwargs.append('axis=' + str(self.axis))
other = sorted(
set(self.__slots__[1:]) - set(['_child', 'axis', 'keepdims']))
for slot in other:
kwargs.append('%s=%s' % (slot, getattr(self, slot)))
name = type(self).__name__
if kwargs:
return '%s(%s, %s)' % (name, self._child, ', '.join(kwargs))
else:
return '%s(%s)' % (name, self._child)
class any(Reduction):
schema = dshape(ct.bool_)
class all(Reduction):
schema = dshape(ct.bool_)
class sum(Reduction):
@property
def schema(self):
return DataShape(datashape.maxtype(super(sum, self).schema))
class max(Reduction):
pass
class min(Reduction):
pass
class mean(Reduction):
schema = dshape(ct.real)
class var(Reduction):
"""Variance
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute an unbiased estimate of the population variance if this is
``True``. In NumPy and pandas, this parameter is called ``ddof`` (delta
degrees of freedom) and is equal to 1 for unbiased and 0 for biased.
"""
__slots__ = '_hash', '_child', 'unbiased', 'axis', 'keepdims'
schema = dshape(ct.real)
def __init__(self, child, unbiased=False, *args, **kwargs):
self.unbiased = unbiased
super(var, self).__init__(child, *args, **kwargs)
class std(Reduction):
"""Standard Deviation
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute the square root of an unbiased estimate of the population
variance if this is ``True``.
.. warning::
This does *not* return an unbiased estimate of the population
standard deviation.
See Also
--------
var
"""
__slots__ = '_hash', '_child', 'unbiased', 'axis', 'keepdims'
schema = dshape(ct.real)
def __init__(self, child, unbiased=False, *args, **kwargs):
self.unbiased = unbiased
super(std, self).__init__(child, *args, **kwargs)
class count(Reduction):
""" The number of non-null elements """
schema = dshape(ct.int32)
class nunique(Reduction):
schema = dshape(ct.int32)
class nelements(Reduction):
"""Compute the number of elements in a collection, including missing values.
See Also
---------
blaze.expr.reductions.count: compute the number of non-null elements
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: float64}')
>>> t[t.amount < 1].nelements()
nelements(t[t.amount < 1])
"""
schema = dshape(ct.int32)
def nrows(expr):
return nelements(expr, axis=(0,))
class Summary(Expr):
""" A collection of named reductions
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = summary(number=t.id.nunique(), sum=t.amount.sum())
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 1]]
>>> from blaze import compute
>>> compute(expr, data)
(2, 350)
"""
__slots__ = '_hash', '_child', 'names', 'values', 'axis', 'keepdims'
def __init__(self, _child, names, values, axis=None, keepdims=False):
self._child = _child
self.names = names
self.values = values
self.keepdims = keepdims
self.axis = axis
@property
def dshape(self):
axis = self.axis
if self.keepdims:
shape = tuple(1 if i in axis else d
for i, d in enumerate(self._child.shape))
else:
shape = tuple(d
for i, d in enumerate(self._child.shape)
if i not in axis)
measure = Record(list(zip(self.names,
[v.schema for v in self.values])))
return DataShape(*(shape + (measure,)))
def __str__(self):
s = 'summary('
s += ', '.join('%s=%s' % (name, str(val))
for name, val in zip(self.fields, self.values))
if self.keepdims:
s += ', keepdims=True'
s += ')'
return s
def summary(keepdims=False, axis=None, **kwargs):
items = sorted(kwargs.items(), key=toolz.first)
names = tuple(map(toolz.first, items))
values = tuple(map(toolz.second, items))
child = common_subexpression(*values)
if len(kwargs) == 1 and not iscollection(child.dshape):
while not iscollection(child.dshape):
children = [i for i in child._inputs if isinstance(i, Expr)]
if len(children) == 1:
child = children[0]
else:
child = common_subexpression(*children)
if axis is None:
axis = tuple(range(ndim(child)))
if isinstance(axis, (set, list)):
axis = tuple(axis)
if not isinstance(axis, tuple):
axis = (axis,)
return Summary(child, names, values, keepdims=keepdims, axis=axis)
summary.__doc__ = Summary.__doc__
def vnorm(expr, ord=None, axis=None, keepdims=False):
""" Vector norm
See np.linalg.norm
"""
if ord is None or ord == 'fro':
ord = 2
if ord == inf:
return max(abs(expr), axis=axis, keepdims=keepdims)
elif ord == -inf:
return min(abs(expr), axis=axis, keepdims=keepdims)
elif ord == 1:
return sum(abs(expr), axis=axis, keepdims=keepdims)
elif ord % 2 == 0:
return sum(expr ** ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
return sum(abs(expr) ** ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
dshape_method_list.extend([
(iscollection, set([count, nelements])),
(lambda ds: (iscollection(ds) and
(isstring(ds) or isnumeric(ds) or isboolean(ds) or
isdatelike(ds) or isinstance(ds, TimeDelta))),
set([min, max])),
(lambda ds: len(ds.shape) == 1,
set([nrows, nunique])),
(lambda ds: iscollection(ds) and isboolean(ds),
set([any, all])),
(lambda ds: iscollection(ds) and (isnumeric(ds) or isboolean(ds)),
set([mean, sum, std, var, vnorm])),
])
method_properties.update([nrows])
| {
"repo_name": "dwillmer/blaze",
"path": "blaze/expr/reductions.py",
"copies": "1",
"size": "8872",
"license": "bsd-3-clause",
"hash": 4537954242533676000,
"line_mean": 26.2147239264,
"line_max": 80,
"alpha_frac": 0.561767358,
"autogenerated": false,
"ratio": 3.756138865368332,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9817716872463235,
"avg_score": 0.00003787018101946527,
"num_lines": 326
} |
from __future__ import absolute_import, division, print_function
from .object_hook import object_hook, register
import werkzeug.exceptions as wz_ex
from blaze.compatibility import builtins, reduce
import numpy as np
import pandas as pd
_converters_trusted = object_hook._converters.copy()
def object_hook_trusted(ob, _converters=_converters_trusted):
return object_hook(ob, _converters=_converters)
object_hook_trusted._converters = _converters_trusted
object_hook_trusted.register = register(converters=_converters_trusted)
del _converters_trusted
@object_hook_trusted.register('callable')
def numpy_pandas_function_from_str(f):
"""
reconstruct function from string representation
"""
if f.startswith(np.__name__):
mod = np
elif f.startswith(pd.__name__):
mod = pd
elif f.startswith(builtins.__name__):
mod = builtins
else:
msg = ("Function {} not recognized; only numpy, pandas, or builtin "
"functions are supported.")
raise wz_ex.NotImplemented(msg.format(f))
fcn = reduce(getattr, f.split('.')[1:], mod)
return fcn
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/server/serialization/object_hook_trusted.py",
"copies": "3",
"size": "1121",
"license": "bsd-3-clause",
"hash": 4076010981351232000,
"line_mean": 31.0285714286,
"line_max": 76,
"alpha_frac": 0.6949152542,
"autogenerated": false,
"ratio": 3.9893238434163703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000529100529100529,
"num_lines": 35
} |
from __future__ import (absolute_import, division, print_function)
from odm2api.base import modelBase
from sqlalchemy import BigInteger, Boolean, Column, Date, DateTime, Float, ForeignKey, Integer, String, case
from sqlalchemy.dialects import mysql, postgresql, sqlite
from sqlalchemy.orm import relationship
Base = modelBase.Base
BigIntegerType = BigInteger()
BigIntegerType = BigIntegerType.with_variant(sqlite.INTEGER(), 'sqlite')
BigIntegerType = BigIntegerType.with_variant(postgresql.BIGINT(), 'postgresql')
BigIntegerType = BigIntegerType.with_variant(mysql.BIGINT(), 'mysql')
DateTimeType = DateTime()
DateTimeType = DateTimeType.with_variant(sqlite.DATETIME(), 'sqlite')
def is_hex(s):
try:
int(s, base=16)
return True
except ValueError:
return False
################################################################################
# CV
################################################################################
class CV(object):
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyURI = Column('sourcevocabularyuri', String(255))
class CVActionType(Base, CV):
__tablename__ = 'cv_actiontype'
class CVAggregationStatistic(Base, CV):
__tablename__ = 'cv_aggregationstatistic'
class CVAnnotationType(Base, CV):
__tablename__ = 'cv_annotationtype'
class CVCensorCode(Base, CV):
__tablename__ = 'cv_censorcode'
class CVDataQualityType(Base, CV):
__tablename__ = 'cv_dataqualitytype'
class CVDataSetType(Base, CV):
__tablename__ = 'cv_datasettypecv'
class CVDeploymentType(Base, CV):
__tablename__ = 'cv_deploymenttype'
class CVDirectiveType(Base, CV):
__tablename__ = 'cv_directivetype'
class CVElevationDatum(Base, CV):
__tablename__ = 'cv_elevationdatum'
class CVEquipmentType(Base, CV):
__tablename__ = 'cv_equipmenttype'
class CVMediumType(Base, CV):
__tablename__ = 'cv_medium'
class CVMethodType(Base, CV):
__tablename__ = 'cv_methodtype'
class CVOrganizationType(Base, CV):
__tablename__ = 'cv_organizationtype'
class CVPropertyDataType(Base, CV):
__tablename__ = 'cv_propertydatatype'
class CVQualityCode(Base, CV):
__tablename__ = 'cv_qualitycode'
class CVResultType(Base, CV):
__tablename__ = 'cv_resulttype'
class CVRelationshipType(Base, CV):
__tablename__ = 'cv_relationshiptype'
class CVSamplingFeatureGeoType(Base, CV):
__tablename__ = 'cv_samplingfeaturegeotype'
class CVSamplingFeatureType(Base, CV):
__tablename__ = 'cv_samplingfeaturetype'
class CVSpatialOffsetType(Base, CV):
__tablename__ = 'cv_spatialoffsettype'
class CVSpeciation(Base, CV):
__tablename__ = 'cv_speciation'
class CVSpecimenType(Base, CV):
__tablename__ = 'cv_specimentype'
class CVSiteType(Base, CV):
__tablename__ = 'cv_sitetype'
class CVStatus(Base, CV):
__tablename__ = 'cv_status'
class CVTaxonomicClassifierType(Base, CV):
__tablename__ = 'cv_taxonomicclassifiertype'
class CVUnitsType(Base, CV):
__tablename__ = 'cv_unitstype'
class CVVariableName(Base, CV):
__tablename__ = 'cv_variablename'
class CVVariableType(Base, CV):
__tablename__ = 'cv_variabletype'
class CVReferenceMaterialMedium(Base, CV):
__tablename__ = 'cv_referencematerialmedium'
# ################################################################################
# Core
# ################################################################################
class People(Base):
"""
Individuals that perform actions.
"""
PersonID = Column('personid', Integer, primary_key=True, nullable=False)
PersonFirstName = Column('personfirstname', String(255), nullable=False)
PersonMiddleName = Column('personmiddlename', String(255))
PersonLastName = Column('personlastname', String(255), nullable=False)
class Organizations(Base):
"""
A group of people.
"""
OrganizationID = Column('organizationid', Integer, primary_key=True, nullable=False)
OrganizationTypeCV = Column('organizationtypecv', ForeignKey(CVOrganizationType.Name), nullable=False,
index=True)
OrganizationCode = Column('organizationcode', String(50), nullable=False)
OrganizationName = Column('organizationname', String(255), nullable=False)
OrganizationDescription = Column('organizationdescription', String(500))
OrganizationLink = Column('organizationlink', String(255))
ParentOrganizationID = Column('parentorganizationid', ForeignKey('odm2.organizations.organizationid'))
OrganizationObj = relationship(u'Organizations', remote_side=[OrganizationID])
class Affiliations(Base):
AffiliationID = Column('affiliationid', Integer, primary_key=True, nullable=False)
PersonID = Column('personid', ForeignKey(People.PersonID), nullable=False)
OrganizationID = Column('organizationid', ForeignKey(Organizations.OrganizationID))
IsPrimaryOrganizationContact = Column('isprimaryorganizationcontact', Boolean)
AffiliationStartDate = Column('affiliationstartdate', Date, nullable=False)
AffiliationEndDate = Column('affiliationenddate', Date)
PrimaryPhone = Column('primaryphone', String(50))
PrimaryEmail = Column('primaryemail', String(255), nullable=False)
PrimaryAddress = Column('primaryaddress', String(255))
PersonLink = Column('personlink', String(255))
OrganizationObj = relationship(Organizations)
PersonObj = relationship(People)
class Methods(Base):
"""
The procedure used to perform an action.
"""
MethodID = Column('methodid', Integer, primary_key=True, nullable=False)
MethodTypeCV = Column('methodtypecv', ForeignKey(CVMethodType.Name), nullable=False, index=True)
MethodCode = Column('methodcode', String(50), nullable=False)
MethodName = Column('methodname', String(255), nullable=False)
MethodDescription = Column('methoddescription', String(500))
MethodLink = Column('methodlink', String(255))
OrganizationID = Column('organizationid', Integer, ForeignKey(Organizations.OrganizationID))
OrganizationObj = relationship(Organizations)
class Actions(Base):
"""
Actions are performed by people and may have a result.
"""
ActionID = Column('actionid', Integer, primary_key=True, nullable=False)
ActionTypeCV = Column('actiontypecv', ForeignKey(CVActionType.Name), nullable=False, index=True)
MethodID = Column('methodid', ForeignKey(Methods.MethodID), nullable=False)
BeginDateTime = Column('begindatetime', DateTime, nullable=False)
BeginDateTimeUTCOffset = Column('begindatetimeutcoffset', Integer, nullable=False)
EndDateTime = Column('enddatetime', DateTime)
EndDateTimeUTCOffset = Column('enddatetimeutcoffset', Integer)
ActionDescription = Column('actiondescription', String(500))
ActionFileLink = Column('actionfilelink', String(255))
MethodObj = relationship(Methods)
class ActionBy(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ActionID = Column('actionid', Integer, ForeignKey(Actions.ActionID), nullable=False)
AffiliationID = Column('affiliationid', ForeignKey(Affiliations.AffiliationID), nullable=False)
IsActionLead = Column('isactionlead', Boolean, nullable=False)
RoleDescription = Column('roledescription', String(500))
ActionObj = relationship(Actions)
AffiliationObj = relationship(Affiliations)
class SamplingFeatures(Base):
"""
Where or on what an action was performed.
"""
SamplingFeatureID = Column('samplingfeatureid', Integer, primary_key=True, nullable=False)
"""int: Primary key identifier."""
SamplingFeatureUUID = Column('samplingfeatureuuid', String(36), nullable=False)
"""str: A universally unique identifier for the sampling feature."""
SamplingFeatureTypeCV = Column('samplingfeaturetypecv', ForeignKey(CVSamplingFeatureType.Name),
nullable=False, index=True)
"""str: CV term describing the type of sampling feature."""
SamplingFeatureCode = Column('samplingfeaturecode', String(50), nullable=False)
"""str: A short but meaningful text identifier for the sampling feature."""
SamplingFeatureName = Column('samplingfeaturename', String(255))
"""str: Sampling Feature name (free text)."""
SamplingFeatureDescription = Column('samplingfeaturedescription', String(500))
"""str: Text describing the sampling feature."""
SamplingFeatureGeotypeCV = Column('samplingfeaturegeotypecv', ForeignKey(CVSamplingFeatureGeoType.Name),
index=True)
"""str: Dimensionality of SamplingFeature; point2d, line2d, etc."""
Elevation_m = Column('elevation_m', Float(53))
"""float: The elevation of the sampling feature in meters, or in the case of Specimen,
the elevation from where the SamplingFeature.Specimen was collected""" # noqa
ElevationDatumCV = Column('elevationdatumcv', ForeignKey(CVElevationDatum.Name), index=True)
"""str: The code for the vertical geodetic datum that specifies the zero point for
the Sampling Feature Elevation""" # noqa
# FeatureGeometry = Column('featuregeometry', String(50))
"""object: The location geometry of the sampling feature on the Earth expressed using a
geometry data type. Can be a Point, Curve (profile, trajectory, etc),
Surface (flat polygons, etc) or Solid/Volume (although often limited to
2D geometries). """ # noqa
FeatureGeometryWKT = Column('featuregeometrywkt', String(50))
"""str: The location geometry of the sampling feature on the Earth expressed as
well known text (WKT). Can be a Point, Curve (profile, trajectory, etc.),
Surface (flat polygons, etc.), or Solid/Volume (although often limited to
2D geometries).""" # noqa
__mapper_args__ = {
'polymorphic_on': case(
[
(SamplingFeatureTypeCV == 'Specimen', 'Specimen'),
(SamplingFeatureTypeCV == 'Site', 'Site'),
],
else_='samplingfeatures'),
'polymorphic_identity': 'samplingfeatures',
}
class FeatureActions(Base):
"""
Provides flexible linkage between Actions and the SamplingFeatures
on which or at which they were performed.
"""
FeatureActionID = Column('featureactionid', Integer, primary_key=True, nullable=False)
SamplingFeatureID = Column('samplingfeatureid', ForeignKey(SamplingFeatures.SamplingFeatureID),
nullable=False)
ActionID = Column('actionid', ForeignKey(Actions.ActionID), nullable=False)
ActionObj = relationship(Actions)
SamplingFeatureObj = relationship(SamplingFeatures)
class DataSets(Base):
"""
Enables grouping of results into a larger dataset.
"""
DataSetID = Column('datasetid', Integer, primary_key=True, nullable=False)
# This has been changed to String to support multiple database uuid types
DataSetUUID = Column('datasetuuid', String(255), nullable=False)
DataSetTypeCV = Column('datasettypecv', ForeignKey(CVDataSetType.Name), nullable=False, index=True)
DataSetCode = Column('datasetcode', String(50), nullable=False)
DataSetTitle = Column('datasettitle', String(255), nullable=False)
DataSetAbstract = Column('datasetabstract', String(500), nullable=False)
class ProcessingLevels(Base):
"""
Levels to which data have been quality controlled.
"""
ProcessingLevelID = Column('processinglevelid', Integer, primary_key=True, nullable=False)
ProcessingLevelCode = Column('processinglevelcode', String(50), nullable=False)
Definition = Column('definition', String(500))
Explanation = Column('explanation', String(500))
class RelatedActions(Base):
"""
Enables specifying relationships among Actions (e.g., workflows, etc.)
"""
RelationID = Column('relationid', Integer, primary_key=True, nullable=False)
ActionID = Column('actionid', ForeignKey(Actions.ActionID), nullable=False)
RelationshipTypeCV = Column('relationshiptypecv', ForeignKey(CVRelationshipType.Name), nullable=False,
index=True)
RelatedActionID = Column('relatedactionid', ForeignKey(Actions.ActionID), nullable=False)
ActionObj = relationship(Actions, primaryjoin='RelatedActions.ActionID == Actions.ActionID')
RelatedActionObj = relationship(Actions, primaryjoin='RelatedActions.RelatedActionID == Actions.ActionID')
class TaxonomicClassifiers(Base):
"""
Terms for classifying results.
"""
TaxonomicClassifierID = Column('taxonomicclassifierid', Integer, primary_key=True, nullable=False)
TaxonomicClassifierTypeCV = Column(
'taxonomicclassifiertypecv',
ForeignKey(CVTaxonomicClassifierType.Name),
nullable=False,
index=True
)
TaxonomicClassifierName = Column('taxonomicclassifiername', String(255),
nullable=False)
TaxonomicClassifierCommonName = Column('taxonomicclassifiercommonname', String(255))
TaxonomicClassifierDescription = Column('taxonomicclassifierdescription', String(500))
ParentTaxonomicClassifierID = Column('parenttaxonomicclassifierid',
ForeignKey('odm2.taxonomicclassifiers.taxonomicclassifierid'))
parent = relationship(u'TaxonomicClassifiers', remote_side=[TaxonomicClassifierID])
class Units(Base):
"""
Units of measure.
"""
UnitsID = Column('unitsid', Integer, primary_key=True, nullable=False)
UnitsTypeCV = Column('unitstypecv', ForeignKey(CVUnitsType.Name), nullable=False, index=True)
UnitsAbbreviation = Column('unitsabbreviation', String(255), nullable=False)
UnitsName = Column('unitsname', String, nullable=False)
UnitsLink = Column('unitslink', String(255))
class Variables(Base):
"""
What was observed.
"""
VariableID = Column('variableid', Integer, primary_key=True, nullable=False)
VariableTypeCV = Column('variabletypecv', ForeignKey(CVVariableType.Name), nullable=False, index=True)
VariableCode = Column('variablecode', String(50), nullable=False)
VariableNameCV = Column('variablenamecv', ForeignKey(CVVariableName.Name), nullable=False, index=True)
VariableDefinition = Column('variabledefinition', String(500))
SpeciationCV = Column('speciationcv', ForeignKey(CVSpeciation.Name), index=True)
NoDataValue = Column('nodatavalue', Float(asdecimal=True), nullable=False)
class Results(Base):
"""
The result of an action.
"""
ResultID = Column('resultid', BigIntegerType, primary_key=True)
# This has been changed to String to support multiple database uuid types
# ResultUUID = Column(UNIQUEIDENTIFIER, nullable=False)
ResultUUID = Column('resultuuid', String(36), nullable=False)
FeatureActionID = Column('featureactionid', ForeignKey(FeatureActions.FeatureActionID), nullable=False)
ResultTypeCV = Column('resulttypecv', ForeignKey(CVResultType.Name), nullable=False, index=True)
VariableID = Column('variableid', ForeignKey(Variables.VariableID), nullable=False)
UnitsID = Column('unitsid', ForeignKey(Units.UnitsID), nullable=False)
TaxonomicClassifierID = Column('taxonomicclassifierid',
ForeignKey(TaxonomicClassifiers.TaxonomicClassifierID))
ProcessingLevelID = Column('processinglevelid', ForeignKey(ProcessingLevels.ProcessingLevelID),
nullable=False)
ResultDateTime = Column('resultdatetime', DateTime)
ResultDateTimeUTCOffset = Column('resultdatetimeutcoffset', BigIntegerType)
ValidDateTime = Column('validdatetime', DateTime)
ValidDateTimeUTCOffset = Column('validdatetimeutcoffset', BigIntegerType)
StatusCV = Column('statuscv', ForeignKey(CVStatus.Name), index=True)
SampledMediumCV = Column('sampledmediumcv', ForeignKey(CVMediumType.Name), nullable=False, index=True)
ValueCount = Column('valuecount', Integer, nullable=False)
FeatureActionObj = relationship(FeatureActions)
ProcessingLevelObj = relationship(ProcessingLevels)
TaxonomicClassifierObj = relationship(TaxonomicClassifiers)
UnitsObj = relationship(Units)
VariableObj = relationship(Variables)
__mapper_args__ = {
'polymorphic_on': case([
(ResultTypeCV == 'Point coverage', 'Point coverage'),
(ResultTypeCV == 'Profile Coverage', 'Profile Coverage'),
(ResultTypeCV == 'Category coverage', 'Category coverage'),
(ResultTypeCV == 'Transect Coverage', 'Transect Coverage'),
(ResultTypeCV == 'Spectra coverage', 'Spectra coverage'),
(ResultTypeCV == 'Time series coverage', 'Time series coverage'),
(ResultTypeCV == 'Section coverage', 'Section coverage'),
(ResultTypeCV == 'Profile Coverage', 'Profile Coverage'),
(ResultTypeCV == 'Trajectory coverage', 'Trajectory coverage'),
(ResultTypeCV == 'Measurement', 'Measurement'),
], else_='results'),
'polymorphic_identity': 'results',
}
# ################################################################################
# Equipment
# ################################################################################
class DataLoggerProgramFiles(Base):
ProgramID = Column('programid', Integer, primary_key=True, nullable=False)
AffiliationID = Column('affiliationid', Integer, ForeignKey(Affiliations.AffiliationID), nullable=False)
ProgramName = Column('programname', String(255), nullable=False)
ProgramDescription = Column('programdescription', String(500))
ProgramVersion = Column('programversion', String(50))
ProgramFileLink = Column('programfilelink', String(255))
AffiliationObj = relationship(Affiliations)
class DataLoggerFiles(Base):
DataLoggerFileID = Column('dataloggerfileid', Integer, primary_key=True, nullable=False)
ProgramID = Column('programid', Integer, ForeignKey(DataLoggerProgramFiles.ProgramID), nullable=False)
DataLoggerFileName = Column('dataloggerfilename', String(255), nullable=False)
DataLoggerOutputFileDescription = Column('dataloggerfiledescription', String(500))
DataLoggerOutputFileLink = Column('dataloggerfilelink', String(255))
ProgramObj = relationship(DataLoggerProgramFiles)
class EquipmentModels(Base):
ModelID = Column('equipmentmodelid', Integer, primary_key=True, nullable=False)
ModelManufacturerID = Column('modelmanufacturerid', Integer,
ForeignKey(Organizations.OrganizationID), nullable=False)
ModelPartNumber = Column('modelpartnumber', String(50))
ModelName = Column('modelname', String(255), nullable=False)
ModelDescription = Column('modeldescription', String(500))
ModelSpecificationsFileLink = Column('modelspecificationsfilelink', String(255))
ModelLink = Column('modellink', String(255))
IsInstrument = Column('isinstrument', Boolean, nullable=False)
OrganizationObj = relationship(Organizations)
class InstrumentOutputVariables(Base):
InstrumentOutputVariableID = Column(
'instrumentoutputvariableid',
Integer,
primary_key=True,
nullable=False
)
ModelID = Column('modelid', Integer, ForeignKey(EquipmentModels.ModelID), nullable=False)
VariableID = Column('variableid', Integer, ForeignKey(Variables.VariableID), nullable=False)
InstrumentMethodID = Column('instrumentmethodid', Integer, ForeignKey(Methods.MethodID), nullable=False)
InstrumentResolution = Column('instrumentresolution', String(255))
InstrumentAccuracy = Column('instrumentaccuracy', String(255))
InstrumentRawOutputUnitsID = Column('instrumentrawoutputunitsid', Integer, ForeignKey(Units.UnitsID),
nullable=False)
MethodObj = relationship(Methods)
OutputUnitObj = relationship(Units)
EquipmentModelObj = relationship(EquipmentModels)
VariableObj = relationship(Variables)
class DataLoggerFileColumns(Base):
DataLoggerFileColumnID = Column('dataloggerfilecolumnid', Integer, primary_key=True, nullable=False)
ResultID = Column('resultid', BigIntegerType, ForeignKey(Results.ResultID))
DataLoggerFileID = Column('dataloggerfileid', Integer,
ForeignKey(DataLoggerFiles.DataLoggerFileID), nullable=False)
InstrumentOutputVariableID = Column('instrumentoutputvariableid', Integer,
ForeignKey(InstrumentOutputVariables.VariableID),
nullable=False)
ColumnLabel = Column('columnlabel', String(50), nullable=False)
ColumnDescription = Column('columndescription', String(500))
MeasurementEquation = Column('measurementequation', String(255))
ScanInterval = Column('scaninterval', Float(50))
ScanIntervalUnitsID = Column('scanintervalunitsid', Integer, ForeignKey(Units.UnitsID))
RecordingInterval = Column('recordinginterval', Float(50))
RecordingIntervalUnitsID = Column('recordingintervalunitsid', Integer, ForeignKey(Units.UnitsID))
AggregationStatisticCV = Column(
'aggregationstatisticcv',
String(255),
ForeignKey(CVAggregationStatistic.Name),
index=True
)
ResultObj = relationship(Results)
DataLoggerFileObj = relationship(DataLoggerFiles)
InstrumentOutputVariableObj = relationship(InstrumentOutputVariables)
ScanIntervalUnitsObj = relationship(
Units,
primaryjoin='DataLoggerFileColumns.ScanIntervalUnitsID == Units.UnitsID'
)
RecordingIntervalUnitsObj = relationship(
Units,
primaryjoin='DataLoggerFileColumns.RecordingIntervalUnitsID == Units.UnitsID'
)
class Equipment(Base):
EquipmentID = Column('equipmentid', Integer, primary_key=True, nullable=False)
EquipmentCode = Column('equipmentcode', String(50), nullable=False)
EquipmentName = Column('equipmentname', String(255), nullable=False)
EquipmentTypeCV = Column('equipmenttypecv', ForeignKey(CVEquipmentType.Name), nullable=False, index=True)
ModelID = Column('equipmentmodelid', ForeignKey(EquipmentModels.ModelID), nullable=False)
EquipmentSerialNumber = Column('equipmentserialnumber', String(50), nullable=False)
EquipmentInventoryNumber = Column('equipmentinventorynumber', String(50))
EquipmentOwnerID = Column('equipmentownerid', ForeignKey(People.PersonID), nullable=False)
EquipmentVendorID = Column('equipmentvendorid', ForeignKey(Organizations.OrganizationID), nullable=False)
EquipmentPurchaseDate = Column('equipmentpurchasedate', DateTime, nullable=False)
EquipmentPurchaseOrderNumber = Column('equipmentpurchaseordernumber', String(50))
EquipmentDescription = Column('equipmentdescription', String(500))
EquipmentDocumentationLink = Column('equipmentdocumentationlink', String(255))
PersonObj = relationship(People)
OrganizationObj = relationship(Organizations)
EquipmentModelObj = relationship(EquipmentModels)
class CalibrationReferenceEquipment(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ActionID = Column('actionid', Integer, ForeignKey(Actions.ActionID), nullable=False)
EquipmentID = Column('equipmentid', Integer, ForeignKey(Equipment.EquipmentID), nullable=False)
ActionObj = relationship(Actions)
EquipmentObj = relationship(Equipment)
class EquipmentActions(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
EquipmentID = Column('equipmentid', ForeignKey(Equipment.EquipmentID), nullable=False)
ActionID = Column('actionid', ForeignKey(Actions.ActionID), nullable=False)
ActionObj = relationship(Actions)
EquipmentObj = relationship(Equipment)
class EquipmentUsed(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ActionID = Column('actionid', Integer, ForeignKey(Actions.ActionID), nullable=False)
EquipmentID = Column('equipmentid', Integer, ForeignKey(Equipment.EquipmentID), nullable=False)
ActionObj = relationship(Actions)
EquipmentObj = relationship(Equipment)
class MaintenanceActions(Base):
ActionID = Column('actionid', Integer, ForeignKey(Actions.ActionID), primary_key=True, nullable=False)
IsFactoryService = Column('isfactoryservice', Boolean, nullable=False)
MaintenanceCode = Column('maintenancecode', String(50))
MantenanceReason = Column('maintenancereason', String(50))
ActionObj = relationship(Actions)
class RelatedEquipment(Base):
RelationID = Column('relationid', Integer, primary_key=True, nullable=True)
EquipmentID = Column('equipmentid', Integer, ForeignKey(Equipment.EquipmentID), nullable=True)
RelationshipTypeCV = Column('relationshiptypecv', String(255), nullable=True, index=True)
RelatedEquipmentID = Column(
'relatedequipmentid',
Integer,
ForeignKey(Equipment.EquipmentID),
nullable=True
)
RelationshipStartDateTime = Column('relationshipstartdatetime', DateTime, nullable=True)
RelationshipStartDateTimeUTCOffset = Column('relationshipstartdatetimeutcoffset', Integer, nullable=True)
RelationshipEndDateTime = Column('relationshipenddatetime', DateTime)
RelationshipEndDateTimeUTCOffset = Column('relationshipenddatetimeutcoffset', Integer)
EquipmentObj = relationship(
Equipment,
primaryjoin='RelatedEquipment.EquipmentID == Equipment.EquipmentID'
)
RelatedEquipmentObj = relationship(
Equipment,
primaryjoin='RelatedEquipment.RelatedEquipmentID == Equipment.EquipmentID'
)
class CalibrationActions(Base):
ActionID = Column('actionid', Integer, ForeignKey(Actions.ActionID), primary_key=True, nullable=False)
CalibrationCheckValue = Column('calibrationcheckvalue', Float(53))
InstrumentOutputVariableID = Column('instrumentoutputvariableid', Integer,
ForeignKey(InstrumentOutputVariables.VariableID), nullable=False)
CalibrationEquation = Column('calibrationequation', String(255))
ActionObj = relationship(Actions)
InstrumentOutputVariableObj = relationship(InstrumentOutputVariables)
# ################################################################################
# Lab Analyses
# ################################################################################
class Directives(Base):
DirectiveID = Column('directiveid', Integer, primary_key=True, nullable=False)
DirectiveTypeCV = Column('directivetypecv', ForeignKey(CVDirectiveType.Name), nullable=False, index=True)
DirectiveDescription = Column('directivedescription', String(500), nullable=False)
class ActionDirectives(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ActionID = Column('actionid', ForeignKey(Actions.ActionID), nullable=False)
DirectiveID = Column('directiveid', ForeignKey(Directives.DirectiveID), nullable=False)
ActionObj = relationship(Actions)
DirectiveObj = relationship(Directives)
class SpecimenBatchPositions(Base):
# todo fix misspelling
__tablename__ = u'specimenbatchpostions'
FeatureActionID = Column(
'featureactionid',
Integer,
ForeignKey(FeatureActions.FeatureActionID),
primary_key=True,
nullable=False
)
BatchPositionsNumber = Column('batchpositionnumber', Integer, nullable=False)
BatchPositionLabel = Column('batchpositionlabel', String(255))
FeatureActionObj = relationship(FeatureActions)
# ################################################################################
# Sampling Features
# ################################################################################
class SpatialReferences(Base):
SpatialReferenceID = Column('spatialreferenceid', Integer, primary_key=True, nullable=False)
SRSCode = Column('srscode', String(50))
SRSName = Column('srsname', String(255), nullable=False)
SRSDescription = Column('srsdescription', String(500))
SRSLink = Column('srslink', String(255))
class Specimens(SamplingFeatures):
SamplingFeatureID = Column('samplingfeatureid', ForeignKey(SamplingFeatures.SamplingFeatureID),
primary_key=True)
SpecimenTypeCV = Column('specimentypecv', ForeignKey(CVSpecimenType.Name), nullable=False, index=True)
SpecimenMediumCV = Column('specimenmediumcv', ForeignKey(CVMediumType.Name), nullable=False, index=True)
IsFieldSpecimen = Column('isfieldspecimen', Boolean, nullable=False)
__mapper_args__ = {
'polymorphic_identity': 'Specimen',
}
class SpatialOffsets(Base):
SpatialOffsetID = Column('spatialoffsetid', Integer, primary_key=True, nullable=False)
SpatialOffsetTypeCV = Column('spatialoffsettypecv', ForeignKey(CVSpatialOffsetType.Name), nullable=False,
index=True)
Offset1Value = Column('offset1value', Float(53), nullable=False)
Offset1UnitID = Column('offset1unitid', Integer, ForeignKey(Units.UnitsID), nullable=False)
Offset2Value = Column('offset2value', Float(53))
Offset2UnitID = Column('offset2unitid', Integer, ForeignKey(Units.UnitsID))
Offset3Value = Column('offset3value', Float(53))
Offset3UnitID = Column('offset3unitid', Integer, ForeignKey(Units.UnitsID))
Offset1UnitObj = relationship(Units, primaryjoin='SpatialOffsets.Offset1UnitID == Units.UnitsID')
Offset2UnitObj = relationship(Units, primaryjoin='SpatialOffsets.Offset2UnitID == Units.UnitsID')
Offset3UnitObj = relationship(Units, primaryjoin='SpatialOffsets.Offset3UnitID == Units.UnitsID')
class Sites(SamplingFeatures):
SamplingFeatureID = Column('samplingfeatureid', ForeignKey(SamplingFeatures.SamplingFeatureID),
primary_key=True)
SpatialReferenceID = Column('spatialreferenceid', ForeignKey(SpatialReferences.SpatialReferenceID),
nullable=False)
SiteTypeCV = Column('sitetypecv', ForeignKey(CVSiteType.Name), nullable=False, index=True)
Latitude = Column('latitude', Float(53), nullable=False)
Longitude = Column('longitude', Float(53), nullable=False)
SpatialReferenceObj = relationship(SpatialReferences)
__mapper_args__ = {
'polymorphic_identity': 'Site',
}
class RelatedFeatures(Base):
RelationID = Column('relationid', Integer, primary_key=True, nullable=False)
SamplingFeatureID = Column('samplingfeatureid', ForeignKey(SamplingFeatures.SamplingFeatureID),
nullable=False)
RelationshipTypeCV = Column('relationshiptypecv', ForeignKey(CVRelationshipType.Name), nullable=False,
index=True)
RelatedFeatureID = Column(
'relatedfeatureid',
ForeignKey(SamplingFeatures.SamplingFeatureID),
nullable=False
)
SpatialOffsetID = Column('spatialoffsetid', ForeignKey(SpatialOffsets.SpatialOffsetID))
SamplingFeatureObj = relationship(
SamplingFeatures,
primaryjoin='RelatedFeatures.SamplingFeatureID == SamplingFeatures.SamplingFeatureID'
)
RelatedFeatureObj = relationship(
SamplingFeatures,
primaryjoin='RelatedFeatures.RelatedFeatureID == SamplingFeatures.SamplingFeatureID'
)
SpatialOffsetObj = relationship(SpatialOffsets)
class SpecimenTaxonomicClassifiers(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
SamplingFeatureID = Column('samplingfeatureid', ForeignKey(Specimens.SamplingFeatureID), nullable=False)
TaxonomicClassifierID = Column('taxonomicclassifierid',
ForeignKey(TaxonomicClassifiers.TaxonomicClassifierID), nullable=False)
CitationID = Column('citationid', Integer)
SpecimenObj = relationship(Specimens)
TaxonomicClassifierObj = relationship(TaxonomicClassifiers)
# ################################################################################
# Simulation
# ################################################################################
class Models(Base):
ModelID = Column('modelid', Integer, primary_key=True, nullable=False)
ModelCode = Column('modelcode', String(255), nullable=False)
ModelName = Column('modelname', String(255), nullable=False)
ModelDescription = Column('modeldescription', String(500))
class RelatedModels(Base):
RelatedID = Column('relatedid', Integer, primary_key=True, nullable=False)
ModelID = Column('modelid', ForeignKey(Models.ModelID), nullable=False)
RelationshipTypeCV = Column('relationshiptypecv', ForeignKey(CVRelationshipType.Name), nullable=False,
index=True)
RelatedModelID = Column('relatedmodelid', ForeignKey(Models.ModelID), nullable=False)
ModelObj = relationship(Models, primaryjoin='RelatedModels.ModelID == Models.ModelID')
RelatedModelObj = relationship(Models, primaryjoin='RelatedModels.RelatedModelID == Models.ModelID')
class Simulations(Base):
SimulationID = Column('simulationid', Integer, primary_key=True, nullable=False)
ActionID = Column('actionid', ForeignKey(Actions.ActionID), nullable=False)
SimulationName = Column('simulationname', String(255), nullable=False)
SimulationDescription = Column('simulationdescription', String(500))
SimulationStartDateTime = Column('simulationstartdatetime', Date, nullable=False)
SimulationStartDateTimeUTCOffset = Column('simulationstartdatetimeutcoffset', Integer, nullable=False)
SimulationEndDateTime = Column('simulationenddatetime', Date, nullable=False)
SimulationEndDateTimeUTCOffset = Column('simulationenddatetimeutcoffset', Integer, nullable=False)
TimeStepValue = Column('timestepvalue', Float(53), nullable=False)
TimeStepUnitsID = Column('timestepunitsid', ForeignKey(Units.UnitsID), nullable=False)
InputDataSetID = Column('inputdatasetid', ForeignKey(DataSets.DataSetID))
# OutputDataSetID = Column('outputdatasetid', Integer) # What's this ?
ModelID = Column('modelid', ForeignKey(Models.ModelID), nullable=False)
Action = relationship(Actions)
DataSet = relationship(DataSets)
Model = relationship(Models)
Unit = relationship(Units)
# Part of the Provenance table, needed here to meet dependencies
class Citations(Base):
CitationID = Column('citationid', Integer, primary_key=True, nullable=False)
Title = Column('title', String(255), nullable=False)
Publisher = Column('publisher', String(255), nullable=False)
PublicationYear = Column('publicationyear', Integer, nullable=False)
CitationLink = Column('citationlink', String(255))
# ################################################################################
# Annotations
# ################################################################################
class Annotations(Base):
AnnotationID = Column('annotationid', Integer, primary_key=True, nullable=False)
AnnotationTypeCV = Column(
'annotationtypecv',
ForeignKey(CVAnnotationType.Name),
nullable=False,
index=True
)
AnnotationCode = Column('annotationcode', String(50))
AnnotationText = Column('annotationtext', String(500), nullable=False)
AnnotationDateTime = Column('annotationdatetime', DateTime)
AnnotationUTCOffset = Column('annotationutcoffset', Integer)
AnnotationLink = Column('annotationlink', String(255))
AnnotatorID = Column('annotatorid', ForeignKey(People.PersonID))
CitationID = Column('citationid', ForeignKey(Citations.CitationID))
# PersonObj = relationship(People)
AnnotatorObj = relationship(People)
CitationObj = relationship(Citations)
class ActionAnnotations(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ActionID = Column('actionid', ForeignKey(Actions.ActionID), nullable=False)
AnnotationID = Column('annotationid', ForeignKey(Annotations.AnnotationID), nullable=False)
ActionObj = relationship(Actions)
AnnotationObj = relationship(Annotations)
class EquipmentAnnotations(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
EquipmentID = Column('equipmentid', BigIntegerType, ForeignKey(Equipment.EquipmentID), nullable=False)
AnnotationID = Column('annotationid', ForeignKey(Annotations.AnnotationID), nullable=False)
AnnotationObj = relationship(Annotations)
EquipmentObj = relationship(Equipment)
class MethodAnnotations(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
MethodID = Column('methodid', ForeignKey(Methods.MethodID), nullable=False)
AnnotationID = Column('annotationid', ForeignKey(Annotations.AnnotationID), nullable=False)
AnnotationObj = relationship(Annotations)
MethodObj = relationship(Methods)
class ResultAnnotations(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ResultID = Column('resultid', ForeignKey(Results.ResultID), nullable=False)
AnnotationID = Column('annotationid', ForeignKey(Annotations.AnnotationID), nullable=False)
BeginDateTime = Column('begindatetime', DateTime, nullable=False)
EndDateTime = Column('enddatetime', DateTime, nullable=False)
AnnotationObj = relationship(Annotations)
ResultObj = relationship(Results)
class SamplingFeatureAnnotations(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
SamplingFeatureID = Column('samplingfeatureid', ForeignKey(SamplingFeatures.SamplingFeatureID),
nullable=False)
AnnotationID = Column('annotationid', ForeignKey(Annotations.AnnotationID), nullable=False)
AnnotationObj = relationship(Annotations)
SamplingFeatureObj = relationship(SamplingFeatures)
# ################################################################################
# Data Quality
# ################################################################################
class DataSetsResults(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
DataSetID = Column('datasetid', ForeignKey(DataSets.DataSetID), nullable=False)
ResultID = Column('resultid', ForeignKey(Results.ResultID), nullable=False)
DataSetObj = relationship(DataSets)
ResultObj = relationship(Results)
class DataQuality(Base):
DataQualityID = Column('dataqualityid', Integer, primary_key=True, nullable=False)
DataQualityTypeCV = Column('dataqualitytypecv', ForeignKey(CVDataQualityType.Name), nullable=False,
index=True)
DataQualityCode = Column('dataqualitycode', String(255), nullable=False)
DataQualityValue = Column('dataqualityvalue', Float(53))
DataQualityValueUnitsID = Column('dataqualityvalueunitsid', ForeignKey(Units.UnitsID))
DataQualityDescription = Column('dataqualitydescription', String(500))
DataQualityLink = Column('dataqualitylink', String(255))
UnitObj = relationship(Units)
class ReferenceMaterials(Base):
ReferenceMaterialID = Column('referencematerialid', Integer, primary_key=True, nullable=False)
ReferenceMaterialMediumCV = Column(
'referencematerialmediumcv',
ForeignKey(CVReferenceMaterialMedium.Name),
nullable=False,
index=True
)
ReferenceMaterialOrganizationID = Column('referencematerialorganizationid',
ForeignKey(Organizations.OrganizationID), nullable=False)
ReferenceMaterialCode = Column('referencematerialcode', String(50), nullable=False)
ReferenceMaterialLotCode = Column('referencemateriallotcode', String(255))
ReferenceMaterialPurchaseDate = Column('referencematerialpurchasedate', DateTime)
ReferenceMaterialExpirationDate = Column('referencematerialexpirationdate', DateTime)
ReferenceMaterialCertificateLink = Column('referencematerialcertificatelink', String(255))
SamplingFeatureID = Column('samplingfeatureid', ForeignKey(SamplingFeatures.SamplingFeatureID))
OrganizationObj = relationship(Organizations)
SamplingFeatureObj = relationship(SamplingFeatures)
class CalibrationStandards(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ActionID = Column('actionid', Integer, ForeignKey(Actions.ActionID), nullable=False)
ReferenceMaterialID = Column(
'referencematerialid',
Integer,
ForeignKey(ReferenceMaterials.ReferenceMaterialID),
nullable=False
)
ActionObj = relationship(Actions)
ReferenceMaterialObj = relationship(ReferenceMaterials)
class ReferenceMaterialValues(Base):
ReferenceMaterialValueID = Column('referencematerialvalueid', Integer, primary_key=True, nullable=False)
ReferenceMaterialID = Column('referencematerialid', ForeignKey(ReferenceMaterials.ReferenceMaterialID),
nullable=False)
ReferenceMaterialValue = Column('referencematerialvalue', Float(53), nullable=False)
ReferenceMaterialAccuracy = Column('referencematerialaccuracy', Float(53))
VariableID = Column('variableid', ForeignKey(Variables.VariableID), nullable=False)
UnitsID = Column('unitsid', ForeignKey(Units.UnitsID), nullable=False)
CitationID = Column('citationid', ForeignKey(Citations.CitationID), nullable=False)
CitationObj = relationship(Citations)
ReferenceMaterialObj = relationship(ReferenceMaterials)
UnitObj = relationship(Units)
VariableObj = relationship(Variables)
class ResultNormalizationValues(Base):
ResultID = Column(u'resultid', ForeignKey(Results.ResultID), primary_key=True)
ReferenceMaterialValueID = Column(u'normalizedbyreferencematerialvalueid',
ForeignKey(ReferenceMaterialValues.ReferenceMaterialValueID),
nullable=False)
ResultsObj = relationship(Results)
ReferenceMaterialValueObj = relationship(ReferenceMaterialValues)
class ResultsDataQuality(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ResultID = Column('resultid', ForeignKey(Results.ResultID), nullable=False)
DataQualityID = Column('dataqualityid', ForeignKey(DataQuality.DataQualityID), nullable=False)
DataQualityObj = relationship(DataQuality)
ResultObj = relationship(Results)
# ################################################################################
# Extension Properties
# ################################################################################
class ExtensionProperties(Base):
PropertyID = Column('propertyid', Integer, primary_key=True, nullable=False)
PropertyName = Column('propertyname', String(255), nullable=False)
PropertyDescription = Column('propertydescription', String(500))
PropertyDataTypeCV = Column('propertydatatypecv', ForeignKey(CVPropertyDataType.Name), nullable=False,
index=True)
PropertyUnitsID = Column('propertyunitsid', ForeignKey(Units.UnitsID))
UnitObj = relationship(Units)
class ActionExtensionPropertyValues(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ActionID = Column('actionid', ForeignKey(Actions.ActionID), nullable=False)
PropertyID = Column('propertyid', ForeignKey(ExtensionProperties.PropertyID), nullable=False)
PropertyValue = Column('propertyvalue', String(255), nullable=False)
ActionObj = relationship(Actions)
ExtensionPropertyObj = relationship(ExtensionProperties)
class CitationExtensionPropertyValues(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
CitationID = Column('citationid', ForeignKey(Citations.CitationID), nullable=False)
PropertyID = Column('propertyid', ForeignKey(ExtensionProperties.PropertyID), nullable=False)
PropertyValue = Column('propertyvalue', String(255), nullable=False)
CitationObj = relationship(Citations)
ExtensionPropertyObj = relationship(ExtensionProperties)
class MethodExtensionPropertyValues(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
MethodID = Column('methodid', ForeignKey(Methods.MethodID), nullable=False)
PropertyID = Column('propertyid', ForeignKey(ExtensionProperties.PropertyID), nullable=False)
PropertyValue = Column('propertyvalue', String(255), nullable=False)
MethodObj = relationship(Methods)
ExtensionPropertyObj = relationship(ExtensionProperties)
class ResultExtensionPropertyValues(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ResultID = Column('resultid', ForeignKey(Results.ResultID), nullable=False)
PropertyID = Column('propertyid', ForeignKey(ExtensionProperties.PropertyID), nullable=False)
PropertyValue = Column('propertyvalue', String(255), nullable=False)
ExtensionPropertyObj = relationship(ExtensionProperties)
ResultObj = relationship(Results)
class SamplingFeatureExtensionPropertyValues(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
SamplingFeatureID = Column('samplingfeatureid', ForeignKey(SamplingFeatures.SamplingFeatureID),
nullable=False)
PropertyID = Column('propertyid', ForeignKey(ExtensionProperties.PropertyID), nullable=False)
PropertyValue = Column('propertyvalue', String(255), nullable=False)
ExtensionPropertyObj = relationship(ExtensionProperties)
SamplingFeatureObj = relationship(SamplingFeatures)
class VariableExtensionPropertyValues(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
VariableID = Column('variableid', ForeignKey(Variables.VariableID), nullable=False)
PropertyID = Column('propertyid', ForeignKey(ExtensionProperties.PropertyID), nullable=False)
PropertyValue = Column('propertyvalue', String(255), nullable=False)
ExtensionPropertyObj = relationship(ExtensionProperties)
VariableObj = relationship(Variables)
# ################################################################################
# Extension Identifiers
# ################################################################################
class ExternalIdentifierSystems(Base):
ExternalIdentifierSystemID = Column(
'externalidentifiersystemid',
Integer,
primary_key=True,
nullable=False
)
ExternalIdentifierSystemName = Column('externalidentifiersystemname', String(255), nullable=False)
IdentifierSystemOrganizationID = Column('identifiersystemorganizationid',
ForeignKey(Organizations.OrganizationID), nullable=False)
ExternalIdentifierSystemDescription = Column('externalidentifiersystemdescription', String(500))
ExternalIdentifierSystemURL = Column('externalidentifiersystemurl', String(255))
IdentifierSystemOrganizationObj = relationship(Organizations)
class CitationExternalIdentifiers(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
CitationID = Column('citationid', ForeignKey(Citations.CitationID), nullable=False)
ExternalIdentifierSystemID = Column('externalidentifiersystemid',
ForeignKey(ExternalIdentifierSystems.ExternalIdentifierSystemID),
nullable=False)
CitationExternalIdentifier = Column('citationexternalidentifier', String(255), nullable=False)
CitationExternalIdentifierURI = Column('citationexternalidentifieruri', String(255))
CitationObj = relationship(Citations)
ExternalIdentifierSystemObj = relationship(ExternalIdentifierSystems)
class MethodExternalIdentifiers(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
MethodID = Column('methodid', ForeignKey(Methods.MethodID), nullable=False)
ExternalIdentifierSystemID = Column('externalidentifiersystemid',
ForeignKey(ExternalIdentifierSystems.ExternalIdentifierSystemID),
nullable=False)
MethodExternalIdentifier = Column('methodexternalidentifier', String(255), nullable=False)
MethodExternalIdentifierURI = Column('methodexternalidentifieruri', String(255))
ExternalIdentifierSystemObj = relationship(ExternalIdentifierSystems)
MethodObj = relationship(Methods)
class PersonExternalIdentifiers(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
PersonID = Column('personid', ForeignKey(People.PersonID), nullable=False)
ExternalIdentifierSystemID = Column('externalidentifiersystemid',
ForeignKey(ExternalIdentifierSystems.ExternalIdentifierSystemID),
nullable=False)
PersonExternalIdentifier = Column('personexternalidentifier', String(255), nullable=False)
PersonExternalIdentifierURI = Column('personexternalidentifieruri', String(255))
ExternalIdentifierSystemObj = relationship(ExternalIdentifierSystems)
PersonObj = relationship(People)
class ReferenceMaterialExternalIdentifiers(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ReferenceMaterialID = Column(ForeignKey(ReferenceMaterials.ReferenceMaterialID), nullable=False)
ExternalIdentifierSystemID = Column('externalidentifiersystemid',
ForeignKey(ExternalIdentifierSystems.ExternalIdentifierSystemID),
nullable=False)
ReferenceMaterialExternalIdentifier = Column(
'referencematerialexternalidentifier',
String(255),
nullable=False
)
ReferenceMaterialExternalIdentifierURI = Column('referencematerialexternalidentifieruri', String(255))
ExternalIdentifierSystemObj = relationship(ExternalIdentifierSystems)
ReferenceMaterialObj = relationship(ReferenceMaterials)
class SamplingFeatureExternalIdentifiers(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
SamplingFeatureID = Column('samplingfeatureid', ForeignKey(SamplingFeatures.SamplingFeatureID),
nullable=False)
ExternalIdentifierSystemID = Column('externalidentifiersystemid',
ForeignKey(ExternalIdentifierSystems.ExternalIdentifierSystemID),
nullable=False)
SamplingFeatureExternalIdentifier = Column(
'samplingfeatureexternalidentifier',
String(255),
nullable=False
)
SamplingFeatureExternalIdentifierURI = Column('samplingfeatureexternalidentifieruri', String(255))
ExternalIdentifierSystemObj = relationship(ExternalIdentifierSystems)
SamplingFeatureObj = relationship(SamplingFeatures)
class SpatialReferenceExternalIdentifiers(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
SpatialReferenceID = Column('spatialreferenceid', ForeignKey(SpatialReferences.SpatialReferenceID),
nullable=False)
ExternalIdentifierSystemID = Column('externalidentifiersystemid',
ForeignKey(ExternalIdentifierSystems.ExternalIdentifierSystemID),
nullable=False)
SpatialReferenceExternalIdentifier = Column(
'spatialreferenceexternalidentifier',
String(255),
nullable=False
)
SpatialReferenceExternalIdentifierURI = Column('spatialreferenceexternalidentifieruri', String(255))
ExternalIdentifierSystemObj = relationship(ExternalIdentifierSystems)
SpatialReferenceObj = relationship(SpatialReferences)
class TaxonomicClassifierExternalIdentifiers(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
TaxonomicClassifierID = Column('taxonomicclassifierid',
ForeignKey(TaxonomicClassifiers.TaxonomicClassifierID), nullable=False)
ExternalIdentifierSystemID = Column('externalidentifiersystemid',
ForeignKey(ExternalIdentifierSystems.ExternalIdentifierSystemID),
nullable=False)
TaxonomicClassifierExternalIdentifier = Column(
'taxonomicclassifierexternalidentifier',
String(255),
nullable=False
)
TaxonomicClassifierExternalIdentifierURI = Column('taxonomicclassifierexternalidentifieruri', String(255))
ExternalIdentifierSystemObj = relationship(ExternalIdentifierSystems)
TaxonomicClassifierObj = relationship(TaxonomicClassifiers)
class VariableExternalIdentifiers(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
VariableID = Column('variableid', ForeignKey(Variables.VariableID), nullable=False)
ExternalIdentifierSystemID = Column('externalidentifiersystemid',
ForeignKey(ExternalIdentifierSystems.ExternalIdentifierSystemID),
nullable=False)
VariableExternalIdentifier = Column('variableexternalidentifer', String(255), nullable=False)
VariableExternalIdentifierURI = Column('variableexternalidentifieruri', String(255))
ExternalIdentifierSystemObj = relationship(ExternalIdentifierSystems)
VariableObj = relationship(Variables)
# ################################################################################
# Provenance
# ################################################################################
class AuthorLists(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
CitationID = Column('citationid', ForeignKey(Citations.CitationID), nullable=False)
PersonID = Column('personid', ForeignKey(People.PersonID), nullable=False)
AuthorOrder = Column('authororder', Integer, nullable=False)
CitationObj = relationship(Citations, primaryjoin='AuthorLists.CitationID == Citations.CitationID')
PersonObj = relationship(People, primaryjoin='AuthorLists.PersonID == People.PersonID')
class DataSetCitations(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
DataSetID = Column('datasetid', ForeignKey(DataSets.DataSetID), nullable=False)
RelationshipTypeCV = Column('relationshiptypecv', ForeignKey(CVRelationshipType.Name), nullable=False,
index=True)
CitationID = Column('citationid', ForeignKey(Citations.CitationID), nullable=False)
CitationObj = relationship(Citations)
DataSetObj = relationship(DataSets)
class DerivationEquations(Base):
DerivationEquationID = Column('derivationequationid', Integer, primary_key=True, nullable=False)
DerivationEquation = Column('derivationequation', String(255), nullable=False)
class ResultDerivationEquations(Base):
ResultID = Column(u'resultid', ForeignKey(Results.ResultID), primary_key=True)
DerivationEquationID = Column(
u'derivationequationid',
ForeignKey(DerivationEquations.DerivationEquationID),
nullable=False
)
ResultsObj = relationship(Results)
DerivationEquationsObj = relationship(DerivationEquations)
class MethodCitations(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
MethodID = Column('methodid', ForeignKey(Methods.MethodID), nullable=False)
RelationshipTypeCV = Column('relationshiptypecv', ForeignKey(CVRelationshipType.Name), nullable=False,
index=True)
CitationID = Column('citationid', ForeignKey(Citations.CitationID), nullable=False)
CitationObj = relationship(Citations)
MethodObj = relationship(Methods)
class RelatedAnnotations(Base):
RelationID = Column('relationid', Integer, primary_key=True, nullable=False)
AnnotationID = Column('annotationid', ForeignKey(Annotations.AnnotationID), nullable=False)
RelationshipTypeCV = Column('relationshiptypecv', ForeignKey(CVRelationshipType.Name), nullable=False,
index=True)
RelatedAnnotationID = Column('relatedannotationid', ForeignKey(Annotations.AnnotationID), nullable=False)
AnnotationObj = relationship(
Annotations,
primaryjoin='RelatedAnnotations.AnnotationID == Annotations.AnnotationID'
)
RelatedAnnotationObj = relationship(
Annotations,
primaryjoin='RelatedAnnotations.RelatedAnnotationID == Annotations.AnnotationID'
)
class RelatedCitations(Base):
RelationID = Column('relationid', Integer, primary_key=True, nullable=False)
CitationID = Column('citationid', ForeignKey(Citations.CitationID), nullable=False)
RelationshipTypeCV = Column('relationshiptypecv', ForeignKey(CVRelationshipType.Name), nullable=False,
index=True)
RelatedCitationID = Column('relatedcitationid', ForeignKey(Citations.CitationID), nullable=False)
CitationObj = relationship(Citations, primaryjoin='RelatedCitations.CitationID == Citations.CitationID')
RelatedCitationObj = relationship(
Citations,
primaryjoin='RelatedCitations.RelatedCitationID == Citations.CitationID'
)
class RelatedDataSets(Base):
RelationID = Column('relationid', Integer, primary_key=True, nullable=False)
DataSetID = Column('datasetid', ForeignKey(DataSets.DataSetID), nullable=False)
RelationshipTypeCV = Column('relationshiptypecv', ForeignKey(CVRelationshipType.Name), nullable=False,
index=True)
RelatedDataSetID = Column('relateddatasetid', ForeignKey(DataSets.DataSetID), nullable=False)
VersionCode = Column('versioncode', String(50))
DataSetObj = relationship(DataSets, primaryjoin='RelatedDataSets.DataSetID == DataSets.DataSetID')
RelatedDataSetObj = relationship(
DataSets,
primaryjoin='RelatedDataSets.RelatedDataSetID == DataSets.DataSetID'
)
class RelatedResults(Base):
RelationID = Column('relationid', Integer, primary_key=True, nullable=False)
ResultID = Column('resultid', ForeignKey(Results.ResultID), nullable=False)
RelationshipTypeCV = Column('relationshiptypecv', ForeignKey(CVRelationshipType.Name), nullable=False,
index=True)
RelatedResultID = Column('relatedresultid', ForeignKey(Results.ResultID), nullable=False)
VersionCode = Column('versioncode', String(50))
RelatedResultSequenceNumber = Column('relatedresultsequencenumber', Integer)
ResultObj = relationship(Results, primaryjoin='RelatedResults.RelatedResultID == Results.ResultID')
RelatedResultObj = relationship(Results, primaryjoin='RelatedResults.ResultID == Results.ResultID')
# ################################################################################
# Results
# ################################################################################
class PointCoverageResults(Results):
ResultID = Column('resultid', ForeignKey(Results.ResultID), primary_key=True)
ZLocation = Column('zlocation', Float(53))
ZLocationUnitsID = Column('zlocationunitsid', ForeignKey(Units.UnitsID))
SpatialReferenceID = Column('spatialreferenceid', ForeignKey(SpatialReferences.SpatialReferenceID))
IntendedXSpacing = Column('intendedxspacing', Float(53))
IntendedXSpacingUnitsID = Column('intendedxspacingunitsid', ForeignKey(Units.UnitsID))
IntendedYSpacing = Column('intendedyspacing', Float(53))
IntendedYSpacingUnitsID = Column('intendedyspacingunitsid', ForeignKey(Units.UnitsID))
AggregationStatisticCV = Column('aggregationstatisticcv', ForeignKey(CVAggregationStatistic.Name),
nullable=False, index=True)
TimeAggregationInterval = Column('timeaggregationinterval', Float(53), nullable=False)
TimeAggregationIntervalUnitsID = Column('timeaggregationintervalunitsid', Integer, nullable=False)
IntendedXSpacingUnitsObj = relationship(
Units,
primaryjoin='PointCoverageResults.IntendedXSpacingUnitsID == Units.UnitsID'
)
IntendedYSpacingUnitsObj = relationship(
Units,
primaryjoin='PointCoverageResults.IntendedYSpacingUnitsID == Units.UnitsID'
)
SpatialReferenceObj = relationship(SpatialReferences)
ZLocationUnitsObj = relationship(
Units,
primaryjoin='PointCoverageResults.ZLocationUnitsID == Units.UnitsID'
)
__mapper_args__ = {'polymorphic_identity': 'Point coverage'}
class ProfileResults(Results):
ResultID = Column('resultid', ForeignKey(Results.ResultID), primary_key=True)
XLocation = Column('xlocation', Float(53))
XLocationUnitsID = Column('xlocationunitsid', ForeignKey(Units.UnitsID))
YLocation = Column('ylocation', Float(53))
YLocationUnitsID = Column('ylocationunitsid', ForeignKey(Units.UnitsID))
SpatialReferenceID = Column('spatialreferenceid', ForeignKey(SpatialReferences.SpatialReferenceID))
IntendedZSpacing = Column('intendedzspacing', Float(53))
IntendedZSpacingUnitsID = Column('intendedzspacingunitsid', ForeignKey(Units.UnitsID))
IntendedTimeSpacing = Column('intendedtimespacing', Float(53))
IntendedTimeSpacingUnitsID = Column('intendedtimespacingunitsid', ForeignKey(Units.UnitsID))
AggregationStatisticCV = Column('aggregationstatisticcv', ForeignKey(CVAggregationStatistic.Name),
nullable=False, index=True)
IntendedTimeSpacingUnitsObj = relationship(
Units,
primaryjoin='ProfileResults.IntendedTimeSpacingUnitsID == Units.UnitsID'
)
IntendedZSpacingUnitsObj = relationship(
Units,
primaryjoin='ProfileResults.IntendedZSpacingUnitsID == Units.UnitsID'
)
SpatialReferenceObj = relationship(SpatialReferences)
XLocationUnitsObj = relationship(Units, primaryjoin='ProfileResults.XLocationUnitsID == Units.UnitsID')
YLocationUnitsObj = relationship(Units, primaryjoin='ProfileResults.YLocationUnitsID == Units.UnitsID')
__mapper_args__ = {'polymorphic_identity': 'Profile Coverage'}
class CategoricalResults(Results):
ResultID = Column('resultid', ForeignKey(Results.ResultID), primary_key=True)
XLocation = Column('xlocation', Float(53))
XLocationUnitsID = Column('xlocationunitsid', Integer, ForeignKey(Units.UnitsID))
YLocation = Column('ylocation', Float(53))
YLocationUnitsID = Column('ylocationunitsid', Integer, ForeignKey(Units.UnitsID))
ZLocation = Column('zlocation', Float(53))
ZLocationUnitsID = Column('zlocationunitsid', Integer, ForeignKey(Units.UnitsID))
SpatialReferenceID = Column('spatialreferenceid', ForeignKey(SpatialReferences.SpatialReferenceID))
QualityCodeCV = Column('qualitycodecv', ForeignKey(CVQualityCode.Name), nullable=False, index=True)
SpatialReferenceObj = relationship(SpatialReferences)
XLocationUnitsObj = relationship(
Units,
primaryjoin='CategoricalResults.XLocationUnitsID == Units.UnitsID'
)
YLocationUnitsObj = relationship(
Units,
primaryjoin='CategoricalResults.YLocationUnitsID == Units.UnitsID'
)
ZLocationUnitsObj = relationship(
Units,
primaryjoin='CategoricalResults.ZLocationUnitsID == Units.UnitsID'
)
__mapper_args__ = {'polymorphic_identity': ' Category coverage'}
class TransectResults(Results):
ResultID = Column('resultid', ForeignKey(Results.ResultID), primary_key=True)
ZLocation = Column('zlocation', Float(53))
ZLocationUnitsID = Column('zlocationunitsid', ForeignKey(Units.UnitsID))
SpatialReferenceID = Column('spatialreferenceid', ForeignKey(SpatialReferences.SpatialReferenceID))
IntendedTransectSpacing = Column('intendedtransectspacing', Float(53))
IntendedTransectSpacingUnitsID = Column('intendedtransectspacingunitsid', ForeignKey(Units.UnitsID))
IntendedTimeSpacing = Column('intendedtimespacing', Float(53))
IntendedTimeSpacingUnitsID = Column('intendedtimespacingunitsid', ForeignKey(Units.UnitsID))
AggregationStatisticCV = Column('aggregationstatisticcv', ForeignKey(CVAggregationStatistic.Name),
nullable=False, index=True)
IntendedTimeSpacingUnitsObj = relationship(
Units,
primaryjoin='TransectResults.IntendedTimeSpacingUnitsID == Units.UnitsID'
)
IntendedTransectSpacingUnitsObj = relationship(
Units,
primaryjoin='TransectResults.IntendedTransectSpacingUnitsID == Units.UnitsID'
)
SpatialReferenceObj = relationship(SpatialReferences)
ZLocationUnitsObj = relationship(Units, primaryjoin='TransectResults.ZLocationUnitsID == Units.UnitsID')
__mapper_args__ = {'polymorphic_identity': 'Transect Coverage'}
class SpectraResults(Results):
ResultID = Column('resultid', ForeignKey(Results.ResultID), primary_key=True)
XLocation = Column('xlocation', Float(53))
XLocationUnitsID = Column('xlocationunitsid', ForeignKey(Units.UnitsID))
YLocation = Column('ylocation', Float(53))
YLocationUnitsID = Column('ylocationunitsid', ForeignKey(Units.UnitsID))
ZLocation = Column('zlocation', Float(53))
ZLocationUnitsID = Column('zlocationunitsid', ForeignKey(Units.UnitsID))
SpatialReferenceID = Column('spatialreferenceid', ForeignKey(SpatialReferences.SpatialReferenceID))
IntendedWavelengthSpacing = Column('intendedwavelengthspacing', Float(53))
IntendedWavelengthSpacingUnitsID = Column('intendedwavelengthspacingunitsid', ForeignKey(Units.UnitsID))
AggregationStatisticCV = Column('aggregationstatisticcv', ForeignKey(CVAggregationStatistic.Name),
nullable=False, index=True)
IntendedWavelengthSpacingUnitsObj = relationship(
Units,
primaryjoin='SpectraResults.IntendedWavelengthSpacingUnitsID == Units.UnitsID'
)
SpatialReferenceObj = relationship(SpatialReferences)
XLocationUnitsObj = relationship(Units, primaryjoin='SpectraResults.XLocationUnitsID == Units.UnitsID')
YLocationUnitsObj = relationship(Units, primaryjoin='SpectraResults.YLocationUnitsID == Units.UnitsID')
ZLocationUnitsObj = relationship(Units, primaryjoin='SpectraResults.ZLocationUnitsID == Units.UnitsID')
__mapper_args__ = {'polymorphic_identity': 'Spectra coverage'}
class TimeSeriesResults(Results):
ResultID = Column('resultid', ForeignKey(Results.ResultID), primary_key=True)
XLocation = Column('xlocation', Float(53))
XLocationUnitsID = Column('xlocationunitsid', ForeignKey(Units.UnitsID))
YLocation = Column('ylocation', Float(53))
YLocationUnitsID = Column('ylocationunitsid', ForeignKey(Units.UnitsID))
ZLocation = Column('zlocation', Float(53))
ZLocationUnitsID = Column('zlocationunitsid', ForeignKey(Units.UnitsID))
SpatialReferenceID = Column('spatialreferenceid', ForeignKey(SpatialReferences.SpatialReferenceID))
IntendedTimeSpacing = Column('intendedtimespacing', Float(53))
IntendedTimeSpacingUnitsID = Column('intendedtimespacingunitsid', ForeignKey(Units.UnitsID))
AggregationStatisticCV = Column('aggregationstatisticcv', ForeignKey(CVAggregationStatistic.Name),
nullable=False, index=True)
IntendedTimeSpacingUnitsObj = relationship(
Units,
primaryjoin='TimeSeriesResults.IntendedTimeSpacingUnitsID == Units.UnitsID'
)
SpatialReferenceObj = relationship(SpatialReferences)
XLocationUnitsObj = relationship(Units, primaryjoin='TimeSeriesResults.XLocationUnitsID == Units.UnitsID')
YLocationUnitsObj = relationship(Units, primaryjoin='TimeSeriesResults.YLocationUnitsID == Units.UnitsID')
ZLocationUnitsObj = relationship(Units, primaryjoin='TimeSeriesResults.ZLocationUnitsID == Units.UnitsID')
__mapper_args__ = {'polymorphic_identity': 'Time series coverage'}
class SectionResults(Results):
ResultID = Column('resultid', ForeignKey(Results.ResultID), primary_key=True)
YLocation = Column('ylocation', Float(53))
YLocationUnitsID = Column('ylocationunitsid', ForeignKey(Units.UnitsID))
SpatialReferenceID = Column('spatialreferenceid', ForeignKey(SpatialReferences.SpatialReferenceID))
IntendedXSpacing = Column('intendedxspacing', Float(53))
IntendedXSpacingUnitsID = Column('intendedxspacingunitsid', ForeignKey(Units.UnitsID))
IntendedZSpacing = Column('intendedzspacing', Float(53))
IntendedZSpacingUnitsID = Column('intendedzspacingunitsid', ForeignKey(Units.UnitsID))
IntendedTimeSpacing = Column('intendedtimespacing', Float(53))
IntendedTimeSpacingUnitsID = Column('intendedtimespacingunitsid', ForeignKey(Units.UnitsID))
AggregationStatisticCV = Column(
'aggregationstatisticcv',
ForeignKey(CVAggregationStatistic.Name),
nullable=False,
index=True
)
IntendedTimeSpacingUnitsObj = relationship(
Units,
primaryjoin='SectionResults.IntendedTimeSpacingUnitsID == Units.UnitsID'
)
IntendedXSpacingUnitsObj = relationship(
Units,
primaryjoin='SectionResults.IntendedXSpacingUnitsID == Units.UnitsID'
)
IntendedZSpacingUnitsObj = relationship(
Units,
primaryjoin='SectionResults.IntendedZSpacingUnitsID == Units.UnitsID'
)
SpatialReferenceObj = relationship(SpatialReferences)
YLocationUnitsObj = relationship(Units, primaryjoin='SectionResults.YLocationUnitsID == Units.UnitsID')
__mapper_args__ = {'polymorphic_identity': 'Section coverage'}
class TrajectoryResults(Results):
ResultID = Column('resultid', ForeignKey(Results.ResultID), primary_key=True)
SpatialReferenceID = Column('spatialreferenceid', ForeignKey(SpatialReferences.SpatialReferenceID))
IntendedTrajectorySpacing = Column('intendedtrajectoryspacing', Float(53))
IntendedTrajectorySpacingUnitsID = Column('intendedtrajectoryspacingunitsid', ForeignKey(Units.UnitsID))
IntendedTimeSpacing = Column('intendedtimespacing', Float(53))
IntendedTimeSpacingUnitsID = Column('intendedtimespacingunitsid', ForeignKey(Units.UnitsID))
AggregationStatisticCV = Column('aggregationstatisticcv', ForeignKey(CVAggregationStatistic.Name),
nullable=False, index=True)
IntendedTimeSpacingUnitsObj = relationship(
Units,
primaryjoin='TrajectoryResults.IntendedTimeSpacingUnitsID == Units.UnitsID'
)
IntendedTrajectorySpacingUnitsObj = relationship(
Units,
primaryjoin='TrajectoryResults.IntendedTrajectorySpacingUnitsID == Units.UnitsID'
)
SpatialReferenceObj = relationship(SpatialReferences)
__mapper_args__ = {'polymorphic_identity': 'Trajectory coverage'}
class MeasurementResults(Results):
ResultID = Column('resultid', ForeignKey(Results.ResultID), primary_key=True)
XLocation = Column('xlocation', Float(53))
XLocationUnitsID = Column('xlocationunitsid', ForeignKey(Units.UnitsID))
YLocation = Column('ylocation', Float(53))
YLocationUnitsID = Column('ylocationunitsid', ForeignKey(Units.UnitsID))
ZLocation = Column('zlocation', Float(53))
ZLocationUnitsID = Column('zlocationunitsid', ForeignKey(Units.UnitsID))
SpatialReferenceID = Column('spatialreferenceid', ForeignKey(SpatialReferences.SpatialReferenceID))
CensorCodeCV = Column('censorcodecv', ForeignKey(CVCensorCode.Name), nullable=False, index=True)
QualityCodeCV = Column('qualitycodecv', ForeignKey(CVQualityCode.Name), nullable=False, index=True)
AggregationStatisticCV = Column('aggregationstatisticcv', ForeignKey(CVAggregationStatistic.Name),
nullable=False, index=True)
TimeAggregationInterval = Column('timeaggregationinterval', Float(53), nullable=False)
TimeAggregationIntervalUnitsID = Column('timeaggregationintervalunitsid', ForeignKey(Units.UnitsID),
nullable=False)
SpatialReferenceObj = relationship(SpatialReferences)
TimeAggregationIntervalUnitsObj = relationship(
Units,
primaryjoin='MeasurementResults.TimeAggregationIntervalUnitsID == Units.UnitsID'
)
XLocationUnitsObj = relationship(
Units,
primaryjoin='MeasurementResults.XLocationUnitsID == Units.UnitsID'
)
YLocationUnitsObj = relationship(
Units,
primaryjoin='MeasurementResults.YLocationUnitsID == Units.UnitsID'
)
ZLocationUnitsObj = relationship(
Units,
primaryjoin='MeasurementResults.ZLocationUnitsID == Units.UnitsID'
)
__mapper_args__ = {'polymorphic_identity': 'Measurement'}
class CategoricalResultValues(Base):
ValueID = Column('valueid', BigIntegerType, primary_key=True)
ResultID = Column('resultid', ForeignKey(CategoricalResults.ResultID), nullable=False)
DataValue = Column('datavalue', String(255), nullable=False)
ValueDateTime = Column('valuedatetime', DateTimeType, nullable=False)
ValueDateTimeUTCOffset = Column('valuedatetimeutcoffset', Integer, nullable=False)
ResultObj = relationship(CategoricalResults)
class MeasurementResultValues(Base):
ValueID = Column('valueid', BigIntegerType, primary_key=True)
ResultID = Column('resultid', ForeignKey(MeasurementResults.ResultID), nullable=False)
DataValue = Column('datavalue', Float(53), nullable=False)
ValueDateTime = Column('valuedatetime', DateTimeType, nullable=False)
ValueDateTimeUTCOffset = Column('valuedatetimeutcoffset', Integer, nullable=False)
ResultObj = relationship(MeasurementResults)
class PointCoverageResultValues(Base):
ValueID = Column('valueid', BigIntegerType, primary_key=True)
ResultID = Column('resultid', ForeignKey(PointCoverageResults.ResultID), nullable=False)
DataValue = Column('datavalue', BigIntegerType, nullable=False)
ValueDateTime = Column('valuedatetime', DateTimeType, nullable=False)
ValueDateTimeUTCOffset = Column('valuedatetimeutcoffset', Integer, nullable=False)
XLocation = Column('xlocation', Float(53), nullable=False)
XLocationUnitsID = Column('xlocationunitsid', ForeignKey(Units.UnitsID), nullable=False)
YLocation = Column('ylocation', Float(53), nullable=False)
YLocationUnitsID = Column('ylocationunitsid', ForeignKey(Units.UnitsID), nullable=False)
CensorCodeCV = Column('censorcodecv', ForeignKey(CVCensorCode.Name), nullable=False, index=True)
QualityCodeCV = Column('qualitycodecv', ForeignKey(CVQualityCode.Name), nullable=False, index=True)
ResultObj = relationship(PointCoverageResults)
XLocationUnitsObj = relationship(
Units,
primaryjoin='PointCoverageResultValues.XLocationUnitsID == Units.UnitsID'
)
YLocationUnitsobj = relationship(
Units,
primaryjoin='PointCoverageResultValues.YLocationUnitsID == Units.UnitsID'
)
class ProfileResultValues(Base):
ValueID = Column('valueid', BigIntegerType, primary_key=True)
ResultID = Column('resultid', ForeignKey(ProfileResults.ResultID), nullable=False)
DataValue = Column('datavalue', Float(53), nullable=False)
ValueDateTime = Column('valuedatetime', DateTimeType, nullable=False)
ValueDateTimeUTCOffset = Column('valuedatetimeutcoffset', Integer, nullable=False)
ZLocation = Column('zlocation', Float(53), nullable=False)
ZAggregationInterval = Column('zaggregationinterval', Float(53), nullable=False)
ZLocationUnitsID = Column('zlocationunitsid', ForeignKey(Units.UnitsID), nullable=False)
CensorCodeCV = Column('censorcodecv', ForeignKey(CVCensorCode.Name), nullable=False, index=True)
QualityCodeCV = Column('qualitycodecv', ForeignKey(CVQualityCode.Name), nullable=False, index=True)
TimeAggregationInterval = Column('timeaggregationinterval', Float(53), nullable=False)
TimeAggregationIntervalUnitsID = Column('timeaggregationintervalunitsid', ForeignKey(Units.UnitsID),
nullable=False)
ResultObj = relationship(ProfileResults)
TimeAggregationIntervalUnitsObj = relationship(
Units,
primaryjoin='ProfileResultValues.TimeAggregationIntervalUnitsID == Units.UnitsID'
)
ZLocationUnitsObj = relationship(
Units,
primaryjoin='ProfileResultValues.ZLocationUnitsID == Units.UnitsID'
)
class SectionResultValues(Base):
ValueID = Column('valueid', BigIntegerType, primary_key=True)
ResultID = Column('resultid', ForeignKey(SectionResults.ResultID), nullable=False)
DataValue = Column('datavalue', Float(53), nullable=False)
ValueDateTime = Column('valuedatetime', DateTimeType, nullable=False)
ValueDateTimeUTCOffset = Column('valuedatetimeutcoffset', Integer, nullable=False)
XLocation = Column('xlocation', Float(53), nullable=False)
XAggregationInterval = Column('xaggregationinterval', Float(53), nullable=False)
XLocationUnitsID = Column('xlocationunitsid', ForeignKey(Units.UnitsID), nullable=False)
ZLocation = Column('zlocation', BigIntegerType, nullable=False)
ZAggregationInterval = Column('zaggregationinterval', Float(53), nullable=False)
ZLocationUnitsID = Column('zlocationunitsid', ForeignKey(Units.UnitsID), nullable=False)
CensorCodeCV = Column('censorcodecv', ForeignKey(CVCensorCode.Name), nullable=False, index=True)
QualityCodeCV = Column('qualitycodecv', ForeignKey(CVQualityCode.Name), nullable=False, index=True)
AggregationStatisticCV = Column('aggregationstatisticcv', ForeignKey(CVAggregationStatistic.Name),
nullable=False, index=True)
TimeAggregationInterval = Column('timeaggregationinterval', Float(53), nullable=False)
TimeAggregationIntervalUnitsID = Column('timeaggregationintervalunitsid', ForeignKey(Units.UnitsID),
nullable=False)
ResultObj = relationship(SectionResults)
TimeAggregationIntervalUnitsObj = relationship(
Units,
primaryjoin='SectionResultValues.TimeAggregationIntervalUnitsID == Units.UnitsID'
)
XLocationUnitsObj = relationship(
Units,
primaryjoin='SectionResultValues.XLocationUnitsID == Units.UnitsID'
)
ZLocationUnitsObj = relationship(
Units,
primaryjoin='SectionResultValues.ZLocationUnitsID == Units.UnitsID'
)
class SpectraResultValues(Base):
ValueID = Column('valueid', BigIntegerType, primary_key=True)
ResultID = Column('resultid', ForeignKey(SpectraResults.ResultID), nullable=False)
DataValue = Column('datavalue', Float(53), nullable=False)
ValueDateTime = Column('valuedatetime', DateTimeType, nullable=False)
ValueDateTimeUTCOffset = Column('valuedatetimeutcoffset', Integer, nullable=False)
ExcitationWavelength = Column('excitationwavelength', Float(53), nullable=False)
EmissionWavelength = Column('emissionwavelength', Float(53), nullable=False)
WavelengthUnitsID = Column('wavelengthunitsid', ForeignKey(Units.UnitsID), nullable=False)
CensorCodeCV = Column('censorcodecv', ForeignKey(CVCensorCode.Name), nullable=False, index=True)
QualityCodeCV = Column('qualitycodecv', ForeignKey(CVQualityCode.Name), nullable=False, index=True)
TimeAggregationInterval = Column('timeaggregationinterval', Float(53), nullable=False)
TimeAggregationIntervalUnitsID = Column('timeaggregationintervalunitsid', ForeignKey(Units.UnitsID),
nullable=False)
ResultObj = relationship(SpectraResults)
TimeAggregationIntervalUnitsObj = relationship(
Units,
primaryjoin='SpectraResultValues.TimeAggregationIntervalUnitsID == Units.UnitsID'
)
WavelengthUnitsObj = relationship(
Units,
primaryjoin='SpectraResultValues.WavelengthUnitsID == Units.UnitsID'
)
class TimeSeriesResultValues(Base):
ValueID = Column('valueid', BigIntegerType, primary_key=True)
ResultID = Column('resultid', ForeignKey(TimeSeriesResults.ResultID), nullable=False)
DataValue = Column('datavalue', Float(53), nullable=False)
ValueDateTime = Column('valuedatetime', DateTimeType, nullable=False)
ValueDateTimeUTCOffset = Column('valuedatetimeutcoffset', Integer, nullable=False)
CensorCodeCV = Column('censorcodecv', ForeignKey(CVCensorCode.Name), nullable=False, index=True)
QualityCodeCV = Column('qualitycodecv', ForeignKey(CVQualityCode.Name), nullable=False, index=True)
TimeAggregationInterval = Column('timeaggregationinterval', Float(53), nullable=False)
TimeAggregationIntervalUnitsID = Column('timeaggregationintervalunitsid', ForeignKey(Units.UnitsID),
nullable=False)
ResultObj = relationship(TimeSeriesResults)
TimeAggregationIntervalUnitsObj = relationship(Units)
def get_columns(self):
return ['ValueID', 'ResultID', 'DataValue', 'ValueDateTime', 'ValueDateTimeUTCOffset',
'CensorCodeCV', 'QualityCodeCV', 'TimeAggregationInterval', 'TimeAggregationIntervalUnitsID']
def list_repr(self):
return [self.ValueID, self.ResultID, self.DataValue, self.ValueDateTime, self.ValueDateTimeUTCOffset,
self.CensorCodeCV, self.QualityCodeCV, self.TimeAggregationInterval,
self.TimeAggregationIntervalUnitsID]
class TrajectoryResultValues(Base):
ValueID = Column('valueid', BigIntegerType, primary_key=True)
ResultID = Column('resultid', ForeignKey(TrajectoryResults.ResultID), nullable=False)
DataValue = Column('datavalue', Float(53), nullable=False)
ValueDateTime = Column('valuedatetime', DateTimeType, nullable=False)
ValueDateTimeUTCOffset = Column('valuedatetimeutcoffset', Integer, nullable=False)
XLocation = Column('xlocation', Float(53), nullable=False)
XLocationUnitsID = Column('xlocationunitsid', ForeignKey(Units.UnitsID), nullable=False)
YLocation = Column('ylocation', Float(53), nullable=False)
YLocationUnitsID = Column('ylocationunitsid', ForeignKey(Units.UnitsID), nullable=False)
ZLocation = Column('zlocation', Float(53), nullable=False)
ZLocationUnitsID = Column('zlocationunitsid', ForeignKey(Units.UnitsID), nullable=False)
TrajectoryDistance = Column('trajectorydistance', Float(53), nullable=False)
TrajectoryDistanceAggregationInterval = Column(
'trajectorydistanceaggregationinterval',
Float(53),
nullable=False
)
TrajectoryDistanceUnitsID = Column('trajectorydistanceunitsid', Integer, nullable=False)
CensorCodeCV = Column('censorcodecv', ForeignKey(CVCensorCode.Name), nullable=False, index=True)
QualityCodeCV = Column('qualitycodecv', ForeignKey(CVQualityCode.Name), nullable=False, index=True)
TimeAggregationInterval = Column('timeaggregationinterval', Float(53), nullable=False)
TimeAggregationIntervalUnitsID = Column('timeaggregationintervalunitsid', ForeignKey(Units.UnitsID),
nullable=False)
ResultObj = relationship(TrajectoryResults)
TimeAggregationIntervalUnitsObj = relationship(
Units,
primaryjoin='TrajectoryResultValues.TimeAggregationIntervalUnitsID == Units.UnitsID'
)
XLocationUnitsObj = relationship(
Units,
primaryjoin='TrajectoryResultValues.XLocationUnitsID == Units.UnitsID'
)
YLocationUnitsObj = relationship(
Units,
primaryjoin='TrajectoryResultValues.YLocationUnitsID == Units.UnitsID'
)
ZLocationUnitsObj = relationship(
Units,
primaryjoin='TrajectoryResultValues.ZLocationUnitsID == Units.UnitsID'
)
class TransectResultValues(Base):
ValueID = Column('valueid', BigIntegerType, primary_key=True)
ResultID = Column('resultid', ForeignKey(TransectResults.ResultID), nullable=False)
DataValue = Column('datavalue', Float(53), nullable=False)
ValueDateTime = Column('valuedatetime', DateTimeType, nullable=False)
ValueDateTimeUTCOffset = Column('valuedatetimeutcoffset', Integer, nullable=False)
XLocation = Column('xlocation', Float(53), nullable=False)
XLocationUnitsID = Column('xlocationunitsid', ForeignKey(Units.UnitsID), nullable=False)
YLocation = Column('ylocation', Float(53), nullable=False)
YLocationUnitsID = Column('ylocationunitsid', ForeignKey(Units.UnitsID), nullable=False)
TransectDistance = Column('transectdistance', Float(53), nullable=False)
TransectDistanceAggregationInterval = Column(
'transectdistanceaggregationinterval',
Float(53),
nullable=False
)
TransectDistanceUnitsID = Column('transectdistanceunitsid', ForeignKey(Units.UnitsID), nullable=False)
CensorCodeCV = Column('censorcodecv', ForeignKey(CVCensorCode.Name), nullable=False, index=True)
QualityCodeCV = Column('qualitycodecv', ForeignKey(CVQualityCode.Name), nullable=False, index=True)
AggregationStatisticCV = Column('aggregationstatisticcv', ForeignKey(CVAggregationStatistic.Name),
nullable=False, index=True)
TimeAggregationInterval = Column('timeaggregationinterval', Float(53), nullable=False)
TimeAggregationIntervalUnitsID = Column(
'timeaggregationintervalunitsid',
ForeignKey(Units.UnitsID),
nullable=False
)
ResultObj = relationship(TransectResults)
TimeAggregationIntervalUnitsObj = relationship(
Units,
primaryjoin='TransectResultValues.TimeAggregationIntervalUnitsID == Units.UnitsID'
)
XLocationUnitsObj = relationship(
Units,
primaryjoin='TransectResultValues.XLocationUnitsID == Units.UnitsID'
)
YLocationUnitsObj = relationship(
Units,
primaryjoin='TransectResultValues.YLocationUnitsID == Units.UnitsID'
)
TransectDistanceUnitsObj = relationship(
Units,
primaryjoin='TransectResultValues.TransectDistanceUnitsID == Units.UnitsID'
)
class CategoricalResultValueAnnotations(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ValueID = Column('valueid', BigIntegerType, ForeignKey(CategoricalResultValues.ValueID), nullable=False)
AnnotationID = Column('annotationid', ForeignKey(Annotations.AnnotationID), nullable=False)
AnnotationObj = relationship(Annotations)
ValueObj = relationship(CategoricalResultValues)
class MeasurementResultValueAnnotations(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ValueID = Column('valueid', BigIntegerType, ForeignKey(MeasurementResultValues.ValueID), nullable=False)
AnnotationID = Column('annotationid', ForeignKey(Annotations.AnnotationID), nullable=False)
AnnotationObj = relationship(Annotations)
ValueObj = relationship(MeasurementResultValues)
class PointCoverageResultValueAnnotations(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ValueID = Column('valueid', BigIntegerType, ForeignKey(PointCoverageResultValues.ValueID), nullable=False)
AnnotationID = Column('annotationid', ForeignKey(Annotations.AnnotationID), nullable=False)
AnnotationObj = relationship(Annotations)
ValueObj = relationship(PointCoverageResultValues)
class ProfileResultValueAnnotations(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ValueID = Column('valueid', BigIntegerType, ForeignKey(ProfileResultValues.ValueID), nullable=False)
AnnotationID = Column('annotationid', ForeignKey(Annotations.AnnotationID), nullable=False)
AnnotationObj = relationship(Annotations)
ValueObj = relationship(ProfileResultValues)
class SectionResultValueAnnotations(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ValueID = Column('valueid', BigIntegerType, ForeignKey(SectionResultValues.ValueID), nullable=False)
AnnotationID = Column('annotationid', ForeignKey(Annotations.AnnotationID), nullable=False)
AnnotationObj = relationship(Annotations)
ValueObj = relationship(SectionResultValues)
class SpectraResultValueAnnotations(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ValueID = Column('valueid', BigIntegerType, ForeignKey(SpectraResultValues.ValueID), nullable=False)
AnnotationID = Column('annotationid', ForeignKey(Annotations.AnnotationID), nullable=False)
AnnotationObj = relationship(Annotations)
ValueObj = relationship(SpectraResultValues)
class TimeSeriesResultValueAnnotations(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ValueID = Column('valueid', BigIntegerType, ForeignKey(TimeSeriesResultValues.ValueID), nullable=False)
AnnotationID = Column('annotationid', ForeignKey(Annotations.AnnotationID), nullable=False)
AnnotationObj = relationship(Annotations)
ValueObj = relationship(TimeSeriesResultValues)
class TrajectoryResultValueAnnotations(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ValueID = Column('valueid', BigIntegerType, ForeignKey(TrajectoryResultValues.ValueID), nullable=False)
AnnotationID = Column('annotationid', ForeignKey(Annotations.AnnotationID), nullable=False)
AnnotationObj = relationship(Annotations)
ValueObj = relationship(TrajectoryResultValues)
class TransectResultValueAnnotations(Base):
BridgeID = Column('bridgeid', Integer, primary_key=True, nullable=False)
ValueID = Column('valueid', BigIntegerType, ForeignKey(TransectResultValues.ValueID), nullable=False)
AnnotationID = Column('annotationid', ForeignKey(Annotations.AnnotationID), nullable=False)
AnnotationObj = relationship(Annotations)
ValueObj = relationship(TransectResultValues)
def _changeSchema(schema):
import inspect
import sys
# get a list of all of the classes in the module
clsmembers = inspect.getmembers(sys.modules[__name__],
lambda member: inspect.isclass(member) and member.__module__ == __name__)
for name, Tbl in clsmembers:
import sqlalchemy.ext.declarative.api as api
if isinstance(Tbl, api.DeclarativeMeta):
# check to see if the schema is already set correctly
if Tbl.__table__.schema == schema:
return
Tbl.__table__.schema = schema
Tbl.__table_args__['schema'] = schema
def _getSchema(engine):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(engine)
for name in insp.get_schema_names():
if 'odm2' == name.lower():
return name
return insp.default_schema_name
def setSchema(engine):
s = _getSchema(engine)
_changeSchema(s)
| {
"repo_name": "ODM2/ODM2PythonAPI",
"path": "odm2api/models.py",
"copies": "1",
"size": "91033",
"license": "bsd-3-clause",
"hash": 1418426195318752800,
"line_mean": 46.167357513,
"line_max": 110,
"alpha_frac": 0.7172893346,
"autogenerated": false,
"ratio": 4.33098625053523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5548275585135231,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
from odm2api.ODMconnection import dbconnection
from odm2api.models import CVElevationDatum
import pytest
from sqlalchemy.engine import reflection
__author__ = 'valentine'
dbs_readonly = [
['mysql_odm2_odm', 'mysql', 'localhost', 'odm2', 'ODM', 'odm'],
['mysql_odm2_root', 'mysql', 'localhost', 'odm2', 'root', None],
['postgresql_marchantariats', 'postgresql', 'localhost', 'marchantariats', 'postgres', 'iforget'],
['sqlite_wof', 'sqlite', './tests/spatialite/wof2odm/ODM2.sqlite', None, None, None]
]
dbs_test = [
['sqlite_memory', 'sqlite', ':memory:', None, None, None]
]
class Connection:
def __init__(self, request):
db = request.param
print ('dbtype', db[0], db[1])
session_factory = dbconnection.createConnection(db[1], db[2], db[3], db[4], db[5], echo=True)
assert session_factory is not None, ('failed to create a session for ', db[0], db[1])
assert session_factory.engine is not None, ('failed: session has no engine ', db[0], db[1])
insp = reflection.Inspector.from_engine(session_factory.engine)
insp.get_table_names()
self.session = session_factory.getSession()
@pytest.fixture(scope='session', params=dbs_readonly)
def setup(request):
return Connection(request)
def test_connection(setup):
q = setup.session.query(CVElevationDatum)
results = q.all()
assert len(results) > 0
def test_create_all_schema():
pass
def test_create_all_no_schema():
pass
| {
"repo_name": "ODM2/ODM2PythonAPI",
"path": "tests/test_connection.py",
"copies": "1",
"size": "1559",
"license": "bsd-3-clause",
"hash": 1969935578429236500,
"line_mean": 27.8703703704,
"line_max": 102,
"alpha_frac": 0.6619627967,
"autogenerated": false,
"ratio": 3.241164241164241,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4403127037864241,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
from odm2api.ODMconnection import SessionFactory
from odm2api.models import CVElevationDatum, setSchema
import pytest
__author__ = 'valentine'
dbs_readonly = [
['mysql:ODM@Localhost/', 'mysql', 'mysql+pymysql://ODM:odm@localhost/'],
['mysql"root@Localhost/', 'mysql', 'mysql+pymysql://root@localhost/'],
['mysql:ODM@Localhost/odm2', 'mysql', 'mysql+pymysql://ODM:odm@localhost/odm2'],
['mysql"root@Localhost/odm2', 'mysql', 'mysql+pymysql://root@localhost/odm2'],
['postgresql_marchantariats_none', 'postgresql',
'postgresql+psycopg2://postgres:None@localhost/marchantariats',
'marchantariats', 'postgres', None],
['postgresql_marchantariats_empty', 'postgresql',
'postgresql+psycopg2://postgres@localhost/marchantariats',
'marchantariats', 'postgres', None],
['sqlite_wof', 'sqlite', 'sqlite:///./tests/spatialite/wof2odm/ODM2.sqlite', None, None, None]
]
dbs_test = [
['sqlite_test', 'sqlite' './tests/spatialite/odm2_test.sqlite', None, None, None]
]
class aSessionFactory:
def __init__(self, request):
db = request.param
print ('dbtype', db[0], db[1])
session_factory = SessionFactory(db[2])
setSchema(session_factory.engine)
assert session_factory is not None, ('failed to create a session for ', db[0], db[1])
self.session = session_factory.getSession()
@pytest.fixture(scope='session', params=dbs_readonly)
def setup(request):
return aSessionFactory(request)
def test_aSessionFactory(setup):
q = setup.session.query(CVElevationDatum)
results = q.all()
assert len(results) > 0
| {
"repo_name": "ODM2/ODM2PythonAPI",
"path": "tests/test_SessionFactory.py",
"copies": "1",
"size": "1682",
"license": "bsd-3-clause",
"hash": 7110009812174243000,
"line_mean": 34.0416666667,
"line_max": 98,
"alpha_frac": 0.6831153389,
"autogenerated": false,
"ratio": 3.2284069097888675,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9402999213350893,
"avg_score": 0.0017046070675949508,
"num_lines": 48
} |
from __future__ import absolute_import, division, print_function
from odo.core import NetworkDispatcher, path
from datashape import discover
d = NetworkDispatcher('foo')
@d.register(float, int, cost=1.0)
def f(x, **kwargs):
return float(x)
@d.register(str, float, cost=1.0)
def g(x, **kwargs):
return str(x)
def test_basic():
assert [func for a, b, func in d.path(int, str)] == [f, g]
assert d.path(int, str) == d.path(1, '')
def test_convert_is_robust_to_failures():
foo = NetworkDispatcher('foo')
def badfunc(*args, **kwargs):
raise NotImplementedError()
class A(object): pass
class B(object): pass
class C(object): pass
discover.register((A, B, C))(lambda x: 'int')
foo.register(B, A, cost=1.0)(lambda x, **kwargs: 1)
foo.register(C, B, cost=1.0)(badfunc)
foo.register(C, A, cost=10.0)(lambda x, **kwargs: 2)
assert foo(C, A()) == 2
def test_ooc_behavior():
foo = NetworkDispatcher('foo')
class A(object): pass
class B(object): pass
class C(object): pass
discover.register((A, B, C))(lambda x: 'int')
foo.register(B, A, cost=1.0)(lambda x, **kwargs: 1)
foo.register(C, B, cost=1.0)(lambda x, **kwargs: x / 0) # note that this errs
foo.register(C, A, cost=10.0)(lambda x, **kwargs: 2)
assert [(a, b) for a, b, func in path(foo.graph, A, C)] == [(A, B), (B, C)]
ooc = set([A, C])
assert [(a, b) for a, b, func in path(foo.graph, A, C, ooc_types=ooc)] == \
[(A, C)]
| {
"repo_name": "Dannnno/odo",
"path": "odo/tests/test_core.py",
"copies": "3",
"size": "1515",
"license": "bsd-3-clause",
"hash": 6266833701682457000,
"line_mean": 26.5454545455,
"line_max": 81,
"alpha_frac": 0.596039604,
"autogenerated": false,
"ratio": 2.8747628083491463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9939503681950377,
"avg_score": 0.006259746079753905,
"num_lines": 55
} |
from __future__ import absolute_import, division, print_function
from oem_core.core.plugin import PluginManager
from oem_framework.plugin import Plugin
from oem_framework.storage import ProviderStorage
class Provider(Plugin):
def __init__(self, storage):
self._storage = storage
self._client = None
#
# Properties
#
@property
def client(self):
return self._client
@property
def formats(self):
return self._client.formats
@property
def plugins(self):
return self._client.plugins
@property
def storage(self):
return self._storage
#
# Public methods
#
def initialize(self, client):
self._client = client
self._storage = self._construct_storage(self._storage)
#
# Abstract methods
#
def fetch(self, source, target, key, metadata):
raise NotImplementedError
def open_database(self, source, target):
raise NotImplementedError
#
# Private methods
#
def _construct_storage(self, storage_or_key):
if isinstance(storage_or_key, ProviderStorage):
# Use provided source
storage = storage_or_key
elif PluginManager.has('storage', storage_or_key):
# Construct source by key
storage = PluginManager.get('storage', storage_or_key)()
else:
raise ValueError('Unknown storage interface: %r' % storage_or_key)
# Initialize source
storage.initialize(self._client)
return storage
| {
"repo_name": "OpenEntityMap/oem-client",
"path": "oem/core/providers/base.py",
"copies": "1",
"size": "1564",
"license": "bsd-3-clause",
"hash": 7217312248077255000,
"line_mean": 21.6666666667,
"line_max": 78,
"alpha_frac": 0.6221227621,
"autogenerated": false,
"ratio": 4.533333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5655456095433333,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from oem.core.exceptions import AbsoluteNumberRequiredError
from oem.media.movie.identifier import MovieIdentifier
from oem.media.show.identifier import EpisodeIdentifier
from oem.media.show.match import EpisodeMatch
from oem_framework.core.helpers import try_convert
from copy import deepcopy
import logging
log = logging.getLogger(__name__)
class ShowMapper(object):
def __init__(self, service):
self._service = service
def match(self, show, identifier, resolve_mappings=True):
if identifier is None:
# Create identifier for S01E01
identifier = EpisodeIdentifier(1, 1)
elif isinstance(identifier, MovieIdentifier):
# Convert movie identifier to S01E01
identifier = EpisodeIdentifier(1, 1, progress=identifier.progress)
# Validate identifier
if identifier and not identifier.valid:
raise ValueError('Invalid value provided for "identifier" parameter')
# Show
best = self._match_show(show, identifier)
# Season
season, result = self._match_season(show, identifier)
if result:
best = result
if season:
# Episode
result = self._match_episode(
show, season, identifier,
resolve_mappings=resolve_mappings
)
if result:
best = result
# Return best result
return best
def _match_show(self, show, identifier):
# Retrieve "default_season" parameter
default_season = None
if 'default_season' in show.parameters:
default_season = show.parameters['default_season']
if default_season != 'a':
# Cast season number to an integer
default_season = try_convert(default_season, int)
if default_season is None:
log.warn(
'Invalid value provided for the "default_season" parameter: %r',
show.parameters['default_season']
)
return None
# Retrieve season number
season_num = identifier.season_num
if season_num is None or default_season is None or default_season == 'a':
season_num = default_season
elif season_num > 0:
season_num = default_season + (season_num - 1)
# Retrieve episode number
episode_num = identifier.episode_num
if 'episode_offset' in show.parameters:
episode_num += int(show.parameters['episode_offset'])
# Build episode match
if season_num != 'a':
match = EpisodeMatch(
self._get_identifiers(show),
season_num=season_num,
episode_num=episode_num,
progress=identifier.progress
)
else:
if identifier.absolute_num is None:
raise AbsoluteNumberRequiredError('Unable to match %r, an absolute number is required' % identifier)
match = EpisodeMatch(
self._get_identifiers(show),
absolute_num=identifier.absolute_num,
progress=identifier.progress
)
if not match.valid:
return None
return match
def _match_season(self, show, identifier):
# Try retrieve matching season
season = show.seasons.get(str(identifier.season_num)) or show.seasons.get('a')
if not season:
return None, None
if season.number == 'a':
if identifier.absolute_num is None:
raise AbsoluteNumberRequiredError('Unable to match %r, an absolute number is required' % identifier)
return season, EpisodeMatch(
self._get_identifiers(show, season),
absolute_num=identifier.absolute_num,
progress=identifier.progress
)
# Look for matching season mapping
for season_mapping in season.mappings:
if not (season_mapping.start <= identifier.episode_num <= season_mapping.end):
continue
return season, EpisodeMatch(
self._get_identifiers(show, season),
int(season_mapping.season),
identifier.episode_num + season_mapping.offset,
progress=identifier.progress
)
# Retrieve "default_season" parameter
default_season = None
if 'default_season' in season.parameters:
default_season = season.parameters['default_season']
if default_season != 'a':
# Cast season number to an integer
default_season = try_convert(default_season, int)
if default_season is None:
log.warn(
'Invalid value provided for the "default_season" parameter: %r',
season.parameters['default_season']
)
return season, None
# Retrieve season number
season_num = identifier.season_num
if season.identifiers:
season_num = 1
if default_season is not None:
season_num = default_season
# Retrieve episode number
episode_num = identifier.episode_num
# Apply episode offset
episode_offset = self._get_parameter('episode_offset', show, season)
if episode_offset is not None:
episode_num += int(episode_offset)
# Build season match
match = EpisodeMatch(
self._get_identifiers(show, season),
season_num=season_num,
episode_num=episode_num,
progress=identifier.progress
)
if not match.valid:
return season, None
return season, match
def _match_episode(self, show, season, identifier, resolve_mappings=True):
episode = season.episodes.get(str(identifier.episode_num))
if not episode:
return None
if not resolve_mappings:
match = EpisodeMatch(
self._get_identifiers(show, season, episode),
mappings=episode.mappings
)
if not match.valid:
return None
return match
if identifier.part is not None and identifier.part - 1 < len(episode.mappings):
# Parse episode mapping
valid, match = self._parse_episode_mapping(
show, season, episode, episode.mappings[identifier.part - 1],
part=identifier.part
)
if valid:
return match
for episode_mapping in episode.mappings:
# Parse timeline attributes
progress = identifier.progress
if episode_mapping.timeline:
if identifier.progress is None:
raise ValueError('Missing required parameter "progress"')
if 'source' in episode_mapping.timeline:
timeline_source = episode_mapping.timeline['source']
if not (timeline_source.start <= identifier.progress <= timeline_source.end):
continue
# Calculate progress
progress = (
float(identifier.progress - timeline_source.start) *
(100 / (timeline_source.end - timeline_source.start))
)
elif 'target' in episode_mapping.timeline:
timeline_target = episode_mapping.timeline['target']
# Calculate progress
progress = (
timeline_target.start + (
float(identifier.progress) /
(100 / (timeline_target.end - timeline_target.start))
)
)
# Parse episode mapping
valid, match = self._parse_episode_mapping(
show, season, episode, episode_mapping,
progress=progress
)
if valid:
return match
return None
def _parse_episode_mapping(self, show, season, episode, episode_mapping, progress=None, part=None):
# Parse mapping attributes
try:
season_num = int(episode_mapping.season)
except (TypeError, ValueError):
return False, None
try:
episode_num = int(episode_mapping.number)
except (TypeError, ValueError):
return False, None
# Return episode match
match = EpisodeMatch(
self._get_identifiers(show, season, episode),
season_num=season_num,
episode_num=episode_num,
progress=progress,
part=part
)
if not match.valid:
return True, None
return True, match
def _get_identifiers(self, show, season=None, episode=None):
# Retrieve identifiers from objects
if show and season and episode:
identifiers = episode.identifiers or season.identifiers or show.identifiers
elif show and season:
identifiers = season.identifiers or show.identifiers
else:
identifiers = show.identifiers
# Copy identifiers
if identifiers:
identifiers = deepcopy(identifiers)
else:
identifiers = {}
# Remove source identifier
if self._service.source_key in identifiers:
del identifiers[self._service.source_key]
return identifiers
def _get_parameter(self, key, show, season=None, episode=None):
for obj in [episode, season, show]:
if not obj:
continue
return obj.parameters.get(key)
return None
| {
"repo_name": "OpenEntityMap/oem-client",
"path": "oem/media/show/mapper.py",
"copies": "1",
"size": "10001",
"license": "bsd-3-clause",
"hash": 5635340066726829000,
"line_mean": 30.9520766773,
"line_max": 116,
"alpha_frac": 0.5608439156,
"autogenerated": false,
"ratio": 5.076649746192893,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6137493661792893,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from oem.core.providers.base import Provider
from oem.version import __version__
from oem_core.core.plugin import PluginManager
import inspect
import logging
import six
log = logging.getLogger(__name__)
class Client(object):
version = __version__
def __init__(self, services, provider, formats=None):
"""Client for OpenEntityMap.
:param services: List of services to load (e.g. "anidb")
:type services: list
:param provider: Provider to use for databases (e.g. "package", "release/incremental")
:type provider: str or oem.core.providers.base.Base
:param formats: List of formats to use, or `None` for any
:type formats: list or None
"""
self._formats = formats
# Discover available plugins
self._plugins = PluginManager
self._plugins.discover()
# Construct plugins
self._services = self._construct_services(services)
self._provider = self._construct_provider(provider)
# Build database + package tables
self._databases = {}
self._packages = {}
for _, cls in self._load_plugins('client', services, construct=False):
# Merge service databases into client
if cls.__databases__:
self._databases.update(cls.__databases__)
else:
log.warn('Service %r has no "__databases__" defined', cls.__key__)
# Merge service packages into client
if cls.__packages__:
self._packages.update(cls.__packages__)
else:
log.warn('Service %r has no "__packages__" defined', cls.__key__)
@property
def formats(self):
return self._formats
@property
def plugins(self):
return self._plugins
@property
def provider(self):
return self._provider
def load_all(self):
for service in six.itervalues(self._services):
service.load()
def database_name(self, source, target):
return self._databases.get((source, target))
def package_name(self, source, target):
return self._packages.get((source, target))
def __getitem__(self, source):
return ServiceInterface(self, source)
#
# Private methods
#
def _construct_services(self, services):
result = {}
for _, cls in self._load_plugins('client', services, construct=False):
# Add supported service conversions
for source, targets in cls.__services__.items():
for target in targets:
# Construct service
result[(source, target)] = cls(self, source, target)
return result
def _construct_provider(self, provider_or_key):
if isinstance(provider_or_key, Provider):
# Class
provider = provider_or_key
elif isinstance(provider_or_key, six.string_types):
# Identifier
provider = PluginManager.get('client-provider', provider_or_key)
if provider is None:
raise ValueError('Unable to find provider: %r' % provider_or_key)
else:
raise ValueError('Unknown provider: %r' % provider_or_key)
# Ensure provider has been constructed
if inspect.isclass(provider):
provider = provider()
# Initialize provider
provider.initialize(self)
return provider
@staticmethod
def _load_plugins(kind, keys, construct=True):
if not keys:
return
for name in keys:
cls = PluginManager.get(kind, name)
if cls is None:
log.warn('Unable to find plugin: %r', name)
continue
if not cls.available:
log.warn('Plugin %r is not available', name)
continue
if construct:
yield cls.__key__, cls()
else:
yield cls.__key__, cls
class ServiceInterface(object):
def __init__(self, client, source):
self.client = client
self.source = source
def to(self, target):
try:
return self.client._services[(self.source, target)]
except KeyError:
raise KeyError('Unknown service: %s -> %s' % (self.source, target))
| {
"repo_name": "OpenEntityMap/oem-client",
"path": "oem/client.py",
"copies": "1",
"size": "4409",
"license": "bsd-3-clause",
"hash": -1233422667628177700,
"line_mean": 28.3933333333,
"line_max": 94,
"alpha_frac": 0.5788160581,
"autogenerated": false,
"ratio": 4.621593291404612,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5700409349504613,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from oem_framework.core.elapsed import Elapsed
from oem_framework.plugin import Plugin
import logging
log = logging.getLogger(__name__)
class Service(Plugin):
__databases__ = {}
__packages__ = {}
__services__ = {}
def __init__(self, client, source, target, formats=None):
self._client = client
self._source = source
self._target = target
self._formats = formats
self._database = None
self._collection = None
self._loaded = False
@property
def database_name(self):
return 'oem_database_%s_%s' % (self._source, self._target)
@property
def loaded(self):
return self._loaded
@property
def package_name(self):
return 'oem-database-%s-%s' % (self._source, self._target)
@property
def provider(self):
return self._client._provider
@property
def source_key(self):
return self._source
@property
def target_key(self):
return self._target
@Elapsed.track
def load(self):
if self._loaded:
return True
# Load database
self._database = self.provider.open_database(
self.source_key,
self.target_key
)
if self._database is None:
log.warn('Unable to load database for: %s -> %s', self.source_key, self.target_key)
return False
# Load collection
self._collection = self._database.load_collection(
self._source,
self._target
)
if self._collection is None:
log.warn('Unable to load collection for: %s -> %s', self.source_key, self.target_key)
return False
# Successfully loaded service
log.info('Loaded service: %-5s -> %-5s (storage: %r)', self._source, self._target, self._database.storage)
self._loaded = True
return True
@Elapsed.track
def fetch(self, key, metadata):
# Ensure database is loaded
if not self.load():
return False
# Ensure item is loaded
return self.provider.fetch(
self.source_key,
self.target_key,
key, metadata
)
| {
"repo_name": "OpenEntityMap/oem-client",
"path": "oem/core/services/base.py",
"copies": "1",
"size": "2285",
"license": "bsd-3-clause",
"hash": -7651818612636543000,
"line_mean": 23.8369565217,
"line_max": 114,
"alpha_frac": 0.572428884,
"autogenerated": false,
"ratio": 4.215867158671586,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5288296042671586,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from oem.media.core.base.identifier import Identifier
class EpisodeIdentifier(Identifier):
__slots__ = ['season_num', 'episode_num', 'absolute_num', 'progress']
def __init__(self, season_num=None, episode_num=None, absolute_num=None, progress=None, part=None):
# Season + Episode Identifier
self.season_num = season_num
self.episode_num = episode_num
# Absolute Identifier
self.absolute_num = absolute_num
# Extra
self.progress = progress
self.part = part
@property
def valid(self):
return (
self.season_num is not None and
self.episode_num is not None
) or (
self.absolute_num is not None
)
def to_dict(self):
result = {}
if self.absolute_num is not None:
result['absolute_num'] = self.absolute_num
if self.season_num is not None:
result['season_num'] = self.season_num
if self.episode_num is not None:
result['episode_num'] = self.episode_num
if self.progress is not None:
result['progress'] = self.progress
if self.part is not None:
result['part'] = self.part
return result
def __repr__(self):
attributes = [
('%s: %r' % (key, getattr(self, key)))
for key in ['progress', 'part'] if getattr(self, key)
]
fragments = []
if self.absolute_num:
fragments.append('[%03d]' % self.absolute_num)
if self.season_num is not None and self.episode_num is not None:
fragments.append('S%02dE%02d' % (self.season_num, self.episode_num))
elif self.season_num is not None:
fragments.append('S%02d' % self.season_num)
if attributes:
fragments.append('(%s)' % (', '.join(attributes)))
return '<%s%s>' % (
self.__class__.__name__,
(' %s' % (' '.join(fragments))) if fragments else ''
)
| {
"repo_name": "OpenEntityMap/oem-client",
"path": "oem/media/show/identifier.py",
"copies": "1",
"size": "2086",
"license": "bsd-3-clause",
"hash": -8820015782052436000,
"line_mean": 27.9722222222,
"line_max": 103,
"alpha_frac": 0.5570469799,
"autogenerated": false,
"ratio": 3.9657794676806084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003050147726073652,
"num_lines": 72
} |
from __future__ import absolute_import, division, print_function
from oem.media.movie.identifier import MovieIdentifier
from oem.media.movie.match import MovieMatch
from oem.media.show import EpisodeIdentifier
from copy import deepcopy
class MovieMapper(object):
def __init__(self, service):
self._service = service
def match(self, movie, identifier, resolve_mappings=True):
if identifier is not None and not identifier.valid:
raise ValueError('Invalid value provided for "identifier" parameter')
if isinstance(identifier, EpisodeIdentifier) and identifier.season_num != 1:
return None
# Movie
best = self._match_movie(movie, identifier)
# Part
result = self._match_part(movie, identifier)
if result:
best = result
# Return best result
return best
def _match_movie(self, movie, identifier):
if isinstance(identifier, MovieIdentifier) and identifier.part is not None and identifier.part > 1:
return None
if isinstance(identifier, EpisodeIdentifier) and identifier.episode_num != 1:
return None
# Retrieve progress
progress = None
if identifier:
progress = identifier.progress
# Create movie match
return MovieMatch(
self._get_identifiers(movie),
progress=progress
)
def _match_part(self, movie, identifier):
if isinstance(identifier, MovieIdentifier):
part_num = identifier.part
elif isinstance(identifier, EpisodeIdentifier):
part_num = identifier.episode_num
else:
part_num = 1
# Retrieve part
part = movie.parts.get(str(part_num))
if not part:
return None
# Retrieve progress
progress = None
if identifier:
progress = identifier.progress
# Create movie match
return MovieMatch(
self._get_identifiers(part),
progress=progress
)
def _get_identifiers(self, movie):
# Retrieve identifiers from objects
identifiers = movie.identifiers
# Copy identifiers
if identifiers:
identifiers = deepcopy(identifiers)
else:
identifiers = {}
# Remove source identifier
if self._service.source_key in identifiers:
del identifiers[self._service.source_key]
return identifiers
| {
"repo_name": "OpenEntityMap/oem-client",
"path": "oem/media/movie/mapper.py",
"copies": "1",
"size": "2523",
"license": "bsd-3-clause",
"hash": -6386671481307321000,
"line_mean": 26.4239130435,
"line_max": 107,
"alpha_frac": 0.6155370591,
"autogenerated": false,
"ratio": 5.005952380952381,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00048746732684321634,
"num_lines": 92
} |
from __future__ import absolute_import, division, print_function
from oem.media.movie.identifier import MovieIdentifier
class MovieMatch(MovieIdentifier):
def __init__(self, identifiers, progress=None):
super(MovieMatch, self).__init__(progress)
self.identifiers = identifiers or {}
def to_dict(self):
result = super(MovieMatch, self).to_dict()
result['identifiers'] = self.identifiers
return result
def __repr__(self):
fragments = []
# Identifiers
if self.identifiers:
fragments.append(
'(' + (', '.join(
('%s: %r' % (key, value))
for key, value in self.identifiers.items()
)) + ')'
)
# Attributes
attributes = [
('%s: %r' % (key, getattr(self, key))) for key in ['progress'] if getattr(self, key)
]
if attributes:
fragments.append('(%s)' % (', '.join(attributes)))
return '<%s%s>' % (
self.__class__.__name__,
(' %s' % (' '.join(fragments))) if fragments else ''
)
| {
"repo_name": "OpenEntityMap/oem-client",
"path": "oem/media/movie/match.py",
"copies": "1",
"size": "1147",
"license": "bsd-3-clause",
"hash": -5334837377906126000,
"line_mean": 26.3095238095,
"line_max": 96,
"alpha_frac": 0.508282476,
"autogenerated": false,
"ratio": 4.361216730038023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5369499206038023,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from oem.media.show.identifier import EpisodeIdentifier
class EpisodeMatch(EpisodeIdentifier):
def __init__(self, identifiers, season_num=None, episode_num=None, absolute_num=None,
progress=None, part=None, mappings=None):
super(EpisodeMatch, self).__init__(
season_num=season_num,
episode_num=episode_num,
absolute_num=absolute_num,
progress=progress,
part=part
)
self.identifiers = identifiers or {}
self.mappings = mappings or []
@property
def valid(self):
return len(self.identifiers) > 0 and (
super(EpisodeMatch, self).valid or
len(self.mappings) > 0
)
def to_dict(self):
result = super(EpisodeMatch, self).to_dict()
result['identifiers'] = self.identifiers
if self.mappings:
result['mappings'] = [m.to_dict(compact=False) for m in self.mappings]
return result
def __repr__(self):
fragments = []
# Identifiers
if self.identifiers:
fragments.append(
'(' + (', '.join(
('%s: %r' % (key, value)) for key, value in self.identifiers.items()
)) + ')'
)
if self.absolute_num is not None or self.season_num is not None:
fragments.append('-')
# Absolute
if self.absolute_num is not None:
fragments.append('[%03d]' % self.absolute_num)
# Season + Episode
if self.season_num is not None and self.episode_num is not None:
fragments.append('S%02dE%02d' % (self.season_num, self.episode_num))
elif self.season_num is not None:
fragments.append('S%02d' % self.season_num)
# Attributes
attributes = [
('%s: %r' % (key, getattr(self, key))) for key in ['progress'] if getattr(self, key)
]
if attributes:
fragments.append('(%s)' % (', '.join(attributes)))
return '<%s%s>' % (
self.__class__.__name__,
(' %s' % (' '.join(fragments))) if fragments else ''
)
| {
"repo_name": "OpenEntityMap/oem-client",
"path": "oem/media/show/match.py",
"copies": "1",
"size": "2241",
"license": "bsd-3-clause",
"hash": -9010289649903734000,
"line_mean": 29.2837837838,
"line_max": 96,
"alpha_frac": 0.5408299866,
"autogenerated": false,
"ratio": 4.111926605504587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5152756592104587,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from operator import add, getitem
import inspect
from collections import Iterable
from bisect import bisect
import operator
import math
from itertools import product, count
from collections import Iterator
from functools import partial, wraps
from toolz.curried import (identity, pipe, partition, concat, unique, pluck,
frequencies, join, first, memoize, map, groupby, valmap, accumulate,
merge, curry, compose)
import numpy as np
from . import chunk
from .slicing import slice_array, insert_many, remove_full_slices
from ..utils import deepmap, ignoring
from ..async import inline_functions
from ..optimize import cull, inline
from ..compatibility import unicode
from .. import threaded, core
names = ('x_%d' % i for i in count(1))
def getem(arr, blockdims=None, blockshape=None, shape=None):
""" Dask getting various chunks from an array-like
>>> getem('X', blockshape=(2, 3), shape=(4, 6)) # doctest: +SKIP
{('X', 0, 0): (getitem, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getitem, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getitem, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getitem, 'X', (slice(0, 2), slice(3, 6)))}
>>> getem('X', blockdims=((2, 2), (3, 3))) # doctest: +SKIP
{('X', 0, 0): (getitem, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getitem, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getitem, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getitem, 'X', (slice(0, 2), slice(3, 6)))}
"""
if not blockdims:
blockdims = blockdims_from_blockshape(shape, blockshape)
cumdims = [list(accumulate(add, (0,) + bds[:-1])) for bds in blockdims]
keys = list(product([arr], *[range(len(bds)) for bds in blockdims]))
shapes = product(*blockdims)
starts = product(*cumdims)
values = ((getitem, arr) + (tuple(slice(s, s+dim)
for s, dim in zip(start, shape)),)
for start, shape in zip(starts, shapes))
return dict(zip(keys, values))
def dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):
""" Dot product of many aligned chunks
>>> x = np.array([[1, 2], [1, 2]])
>>> y = np.array([[10, 20], [10, 20]])
>>> dotmany([x, x, x], [y, y, y])
array([[ 90, 180],
[ 90, 180]])
Optionally pass in functions to apply to the left and right chunks
>>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)
array([[150, 150],
[150, 150]])
"""
if leftfunc:
A = map(leftfunc, A)
if rightfunc:
B = map(rightfunc, B)
return sum(map(partial(np.dot, **kwargs), A, B))
def lol_tuples(head, ind, values, dummies):
""" List of list of tuple keys
Parameters
----------
head : tuple
The known tuple so far
ind : Iterable
An iterable of indices not yet covered
values : dict
Known values for non-dummy indices
dummies : dict
Ranges of values for dummy indices
Examples
--------
>>> lol_tuples(('x',), 'ij', {'i': 1, 'j': 0}, {})
('x', 1, 0)
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ijk', {'i': 1}, {'j': [0, 1, 2], 'k': [0, 1]}) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 1, 0), ('x', 1, 1, 1)],
[('x', 1, 2, 0), ('x', 1, 2, 1)]]
"""
if not ind:
return head
if ind[0] not in dummies:
return lol_tuples(head + (values[ind[0]],), ind[1:], values, dummies)
else:
return [lol_tuples(head + (v,), ind[1:], values, dummies)
for v in dummies[ind[0]]]
def zero_broadcast_dimensions(lol, nblocks):
"""
>>> lol = [('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> nblocks = (4, 1, 2) # note singleton dimension in second place
>>> lol = [[('x', 1, 0, 0), ('x', 1, 0, 1)],
... [('x', 1, 1, 0), ('x', 1, 1, 1)],
... [('x', 1, 2, 0), ('x', 1, 2, 1)]]
>>> zero_broadcast_dimensions(lol, nblocks) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)]]
See Also
--------
lol_tuples
"""
f = lambda t: (t[0],) + tuple(0 if d == 1 else i for i, d in zip(t[1:], nblocks))
return deepmap(f, lol)
def broadcast_dimensions(argpairs, numblocks, sentinels=(1, (1,))):
""" Find block dimensions from arguments
Parameters
----------
argpairs: iterable
name, ijk index pairs
numblocks: dict
maps {name: number of blocks}
sentinels: iterable (optional)
values for singleton dimensions
Examples
--------
>>> argpairs = [('x', 'ij'), ('y', 'ji')]
>>> numblocks = {'x': (2, 3), 'y': (3, 2)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Supports numpy broadcasting rules
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> numblocks = {'x': (2, 1), 'y': (1, 3)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Works in other contexts too
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
>>> broadcast_dimensions(argpairs, d)
{'i': 'Hello', 'j': (2, 3)}
"""
# List like [('i', 2), ('j', 1), ('i', 1), ('j', 2)]
L = concat([zip(inds, dims)
for (x, inds), (x, dims)
in join(first, argpairs, first, numblocks.items())])
g = groupby(0, L)
g = dict((k, set([d for i, d in v])) for k, v in g.items())
g2 = dict((k, v - set(sentinels) if len(v) > 1 else v) for k, v in g.items())
if g2 and not set(map(len, g2.values())) == set([1]):
raise ValueError("Shapes do not align %s" % g)
return valmap(first, g2)
def top(func, output, out_indices, *arrind_pairs, **kwargs):
""" Tensor operation
Applies a function, ``func``, across blocks from many different input
dasks. We arrange the pattern with which those blocks interact with sets
of matching indices. E.g.
top(func, 'z', 'i', 'x', 'i', 'y', 'i')
yield an embarassingly parallel communication pattern and is read as
z_i = func(x_i, y_i)
More complex patterns may emerge, including multiple indices
top(func, 'z', 'ij', 'x', 'ij', 'y', 'ji')
$$ z_{ij} = func(x_{ij}, y_{ji}) $$
Indices missing in the output but present in the inputs results in many
inputs being sent to one function (see examples).
Examples
--------
Simple embarassing map operation
>>> inc = lambda x: x + 1
>>> top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
Simple operation on two datasets
>>> add = lambda x, y: x + y
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Operation that flips one of the datasets
>>> addT = lambda x, y: x + y.T # Transpose each chunk
>>> # z_ij ~ x_ij y_ji
>>> # .. .. .. notice swap
>>> top(addT, 'z', 'ij', 'x', 'ij', 'y', 'ji', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 1, 0)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 0, 1)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Dot product with contraction over ``j`` index. Yields list arguments
>>> top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
Supports Broadcasting rules
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (1, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 1))}
"""
numblocks = kwargs['numblocks']
argpairs = list(partition(2, arrind_pairs))
assert set(numblocks) == set(pluck(0, argpairs))
all_indices = pipe(argpairs, pluck(1), concat, set)
dummy_indices = all_indices - set(out_indices)
# Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
dims = broadcast_dimensions(argpairs, numblocks)
# (0, 0), (0, 1), (0, 2), (1, 0), ...
keytups = list(product(*[range(dims[i]) for i in out_indices]))
# {i: 0, j: 0}, {i: 0, j: 1}, ...
keydicts = [dict(zip(out_indices, tup)) for tup in keytups]
# {j: [1, 2, 3], ...} For j a dummy index of dimension 3
dummies = dict((i, list(range(dims[i]))) for i in dummy_indices)
# Create argument lists
valtups = []
for kd in keydicts:
args = []
for arg, ind in argpairs:
tups = lol_tuples((arg,), ind, kd, dummies)
tups2 = zero_broadcast_dimensions(tups, numblocks[arg])
args.append(tups2)
valtups.append(tuple(args))
# Add heads to tuples
keys = [(output,) + kt for kt in keytups]
vals = [(func,) + vt for vt in valtups]
return dict(zip(keys, vals))
def _concatenate2(arrays, axes=[]):
""" Recursively Concatenate nested lists of arrays along axes
Each entry in axes corresponds to each level of the nested list. The
length of axes should correspond to the level of nesting of arrays.
>>> x = np.array([[1, 2], [3, 4]])
>>> _concatenate2([x, x], axes=[0])
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> _concatenate2([x, x], axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> _concatenate2([[x, x], [x, x]], axes=[0, 1])
array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
Supports Iterators
>>> _concatenate2(iter([x, x]), axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
"""
if isinstance(arrays, Iterator):
arrays = list(arrays)
if len(axes) > 1:
arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]
return np.concatenate(arrays, axis=axes[0])
def rec_concatenate(arrays, axis=0):
""" Recursive np.concatenate
>>> x = np.array([1, 2])
>>> rec_concatenate([[x, x], [x, x], [x, x]])
array([[1, 2, 1, 2],
[1, 2, 1, 2],
[1, 2, 1, 2]])
"""
if isinstance(arrays, Iterator):
arrays = list(arrays)
if isinstance(arrays[0], Iterator):
arrays = list(map(list, arrays))
if not isinstance(arrays[0], np.ndarray):
arrays = [rec_concatenate(a, axis=axis + 1) for a in arrays]
if arrays[0].ndim <= axis:
arrays = [a[None, ...] for a in arrays]
return np.concatenate(arrays, axis=axis)
def map_blocks(x, func, blockshape=None, blockdims=None):
""" Map a function across all blocks of a dask array
You must also specify the blockdims/blockshape of the resulting array. If
you don't then we assume that the resulting array has the same block
structure as the input.
>>> import dask.array as da
>>> x = da.ones((8,), blockshape=(4,))
>>> np.array(x.map_blocks(lambda x: x + 1))
array([ 2., 2., 2., 2., 2., 2., 2., 2.])
If function changes shape of the blocks provide a blockshape
>>> y = x.map_blocks(lambda x: x[::2], blockshape=(2,))
Or, if the result is ragged, provide a blockdims
>>> y = x.map_blocks(lambda x: x[::2], blockdims=((2, 2),))
Your block function can learn where in the array it is if it supports a
block_id keyword argument. This will receive entries like (2, 0, 1), the
position of the block in the dask array.
>>> def func(block, block_id=None):
... pass
"""
if blockshape is not None:
blockdims = tuple([nb * (bs,)
for nb, bs in zip(x.numblocks, blockshape)])
if blockdims is None:
blockdims = x.blockdims
name = next(names)
try:
spec = inspect.getargspec(func)
except:
spec = None
if spec and 'block_id' in spec.args:
dsk = dict(((name,) + k[1:], (partial(func, block_id=k[1:]), k))
for k in core.flatten(x._keys()))
else:
dsk = dict(((name,) + k[1:], (func, k)) for k in core.flatten(x._keys()))
return Array(merge(dsk, x.dask), name, blockdims=blockdims)
def blockdims_from_blockshape(shape, blockshape):
"""
>>> blockdims_from_blockshape((10, 10), (4, 3))
((4, 4, 2), (3, 3, 3, 1))
"""
return tuple((bd,) * (d // bd) + ((d % bd,) if d % bd else ())
for d, bd in zip(shape, blockshape))
class Array(object):
""" Array object holding a dask
Parameters
----------
dask : dict
Task dependency graph
name : string
Name of array in dask
shape : tuple of ints
Shape of the entire array
blockdims : iterable of tuples
block sizes along each dimension
"""
__slots__ = 'dask', 'name', 'blockdims'
def __init__(self, dask, name, shape=None, blockshape=None, blockdims=None):
self.dask = dask
self.name = name
if blockdims is None:
blockdims = blockdims_from_blockshape(shape, blockshape)
if blockdims is None:
raise ValueError("Either give shape and blockshape or blockdims")
self.blockdims = tuple(map(tuple, blockdims))
@property
def numblocks(self):
return tuple(map(len, self.blockdims))
@property
def shape(self):
return tuple(map(sum, self.blockdims))
@property
def dtype(self):
if self.shape:
return self[(0,) * self.ndim].compute().dtype
else:
return self.compute().dtype
def __repr__(self):
return ("dask.array<%s, shape=%s, blockdims=%s>" %
(self.name, self.shape, self.blockdims))
def _get_block(self, *args):
return core.get(self.dask, (self.name,) + args)
@property
def ndim(self):
return len(self.shape)
def _keys(self, *args):
if self.ndim == 0:
return [(self.name,)]
ind = len(args)
if ind + 1 == self.ndim:
return [(self.name,) + args + (i,)
for i in range(self.numblocks[ind])]
else:
return [self._keys(*(args + (i,)))
for i in range(self.numblocks[ind])]
def __array__(self, dtype=None, **kwargs):
x = self.compute()
if dtype and x.dtype != dtype:
x = x.astype(dtype)
return x
def store(self, target, **kwargs):
""" Store dask array in array-like object, overwrite data in target
This stores a dask into an object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Examples
--------
>>> x = ... # doctest: +SKIP
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.blockshape,
... dtype='f8') # doctest: +SKIP
>>> x.store(dset) # doctest: +SKIP
"""
update = insert_to_ooc(target, self)
dsk = merge(self.dask, update)
get(dsk, list(update.keys()), **kwargs)
return target
def compute(self, **kwargs):
result = get(self.dask, self._keys(), **kwargs)
if self.shape:
result = rec_concatenate(result)
else:
while isinstance(result, Iterable):
result = result[0]
return result
__float__ = __int__ = __bool__ = __complex__ = compute
def __getitem__(self, index):
# Field access, e.g. x['a'] or x[['a', 'b']]
if (isinstance(index, (str, unicode)) or
( isinstance(index, list)
and all(isinstance(i, (str, unicode)) for i in index))):
return elemwise(getitem, self, index)
# Slicing
out = next(names)
if not isinstance(index, tuple):
index = (index,)
if all(i == slice(None, None, None) for i in index):
return self
dsk, blockdims = slice_array(out, self.name, self.blockdims, index)
return Array(merge(self.dask, dsk), out, blockdims=blockdims)
@wraps(np.dot)
def dot(self, other):
return tensordot(self, other, axes=((self.ndim-1,), (other.ndim-2,)))
@property
def T(self):
return transpose(self)
def __abs__(self):
return elemwise(operator.abs, self)
def __add__(self, other):
return elemwise(operator.add, self, other)
def __radd__(self, other):
return elemwise(operator.add, other, self)
def __and__(self, other):
return elemwise(operator.and_, self, other)
def __rand__(self, other):
return elemwise(operator.and_, other, self)
def __div__(self, other):
return elemwise(operator.div, self, other)
def __rdiv__(self, other):
return elemwise(operator.div, other, self)
def __eq__(self, other):
return elemwise(operator.eq, self, other)
def __gt__(self, other):
return elemwise(operator.gt, self, other)
def __ge__(self, other):
return elemwise(operator.ge, self, other)
def __lshift__(self, other):
return elemwise(operator.lshift, self, other)
def __rlshift__(self, other):
return elemwise(operator.lshift, other, self)
def __lt__(self, other):
return elemwise(operator.lt, self, other)
def __le__(self, other):
return elemwise(operator.le, self, other)
def __mod__(self, other):
return elemwise(operator.mod, self, other)
def __rmod__(self, other):
return elemwise(operator.mod, other, self)
def __mul__(self, other):
return elemwise(operator.mul, self, other)
def __rmul__(self, other):
return elemwise(operator.mul, other, self)
def __ne__(self, other):
return elemwise(operator.ne, self, other)
def __neg__(self):
return elemwise(operator.neg, self)
def __or__(self, other):
return elemwise(operator.or_, self, other)
def __ror__(self, other):
return elemwise(operator.or_, other, self)
def __pow__(self, other):
return elemwise(operator.pow, self, other)
def __rpow__(self, other):
return elemwise(operator.pow, other, self)
def __rshift__(self, other):
return elemwise(operator.rshift, self, other)
def __rrshift__(self, other):
return elemwise(operator.rshift, other, self)
def __sub__(self, other):
return elemwise(operator.sub, self, other)
def __rsub__(self, other):
return elemwise(operator.sub, other, self)
def __truediv__(self, other):
return elemwise(operator.truediv, self, other)
def __rtruediv__(self, other):
return elemwise(operator.truediv, other, self)
def __floordiv__(self, other):
return elemwise(operator.floordiv, self, other)
def __rfloordiv__(self, other):
return elemwise(operator.floordiv, other, self)
def __xor__(self, other):
return elemwise(operator.xor, self, other)
def __rxor__(self, other):
return elemwise(operator.xor, other, self)
def any(self, axis=None, keepdims=False):
from .reductions import any
return any(self, axis=axis, keepdims=keepdims)
def all(self, axis=None, keepdims=False):
from .reductions import all
return all(self, axis=axis, keepdims=keepdims)
def min(self, axis=None, keepdims=False):
from .reductions import min
return min(self, axis=axis, keepdims=keepdims)
def max(self, axis=None, keepdims=False):
from .reductions import max
return max(self, axis=axis, keepdims=keepdims)
def argmin(self, axis=None):
from .reductions import argmin
return argmin(self, axis=axis)
def argmax(self, axis=None):
from .reductions import argmax
return argmax(self, axis=axis)
def sum(self, axis=None, keepdims=False):
from .reductions import sum
return sum(self, axis=axis, keepdims=keepdims)
def prod(self, axis=None, keepdims=False):
from .reductions import prod
return prod(self, axis=axis, keepdims=keepdims)
def mean(self, axis=None, keepdims=False):
from .reductions import mean
return mean(self, axis=axis, keepdims=keepdims)
def std(self, axis=None, keepdims=False, ddof=0):
from .reductions import std
return std(self, axis=axis, keepdims=keepdims, ddof=ddof)
def var(self, axis=None, keepdims=False, ddof=0):
from .reductions import var
return var(self, axis=axis, keepdims=keepdims, ddof=ddof)
def vnorm(self, ord=None, axis=None, keepdims=False):
from .reductions import vnorm
return vnorm(self, ord=ord, axis=axis, keepdims=keepdims)
@wraps(map_blocks)
def map_blocks(self, func, blockshape=None, blockdims=None):
return map_blocks(self, func, blockshape=blockshape,
blockdims=blockdims)
def from_array(x, blockdims=None, blockshape=None, name=None, **kwargs):
""" Create dask array from something that looks like an array
Input must have a ``.shape`` and support numpy-style slicing.
Example
-------
>>> x = h5py.File('...')['/data/path'] # doctest: +SKIP
>>> a = da.from_array(x, blockshape=(1000, 1000)) # doctest: +SKIP
"""
if blockdims is None:
blockdims = blockdims_from_blockshape(x.shape, blockshape)
name = name or next(names)
dask = merge({name: x}, getem(name, blockdims=blockdims))
return Array(dask, name, blockdims=blockdims)
def atop(func, out, out_ind, *args):
""" Array object version of dask.array.top """
arginds = list(partition(2, args)) # [x, ij, y, jk] -> [(x, ij), (y, jk)]
numblocks = dict([(a.name, a.numblocks) for a, ind in arginds])
argindsstr = list(concat([(a.name, ind) for a, ind in arginds]))
dsk = top(func, out, out_ind, *argindsstr, numblocks=numblocks)
# Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
shapes = dict((a.name, a.shape) for a, _ in arginds)
nameinds = [(a.name, i) for a, i in arginds]
dims = broadcast_dimensions(nameinds, shapes)
shape = tuple(dims[i] for i in out_ind)
blockdim_dict = dict((a.name, a.blockdims) for a, _ in arginds)
blockdimss = broadcast_dimensions(nameinds, blockdim_dict)
blockdims = tuple(blockdimss[i] for i in out_ind)
dsks = [a.dask for a, _ in arginds]
return Array(merge(dsk, *dsks), out, shape, blockdims=blockdims)
def get(dsk, keys, get=threaded.get, **kwargs):
""" Specialized get function
1. Handle inlining
2. Use custom score function
"""
fast_functions=kwargs.get('fast_functions',
set([getitem, np.transpose]))
dsk2 = cull(dsk, list(core.flatten(keys)))
dsk3 = remove_full_slices(dsk2)
dsk4 = inline_functions(dsk3, fast_functions=fast_functions)
return get(dsk4, keys, **kwargs)
stacked_names = ('stack-%d' % i for i in count(1))
def stack(seq, axis=0):
"""
Stack arrays along a new axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along a new dimension (axis=0 by default)
Example
-------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), blockshape=(2, 2))
... for i in range(3)]
>>> x = da.stack(data, axis=0)
>>> x.shape
(3, 4, 4)
>>> da.stack(data, axis=1).shape
(4, 3, 4)
>>> da.stack(data, axis=-1).shape
(4, 4, 3)
Result is a new dask Array
See Also:
concatenate
"""
n = len(seq)
ndim = len(seq[0].shape)
if axis < 0:
axis = ndim + axis + 1
if axis > ndim:
raise ValueError("Axis must not be greater than number of dimensions"
"\nData has %d dimensions, but got axis=%d" % (ndim, axis))
assert len(set(a.blockdims for a in seq)) == 1 # same blockshape
shape = seq[0].shape[:axis] + (len(seq),) + seq[0].shape[axis:]
blockdims = ( seq[0].blockdims[:axis]
+ ((1,) * n,)
+ seq[0].blockdims[axis:])
name = next(stacked_names)
keys = list(product([name], *[range(len(bd)) for bd in blockdims]))
names = [a.name for a in seq]
values = [(names[key[axis+1]],) + key[1:axis + 1] + key[axis + 2:]
for key in keys]
dsk = dict(zip(keys, values))
dsk2 = merge(dsk, *[a.dask for a in seq])
return Array(dsk2, name, shape, blockdims=blockdims)
concatenate_names = ('concatenate-%d' % i for i in count(1))
def concatenate(seq, axis=0):
"""
Concatenate arrays along an existing axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along an existing dimension (axis=0 by default)
Example
-------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), blockshape=(2, 2))
... for i in range(3)]
>>> x = da.concatenate(data, axis=0)
>>> x.shape
(12, 4)
>>> da.concatenate(data, axis=1).shape
(4, 12)
Result is a new dask Array
See Also:
stack
"""
n = len(seq)
ndim = len(seq[0].shape)
if axis < 0:
axis = ndim + axis
if axis >= ndim:
raise ValueError("Axis must be less than than number of dimensions"
"\nData has %d dimensions, but got axis=%d" % (ndim, axis))
bds = [a.blockdims for a in seq]
if not all(len(set(bds[i][j] for i in range(n))) == 1
for j in range(len(bds[0])) if j != axis):
raise ValueError("Block shapes do not align")
shape = (seq[0].shape[:axis]
+ (sum(a.shape[axis] for a in seq),)
+ seq[0].shape[axis + 1:])
blockdims = ( seq[0].blockdims[:axis]
+ (sum([bd[axis] for bd in bds], ()),)
+ seq[0].blockdims[axis + 1:])
name = next(concatenate_names)
keys = list(product([name], *[range(len(bd)) for bd in blockdims]))
cum_dims = [0] + list(accumulate(add, [len(a.blockdims[axis]) for a in seq]))
names = [a.name for a in seq]
values = [(names[bisect(cum_dims, key[axis + 1]) - 1],)
+ key[1:axis + 1]
+ (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis+1]) - 1],)
+ key[axis + 2:]
for key in keys]
dsk = dict(zip(keys, values))
dsk2 = merge(dsk, *[a.dask for a in seq])
return Array(dsk2, name, shape, blockdims=blockdims)
@wraps(np.transpose)
def transpose(a, axes=None):
axes = axes or tuple(range(a.ndim))[::-1]
return atop(curry(np.transpose, axes=axes),
next(names), axes,
a, tuple(range(a.ndim)))
@curry
def many(a, b, binop=None, reduction=None, **kwargs):
"""
Apply binary operator to pairwise to sequences, then reduce.
>>> many([1, 2, 3], [10, 20, 30], mul, sum) # dot product
140
"""
return reduction(map(curry(binop, **kwargs), a, b))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
ALPHABET = alphabet.upper()
@wraps(np.tensordot)
def tensordot(lhs, rhs, axes=2):
if isinstance(axes, Iterable):
left_axes, right_axes = axes
else:
left_axes = tuple(range(lhs.ndim - 1, lhs.ndim - axes - 1, -1))
right_axes = tuple(range(0, axes))
if isinstance(left_axes, int):
left_axes = (left_axes,)
if isinstance(right_axes, int):
right_axes = (right_axes,)
if isinstance(left_axes, list):
left_axes = tuple(left_axes)
if isinstance(right_axes, list):
right_axes = tuple(right_axes)
if len(left_axes) > 1:
raise NotImplementedError("Simultaneous Contractions of multiple "
"indices not yet supported")
left_index = list(alphabet[:lhs.ndim])
right_index = list(ALPHABET[:rhs.ndim])
out_index = left_index + right_index
for l, r in zip(left_axes, right_axes):
out_index.remove(right_index[r])
out_index.remove(left_index[l])
right_index[r] = left_index[l]
func = many(binop=np.tensordot, reduction=sum,
axes=(left_axes, right_axes))
return atop(func,
next(names), out_index,
lhs, tuple(left_index),
rhs, tuple(right_index))
def insert_to_ooc(out, arr):
from threading import Lock
lock = Lock()
locs = [[0] + list(accumulate(add, bl)) for bl in arr.blockdims]
def store(x, *args):
with lock:
ind = tuple([slice(loc[i], loc[i+1]) for i, loc in zip(args, locs)])
out[ind] = x
return None
name = 'store-%s' % arr.name
return dict(((name,) + t[1:], (store, t) + t[1:])
for t in core.flatten(arr._keys()))
def partial_by_order(op, other):
"""
>>> f = partial_by_order(add, [(1, 10)])
>>> f(5)
15
"""
def f(*args):
args2 = list(args)
for i, arg in other:
args2.insert(i, arg)
return op(*args2)
return f
def elemwise(op, *args, **kwargs):
""" Apply elementwise function across arguments
Respects broadcasting rules
>>> elemwise(add, x, y) # doctest: +SKIP
>>> elemwise(sin, x) # doctest: +SKIP
See also:
atop
"""
name = kwargs.get('name') or next(names)
out_ndim = max(len(arg.shape) if isinstance(arg, Array) else 0
for arg in args)
expr_inds = tuple(range(out_ndim))[::-1]
arrays = [arg for arg in args if isinstance(arg, Array)]
other = [(i, arg) for i, arg in enumerate(args) if not isinstance(arg, Array)]
if other:
op2 = partial_by_order(op, other)
else:
op2 = op
return atop(op2, name, expr_inds,
*concat((a, tuple(range(a.ndim)[::-1])) for a in arrays))
def wrap_elemwise(func):
""" Wrap up numpy function into dask.array """
f = partial(elemwise, func)
f.__doc__ = func.__doc__
f.__name__ = func.__name__
return f
arccos = wrap_elemwise(np.arccos)
arcsin = wrap_elemwise(np.arcsin)
arctan = wrap_elemwise(np.arctan)
arctanh = wrap_elemwise(np.arctanh)
arccosh = wrap_elemwise(np.arccosh)
arcsinh = wrap_elemwise(np.arcsinh)
arctan2 = wrap_elemwise(np.arctan2)
ceil = wrap_elemwise(np.ceil)
copysign = wrap_elemwise(np.copysign)
cos = wrap_elemwise(np.cos)
cosh = wrap_elemwise(np.cosh)
degrees = wrap_elemwise(np.degrees)
exp = wrap_elemwise(np.exp)
expm1 = wrap_elemwise(np.expm1)
fabs = wrap_elemwise(np.fabs)
floor = wrap_elemwise(np.floor)
fmod = wrap_elemwise(np.fmod)
frexp = wrap_elemwise(np.frexp)
hypot = wrap_elemwise(np.hypot)
isinf = wrap_elemwise(np.isinf)
isnan = wrap_elemwise(np.isnan)
ldexp = wrap_elemwise(np.ldexp)
log = wrap_elemwise(np.log)
log10 = wrap_elemwise(np.log10)
log1p = wrap_elemwise(np.log1p)
modf = wrap_elemwise(np.modf)
radians = wrap_elemwise(np.radians)
sin = wrap_elemwise(np.sin)
sinh = wrap_elemwise(np.sinh)
sqrt = wrap_elemwise(np.sqrt)
tan = wrap_elemwise(np.tan)
tanh = wrap_elemwise(np.tanh)
trunc = wrap_elemwise(np.trunc)
def variadic_choose(a, *choices):
return np.choose(a, choices)
@wraps(np.choose)
def choose(a, choices):
return elemwise(variadic_choose, a, *choices)
@wraps(np.where)
def where(condition, x, y):
return choose(condition, [y, x])
@wraps(chunk.coarsen)
def coarsen(reduction, x, axes):
if not all(bd % div == 0 for i, div in axes.items()
for bd in x.blockdims[i]):
raise ValueError(
"Coarsening factor does not align with block dimensions")
if 'dask' in inspect.getfile(reduction):
reduction = getattr(np, reduction.__name__)
name = next(names)
dsk = dict(((name,) + key[1:], (chunk.coarsen, reduction, key, axes))
for key in core.flatten(x._keys()))
blockdims = tuple(tuple(int(bd / axes.get(i, 1)) for bd in bds)
for i, bds in enumerate(x.blockdims))
return Array(merge(x.dask, dsk), name, blockdims=blockdims)
constant_names = ('constant-%d' % i for i in count(1))
def constant(value, shape=None, blockshape=None, blockdims=None, dtype=None):
""" An array with a constant value
>>> x = constant(5, shape=(4, 4), blockshape=(2, 2))
>>> np.array(x)
array([[5, 5, 5, 5],
[5, 5, 5, 5],
[5, 5, 5, 5],
[5, 5, 5, 5]])
"""
name = next(constant_names)
if shape and blockshape and not blockdims:
blockdims = blockdims_from_blockshape(shape, blockshape)
keys = product([name], *[range(len(bd)) for bd in blockdims])
shapes = product(*blockdims)
vals = [(chunk.constant, value, shape) for shape in shapes]
dsk = dict(zip(keys, vals))
return Array(dsk, name, blockdims=blockdims)
def offset_func(func, offset, *args):
""" Offsets inputs by offset
>>> double = lambda x: x * 2
>>> f = offset_func(double, (10,))
>>> f(1)
22
>>> f(300)
620
"""
def _offset(*args):
args2 = list(map(add, args, offset))
return func(*args2)
with ignoring(Exception):
_offset.__name__ = 'offset_' + func.__name__
return _offset
fromfunction_names = ('fromfunction-%d' % i for i in count(1))
@wraps(np.fromfunction)
def fromfunction(func, shape=None, blockshape=None, blockdims=None):
name = next(fromfunction_names)
if shape and blockshape and not blockdims:
blockdims = blockdims_from_blockshape(shape, blockshape)
keys = list(product([name], *[range(len(bd)) for bd in blockdims]))
aggdims = [list(accumulate(add, (0,) + bd[:-1])) for bd in blockdims]
offsets = list(product(*aggdims))
shapes = list(product(*blockdims))
values = [(np.fromfunction, offset_func(func, offset), shape)
for offset, shape in zip(offsets, shapes)]
dsk = dict(zip(keys, values))
return Array(dsk, name, blockdims=blockdims)
| {
"repo_name": "PeterDSteinberg/dask",
"path": "dask/array/core.py",
"copies": "1",
"size": "35780",
"license": "bsd-3-clause",
"hash": 3056052325529438000,
"line_mean": 30.719858156,
"line_max": 107,
"alpha_frac": 0.5531861375,
"autogenerated": false,
"ratio": 3.2726607518521904,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9315878436137403,
"avg_score": 0.0019936906429576097,
"num_lines": 1128
} |
from __future__ import absolute_import, division, print_function
from operator import add
from copy import deepcopy
import dask
import pytest
from dask.async import *
fib_dask = {'f0': 0, 'f1': 1, 'f2': 1, 'f3': 2, 'f4': 3, 'f5': 5, 'f6': 8}
def test_start_state():
dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
result = start_state_from_dask(dsk)
expeted = {'cache': {'x': 1, 'y': 2},
'dependencies': {'w': set(['y', 'z']),
'x': set([]),
'y': set([]),
'z': set(['x'])},
'dependents': {'w': set([]),
'x': set(['z']),
'y': set(['w']),
'z': set(['w'])},
'finished': set([]),
'released': set([]),
'running': set([]),
'ready': ['z'],
'waiting': {'w': set(['z'])},
'waiting_data': {'x': set(['z']),
'y': set(['w']),
'z': set(['w'])}}
def test_start_state_looks_at_cache():
dsk = {'b': (inc, 'a')}
cache = {'a': 1}
result = start_state_from_dask(dsk, cache)
assert result['dependencies']['b'] == set(['a'])
assert result['ready'] == ['b']
def test_start_state_with_redirects():
dsk = {'x': 1, 'y': 'x', 'z': (inc, 'y')}
result = start_state_from_dask(dsk)
assert result['cache'] == {'x': 1}
def test_start_state_with_independent_but_runnable_tasks():
assert start_state_from_dask({'x': (inc, 1)})['ready'] == ['x']
def test_finish_task():
dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
sortkey = order(dsk).get
state = start_state_from_dask(dsk)
state['ready'].remove('z')
state['running'] = set(['z', 'other-task'])
task = 'z'
result = 2
oldstate = deepcopy(state)
state['cache']['z'] = result
finish_task(dsk, task, state, set(), sortkey)
assert state == {
'cache': {'y': 2, 'z': 2},
'dependencies': {'w': set(['y', 'z']),
'x': set([]),
'y': set([]),
'z': set(['x'])},
'finished': set(['z']),
'released': set(['x']),
'running': set(['other-task']),
'dependents': {'w': set([]),
'x': set(['z']),
'y': set(['w']),
'z': set(['w'])},
'ready': ['w'],
'waiting': {},
'waiting_data': {'y': set(['w']),
'z': set(['w'])}}
def test_get():
dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
assert get_sync(dsk, 'w') == 4
assert get_sync(dsk, ['w', 'z']) == (4, 2)
def test_nested_get():
dsk = {'x': 1, 'y': 2, 'a': (add, 'x', 'y'), 'b': (sum, ['x', 'y'])}
assert get_sync(dsk, ['a', 'b']) == (3, 3)
def test_get_sync_num_workers():
get_sync({'x': (inc, 'y'), 'y': 1}, 'x', num_workers=2)
def test_cache_options():
try:
from chest import Chest
except ImportError:
return
cache = Chest()
def inc2(x):
assert 'y' in cache
return x + 1
with dask.set_options(cache=cache):
get_sync({'x': (inc2, 'y'), 'y': 1}, 'x')
def test_sort_key():
L = ['x', ('x', 1), ('z', 0), ('x', 0)]
assert sorted(L, key=sortkey) == ['x', ('x', 0), ('x', 1), ('z', 0)]
def test_callback():
f = lambda x: x + 1
dsk = {'a': (f, 1)}
from dask.threaded import get
def start_callback(key, d, state):
assert key == 'a' or key is None
assert d == dsk
assert isinstance(state, dict)
def end_callback(key, value, d, state, worker_id):
assert key == 'a' or key is None
assert value == 2 or value is None
assert d == dsk
assert isinstance(state, dict)
get(dsk, 'a', start_callback=start_callback, end_callback=end_callback)
def test_order_of_startstate():
dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b'),
'x': 1, 'y': (inc, 'x')}
result = start_state_from_dask(dsk)
assert result['ready'] == ['y', 'b']
dsk = {'x': 1, 'y': (inc, 'x'), 'z': (inc, 'y'),
'a': 1, 'b': (inc, 'a')}
result = start_state_from_dask(dsk)
assert result['ready'] == ['b', 'y']
def test_nonstandard_exceptions_propagate():
class MyException(Exception):
def __init__(self, a, b):
self.a = a
self.b = b
def __str__(self):
return "My Exception!"
def f():
raise MyException(1, 2)
from dask.threaded import get
try:
get({'x': (f,)}, 'x')
assert False
except MyException as e:
assert "My Exception!" in str(e)
assert "Traceback" in str(e)
assert 'a' in dir(e)
assert 'traceback' in dir(e)
assert e.exception.a == 1 and e.exception.b == 2
assert e.a == 1 and e.b == 2
def test_remote_exception():
e = TypeError("hello")
a = remote_exception(e, 'traceback')
b = remote_exception(e, 'traceback')
assert type(a) == type(b)
assert isinstance(a, TypeError)
assert 'hello' in str(a)
assert 'traceback' in str(a)
| {
"repo_name": "vikhyat/dask",
"path": "dask/tests/test_async.py",
"copies": "1",
"size": "5332",
"license": "bsd-3-clause",
"hash": 3231773629763026000,
"line_mean": 27.2116402116,
"line_max": 75,
"alpha_frac": 0.4441110278,
"autogenerated": false,
"ratio": 3.210114388922336,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9150508744868409,
"avg_score": 0.0007433343707853512,
"num_lines": 189
} |
from __future__ import absolute_import, division, print_function
from operator import add
from itertools import chain
def inc(x):
return x + 1
def ishashable(x):
""" Is x hashable?
Example
-------
>>> ishashable(1)
True
>>> ishashable([1])
False
"""
try:
hash(x)
return True
except TypeError:
return False
def istask(x):
""" Is x a runnable task?
A task is a tuple with a callable first argument
Example
-------
>>> inc = lambda x: x + 1
>>> istask((inc, 1))
True
>>> istask(1)
False
"""
return isinstance(x, tuple) and x and callable(x[0])
def _get_task(d, task, maxdepth=1000):
# non-recursive. DAG property is checked upon reaching maxdepth.
_iter = lambda *args: iter(args)
# We construct a nested heirarchy of tuples to mimic the execution stack
# of frames that Python would maintain for a recursive implementation.
# A frame is associated with a single task from a Dask.
# A frame tuple has three elements:
# 1) The function for the task.
# 2) The arguments for the task (typically keys in the Dask).
# Arguments are stored in reverse order, and elements are popped
# as they are evaluated.
# 3) The calculated results of the arguments from (2).
stack = [(task[0], list(task[:0:-1]), [])]
while True:
func, args, results = stack[-1]
if not args:
val = func(*results)
if len(stack) == 1:
return val
stack.pop()
stack[-1][2].append(val)
continue
elif maxdepth and len(stack) > maxdepth:
cycle = getcycle(d, list(task[1:]))
if cycle:
cycle = '->'.join(cycle)
raise RuntimeError('Cycle detected in Dask: %s' % cycle)
maxdepth = None
key = args.pop()
if isinstance(key, list):
# v = (get(d, k, concrete=False) for k in key) # recursive
# Fake being lazy
stack.append((_iter, key[::-1], []))
continue
elif ishashable(key) and key in d:
v = d[key]
else:
v = key
if istask(v):
stack.append((v[0], list(v[:0:-1]), []))
else:
results.append(v)
def get(d, key, get=None, concrete=True, **kwargs):
""" Get value from Dask
Example
-------
>>> inc = lambda x: x + 1
>>> d = {'x': 1, 'y': (inc, 'x')}
>>> get(d, 'x')
1
>>> get(d, 'y')
2
See Also
--------
set
"""
get = get or _get
if isinstance(key, list):
v = (get(d, k, get=get, concrete=concrete) for k in key)
if concrete:
v = list(v)
elif ishashable(key) and key in d:
v = d[key]
elif istask(key):
v = key
else:
return key
if istask(v):
if get is _get:
# use non-recursive method by default
return _get_task(d, v)
func, args = v[0], v[1:]
args2 = [get(d, arg, get=get, concrete=False) for arg in args]
return func(*[get(d, arg, get=get) for arg in args2])
else:
return v
_get = get
def _deps(dsk, arg):
""" Get dependencies from keys or tasks
Helper function for get_dependencies.
>>> dsk = {'x': 1, 'y': 2}
>>> _deps(dsk, 'x')
['x']
>>> _deps(dsk, (add, 'x', 1))
['x']
>>> _deps(dsk, (add, 'x', (inc, 'y'))) # doctest: +SKIP
['x', 'y']
"""
if istask(arg):
result = []
for a in arg[1:]:
result.extend(_deps(dsk, a))
return result
try:
if arg not in dsk:
return []
except TypeError: # not hashable
return []
return [arg]
def get_dependencies(dsk, task, as_list=False):
""" Get the immediate tasks on which this task depends
>>> dsk = {'x': 1,
... 'y': (inc, 'x'),
... 'z': (add, 'x', 'y'),
... 'w': (inc, 'z'),
... 'a': (add, (inc, 'x'), 1)}
>>> get_dependencies(dsk, 'x')
set([])
>>> get_dependencies(dsk, 'y')
set(['x'])
>>> get_dependencies(dsk, 'z') # doctest: +SKIP
set(['x', 'y'])
>>> get_dependencies(dsk, 'w') # Only direct dependencies
set(['z'])
>>> get_dependencies(dsk, 'a') # Ignore non-keys
set(['x'])
"""
args = [dsk[task]]
result = []
while args:
arg = args.pop()
if istask(arg):
args.extend(arg[1:])
elif isinstance(arg, list):
args.extend(arg)
else:
result.append(arg)
if not result:
return [] if as_list else set()
rv = []
for x in result:
rv.extend(_deps(dsk, x))
return rv if as_list else set(rv)
def flatten(seq):
"""
>>> list(flatten([1]))
[1]
>>> list(flatten([[1, 2], [1, 2]]))
[1, 2, 1, 2]
>>> list(flatten([[[1], [2]], [[1], [2]]]))
[1, 2, 1, 2]
>>> list(flatten(((1, 2), (1, 2)))) # Don't flatten tuples
[(1, 2), (1, 2)]
>>> list(flatten((1, 2, [3, 4]))) # support heterogeneous
[1, 2, 3, 4]
"""
for item in seq:
if isinstance(item, list):
for item2 in flatten(item):
yield item2
else:
yield item
def reverse_dict(d):
"""
>>> a, b, c = 'abc'
>>> d = {a: [b, c], b: [c]}
>>> reverse_dict(d) # doctest: +SKIP
{'a': set([]), 'b': set(['a']}, 'c': set(['a', 'b'])}
"""
terms = list(d.keys()) + list(chain.from_iterable(d.values()))
result = dict((t, set()) for t in terms)
for k, vals in d.items():
for val in vals:
result[val].add(k)
return result
def subs(task, key, val):
""" Perform a substitution on a task
Example
-------
>>> subs((inc, 'x'), 'x', 1) # doctest: +SKIP
(inc, 1)
"""
if not istask(task):
if task == key:
return val
elif isinstance(task, list):
return [subs(x, key, val) for x in task]
else:
return task
newargs = []
for arg in task[1:]:
if istask(arg):
arg = subs(arg, key, val)
elif isinstance(arg, list):
arg = [subs(x, key, val) for x in arg]
elif arg == key:
arg = val
newargs.append(arg)
return task[:1] + tuple(newargs)
def _toposort(dsk, keys=None, returncycle=False):
# Stack-based depth-first search traversal. This is based on Tarjan's
# method for topological sorting (see wikipedia for pseudocode)
if keys is None:
keys = dsk
elif not isinstance(keys, list):
keys = [keys]
if not returncycle:
ordered = []
# Nodes whose descendents have been completely explored.
# These nodes are guaranteed to not be part of a cycle.
completed = set()
# All nodes that have been visited in the current traversal. Because
# we are doing depth-first search, going "deeper" should never result
# in visiting a node that has already been seen. The `seen` and
# `completed` sets are mutually exclusive; it is okay to visit a node
# that has already been added to `completed`.
seen = set()
for key in keys:
if key in completed:
continue
nodes = [key]
while nodes:
# Keep current node on the stack until all descendants are visited
cur = nodes[-1]
if cur in completed:
# Already fully traversed descendants of cur
nodes.pop()
continue
seen.add(cur)
# Add direct descendants of cur to nodes stack
next_nodes = []
for nxt in get_dependencies(dsk, cur):
if nxt not in completed:
if nxt in seen:
# Cycle detected!
cycle = [nxt]
while nodes[-1] != nxt:
cycle.append(nodes.pop())
cycle.append(nodes.pop())
cycle.reverse()
if returncycle:
return cycle
else:
cycle = '->'.join(cycle)
raise RuntimeError('Cycle detected in Dask: %s' % cycle)
next_nodes.append(nxt)
if next_nodes:
nodes.extend(next_nodes)
else:
# cur has no more descendants to explore, so we're done with it
if not returncycle:
ordered.append(cur)
completed.add(cur)
seen.remove(cur)
nodes.pop()
if returncycle:
return []
return ordered
def toposort(dsk):
""" Return a list of keys of dask sorted in topological order."""
return _toposort(dsk)
def getcycle(d, keys):
""" Return a list of nodes that form a cycle if Dask is not a DAG.
Returns an empty list if no cycle is found.
``keys`` may be a single key or list of keys.
Example
-------
>>> d = {'x': (inc, 'z'), 'y': (inc, 'x'), 'z': (inc, 'y')}
>>> getcycle(d, 'x')
['x', 'z', 'y', 'x']
See Also
--------
isdag
"""
return _toposort(d, keys=keys, returncycle=True)
def isdag(d, keys):
""" Does Dask form a directed acyclic graph when calculating keys?
``keys`` may be a single key or list of keys.
Example
-------
>>> inc = lambda x: x + 1
>>> isdag({'x': 0, 'y': (inc, 'x')}, 'y')
True
>>> isdag({'x': (inc, 'y'), 'y': (inc, 'x')}, 'y')
False
See Also
--------
getcycle
"""
return not getcycle(d, keys)
| {
"repo_name": "PeterDSteinberg/dask",
"path": "dask/core.py",
"copies": "1",
"size": "9803",
"license": "bsd-3-clause",
"hash": -4226190287708716000,
"line_mean": 24.4623376623,
"line_max": 84,
"alpha_frac": 0.4957666021,
"autogenerated": false,
"ratio": 3.67016098839386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.466592759049386,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from operator import add
from itertools import chain
def inc(x):
return x + 1
def ishashable(x):
""" Is x hashable?
Examples
--------
>>> ishashable(1)
True
>>> ishashable([1])
False
"""
try:
hash(x)
return True
except TypeError:
return False
def istask(x):
""" Is x a runnable task?
A task is a tuple with a callable first argument
Examples
--------
>>> inc = lambda x: x + 1
>>> istask((inc, 1))
True
>>> istask(1)
False
"""
return type(x) is tuple and x and callable(x[0])
def preorder_traversal(task):
"""A generator to preorder-traverse a task."""
for item in task:
if istask(item):
for i in preorder_traversal(item):
yield i
elif isinstance(item, list):
yield list
for i in preorder_traversal(item):
yield i
else:
yield item
def _get_task(d, task, maxdepth=1000):
# non-recursive. DAG property is checked upon reaching maxdepth.
_iter = lambda *args: iter(args)
# We construct a nested heirarchy of tuples to mimic the execution stack
# of frames that Python would maintain for a recursive implementation.
# A frame is associated with a single task from a Dask.
# A frame tuple has three elements:
# 1) The function for the task.
# 2) The arguments for the task (typically keys in the Dask).
# Arguments are stored in reverse order, and elements are popped
# as they are evaluated.
# 3) The calculated results of the arguments from (2).
stack = [(task[0], list(task[:0:-1]), [])]
while True:
func, args, results = stack[-1]
if not args:
val = func(*results)
if len(stack) == 1:
return val
stack.pop()
stack[-1][2].append(val)
continue
elif maxdepth and len(stack) > maxdepth:
cycle = getcycle(d, list(task[1:]))
if cycle:
cycle = '->'.join(cycle)
raise RuntimeError('Cycle detected in Dask: %s' % cycle)
maxdepth = None
key = args.pop()
if isinstance(key, list):
# v = (get(d, k, concrete=False) for k in key) # recursive
# Fake being lazy
stack.append((_iter, key[::-1], []))
continue
elif ishashable(key) and key in d:
v = d[key]
else:
v = key
if istask(v):
stack.append((v[0], list(v[:0:-1]), []))
else:
results.append(v)
def get(d, key, get=None, **kwargs):
""" Get value from Dask
Examples
--------
>>> inc = lambda x: x + 1
>>> d = {'x': 1, 'y': (inc, 'x')}
>>> get(d, 'x')
1
>>> get(d, 'y')
2
See Also
--------
set
"""
get = get or _get
if isinstance(key, list):
v = tuple(get(d, k, get=get) for k in key)
elif istask(key):
v = key
elif ishashable(key):
v = d[key]
else:
message = '%s is neither a task or a dask key'
raise KeyError(message % key)
if istask(v):
if get is _get:
# use non-recursive method by default
return _get_task(d, v)
func, args = v[0], v[1:]
args2 = []
for arg in args:
if not istask(arg) and arg not in d:
args2.append(arg)
else:
args2.append(get(d, arg, get=get))
return func(*args2)
else:
return v
_get = get
def _deps(dsk, arg):
""" Get dependencies from keys or tasks
Helper function for get_dependencies.
>>> dsk = {'x': 1, 'y': 2}
>>> _deps(dsk, 'x')
['x']
>>> _deps(dsk, (add, 'x', 1))
['x']
>>> _deps(dsk, ['x', 'y'])
['x', 'y']
>>> _deps(dsk, {'a': 'x'})
['x']
>>> _deps(dsk, (add, 'x', (inc, 'y'))) # doctest: +SKIP
['x', 'y']
"""
if istask(arg):
result = []
for a in arg[1:]:
result.extend(_deps(dsk, a))
return result
if type(arg) is list:
return sum([_deps(dsk, a) for a in arg], [])
if type(arg) is dict:
return sum([_deps(dsk, v) for v in arg.values()], [])
try:
if arg not in dsk:
return []
except TypeError: # not hashable
return []
return [arg]
def get_dependencies(dsk, task, as_list=False):
""" Get the immediate tasks on which this task depends
>>> dsk = {'x': 1,
... 'y': (inc, 'x'),
... 'z': (add, 'x', 'y'),
... 'w': (inc, 'z'),
... 'a': (add, (inc, 'x'), 1)}
>>> get_dependencies(dsk, 'x')
set([])
>>> get_dependencies(dsk, 'y')
set(['x'])
>>> get_dependencies(dsk, 'z') # doctest: +SKIP
set(['x', 'y'])
>>> get_dependencies(dsk, 'w') # Only direct dependencies
set(['z'])
>>> get_dependencies(dsk, 'a') # Ignore non-keys
set(['x'])
"""
args = [dsk[task]]
result = []
while args:
arg = args.pop()
if istask(arg):
args.extend(arg[1:])
elif type(arg) is list:
args.extend(arg)
else:
result.append(arg)
if not result:
return [] if as_list else set()
rv = []
for x in result:
rv.extend(_deps(dsk, x))
return rv if as_list else set(rv)
def get_deps(dsk):
""" Get dependencies and dependents from dask dask graph
>>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> dependencies, dependents = get_deps(dsk)
>>> dependencies
{'a': set([]), 'c': set(['b']), 'b': set(['a'])}
>>> dependents
{'a': set(['b']), 'c': set([]), 'b': set(['c'])}
"""
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
dependents = reverse_dict(dependencies)
return dependencies, dependents
def flatten(seq):
"""
>>> list(flatten([1]))
[1]
>>> list(flatten([[1, 2], [1, 2]]))
[1, 2, 1, 2]
>>> list(flatten([[[1], [2]], [[1], [2]]]))
[1, 2, 1, 2]
>>> list(flatten(((1, 2), (1, 2)))) # Don't flatten tuples
[(1, 2), (1, 2)]
>>> list(flatten((1, 2, [3, 4]))) # support heterogeneous
[1, 2, 3, 4]
"""
if isinstance(seq, str):
yield seq
else:
for item in seq:
if isinstance(item, list):
for item2 in flatten(item):
yield item2
else:
yield item
def reverse_dict(d):
"""
>>> a, b, c = 'abc'
>>> d = {a: [b, c], b: [c]}
>>> reverse_dict(d) # doctest: +SKIP
{'a': set([]), 'b': set(['a']}, 'c': set(['a', 'b'])}
"""
terms = list(d.keys()) + list(chain.from_iterable(d.values()))
result = dict((t, set()) for t in terms)
for k, vals in d.items():
for val in vals:
result[val].add(k)
return result
def subs(task, key, val):
""" Perform a substitution on a task
Examples
--------
>>> subs((inc, 'x'), 'x', 1) # doctest: +SKIP
(inc, 1)
"""
if not istask(task):
try:
if task == key:
return val
except Exception:
pass
if isinstance(task, list):
return [subs(x, key, val) for x in task]
return task
newargs = []
for arg in task[1:]:
if istask(arg):
arg = subs(arg, key, val)
elif isinstance(arg, list):
arg = [subs(x, key, val) for x in arg]
elif type(arg) is type(key) and arg == key:
arg = val
newargs.append(arg)
return task[:1] + tuple(newargs)
def _toposort(dsk, keys=None, returncycle=False, dependencies=None):
# Stack-based depth-first search traversal. This is based on Tarjan's
# method for topological sorting (see wikipedia for pseudocode)
if keys is None:
keys = dsk
elif not isinstance(keys, list):
keys = [keys]
if not returncycle:
ordered = []
# Nodes whose descendents have been completely explored.
# These nodes are guaranteed to not be part of a cycle.
completed = set()
# All nodes that have been visited in the current traversal. Because
# we are doing depth-first search, going "deeper" should never result
# in visiting a node that has already been seen. The `seen` and
# `completed` sets are mutually exclusive; it is okay to visit a node
# that has already been added to `completed`.
seen = set()
if dependencies is None:
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
for key in keys:
if key in completed:
continue
nodes = [key]
while nodes:
# Keep current node on the stack until all descendants are visited
cur = nodes[-1]
if cur in completed:
# Already fully traversed descendants of cur
nodes.pop()
continue
seen.add(cur)
# Add direct descendants of cur to nodes stack
next_nodes = []
for nxt in dependencies[cur]:
if nxt not in completed:
if nxt in seen:
# Cycle detected!
cycle = [nxt]
while nodes[-1] != nxt:
cycle.append(nodes.pop())
cycle.append(nodes.pop())
cycle.reverse()
if returncycle:
return cycle
else:
cycle = '->'.join(cycle)
raise RuntimeError('Cycle detected in Dask: %s' % cycle)
next_nodes.append(nxt)
if next_nodes:
nodes.extend(next_nodes)
else:
# cur has no more descendants to explore, so we're done with it
if not returncycle:
ordered.append(cur)
completed.add(cur)
seen.remove(cur)
nodes.pop()
if returncycle:
return []
return ordered
def toposort(dsk, dependencies=None):
""" Return a list of keys of dask sorted in topological order."""
return _toposort(dsk, dependencies=dependencies)
def getcycle(d, keys):
""" Return a list of nodes that form a cycle if Dask is not a DAG.
Returns an empty list if no cycle is found.
``keys`` may be a single key or list of keys.
Examples
--------
>>> d = {'x': (inc, 'z'), 'y': (inc, 'x'), 'z': (inc, 'y')}
>>> getcycle(d, 'x')
['x', 'z', 'y', 'x']
See Also
--------
isdag
"""
return _toposort(d, keys=keys, returncycle=True)
def isdag(d, keys):
""" Does Dask form a directed acyclic graph when calculating keys?
``keys`` may be a single key or list of keys.
Examples
--------
>>> inc = lambda x: x + 1
>>> isdag({'x': 0, 'y': (inc, 'x')}, 'y')
True
>>> isdag({'x': (inc, 'y'), 'y': (inc, 'x')}, 'y')
False
See Also
--------
getcycle
"""
return not getcycle(d, keys)
def list2(L):
return list(L)
def quote(x):
""" Ensure that this value remains this value in a dask graph
Some values in dask graph take on special meaning. Lists become iterators,
tasks get executed. Sometimes we want to ensure that our data is not
interpreted but remains literal.
>>> quote([1, 2, 3])
[1, 2, 3]
>>> quote((add, 1, 2)) # doctest: +SKIP
(tuple, [add, 1, 2])
"""
if istask(x):
return (tuple, list(map(quote, x)))
return x
| {
"repo_name": "mikegraham/dask",
"path": "dask/core.py",
"copies": "1",
"size": "11813",
"license": "bsd-3-clause",
"hash": 8167303752778612000,
"line_mean": 24.8490153173,
"line_max": 84,
"alpha_frac": 0.5012274613,
"autogenerated": false,
"ratio": 3.658408175905853,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9656231695394544,
"avg_score": 0.0006807883622619651,
"num_lines": 457
} |
from __future__ import absolute_import, division, print_function
from operator import attrgetter
import os
import re
import subprocess
from itertools import chain
from collections import Iterator
from datetime import datetime, date
from distutils.spawn import find_executable
import pandas as pd
import sqlalchemy as sa
from sqlalchemy import inspect
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import event
from sqlalchemy.schema import CreateSchema
from multipledispatch import MDNotImplementedError
import datashape
from datashape import DataShape, Record, Option, var, dshape, Map
from datashape.predicates import isdimension, isrecord, isscalar
from datashape import discover, datetime_, date_, float64, int64, int_, string
from datashape import float32
from datashape.dispatch import dispatch
from toolz import (partition_all, keyfilter, memoize, valfilter, identity,
concat, curry, merge)
from toolz.curried import pluck, map
from ..compatibility import unicode
from ..utils import keywords, ignoring, iter_except, filter_kwargs
from ..convert import convert, ooc_types
from ..append import append
from ..resource import resource
from ..chunks import Chunks
from .csv import CSV
base = (int, float, datetime, date, bool, str)
# http://docs.sqlalchemy.org/en/latest/core/types.html
types = {
'int64': sa.BigInteger,
'int32': sa.Integer,
'int': sa.Integer,
'int16': sa.SmallInteger,
'float32': sa.REAL,
'float64': sa.FLOAT,
'float': sa.FLOAT,
'real': sa.FLOAT,
'string': sa.Text,
'date': sa.Date,
'time': sa.Time,
'datetime': sa.DateTime,
'bool': sa.Boolean,
"timedelta[unit='D']": sa.Interval(second_precision=0, day_precision=9),
"timedelta[unit='h']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='m']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='s']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='ms']": sa.Interval(second_precision=3, day_precision=0),
"timedelta[unit='us']": sa.Interval(second_precision=6, day_precision=0),
"timedelta[unit='ns']": sa.Interval(second_precision=9, day_precision=0),
# ??: sa.types.LargeBinary,
}
revtypes = dict(map(reversed, types.items()))
revtypes.update({
sa.DATETIME: datetime_,
sa.TIMESTAMP: datetime_,
sa.FLOAT: float64,
sa.DATE: date_,
sa.BIGINT: int64,
sa.INTEGER: int_,
sa.BIGINT: int64,
sa.types.NullType: string,
sa.REAL: float32,
sa.Float: float64,
sa.Float(precision=24): float32,
sa.Float(precision=53): float64,
})
# interval types are special cased in discover_typeengine so remove them from
# revtypes
revtypes = valfilter(lambda x: not isinstance(x, sa.Interval), revtypes)
units_of_power = {
0: 's',
3: 'ms',
6: 'us',
9: 'ns'
}
# these aren't loaded by sqlalchemy by default
sa.dialects.registry.load('oracle')
sa.dialects.registry.load('postgresql')
def getbind(t, bind):
if bind is None:
return t.bind
if isinstance(bind, sa.engine.base.Engine):
return bind
return sa.create_engine(bind)
def batch(sel, chunksize=10000, bind=None):
"""Execute `sel`, streaming row at a time and fetching from the database in
batches of size `chunksize`.
Parameters
----------
sel : sa.sql.Selectable
Selectable to execute
chunksize : int, optional, default 10000
Number of rows to fetch from the database
"""
def rowterator(sel, chunksize=chunksize):
with getbind(sel, bind).connect() as conn:
result = conn.execute(sel)
yield result.keys()
for rows in iter_except(curry(result.fetchmany, size=chunksize),
sa.exc.ResourceClosedError):
if rows:
yield rows
else:
return
terator = rowterator(sel)
return next(terator), concat(terator)
@discover.register(sa.dialects.postgresql.base.INTERVAL)
def discover_postgresql_interval(t):
return discover(sa.Interval(day_precision=0, second_precision=t.precision))
@discover.register(sa.dialects.oracle.base.INTERVAL)
def discover_oracle_interval(t):
return discover(t.adapt(sa.Interval))
@discover.register(sa.sql.type_api.TypeEngine)
def discover_typeengine(typ):
if isinstance(typ, sa.Interval):
if typ.second_precision is None and typ.day_precision is None:
return datashape.TimeDelta(unit='us')
elif typ.second_precision == 0 and typ.day_precision == 0:
return datashape.TimeDelta(unit='s')
if typ.second_precision in units_of_power and not typ.day_precision:
units = units_of_power[typ.second_precision]
elif typ.day_precision > 0:
units = 'D'
else:
raise ValueError('Cannot infer INTERVAL type with parameters'
'second_precision=%d, day_precision=%d' %
(typ.second_precision, typ.day_precision))
return datashape.TimeDelta(unit=units)
if typ in revtypes:
return dshape(revtypes[typ])[0]
if type(typ) in revtypes:
return revtypes[type(typ)]
if isinstance(typ, (sa.NUMERIC, sa.DECIMAL)):
return datashape.Decimal(precision=typ.precision, scale=typ.scale)
if isinstance(typ, (sa.String, sa.Unicode)):
return datashape.String(typ.length, typ.collation)
else:
for k, v in revtypes.items():
if isinstance(k, type) and (isinstance(typ, k) or
hasattr(typ, 'impl') and
isinstance(typ.impl, k)):
return v
if k == typ:
return v
raise NotImplementedError("No SQL-datashape match for type %s" % typ)
@discover.register(sa.ForeignKey, sa.sql.FromClause)
def discover_foreign_key_relationship(fk, parent, parent_measure=None):
if fk.column.table is not parent:
parent_measure = discover(fk.column.table).measure
return {fk.parent.name: Map(discover(fk.parent.type), parent_measure)}
@discover.register(sa.Column)
def discover_sqlalchemy_column(c):
meta = Option if c.nullable else identity
return Record([(c.name, meta(discover(c.type)))])
@discover.register(sa.sql.FromClause)
def discover_sqlalchemy_selectable(t):
ordering = dict((c, i) for i, c in enumerate(c for c in t.columns.keys()))
records = list(sum([discover(c).parameters[0] for c in t.columns], ()))
fkeys = [discover(fkey, t, parent_measure=Record(records))
for fkey in t.foreign_keys]
for name, column in merge(*fkeys).items():
records[ordering[name]] = (name, column)
return var * Record(records)
@memoize
def metadata_of_engine(engine, schema=None):
return sa.MetaData(engine, schema=schema)
def create_engine(uri, *args, **kwargs):
if ':memory:' in uri:
return sa.create_engine(uri, *args, **kwargs)
else:
return memoized_create_engine(uri, *args, **kwargs)
memoized_create_engine = memoize(sa.create_engine)
@dispatch(sa.engine.base.Engine, str)
def discover(engine, tablename):
metadata = metadata_of_engine(engine)
if tablename not in metadata.tables:
try:
metadata.reflect(engine,
views=metadata.bind.dialect.supports_views)
except NotImplementedError:
metadata.reflect(engine)
table = metadata.tables[tablename]
return discover(table)
@dispatch(sa.engine.base.Engine)
def discover(engine):
metadata = metadata_of_engine(engine)
return discover(metadata)
@dispatch(sa.MetaData)
def discover(metadata):
try:
metadata.reflect(views=metadata.bind.dialect.supports_views)
except NotImplementedError:
metadata.reflect()
pairs = []
for table in sorted(metadata.tables.values(), key=attrgetter('name')):
name = table.name
try:
pairs.append([name, discover(table)])
except sa.exc.CompileError as e:
print("Can not discover type of table %s.\n" % name +
"SQLAlchemy provided this error message:\n\t%s" % e.message +
"\nSkipping.")
except NotImplementedError as e:
print("Blaze does not understand a SQLAlchemy type.\n"
"Blaze provided the following error:\n\t%s" % "\n\t".join(e.args) +
"\nSkipping.")
return DataShape(Record(pairs))
@discover.register(sa.engine.RowProxy)
def discover_row_proxy(rp):
return Record(list(zip(rp.keys(), map(discover, rp.values()))))
def validate_foreign_keys(ds, foreign_keys):
# passed foreign_keys and column in dshape, but not a ForeignKey type
for field in foreign_keys:
if field not in ds.measure.names:
raise TypeError('Requested foreign key field %r is not a field in '
'datashape %s' % (field, ds))
for field, typ in ds.measure.fields:
if field in foreign_keys and not isinstance(getattr(typ, 'ty', typ),
Map):
raise TypeError('Foreign key %s passed in but not a Map '
'datashape, got %s' % (field, typ))
if isinstance(typ, Map) and field not in foreign_keys:
raise TypeError('Map type %s found on column %s, but %r '
"wasn't found in %s" %
(typ, field, field, foreign_keys))
def dshape_to_table(name, ds, metadata=None, foreign_keys=None,
primary_key=None):
"""
Create a SQLAlchemy table from a datashape and a name
>>> dshape_to_table('bank', '{name: string, amount: int}') # doctest: +NORMALIZE_WHITESPACE
Table('bank', MetaData(bind=None),
Column('name', Text(), table=<bank>, nullable=False),
Column('amount', Integer(), table=<bank>, nullable=False),
schema=None)
"""
if isinstance(ds, str):
ds = dshape(ds)
if not isrecord(ds.measure):
raise TypeError('dshape measure must be a record type e.g., '
'"{a: int64, b: int64}". Input measure is %r' %
ds.measure)
if metadata is None:
metadata = sa.MetaData()
if foreign_keys is None:
foreign_keys = {}
validate_foreign_keys(ds, foreign_keys)
cols = dshape_to_alchemy(ds, primary_key=primary_key or frozenset())
cols.extend(sa.ForeignKeyConstraint([column_name], [referent])
for column_name, referent in foreign_keys.items())
t = sa.Table(name, metadata, *cols, schema=metadata.schema)
return attach_schema(t, t.schema)
@dispatch(object, str)
def create_from_datashape(o, ds, **kwargs):
return create_from_datashape(o, dshape(ds), **kwargs)
@dispatch(sa.engine.base.Engine, DataShape)
def create_from_datashape(engine, ds, schema=None, foreign_keys=None,
primary_key=None, **kwargs):
assert isrecord(ds), 'datashape must be Record type, got %s' % ds
metadata = metadata_of_engine(engine, schema=schema)
for name, sub_ds in ds[0].dict.items():
t = dshape_to_table(name, sub_ds, metadata=metadata,
foreign_keys=foreign_keys,
primary_key=primary_key)
t.create()
return engine
def dshape_to_alchemy(dshape, primary_key=frozenset()):
"""
>>> dshape_to_alchemy('int')
<class 'sqlalchemy.sql.sqltypes.Integer'>
>>> dshape_to_alchemy('string')
<class 'sqlalchemy.sql.sqltypes.Text'>
>>> dshape_to_alchemy('{name: string, amount: int}')
[Column('name', Text(), table=None, nullable=False), Column('amount', Integer(), table=None, nullable=False)]
>>> dshape_to_alchemy('{name: ?string, amount: ?int}')
[Column('name', Text(), table=None), Column('amount', Integer(), table=None)]
"""
if isinstance(dshape, str):
dshape = datashape.dshape(dshape)
if isinstance(dshape, Map):
return dshape_to_alchemy(dshape.key.measure, primary_key=primary_key)
if isinstance(dshape, Option):
return dshape_to_alchemy(dshape.ty, primary_key=primary_key)
if str(dshape) in types:
return types[str(dshape)]
if isinstance(dshape, datashape.Record):
return [sa.Column(name,
dshape_to_alchemy(getattr(typ, 'ty', typ),
primary_key=primary_key),
primary_key=name in primary_key,
nullable=isinstance(typ[0], Option))
for name, typ in dshape.parameters[0]]
if isinstance(dshape, datashape.DataShape):
if isdimension(dshape[0]):
return dshape_to_alchemy(dshape[1], primary_key=primary_key)
else:
return dshape_to_alchemy(dshape[0], primary_key=primary_key)
if isinstance(dshape, datashape.String):
fixlen = dshape[0].fixlen
if fixlen is None:
return sa.TEXT
string_types = dict(U=sa.Unicode, A=sa.String)
assert dshape.encoding is not None
return string_types[dshape.encoding[0]](length=fixlen)
if isinstance(dshape, datashape.DateTime):
return sa.DATETIME(timezone=dshape.tz is not None)
if isinstance(dshape, datashape.Decimal):
return sa.NUMERIC(dshape.precision, dshape.scale)
raise NotImplementedError("No SQLAlchemy dtype match for datashape: %s"
% dshape)
@convert.register(Iterator, sa.Table, cost=300.0)
def sql_to_iterator(t, bind=None, **kwargs):
_, rows = batch(sa.select([t]), bind=bind)
return map(tuple, rows)
@convert.register(Iterator, sa.sql.Select, cost=300.0)
def select_to_iterator(sel, dshape=None, bind=None, **kwargs):
func = pluck(0) if dshape and isscalar(dshape.measure) else map(tuple)
_, rows = batch(sel, bind=bind)
return func(rows)
@convert.register(base, sa.sql.Select, cost=300.0)
def select_to_base(sel, dshape=None, bind=None, **kwargs):
assert not dshape or isscalar(dshape), \
'dshape should be None or scalar, got %s' % dshape
with getbind(sel, bind).connect() as conn:
return conn.execute(sel).scalar()
@append.register(sa.Table, Iterator)
def append_iterator_to_table(t, rows, dshape=None, bind=None, **kwargs):
assert not isinstance(t, type)
engine = getbind(t, bind)
if not t.exists(bind=engine):
t.create(bind=engine)
rows = iter(rows)
# We see if the sequence is of tuples or dicts
# If tuples then we coerce them to dicts
try:
row = next(rows)
except StopIteration:
return
rows = chain([row], rows)
if isinstance(row, (tuple, list)):
dshape = dshape and datashape.dshape(dshape)
if dshape and isinstance(dshape.measure, datashape.Record):
names = dshape.measure.names
if set(names) != set(discover(t).measure.names):
raise ValueError("Column names of incoming data don't match "
"column names of existing SQL table\n"
"Names in SQL table: %s\n"
"Names from incoming data: %s\n" %
(discover(t).measure.names, names))
else:
names = discover(t).measure.names
rows = (dict(zip(names, row)) for row in rows)
with engine.connect() as conn:
for chunk in partition_all(1000, rows): # TODO: 1000 is hardcoded
conn.execute(t.insert(), chunk)
return t
@append.register(sa.Table, Chunks)
def append_anything_to_sql_Table(t, c, **kwargs):
for item in c:
append(t, item, **kwargs)
return t
@append.register(sa.Table, object)
def append_anything_to_sql_Table(t, o, **kwargs):
return append(t, convert(Iterator, o, **kwargs), **kwargs)
@append.register(sa.Table, sa.Table)
def append_table_to_sql_Table(t, o, **kwargs):
s = sa.select([o])
return append(t, s, **kwargs)
@append.register(sa.Table, sa.sql.Select)
def append_select_statement_to_sql_Table(t, o, bind=None, **kwargs):
t_bind = getbind(t, bind)
o_bind = getbind(o, bind)
if t_bind != o_bind:
return append(
t,
convert(Iterator, o, bind=bind, **kwargs),
bind=bind,
**kwargs
)
bind = t_bind
assert bind.has_table(t.name, t.schema), \
'tables must come from the same database'
query = t.insert().from_select(o.columns.keys(), o)
with bind.connect() as conn:
conn.execute(query)
return t
def should_create_schema(ddl, target, bind, tables=None, state=None,
checkfirst=None, **kwargs):
return ddl.element not in inspect(target.bind).get_schema_names()
def attach_schema(obj, schema):
if schema is not None:
ddl = CreateSchema(schema, quote=True)
event.listen(
obj,
'before_create',
ddl.execute_if(
callable_=should_create_schema,
dialect='postgresql'
)
)
return obj
def fullname(table, compiler):
preparer = compiler.dialect.identifier_preparer
fullname = preparer.quote_identifier(table.name)
schema = table.schema
if schema is not None:
fullname = '%s.%s' % (preparer.quote_schema(schema), fullname)
return fullname
@resource.register(r'(.*sql.*|oracle|redshift)(\+\w+)?://.+')
def resource_sql(uri, *args, **kwargs):
engine = create_engine(uri, **filter_kwargs(sa.create_engine, kwargs))
ds = kwargs.pop('dshape', None)
schema = kwargs.pop('schema', None)
foreign_keys = kwargs.pop('foreign_keys', None)
primary_key = kwargs.pop('primary_key', None)
# we were also given a table name
if args and isinstance(args[0], (str, unicode)):
table_name, args = args[0], args[1:]
metadata = metadata_of_engine(engine, schema=schema)
with ignoring(sa.exc.NoSuchTableError):
return attach_schema(
sa.Table(table_name, metadata, schema=schema,
autoload_with=engine),
schema
)
if ds:
t = dshape_to_table(table_name, ds, metadata=metadata,
foreign_keys=foreign_keys,
primary_key=primary_key)
t.create()
return t
else:
raise ValueError("Table does not exist and no dshape provided")
# We were not given a table name
if ds:
create_from_datashape(engine, ds, schema=schema,
foreign_keys=foreign_keys)
return engine
@resource.register('impala://.+')
def resource_impala(uri, *args, **kwargs):
try:
import impala.sqlalchemy
except ImportError:
raise ImportError("Please install or update `impyla` library")
return resource_sql(uri, *args, **kwargs)
@resource.register('monetdb://.+')
def resource_monet(uri, *args, **kwargs):
try:
import monetdb
except ImportError:
raise ImportError("Please install the `sqlalchemy_monetdb` library")
return resource_sql(uri, *args, **kwargs)
@resource.register('hive://.+')
def resource_hive(uri, *args, **kwargs):
try:
import pyhive
except ImportError:
raise ImportError("Please install the `PyHive` library.")
pattern = 'hive://((?P<user>[a-zA-Z_]\w*)@)?(?P<host>[\w.]+)(:(?P<port>\d*))?(/(?P<database>\w*))?'
d = re.search(pattern, uri.split('::')[0]).groupdict()
defaults = {'port': '10000',
'user': 'hdfs',
'database': 'default'}
for k, v in d.items():
if not v:
d[k] = defaults[k]
if d['user']:
d['user'] += '@'
uri2 = 'hive://%(user)s%(host)s:%(port)s/%(database)s' % d
if '::' in uri:
uri2 += '::' + uri.split('::')[1]
return resource_sql(uri2, *args, **kwargs)
ooc_types.add(sa.Table)
@dispatch(sa.Table)
def drop(table, bind=None):
bind = getbind(table, bind)
table.drop(bind=bind, checkfirst=True)
if table.exists(bind=bind):
raise ValueError('table %r dropped but still exists' % table.name)
@convert.register(pd.DataFrame, (sa.sql.Select, sa.sql.Selectable), cost=200.0)
def select_or_selectable_to_frame(el, bind=None, **kwargs):
columns, rows = batch(el, bind=bind)
row = next(rows, None)
if row is None:
return pd.DataFrame(columns=columns)
return pd.DataFrame(list(chain([tuple(row)], map(tuple, rows))),
columns=columns)
class CopyToCSV(sa.sql.expression.Executable, sa.sql.ClauseElement):
def __init__(self, element, path, delimiter=',', quotechar='"',
lineterminator=r'\n', escapechar='\\', header=True,
na_value='', bind=None):
self.element = element
self.path = path
self.delimiter = delimiter
self.quotechar = quotechar
self.lineterminator = lineterminator
self._bind = bind = getbind(element, bind)
# mysql cannot write headers
self.header = header and bind.dialect.name != 'mysql'
self.escapechar = escapechar
self.na_value = na_value
@property
def bind(self):
return self._bind
@compiles(CopyToCSV, 'postgresql')
def compile_copy_to_csv_postgres(element, compiler, **kwargs):
selectable = element.element
istable = isinstance(selectable, sa.Table)
template = """COPY %s TO '{path}'
WITH CSV {header}
DELIMITER '{delimiter}'
QUOTE '{quotechar}'
NULL '{na_value}'
ESCAPE '{escapechar}'
""" % ('{query}' if istable else '({query})')
processed = (fullname(selectable, compiler)
if istable else compiler.process(selectable))
assert processed, ('got empty string from processing element of type %r' %
type(selectable).__name__)
return template.format(query=processed,
path=element.path,
header='HEADER' if element.header else '',
delimiter=element.delimiter,
quotechar=element.quotechar,
na_value=element.na_value,
escapechar=element.escapechar)
@compiles(CopyToCSV, 'mysql')
def compile_copy_to_csv_mysql(element, compiler, **kwargs):
selectable = element.element
if isinstance(selectable, sa.Table):
processed = 'SELECT * FROM %(table)s' % dict(table=selectable.name)
else:
processed = compiler.process(selectable)
assert processed, ('got empty string from processing element of type %r' %
type(selectable).__name__)
template = """{query} INTO OUTFILE '{path}'
FIELDS TERMINATED BY '{delimiter}'
OPTIONALLY ENCLOSED BY '{quotechar}'
ESCAPED BY '{escapechar}'
LINES TERMINATED BY '{lineterminator}'"""
return template.format(query=processed,
path=element.path,
delimiter=element.delimiter,
lineterminator=element.lineterminator,
escapechar=element.escapechar.encode(
'unicode-escape').decode(),
quotechar=element.quotechar)
@compiles(CopyToCSV, 'sqlite')
def compile_copy_to_csv_sqlite(element, compiler, **kwargs):
if not find_executable('sqlite3'):
raise MDNotImplementedError("Could not find sqlite executable")
selectable = element.element
sql = (compiler.process(sa.select([selectable])
if isinstance(selectable, sa.Table)
else selectable) + ';')
sql = re.sub(r'\s{2,}', ' ', re.sub(r'\s*\n\s*', ' ', sql)).encode()
cmd = ['sqlite3', '-csv',
'-%sheader' % ('no' if not element.header else ''),
'-separator', element.delimiter,
selectable.bind.url.database]
with open(element.path, mode='at') as f:
subprocess.Popen(cmd, stdout=f, stdin=subprocess.PIPE).communicate(sql)
# This will be a no-op since we're doing the write during the compile
return ''
@append.register(CSV, sa.sql.Selectable)
def append_table_to_csv(csv, selectable, dshape=None, bind=None, **kwargs):
kwargs = keyfilter(keywords(CopyToCSV).__contains__,
merge(csv.dialect, kwargs))
stmt = CopyToCSV(
selectable,
os.path.abspath(csv.path),
bind=bind,
**kwargs
)
with getbind(selectable, bind).begin() as conn:
conn.execute(stmt)
return csv
try:
from .hdfs import HDFS
except ImportError:
pass
else:
@append.register(HDFS(CSV), sa.sql.Selectable)
def append_selectable_to_hdfs_csv(*args, **kwargs):
raise MDNotImplementedError()
| {
"repo_name": "cowlicks/odo",
"path": "odo/backends/sql.py",
"copies": "1",
"size": "25054",
"license": "bsd-3-clause",
"hash": 2545432010387069000,
"line_mean": 33.462173315,
"line_max": 113,
"alpha_frac": 0.6141933424,
"autogenerated": false,
"ratio": 3.7607325127589313,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.987429938646226,
"avg_score": 0.00012529373933440832,
"num_lines": 727
} |
from __future__ import absolute_import, division, print_function
from operator import getitem
from functools import partial
import numpy as np
from toolz import curry
from .core import Array, elemwise, atop, apply_infer_dtype, asarray
from ..base import Base
from .. import core, sharedict
from ..utils import skip_doctest
def __array_wrap__(numpy_ufunc, x, *args, **kwargs):
return x.__array_wrap__(numpy_ufunc(x, *args, **kwargs))
@curry
def copy_docstring(target, source=None):
target.__doc__ = skip_doctest(source.__doc__)
return target
def wrap_elemwise(numpy_ufunc, array_wrap=False):
""" Wrap up numpy function into dask.array """
def wrapped(*args, **kwargs):
dsk = [arg for arg in args if hasattr(arg, '_elemwise')]
if len(dsk) > 0:
if array_wrap:
return dsk[0]._elemwise(__array_wrap__, numpy_ufunc,
*args, **kwargs)
else:
return dsk[0]._elemwise(numpy_ufunc, *args, **kwargs)
else:
return numpy_ufunc(*args, **kwargs)
# functools.wraps cannot wrap ufunc in Python 2.x
wrapped.__name__ = numpy_ufunc.__name__
wrapped.__doc__ = skip_doctest(numpy_ufunc.__doc__)
return wrapped
class ufunc(object):
_forward_attrs = {'nin', 'nargs', 'nout', 'ntypes', 'identity',
'signature', 'types'}
def __init__(self, ufunc):
if not isinstance(ufunc, np.ufunc):
raise TypeError("must be an instance of `ufunc`, "
"got `%s" % type(ufunc).__name__)
self._ufunc = ufunc
self.__name__ = ufunc.__name__
copy_docstring(self, ufunc)
def __getattr__(self, key):
if key in self._forward_attrs:
return getattr(self._ufunc, key)
raise AttributeError("%r object has no attribute "
"%r" % (type(self).__name__, key))
def __dir__(self):
return list(self._forward_attrs.union(dir(type(self)), self.__dict__))
def __repr__(self):
return repr(self._ufunc)
def __call__(self, *args, **kwargs):
dsk = [arg for arg in args if hasattr(arg, '_elemwise')]
if len(dsk) > 0:
return dsk[0]._elemwise(self._ufunc, *args, **kwargs)
else:
return self._ufunc(*args, **kwargs)
@copy_docstring(source=np.ufunc.outer)
def outer(self, A, B, **kwargs):
if self.nin != 2:
raise ValueError("outer product only supported for binary functions")
if 'out' in kwargs:
raise ValueError("`out` kwarg not supported")
A_is_dask = isinstance(A, Base)
B_is_dask = isinstance(B, Base)
if not A_is_dask and not B_is_dask:
return self._ufunc.outer(A, B, **kwargs)
elif (A_is_dask and not isinstance(A, Array) or
B_is_dask and not isinstance(B, Array)):
raise NotImplementedError("Dask objects besides `dask.array.Array` "
"are not supported at this time.")
A = asarray(A)
B = asarray(B)
ndim = A.ndim + B.ndim
out_inds = tuple(range(ndim))
A_inds = out_inds[:A.ndim]
B_inds = out_inds[A.ndim:]
dtype = apply_infer_dtype(self._ufunc.outer, [A, B], kwargs,
'ufunc.outer', suggest_dtype=False)
if 'dtype' in kwargs:
func = partial(self._ufunc.outer, dtype=kwargs.pop('dtype'))
else:
func = self._ufunc.outer
return atop(func, out_inds, A, A_inds, B, B_inds, dtype=dtype,
token=self.__name__ + '.outer', **kwargs)
# ufuncs, copied from this page:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html
# math operations
add = ufunc(np.add)
subtract = ufunc(np.subtract)
multiply = ufunc(np.multiply)
divide = ufunc(np.divide)
logaddexp = ufunc(np.logaddexp)
logaddexp2 = ufunc(np.logaddexp2)
true_divide = ufunc(np.true_divide)
floor_divide = ufunc(np.floor_divide)
negative = ufunc(np.negative)
power = ufunc(np.power)
remainder = ufunc(np.remainder)
mod = ufunc(np.mod)
# fmod: see below
conj = ufunc(np.conj)
exp = ufunc(np.exp)
exp2 = ufunc(np.exp2)
log = ufunc(np.log)
log2 = ufunc(np.log2)
log10 = ufunc(np.log10)
log1p = ufunc(np.log1p)
expm1 = ufunc(np.expm1)
sqrt = ufunc(np.sqrt)
square = ufunc(np.square)
cbrt = ufunc(np.cbrt)
reciprocal = ufunc(np.reciprocal)
# trigonometric functions
sin = ufunc(np.sin)
cos = ufunc(np.cos)
tan = ufunc(np.tan)
arcsin = ufunc(np.arcsin)
arccos = ufunc(np.arccos)
arctan = ufunc(np.arctan)
arctan2 = ufunc(np.arctan2)
hypot = ufunc(np.hypot)
sinh = ufunc(np.sinh)
cosh = ufunc(np.cosh)
tanh = ufunc(np.tanh)
arcsinh = ufunc(np.arcsinh)
arccosh = ufunc(np.arccosh)
arctanh = ufunc(np.arctanh)
deg2rad = ufunc(np.deg2rad)
rad2deg = ufunc(np.rad2deg)
# comparison functions
greater = ufunc(np.greater)
greater_equal = ufunc(np.greater_equal)
less = ufunc(np.less)
less_equal = ufunc(np.less_equal)
not_equal = ufunc(np.not_equal)
equal = ufunc(np.equal)
logical_and = ufunc(np.logical_and)
logical_or = ufunc(np.logical_or)
logical_xor = ufunc(np.logical_xor)
logical_not = ufunc(np.logical_not)
maximum = ufunc(np.maximum)
minimum = ufunc(np.minimum)
fmax = ufunc(np.fmax)
fmin = ufunc(np.fmin)
# floating functions
isfinite = ufunc(np.isfinite)
isinf = ufunc(np.isinf)
isnan = ufunc(np.isnan)
signbit = ufunc(np.signbit)
copysign = ufunc(np.copysign)
nextafter = ufunc(np.nextafter)
spacing = ufunc(np.spacing)
# modf: see below
ldexp = ufunc(np.ldexp)
# frexp: see below
fmod = ufunc(np.fmod)
floor = ufunc(np.floor)
ceil = ufunc(np.ceil)
trunc = ufunc(np.trunc)
divide = ufunc(np.divide)
# more math routines, from this page:
# http://docs.scipy.org/doc/numpy/reference/routines.math.html
degrees = ufunc(np.degrees)
radians = ufunc(np.radians)
rint = ufunc(np.rint)
fabs = ufunc(np.fabs)
sign = ufunc(np.sign)
absolute = ufunc(np.absolute)
# non-ufunc elementwise functions
clip = wrap_elemwise(np.clip)
isreal = wrap_elemwise(np.isreal, array_wrap=True)
iscomplex = wrap_elemwise(np.iscomplex, array_wrap=True)
real = wrap_elemwise(np.real, array_wrap=True)
imag = wrap_elemwise(np.imag, array_wrap=True)
fix = wrap_elemwise(np.fix, array_wrap=True)
i0 = wrap_elemwise(np.i0, array_wrap=True)
sinc = wrap_elemwise(np.sinc, array_wrap=True)
nan_to_num = wrap_elemwise(np.nan_to_num, array_wrap=True)
@copy_docstring(source=np.angle)
def angle(x, deg=0):
deg = bool(deg)
if hasattr(x, '_elemwise'):
return x._elemwise(__array_wrap__, np.angle, x, deg)
return np.angle(x, deg=deg)
@copy_docstring(source=np.frexp)
def frexp(x):
# Not actually object dtype, just need to specify something
tmp = elemwise(np.frexp, x, dtype=object)
left = 'mantissa-' + tmp.name
right = 'exponent-' + tmp.name
ldsk = dict(((left,) + key[1:], (getitem, key, 0))
for key in core.flatten(tmp._keys()))
rdsk = dict(((right,) + key[1:], (getitem, key, 1))
for key in core.flatten(tmp._keys()))
a = np.empty((1, ), dtype=x.dtype)
l, r = np.frexp(a)
ldt = l.dtype
rdt = r.dtype
L = Array(sharedict.merge(tmp.dask, (left, ldsk)), left, chunks=tmp.chunks, dtype=ldt)
R = Array(sharedict.merge(tmp.dask, (right, rdsk)), right, chunks=tmp.chunks, dtype=rdt)
return L, R
@copy_docstring(source=np.modf)
def modf(x):
# Not actually object dtype, just need to specify something
tmp = elemwise(np.modf, x, dtype=object)
left = 'modf1-' + tmp.name
right = 'modf2-' + tmp.name
ldsk = dict(((left,) + key[1:], (getitem, key, 0))
for key in core.flatten(tmp._keys()))
rdsk = dict(((right,) + key[1:], (getitem, key, 1))
for key in core.flatten(tmp._keys()))
a = np.empty((1,), dtype=x.dtype)
l, r = np.modf(a)
ldt = l.dtype
rdt = r.dtype
L = Array(sharedict.merge(tmp.dask, (left, ldsk)), left, chunks=tmp.chunks, dtype=ldt)
R = Array(sharedict.merge(tmp.dask, (right, rdsk)), right, chunks=tmp.chunks, dtype=rdt)
return L, R
| {
"repo_name": "mraspaud/dask",
"path": "dask/array/ufunc.py",
"copies": "1",
"size": "8125",
"license": "bsd-3-clause",
"hash": 3994483543135494700,
"line_mean": 30.25,
"line_max": 92,
"alpha_frac": 0.6235076923,
"autogenerated": false,
"ratio": 3.1190019193857967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42425096116857963,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from operator import getitem
from itertools import product
from toolz import merge, pipe, concat, partial
from toolz.curried import map
from . import chunk, wrap
from .core import Array, map_blocks, concatenate, concatenate3, reshapelist
from .. import sharedict
from ..base import tokenize
from ..core import flatten
from ..utils import concrete
def fractional_slice(task, axes):
"""
>>> fractional_slice(('x', 5.1), {0: 2}) # doctest: +SKIP
(getitem, ('x', 6), (slice(0, 2),))
>>> fractional_slice(('x', 3, 5.1), {0: 2, 1: 3}) # doctest: +SKIP
(getitem, ('x', 3, 5), (slice(None, None, None), slice(-3, None)))
>>> fractional_slice(('x', 2.9, 5.1), {0: 2, 1: 3}) # doctest: +SKIP
(getitem, ('x', 3, 5), (slice(0, 2), slice(-3, None)))
"""
rounded = (task[0],) + tuple(int(round(i)) for i in task[1:])
index = []
for i, (t, r) in enumerate(zip(task[1:], rounded[1:])):
depth = axes.get(i, 0)
if t == r:
index.append(slice(None, None, None))
elif t < r:
index.append(slice(0, depth))
elif t > r and depth == 0:
index.append(slice(0, 0))
else:
index.append(slice(-depth, None))
index = tuple(index)
if all(ind == slice(None, None, None) for ind in index):
return task
else:
return (getitem, rounded, index)
def expand_key(k, dims):
""" Get all neighboring keys around center
>>> expand_key(('x', 2, 3), dims=[5, 5]) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1.1, 2.1), ('x', 1.1, 3), ('x', 1.1, 3.9)],
[('x', 2, 2.1), ('x', 2, 3), ('x', 2, 3.9)],
[('x', 2.9, 2.1), ('x', 2.9, 3), ('x', 2.9, 3.9)]]
>>> expand_key(('x', 0, 4), dims=[5, 5]) # doctest: +NORMALIZE_WHITESPACE
[[('x', 0, 3.1), ('x', 0, 4)],
[('x', 0.9, 3.1), ('x', 0.9, 4)]]
"""
def inds(i, ind):
rv = []
if ind - 0.9 > 0:
rv.append(ind - 0.9)
rv.append(ind)
if ind + 0.9 < dims[i] - 1:
rv.append(ind + 0.9)
return rv
shape = []
for i, ind in enumerate(k[1:]):
num = 1
if ind > 0:
num += 1
if ind < dims[i] - 1:
num += 1
shape.append(num)
seq = list(product([k[0]], *[inds(i, ind)
for i, ind in enumerate(k[1:])]))
return reshapelist(shape, seq)
def ghost_internal(x, axes):
""" Share boundaries between neighboring blocks
Parameters
----------
x: da.Array
A dask array
axes: dict
The size of the shared boundary per axis
The axes input informs how many cells to overlap between neighboring blocks
{0: 2, 2: 5} means share two cells in 0 axis, 5 cells in 2 axis
"""
dims = list(map(len, x.chunks))
expand_key2 = partial(expand_key, dims=dims)
interior_keys = pipe(x._keys(), flatten, map(expand_key2), map(flatten),
concat, list)
token = tokenize(x, axes)
name = 'ghost-' + token
interior_slices = {}
ghost_blocks = {}
for k in interior_keys:
frac_slice = fractional_slice(k, axes)
if k != frac_slice:
interior_slices[k] = frac_slice
ghost_blocks[(name,) + k[1:]] = (concatenate3,
(concrete, expand_key2(k)))
chunks = []
for i, bds in enumerate(x.chunks):
if len(bds) == 1:
chunks.append(bds)
else:
left = [bds[0] + axes.get(i, 0)]
right = [bds[-1] + axes.get(i, 0)]
mid = []
for bd in bds[1:-1]:
mid.append(bd + axes.get(i, 0) * 2)
chunks.append(left + mid + right)
dsk = merge(interior_slices, ghost_blocks)
dsk = sharedict.merge(x.dask, (name, dsk))
return Array(dsk, name, chunks, dtype=x.dtype)
def trim_internal(x, axes):
""" Trim sides from each block
This couples well with the ghost operation, which may leave excess data on
each block
See also
--------
dask.array.chunk.trim
dask.array.map_blocks
"""
olist = []
for i, bd in enumerate(x.chunks):
ilist = []
for d in bd:
ilist.append(d - axes.get(i, 0) * 2)
olist.append(tuple(ilist))
chunks = tuple(olist)
return map_blocks(partial(chunk.trim, axes=axes), x, chunks=chunks,
dtype=x.dtype)
def periodic(x, axis, depth):
""" Copy a slice of an array around to its other side
Useful to create periodic boundary conditions for ghost
"""
left = ((slice(None, None, None),) * axis +
(slice(0, depth),) +
(slice(None, None, None),) * (x.ndim - axis - 1))
right = ((slice(None, None, None),) * axis +
(slice(-depth, None),) +
(slice(None, None, None),) * (x.ndim - axis - 1))
l = x[left]
r = x[right]
l, r = _remove_ghost_boundaries(l, r, axis, depth)
return concatenate([r, x, l], axis=axis)
def reflect(x, axis, depth):
""" Reflect boundaries of array on the same side
This is the converse of ``periodic``
"""
if depth == 1:
left = ((slice(None, None, None),) * axis +
(slice(0, 1),) +
(slice(None, None, None),) * (x.ndim - axis - 1))
else:
left = ((slice(None, None, None),) * axis +
(slice(depth - 1, None, -1),) +
(slice(None, None, None),) * (x.ndim - axis - 1))
right = ((slice(None, None, None),) * axis +
(slice(-1, -depth - 1, -1),) +
(slice(None, None, None),) * (x.ndim - axis - 1))
l = x[left]
r = x[right]
l, r = _remove_ghost_boundaries(l, r, axis, depth)
return concatenate([l, x, r], axis=axis)
def nearest(x, axis, depth):
""" Each reflect each boundary value outwards
This mimics what the skimage.filters.gaussian_filter(... mode="nearest")
does.
"""
left = ((slice(None, None, None),) * axis +
(slice(0, 1),) +
(slice(None, None, None),) * (x.ndim - axis - 1))
right = ((slice(None, None, None),) * axis +
(slice(-1, -2, -1),) +
(slice(None, None, None),) * (x.ndim - axis - 1))
l = concatenate([x[left]] * depth, axis=axis)
r = concatenate([x[right]] * depth, axis=axis)
l, r = _remove_ghost_boundaries(l, r, axis, depth)
return concatenate([l, x, r], axis=axis)
def constant(x, axis, depth, value):
""" Add constant slice to either side of array """
chunks = list(x.chunks)
chunks[axis] = (depth,)
c = wrap.full(tuple(map(sum, chunks)), value,
chunks=tuple(chunks), dtype=x.dtype)
return concatenate([c, x, c], axis=axis)
def _remove_ghost_boundaries(l, r, axis, depth):
lchunks = list(l.chunks)
lchunks[axis] = (depth,)
rchunks = list(r.chunks)
rchunks[axis] = (depth,)
l = l.rechunk(tuple(lchunks))
r = r.rechunk(tuple(rchunks))
return l, r
def boundaries(x, depth=None, kind=None):
""" Add boundary conditions to an array before ghosting
See Also
--------
periodic
constant
"""
if not isinstance(kind, dict):
kind = dict((i, kind) for i in range(x.ndim))
if not isinstance(depth, dict):
depth = dict((i, depth) for i in range(x.ndim))
for i in range(x.ndim):
d = depth.get(i, 0)
if d == 0:
continue
this_kind = kind.get(i, 'none')
if this_kind == 'none':
continue
elif this_kind == 'periodic':
x = periodic(x, i, d)
elif this_kind == 'reflect':
x = reflect(x, i, d)
elif this_kind == 'nearest':
x = nearest(x, i, d)
elif i in kind:
x = constant(x, i, d, kind[i])
return x
def ghost(x, depth, boundary):
""" Share boundaries between neighboring blocks
Parameters
----------
x: da.Array
A dask array
depth: dict
The size of the shared boundary per axis
boundary: dict
The boundary condition on each axis. Options are 'reflect', 'periodic',
'nearest', 'none', or an array value. Such a value will fill the
boundary with that value.
The depth input informs how many cells to overlap between neighboring
blocks ``{0: 2, 2: 5}`` means share two cells in 0 axis, 5 cells in 2 axis.
Axes missing from this input will not be overlapped.
Examples
--------
>>> import numpy as np
>>> import dask.array as da
>>> x = np.arange(64).reshape((8, 8))
>>> d = da.from_array(x, chunks=(4, 4))
>>> d.chunks
((4, 4), (4, 4))
>>> g = da.ghost.ghost(d, depth={0: 2, 1: 1},
... boundary={0: 100, 1: 'reflect'})
>>> g.chunks
((8, 8), (6, 6))
>>> np.array(g)
array([[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[ 0, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 7],
[ 8, 8, 9, 10, 11, 12, 11, 12, 13, 14, 15, 15],
[ 16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],
[ 24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],
[ 32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],
[ 40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],
[ 16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],
[ 24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],
[ 32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],
[ 40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],
[ 48, 48, 49, 50, 51, 52, 51, 52, 53, 54, 55, 55],
[ 56, 56, 57, 58, 59, 60, 59, 60, 61, 62, 63, 63],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]])
"""
depth2 = coerce_depth(x.ndim, depth)
boundary2 = coerce_boundary(x.ndim, boundary)
# is depth larger than chunk size?
depth_values = [depth2.get(i, 0) for i in range(x.ndim)]
for d, c in zip(depth_values, x.chunks):
if d > min(c):
raise ValueError("The overlapping depth %d is larger than your\n"
"smallest chunk size %d. Rechunk your array\n"
"with a larger chunk size or a chunk size that\n"
"more evenly divides the shape of your array." %
(d, min(c)))
x2 = boundaries(x, depth2, boundary2)
x3 = ghost_internal(x2, depth2)
trim = dict((k, v * 2 if boundary2.get(k, 'none') != 'none' else 0)
for k, v in depth2.items())
x4 = chunk.trim(x3, trim)
return x4
def add_dummy_padding(x, depth, boundary):
"""
Pads an array which has 'none' as the boundary type.
Used to simplify trimming arrays which use 'none'.
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> add_dummy_padding(x, {0: 1}, {0: 'none'}).compute() # doctest: +NORMALIZE_WHITESPACE
array([..., 0, 1, 2, 3, 4, 5, ...])
"""
for k, v in boundary.items():
d = depth[k]
if v == 'none' and d > 0:
empty_shape = list(x.shape)
empty_shape[k] = d
empty_chunks = list(x.chunks)
empty_chunks[k] = (d,)
empty = wrap.empty(empty_shape, chunks=empty_chunks, dtype=x.dtype)
out_chunks = list(x.chunks)
ax_chunks = list(out_chunks[k])
ax_chunks[0] += d
ax_chunks[-1] += d
out_chunks[k] = ax_chunks
x = concatenate([empty, x, empty], axis=k)
x = x.rechunk(out_chunks)
return x
def map_overlap(x, func, depth, boundary=None, trim=True, **kwargs):
depth2 = coerce_depth(x.ndim, depth)
boundary2 = coerce_boundary(x.ndim, boundary)
g = ghost(x, depth=depth2, boundary=boundary2)
g2 = g.map_blocks(func, **kwargs)
if trim:
g3 = add_dummy_padding(g2, depth2, boundary2)
return trim_internal(g3, depth2)
else:
return g2
def coerce_depth(ndim, depth):
if isinstance(depth, int):
depth = (depth,) * ndim
if isinstance(depth, tuple):
depth = dict(zip(range(ndim), depth))
return depth
def coerce_boundary(ndim, boundary):
if boundary is None:
boundary = 'reflect'
if not isinstance(boundary, (tuple, dict)):
boundary = (boundary,) * ndim
if isinstance(boundary, tuple):
boundary = dict(zip(range(ndim), boundary))
return boundary
| {
"repo_name": "mraspaud/dask",
"path": "dask/array/ghost.py",
"copies": "1",
"size": "12823",
"license": "bsd-3-clause",
"hash": -8458673774919209000,
"line_mean": 30.199513382,
"line_max": 93,
"alpha_frac": 0.5181314825,
"autogenerated": false,
"ratio": 3.179518968509794,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9195150959211211,
"avg_score": 0.0004998983597166154,
"num_lines": 411
} |
from __future__ import absolute_import, division, print_function
from operator import getitem
from itertools import product
from toolz import merge, pipe, concat, partition, partial
from toolz.curried import map
from ..base import tokenize
from ..core import flatten
from ..utils import concrete
from .core import Array, map_blocks, concatenate, concatenate3
from . import chunk, wrap
def fractional_slice(task, axes):
"""
>>> fractional_slice(('x', 5.1), {0: 2}) # doctest: +SKIP
(getitem, ('x', 6), (slice(0, 2),))
>>> fractional_slice(('x', 3, 5.1), {0: 2, 1: 3}) # doctest: +SKIP
(getitem, ('x', 3, 5), (slice(None, None, None), slice(-3, None)))
>>> fractional_slice(('x', 2.9, 5.1), {0: 2, 1: 3}) # doctest: +SKIP
(getitem, ('x', 3, 5), (slice(0, 2), slice(-3, None)))
"""
rounded = (task[0],) + tuple(map(round, task[1:]))
index = []
for i, (t, r) in enumerate(zip(task[1:], rounded[1:])):
depth = axes.get(i, 0)
if t == r:
index.append(slice(None, None, None))
elif t < r:
index.append(slice(0, depth))
elif t > r and depth == 0:
index.append(slice(0, 0))
else:
index.append(slice(-depth, None))
index = tuple(index)
if all(ind == slice(None, None, None) for ind in index):
return task
else:
return (getitem, rounded, index)
def expand_key(k, dims):
""" Get all neighboring keys around center
>>> expand_key(('x', 2, 3), dims=[5, 5]) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1.1, 2.1), ('x', 1.1, 3), ('x', 1.1, 3.9)],
[('x', 2, 2.1), ('x', 2, 3), ('x', 2, 3.9)],
[('x', 2.9, 2.1), ('x', 2.9, 3), ('x', 2.9, 3.9)]]
>>> expand_key(('x', 0, 4), dims=[5, 5]) # doctest: +NORMALIZE_WHITESPACE
[[('x', 0, 3.1), ('x', 0, 4)],
[('x', 0.9, 3.1), ('x', 0.9, 4)]]
"""
def inds(i, ind):
rv = []
if ind - 0.9 > 0:
rv.append(ind - 0.9)
rv.append(ind)
if ind + 0.9 < dims[i] - 1:
rv.append(ind + 0.9)
return rv
shape = []
for i, ind in enumerate(k[1:]):
num = 1
if ind > 0:
num += 1
if ind < dims[i] - 1:
num += 1
shape.append(num)
seq = list(product([k[0]], *[inds(i, ind)
for i, ind in enumerate(k[1:])]))
return reshape(shape, seq)
def reshape(shape, seq):
""" Reshape iterator to nested shape
>>> reshape((2, 3), range(6))
[[0, 1, 2], [3, 4, 5]]
"""
if len(shape) == 1:
return list(seq)
else:
n = int(len(seq) / shape[0])
return [reshape(shape[1:], part) for part in partition(n, seq)]
def ghost_internal(x, axes):
""" Share boundaries between neighboring blocks
Parameters
----------
x: da.Array
A dask array
axes: dict
The size of the shared boundary per axis
The axes dict informs how many cells to overlap between neighboring blocks
{0: 2, 2: 5} means share two cells in 0 axis, 5 cells in 2 axis
"""
dims = list(map(len, x.chunks))
expand_key2 = partial(expand_key, dims=dims)
interior_keys = pipe(x._keys(), flatten, map(expand_key2), map(flatten),
concat, list)
token = tokenize(x, axes)
name = 'ghost-' + token
interior_slices = {}
ghost_blocks = {}
for k in interior_keys:
frac_slice = fractional_slice(k, axes)
if k != frac_slice:
interior_slices[k] = frac_slice
ghost_blocks[(name,) + k[1:]] = (concatenate3,
(concrete, expand_key2(k)))
chunks = []
for i, bds in enumerate(x.chunks):
if len(bds) == 1:
chunks.append(bds)
else:
left = [bds[0] + axes.get(i, 0)]
right = [bds[-1] + axes.get(i, 0)]
mid = []
for bd in bds[1:-1]:
mid.append(bd + axes.get(i, 0) * 2)
chunks.append(left + mid + right)
return Array(merge(interior_slices, ghost_blocks, x.dask),
name, chunks)
def trim_internal(x, axes):
""" Trim sides from each block
This couples well with the ghost operation, which may leave excess data on
each block
See also
chunk.trim
map_blocks
"""
olist = []
for i, bd in enumerate(x.chunks):
ilist = []
for d in bd:
ilist.append(d - axes.get(i, 0) * 2)
olist.append(tuple(ilist))
chunks = tuple(olist)
return map_blocks(partial(chunk.trim, axes=axes), x, chunks=chunks)
def periodic(x, axis, depth):
""" Copy a slice of an array around to its other side
Useful to create periodic boundary conditions for ghost
"""
left = ((slice(None, None, None),) * axis +
(slice(0, depth),) +
(slice(None, None, None),) * (x.ndim - axis - 1))
right = ((slice(None, None, None),) * axis +
(slice(-depth, None),) +
(slice(None, None, None),) * (x.ndim - axis - 1))
l = x[left]
r = x[right]
l, r = _remove_ghost_boundaries(l, r, axis, depth)
return concatenate([r, x, l], axis=axis)
def reflect(x, axis, depth):
""" Reflect boundaries of array on the same side
This is the converse of ``periodic``
"""
if depth == 1:
left = ((slice(None, None, None),) * axis +
(slice(0, 1),) +
(slice(None, None, None),) * (x.ndim - axis - 1))
else:
left = ((slice(None, None, None),) * axis +
(slice(depth - 1, None, -1),) +
(slice(None, None, None),) * (x.ndim - axis - 1))
right = ((slice(None, None, None),) * axis +
(slice(-1, -depth-1, -1),) +
(slice(None, None, None),) * (x.ndim - axis - 1))
l = x[left]
r = x[right]
l, r = _remove_ghost_boundaries(l, r, axis, depth)
return concatenate([l, x, r], axis=axis)
def nearest(x, axis, depth):
""" Each reflect each boundary value outwards
This mimics what the skimage.filters.gaussian_filter(... mode="nearest")
does.
"""
left = ((slice(None, None, None),) * axis +
(slice(0, 1),) +
(slice(None, None, None),) * (x.ndim - axis - 1))
right = ((slice(None, None, None),) * axis +
(slice(-1, -2, -1),) +
(slice(None, None, None),) * (x.ndim - axis - 1))
l = concatenate([x[left]] * depth, axis=axis)
r = concatenate([x[right]] * depth, axis=axis)
l, r = _remove_ghost_boundaries(l, r, axis, depth)
return concatenate([l, x, r], axis=axis)
def constant(x, axis, depth, value):
""" Add constant slice to either side of array """
chunks = list(x.chunks)
chunks[axis] = (depth,)
c = wrap.full(tuple(map(sum, chunks)), value,
chunks=tuple(chunks), dtype=x._dtype)
return concatenate([c, x, c], axis=axis)
def _remove_ghost_boundaries(l, r, axis, depth):
lchunks = list(l.chunks)
lchunks[axis] = (depth,)
rchunks = list(r.chunks)
rchunks[axis] = (depth,)
l = l.rechunk(tuple(lchunks))
r = r.rechunk(tuple(rchunks))
return l, r
def boundaries(x, depth=None, kind=None):
""" Add boundary conditions to an array before ghosting
See Also
--------
periodic
constant
"""
if not isinstance(kind, dict):
kind = dict((i, kind) for i in range(x.ndim))
if not isinstance(depth, dict):
depth = dict((i, depth) for i in range(x.ndim))
for i in range(x.ndim):
d = depth.get(i, 0)
if d == 0:
continue
this_kind = kind.get(i, 'none')
if this_kind == 'none':
continue
elif this_kind == 'periodic':
x = periodic(x, i, d)
elif this_kind == 'reflect':
x = reflect(x, i, d)
elif this_kind == 'nearest':
x = nearest(x, i, d)
elif i in kind:
x = constant(x, i, d, kind[i])
return x
def ghost(x, depth, boundary):
""" Share boundaries between neighboring blocks
Parameters
----------
x: da.Array
A dask array
depth: dict
The size of the shared boundary per axis
boundary: dict
The boundary condition on each axis. Options are 'reflect', 'periodic',
'nearest', 'none', an integer will fill the boundary with that integer.
The axes dict informs how many cells to overlap between neighboring blocks
{0: 2, 2: 5} means share two cells in 0 axis, 5 cells in 2 axis
Examples
--------
>>> import numpy as np
>>> import dask.array as da
>>> x = np.arange(64).reshape((8, 8))
>>> d = da.from_array(x, chunks=(4, 4))
>>> d.chunks
((4, 4), (4, 4))
>>> g = da.ghost.ghost(d, depth={0: 2, 1: 1},
... boundary={0: 100, 1: 'reflect'})
>>> g.chunks
((8, 8), (6, 6))
>>> np.array(g)
array([[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[ 0, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 7],
[ 8, 8, 9, 10, 11, 12, 11, 12, 13, 14, 15, 15],
[ 16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],
[ 24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],
[ 32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],
[ 40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],
[ 16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],
[ 24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],
[ 32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],
[ 40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],
[ 48, 48, 49, 50, 51, 52, 51, 52, 53, 54, 55, 55],
[ 56, 56, 57, 58, 59, 60, 59, 60, 61, 62, 63, 63],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]])
"""
depth2 = coerce_depth(x.ndim, depth)
boundary2 = coerce_boundary(x.ndim, boundary)
# is depth larger than chunk size?
depth_values = [depth2.get(i, 0) for i in range(x.ndim)]
for d, c in zip(depth_values, x.chunks):
if d > min(c):
raise ValueError("The overlapping depth %d is larger than your\n"
"smallest chunk size %d. Rechunk your array\n"
"with a larger chunk size or a chunk size that\n"
"more evenly divides the shape of your array." %
(d, min(c)))
x2 = boundaries(x, depth2, boundary2)
x3 = ghost_internal(x2, depth2)
trim = dict((k, v*2 if boundary2.get(k, 'none') != 'none' else 0)
for k, v in depth2.items())
x4 = chunk.trim(x3, trim)
return x4
def add_dummy_padding(x, depth, boundary):
"""
Pads an array which has 'none' as the boundary type.
Used to simplify trimming arrays which use 'none'.
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> add_dummy_padding(x, {0: 1}, {0: 'none'}).compute() # doctest: +NORMALIZE_WHITESPACE
array([..., 0, 1, 2, 3, 4, 5, ...])
"""
for k, v in boundary.items():
if v == 'none':
d = depth[k]
empty_shape = list(x.shape)
empty_shape[k] = d
empty_chunks = list(x.chunks)
empty_chunks[k] = (d,)
empty = wrap.empty(empty_shape, chunks=empty_chunks, dtype=x.dtype)
out_chunks = list(x.chunks)
ax_chunks = list(out_chunks[k])
ax_chunks[0] += d
ax_chunks[-1] += d
out_chunks[k] = ax_chunks
x = concatenate([empty, x, empty], axis=k)
x = x.rechunk(out_chunks)
return x
def map_overlap(x, func, depth, boundary=None, trim=True, **kwargs):
depth2 = coerce_depth(x.ndim, depth)
boundary2 = coerce_boundary(x.ndim, boundary)
g = ghost(x, depth=depth2, boundary=boundary2)
g2 = g.map_blocks(func, **kwargs)
if trim:
g3 = add_dummy_padding(g2, depth2, boundary2)
return trim_internal(g3, depth2)
else:
return g2
def coerce_depth(ndim, depth):
if isinstance(depth, int):
depth = (depth,) * ndim
if isinstance(depth, tuple):
depth = dict(zip(range(ndim), depth))
return depth
def coerce_boundary(ndim, boundary):
if boundary is None:
boundary = 'reflect'
if not isinstance(boundary, (tuple, dict)):
boundary = (boundary,) * ndim
if isinstance(boundary, tuple):
boundary = dict(zip(range(ndim), boundary))
return boundary
| {
"repo_name": "cowlicks/dask",
"path": "dask/array/ghost.py",
"copies": "5",
"size": "12873",
"license": "bsd-3-clause",
"hash": -7447980645462090000,
"line_mean": 29.65,
"line_max": 93,
"alpha_frac": 0.5153421891,
"autogenerated": false,
"ratio": 3.184021766015335,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005231998574505791,
"num_lines": 420
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.