repo_name
stringlengths
7
79
path
stringlengths
4
179
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
959
798k
license
stringclasses
15 values
glorizen/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/transforms.py
69
75638
""" matplotlib includes a framework for arbitrary geometric transformations that is used determine the final position of all elements drawn on the canvas. Transforms are composed into trees of :class:`TransformNode` objects whose actual value depends on their children. When the contents of children change, their parents are automatically invalidated. The next time an invalidated transform is accessed, it is recomputed to reflect those changes. This invalidation/caching approach prevents unnecessary recomputations of transforms, and contributes to better interactive performance. For example, here is a graph of the transform tree used to plot data to the graph: .. image:: ../_static/transforms.png The framework can be used for both affine and non-affine transformations. However, for speed, we want use the backend renderers to perform affine transformations whenever possible. Therefore, it is possible to perform just the affine or non-affine part of a transformation on a set of data. The affine is always assumed to occur after the non-affine. For any transform:: full transform == non-affine part + affine part The backends are not expected to handle non-affine transformations themselves. """ import numpy as np from numpy import ma from matplotlib._path import affine_transform from numpy.linalg import inv from weakref import WeakKeyDictionary import warnings try: set except NameError: from sets import Set as set import cbook from path import Path from _path import count_bboxes_overlapping_bbox, update_path_extents DEBUG = False if DEBUG: import warnings MaskedArray = ma.MaskedArray class TransformNode(object): """ :class:`TransformNode` is the base class for anything that participates in the transform tree and needs to invalidate its parents or be invalidated. This includes classes that are not really transforms, such as bounding boxes, since some transforms depend on bounding boxes to compute their values. """ _gid = 0 # Invalidation may affect only the affine part. If the # invalidation was "affine-only", the _invalid member is set to # INVALID_AFFINE_ONLY INVALID_NON_AFFINE = 1 INVALID_AFFINE = 2 INVALID = INVALID_NON_AFFINE | INVALID_AFFINE # Some metadata about the transform, used to determine whether an # invalidation is affine-only is_affine = False is_bbox = False # If pass_through is True, all ancestors will always be # invalidated, even if 'self' is already invalid. pass_through = False def __init__(self): """ Creates a new :class:`TransformNode`. """ # Parents are stored in a WeakKeyDictionary, so that if the # parents are deleted, references from the children won't keep # them alive. self._parents = WeakKeyDictionary() # TransformNodes start out as invalid until their values are # computed for the first time. self._invalid = 1 def __copy__(self, *args): raise NotImplementedError( "TransformNode instances can not be copied. " + "Consider using frozen() instead.") __deepcopy__ = __copy__ def invalidate(self): """ Invalidate this :class:`TransformNode` and all of its ancestors. Should be called any time the transform changes. """ # If we are an affine transform being changed, we can set the # flag to INVALID_AFFINE_ONLY value = (self.is_affine) and self.INVALID_AFFINE or self.INVALID # Shortcut: If self is already invalid, that means its parents # are as well, so we don't need to do anything. if self._invalid == value: return if not len(self._parents): self._invalid = value return # Invalidate all ancestors of self using pseudo-recursion. stack = [self] while len(stack): root = stack.pop() # Stop at subtrees that have already been invalidated if root._invalid != value or root.pass_through: root._invalid = self.INVALID stack.extend(root._parents.keys()) def set_children(self, *children): """ Set the children of the transform, to let the invalidation system know which transforms can invalidate this transform. Should be called from the constructor of any transforms that depend on other transforms. """ for child in children: child._parents[self] = None if DEBUG: _set_children = set_children def set_children(self, *children): self._set_children(*children) self._children = children set_children.__doc__ = _set_children.__doc__ def frozen(self): """ Returns a frozen copy of this transform node. The frozen copy will not update when its children change. Useful for storing a previously known state of a transform where ``copy.deepcopy()`` might normally be used. """ return self if DEBUG: def write_graphviz(self, fobj, highlight=[]): """ For debugging purposes. Writes the transform tree rooted at 'self' to a graphviz "dot" format file. This file can be run through the "dot" utility to produce a graph of the transform tree. Affine transforms are marked in blue. Bounding boxes are marked in yellow. *fobj*: A Python file-like object """ seen = set() def recurse(root): if root in seen: return seen.add(root) props = {} label = root.__class__.__name__ if root._invalid: label = '[%s]' % label if root in highlight: props['style'] = 'bold' props['shape'] = 'box' props['label'] = '"%s"' % label props = ' '.join(['%s=%s' % (key, val) for key, val in props.items()]) fobj.write('%s [%s];\n' % (hash(root), props)) if hasattr(root, '_children'): for child in root._children: name = '?' for key, val in root.__dict__.items(): if val is child: name = key break fobj.write('%s -> %s [label="%s", fontsize=10];\n' % ( hash(root), hash(child), name)) recurse(child) fobj.write("digraph G {\n") recurse(self) fobj.write("}\n") else: def write_graphviz(self, fobj, highlight=[]): return class BboxBase(TransformNode): """ This is the base class of all bounding boxes, and provides read-only access to its data. A mutable bounding box is provided by the :class:`Bbox` class. The canonical representation is as two points, with no restrictions on their ordering. Convenience properties are provided to get the left, bottom, right and top edges and width and height, but these are not stored explicity. """ is_bbox = True is_affine = True #* Redundant: Removed for performance # # def __init__(self): # TransformNode.__init__(self) if DEBUG: def _check(points): if ma.isMaskedArray(points): warnings.warn("Bbox bounds are a masked array.") points = np.asarray(points) if (points[1,0] - points[0,0] == 0 or points[1,1] - points[0,1] == 0): warnings.warn("Singular Bbox.") _check = staticmethod(_check) def frozen(self): return Bbox(self.get_points().copy()) frozen.__doc__ = TransformNode.__doc__ def __array__(self, *args, **kwargs): return self.get_points() def is_unit(self): """ Returns True if the :class:`Bbox` is the unit bounding box from (0, 0) to (1, 1). """ return list(self.get_points().flatten()) == [0., 0., 1., 1.] def _get_x0(self): return self.get_points()[0, 0] x0 = property(_get_x0, None, None, """ (property) :attr:`x0` is the first of the pair of *x* coordinates that define the bounding box. :attr:`x0` is not guaranteed to be less than :attr:`x1`. If you require that, use :attr:`xmin`.""") def _get_y0(self): return self.get_points()[0, 1] y0 = property(_get_y0, None, None, """ (property) :attr:`y0` is the first of the pair of *y* coordinates that define the bounding box. :attr:`y0` is not guaranteed to be less than :attr:`y1`. If you require that, use :attr:`ymin`.""") def _get_x1(self): return self.get_points()[1, 0] x1 = property(_get_x1, None, None, """ (property) :attr:`x1` is the second of the pair of *x* coordinates that define the bounding box. :attr:`x1` is not guaranteed to be greater than :attr:`x0`. If you require that, use :attr:`xmax`.""") def _get_y1(self): return self.get_points()[1, 1] y1 = property(_get_y1, None, None, """ (property) :attr:`y1` is the second of the pair of *y* coordinates that define the bounding box. :attr:`y1` is not guaranteed to be greater than :attr:`y0`. If you require that, use :attr:`ymax`.""") def _get_p0(self): return self.get_points()[0] p0 = property(_get_p0, None, None, """ (property) :attr:`p0` is the first pair of (*x*, *y*) coordinates that define the bounding box. It is not guaranteed to be the bottom-left corner. For that, use :attr:`min`.""") def _get_p1(self): return self.get_points()[1] p1 = property(_get_p1, None, None, """ (property) :attr:`p1` is the second pair of (*x*, *y*) coordinates that define the bounding box. It is not guaranteed to be the top-right corner. For that, use :attr:`max`.""") def _get_xmin(self): return min(self.get_points()[:, 0]) xmin = property(_get_xmin, None, None, """ (property) :attr:`xmin` is the left edge of the bounding box.""") def _get_ymin(self): return min(self.get_points()[:, 1]) ymin = property(_get_ymin, None, None, """ (property) :attr:`ymin` is the bottom edge of the bounding box.""") def _get_xmax(self): return max(self.get_points()[:, 0]) xmax = property(_get_xmax, None, None, """ (property) :attr:`xmax` is the right edge of the bounding box.""") def _get_ymax(self): return max(self.get_points()[:, 1]) ymax = property(_get_ymax, None, None, """ (property) :attr:`ymax` is the top edge of the bounding box.""") def _get_min(self): return [min(self.get_points()[:, 0]), min(self.get_points()[:, 1])] min = property(_get_min, None, None, """ (property) :attr:`min` is the bottom-left corner of the bounding box.""") def _get_max(self): return [max(self.get_points()[:, 0]), max(self.get_points()[:, 1])] max = property(_get_max, None, None, """ (property) :attr:`max` is the top-right corner of the bounding box.""") def _get_intervalx(self): return self.get_points()[:, 0] intervalx = property(_get_intervalx, None, None, """ (property) :attr:`intervalx` is the pair of *x* coordinates that define the bounding box. It is not guaranteed to be sorted from left to right.""") def _get_intervaly(self): return self.get_points()[:, 1] intervaly = property(_get_intervaly, None, None, """ (property) :attr:`intervaly` is the pair of *y* coordinates that define the bounding box. It is not guaranteed to be sorted from bottom to top.""") def _get_width(self): points = self.get_points() return points[1, 0] - points[0, 0] width = property(_get_width, None, None, """ (property) The width of the bounding box. It may be negative if :attr:`x1` < :attr:`x0`.""") def _get_height(self): points = self.get_points() return points[1, 1] - points[0, 1] height = property(_get_height, None, None, """ (property) The height of the bounding box. It may be negative if :attr:`y1` < :attr:`y0`.""") def _get_size(self): points = self.get_points() return points[1] - points[0] size = property(_get_size, None, None, """ (property) The width and height of the bounding box. May be negative, in the same way as :attr:`width` and :attr:`height`.""") def _get_bounds(self): x0, y0, x1, y1 = self.get_points().flatten() return (x0, y0, x1 - x0, y1 - y0) bounds = property(_get_bounds, None, None, """ (property) Returns (:attr:`x0`, :attr:`y0`, :attr:`width`, :attr:`height`).""") def _get_extents(self): return self.get_points().flatten().copy() extents = property(_get_extents, None, None, """ (property) Returns (:attr:`x0`, :attr:`y0`, :attr:`x1`, :attr:`y1`).""") def get_points(self): return NotImplementedError() def containsx(self, x): """ Returns True if *x* is between or equal to :attr:`x0` and :attr:`x1`. """ x0, x1 = self.intervalx return ((x0 < x1 and (x >= x0 and x <= x1)) or (x >= x1 and x <= x0)) def containsy(self, y): """ Returns True if *y* is between or equal to :attr:`y0` and :attr:`y1`. """ y0, y1 = self.intervaly return ((y0 < y1 and (y >= y0 and y <= y1)) or (y >= y1 and y <= y0)) def contains(self, x, y): """ Returns *True* if (*x*, *y*) is a coordinate inside the bounding box or on its edge. """ return self.containsx(x) and self.containsy(y) def overlaps(self, other): """ Returns True if this bounding box overlaps with the given bounding box *other*. """ ax1, ay1, ax2, ay2 = self._get_extents() bx1, by1, bx2, by2 = other._get_extents() if ax2 < ax1: ax2, ax1 = ax1, ax2 if ay2 < ay1: ay2, ay1 = ay1, ay2 if bx2 < bx1: bx2, bx1 = bx1, bx2 if by2 < by1: by2, by1 = by1, by2 return not ((bx2 < ax1) or (by2 < ay1) or (bx1 > ax2) or (by1 > ay2)) def fully_containsx(self, x): """ Returns True if *x* is between but not equal to :attr:`x0` and :attr:`x1`. """ x0, x1 = self.intervalx return ((x0 < x1 and (x > x0 and x < x1)) or (x > x1 and x < x0)) def fully_containsy(self, y): """ Returns True if *y* is between but not equal to :attr:`y0` and :attr:`y1`. """ y0, y1 = self.intervaly return ((y0 < y1 and (x > y0 and x < y1)) or (x > y1 and x < y0)) def fully_contains(self, x, y): """ Returns True if (*x*, *y*) is a coordinate inside the bounding box, but not on its edge. """ return self.fully_containsx(x) \ and self.fully_containsy(y) def fully_overlaps(self, other): """ Returns True if this bounding box overlaps with the given bounding box *other*, but not on its edge alone. """ ax1, ay1, ax2, ay2 = self._get_extents() bx1, by1, bx2, by2 = other._get_extents() if ax2 < ax1: ax2, ax1 = ax1, ax2 if ay2 < ay1: ay2, ay1 = ay1, ay2 if bx2 < bx1: bx2, bx1 = bx1, bx2 if by2 < by1: by2, by1 = by1, by2 return not ((bx2 <= ax1) or (by2 <= ay1) or (bx1 >= ax2) or (by1 >= ay2)) def transformed(self, transform): """ Return a new :class:`Bbox` object, statically transformed by the given transform. """ return Bbox(transform.transform(self.get_points())) def inverse_transformed(self, transform): """ Return a new :class:`Bbox` object, statically transformed by the inverse of the given transform. """ return Bbox(transform.inverted().transform(self.get_points())) coefs = {'C': (0.5, 0.5), 'SW': (0,0), 'S': (0.5, 0), 'SE': (1.0, 0), 'E': (1.0, 0.5), 'NE': (1.0, 1.0), 'N': (0.5, 1.0), 'NW': (0, 1.0), 'W': (0, 0.5)} def anchored(self, c, container = None): """ Return a copy of the :class:`Bbox`, shifted to position *c* within a container. *c*: may be either: * a sequence (*cx*, *cy*) where *cx* and *cy* range from 0 to 1, where 0 is left or bottom and 1 is right or top * a string: - 'C' for centered - 'S' for bottom-center - 'SE' for bottom-left - 'E' for left - etc. Optional argument *container* is the box within which the :class:`Bbox` is positioned; it defaults to the initial :class:`Bbox`. """ if container is None: container = self l, b, w, h = container.bounds if isinstance(c, str): cx, cy = self.coefs[c] else: cx, cy = c L, B, W, H = self.bounds return Bbox(self._points + [(l + cx * (w-W)) - L, (b + cy * (h-H)) - B]) def shrunk(self, mx, my): """ Return a copy of the :class:`Bbox`, shrunk by the factor *mx* in the *x* direction and the factor *my* in the *y* direction. The lower left corner of the box remains unchanged. Normally *mx* and *my* will be less than 1, but this is not enforced. """ w, h = self.size return Bbox([self._points[0], self._points[0] + [mx * w, my * h]]) def shrunk_to_aspect(self, box_aspect, container = None, fig_aspect = 1.0): """ Return a copy of the :class:`Bbox`, shrunk so that it is as large as it can be while having the desired aspect ratio, *box_aspect*. If the box coordinates are relative---that is, fractions of a larger box such as a figure---then the physical aspect ratio of that figure is specified with *fig_aspect*, so that *box_aspect* can also be given as a ratio of the absolute dimensions, not the relative dimensions. """ assert box_aspect > 0 and fig_aspect > 0 if container is None: container = self w, h = container.size H = w * box_aspect/fig_aspect if H <= h: W = w else: W = h * fig_aspect/box_aspect H = h return Bbox([self._points[0], self._points[0] + (W, H)]) def splitx(self, *args): """ e.g., ``bbox.splitx(f1, f2, ...)`` Returns a list of new :class:`Bbox` objects formed by splitting the original one with vertical lines at fractional positions *f1*, *f2*, ... """ boxes = [] xf = [0] + list(args) + [1] x0, y0, x1, y1 = self._get_extents() w = x1 - x0 for xf0, xf1 in zip(xf[:-1], xf[1:]): boxes.append(Bbox([[x0 + xf0 * w, y0], [x0 + xf1 * w, y1]])) return boxes def splity(self, *args): """ e.g., ``bbox.splitx(f1, f2, ...)`` Returns a list of new :class:`Bbox` objects formed by splitting the original one with horizontal lines at fractional positions *f1*, *f2*, ... """ boxes = [] yf = [0] + list(args) + [1] x0, y0, x1, y1 = self._get_extents() h = y1 - y0 for yf0, yf1 in zip(yf[:-1], yf[1:]): boxes.append(Bbox([[x0, y0 + yf0 * h], [x1, y0 + yf1 * h]])) return boxes def count_contains(self, vertices): """ Count the number of vertices contained in the :class:`Bbox`. *vertices* is a Nx2 Numpy array. """ if len(vertices) == 0: return 0 vertices = np.asarray(vertices) x0, y0, x1, y1 = self._get_extents() dx0 = np.sign(vertices[:, 0] - x0) dy0 = np.sign(vertices[:, 1] - y0) dx1 = np.sign(vertices[:, 0] - x1) dy1 = np.sign(vertices[:, 1] - y1) inside = (abs(dx0 + dx1) + abs(dy0 + dy1)) <= 2 return np.sum(inside) def count_overlaps(self, bboxes): """ Count the number of bounding boxes that overlap this one. bboxes is a sequence of :class:`BboxBase` objects """ return count_bboxes_overlapping_bbox(self, bboxes) def expanded(self, sw, sh): """ Return a new :class:`Bbox` which is this :class:`Bbox` expanded around its center by the given factors *sw* and *sh*. """ width = self.width height = self.height deltaw = (sw * width - width) / 2.0 deltah = (sh * height - height) / 2.0 a = np.array([[-deltaw, -deltah], [deltaw, deltah]]) return Bbox(self._points + a) def padded(self, p): """ Return a new :class:`Bbox` that is padded on all four sides by the given value. """ points = self._points return Bbox(points + [[-p, -p], [p, p]]) def translated(self, tx, ty): """ Return a copy of the :class:`Bbox`, statically translated by *tx* and *ty*. """ return Bbox(self._points + (tx, ty)) def corners(self): """ Return an array of points which are the four corners of this rectangle. For example, if this :class:`Bbox` is defined by the points (*a*, *b*) and (*c*, *d*), :meth:`corners` returns (*a*, *b*), (*a*, *d*), (*c*, *b*) and (*c*, *d*). """ l, b, r, t = self.get_points().flatten() return np.array([[l, b], [l, t], [r, b], [r, t]]) def rotated(self, radians): """ Return a new bounding box that bounds a rotated version of this bounding box by the given radians. The new bounding box is still aligned with the axes, of course. """ corners = self.corners() corners_rotated = Affine2D().rotate(radians).transform(corners) bbox = Bbox.unit() bbox.update_from_data_xy(corners_rotated, ignore=True) return bbox #@staticmethod def union(bboxes): """ Return a :class:`Bbox` that contains all of the given bboxes. """ assert(len(bboxes)) if len(bboxes) == 1: return bboxes[0] x0 = np.inf y0 = np.inf x1 = -np.inf y1 = -np.inf for bbox in bboxes: points = bbox.get_points() xs = points[:, 0] ys = points[:, 1] x0 = min(x0, np.min(xs)) y0 = min(y0, np.min(ys)) x1 = max(x1, np.max(xs)) y1 = max(y1, np.max(ys)) return Bbox.from_extents(x0, y0, x1, y1) union = staticmethod(union) class Bbox(BboxBase): """ A mutable bounding box. """ def __init__(self, points): """ *points*: a 2x2 numpy array of the form [[x0, y0], [x1, y1]] If you need to create a :class:`Bbox` object from another form of data, consider the static methods :meth:`unit`, :meth:`from_bounds` and :meth:`from_extents`. """ BboxBase.__init__(self) self._points = np.asarray(points, np.float_) self._minpos = np.array([0.0000001, 0.0000001]) self._ignore = True if DEBUG: ___init__ = __init__ def __init__(self, points): self._check(points) self.___init__(points) def invalidate(self): self._check(self._points) TransformNode.invalidate(self) _unit_values = np.array([[0.0, 0.0], [1.0, 1.0]], np.float_) #@staticmethod def unit(): """ (staticmethod) Create a new unit :class:`Bbox` from (0, 0) to (1, 1). """ return Bbox(Bbox._unit_values.copy()) unit = staticmethod(unit) #@staticmethod def from_bounds(x0, y0, width, height): """ (staticmethod) Create a new :class:`Bbox` from *x0*, *y0*, *width* and *height*. *width* and *height* may be negative. """ return Bbox.from_extents(x0, y0, x0 + width, y0 + height) from_bounds = staticmethod(from_bounds) #@staticmethod def from_extents(*args): """ (staticmethod) Create a new Bbox from *left*, *bottom*, *right* and *top*. The *y*-axis increases upwards. """ points = np.array(args, dtype=np.float_).reshape(2, 2) return Bbox(points) from_extents = staticmethod(from_extents) def __repr__(self): return 'Bbox(%s)' % repr(self._points) __str__ = __repr__ def ignore(self, value): """ Set whether the existing bounds of the box should be ignored by subsequent calls to :meth:`update_from_data` or :meth:`update_from_data_xy`. *value*: - When True, subsequent calls to :meth:`update_from_data` will ignore the existing bounds of the :class:`Bbox`. - When False, subsequent calls to :meth:`update_from_data` will include the existing bounds of the :class:`Bbox`. """ self._ignore = value def update_from_data(self, x, y, ignore=None): """ Update the bounds of the :class:`Bbox` based on the passed in data. After updating, the bounds will have positive *width* and *height*; *x0* and *y0* will be the minimal values. *x*: a numpy array of *x*-values *y*: a numpy array of *y*-values *ignore*: - when True, ignore the existing bounds of the :class:`Bbox`. - when False, include the existing bounds of the :class:`Bbox`. - when None, use the last value passed to :meth:`ignore`. """ warnings.warn( "update_from_data requires a memory copy -- please replace with update_from_data_xy") xy = np.hstack((x.reshape((len(x), 1)), y.reshape((len(y), 1)))) return self.update_from_data_xy(xy, ignore) def update_from_path(self, path, ignore=None, updatex=True, updatey=True): """ Update the bounds of the :class:`Bbox` based on the passed in data. After updating, the bounds will have positive *width* and *height*; *x0* and *y0* will be the minimal values. *path*: a :class:`~matplotlib.path.Path` instance *ignore*: - when True, ignore the existing bounds of the :class:`Bbox`. - when False, include the existing bounds of the :class:`Bbox`. - when None, use the last value passed to :meth:`ignore`. *updatex*: when True, update the x values *updatey*: when True, update the y values """ if ignore is None: ignore = self._ignore if path.vertices.size == 0: return points, minpos, changed = update_path_extents( path, None, self._points, self._minpos, ignore) if changed: self.invalidate() if updatex: self._points[:,0] = points[:,0] self._minpos[0] = minpos[0] if updatey: self._points[:,1] = points[:,1] self._minpos[1] = minpos[1] def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True): """ Update the bounds of the :class:`Bbox` based on the passed in data. After updating, the bounds will have positive *width* and *height*; *x0* and *y0* will be the minimal values. *xy*: a numpy array of 2D points *ignore*: - when True, ignore the existing bounds of the :class:`Bbox`. - when False, include the existing bounds of the :class:`Bbox`. - when None, use the last value passed to :meth:`ignore`. *updatex*: when True, update the x values *updatey*: when True, update the y values """ if len(xy) == 0: return path = Path(xy) self.update_from_path(path, ignore=ignore, updatex=updatex, updatey=updatey) def _set_x0(self, val): self._points[0, 0] = val self.invalidate() x0 = property(BboxBase._get_x0, _set_x0) def _set_y0(self, val): self._points[0, 1] = val self.invalidate() y0 = property(BboxBase._get_y0, _set_y0) def _set_x1(self, val): self._points[1, 0] = val self.invalidate() x1 = property(BboxBase._get_x1, _set_x1) def _set_y1(self, val): self._points[1, 1] = val self.invalidate() y1 = property(BboxBase._get_y1, _set_y1) def _set_p0(self, val): self._points[0] = val self.invalidate() p0 = property(BboxBase._get_p0, _set_p0) def _set_p1(self, val): self._points[1] = val self.invalidate() p1 = property(BboxBase._get_p1, _set_p1) def _set_intervalx(self, interval): self._points[:, 0] = interval self.invalidate() intervalx = property(BboxBase._get_intervalx, _set_intervalx) def _set_intervaly(self, interval): self._points[:, 1] = interval self.invalidate() intervaly = property(BboxBase._get_intervaly, _set_intervaly) def _set_bounds(self, bounds): l, b, w, h = bounds points = np.array([[l, b], [l+w, b+h]], np.float_) if np.any(self._points != points): self._points = points self.invalidate() bounds = property(BboxBase._get_bounds, _set_bounds) def _get_minpos(self): return self._minpos minpos = property(_get_minpos) def _get_minposx(self): return self._minpos[0] minposx = property(_get_minposx) def _get_minposy(self): return self._minpos[1] minposy = property(_get_minposy) def get_points(self): """ Get the points of the bounding box directly as a numpy array of the form: [[x0, y0], [x1, y1]]. """ self._invalid = 0 return self._points def set_points(self, points): """ Set the points of the bounding box directly from a numpy array of the form: [[x0, y0], [x1, y1]]. No error checking is performed, as this method is mainly for internal use. """ if np.any(self._points != points): self._points = points self.invalidate() def set(self, other): """ Set this bounding box from the "frozen" bounds of another :class:`Bbox`. """ if np.any(self._points != other.get_points()): self._points = other.get_points() self.invalidate() class TransformedBbox(BboxBase): """ A :class:`Bbox` that is automatically transformed by a given transform. When either the child bounding box or transform changes, the bounds of this bbox will update accordingly. """ def __init__(self, bbox, transform): """ *bbox*: a child :class:`Bbox` *transform*: a 2D :class:`Transform` """ assert bbox.is_bbox assert isinstance(transform, Transform) assert transform.input_dims == 2 assert transform.output_dims == 2 BboxBase.__init__(self) self._bbox = bbox self._transform = transform self.set_children(bbox, transform) self._points = None def __repr__(self): return "TransformedBbox(%s, %s)" % (self._bbox, self._transform) __str__ = __repr__ def get_points(self): if self._invalid: points = self._transform.transform(self._bbox.get_points()) if ma.isMaskedArray(points): points.putmask(0.0) points = np.asarray(points) self._points = points self._invalid = 0 return self._points get_points.__doc__ = Bbox.get_points.__doc__ if DEBUG: _get_points = get_points def get_points(self): points = self._get_points() self._check(points) return points class Transform(TransformNode): """ The base class of all :class:`TransformNode` instances that actually perform a transformation. All non-affine transformations should be subclasses of this class. New affine transformations should be subclasses of :class:`Affine2D`. Subclasses of this class should override the following members (at minimum): - :attr:`input_dims` - :attr:`output_dims` - :meth:`transform` - :attr:`is_separable` - :attr:`has_inverse` - :meth:`inverted` (if :meth:`has_inverse` can return True) If the transform needs to do something non-standard with :class:`mathplotlib.path.Path` objects, such as adding curves where there were once line segments, it should override: - :meth:`transform_path` """ # The number of input and output dimensions for this transform. # These must be overridden (with integers) in the subclass. input_dims = None output_dims = None # True if this transform as a corresponding inverse transform. has_inverse = False # True if this transform is separable in the x- and y- dimensions. is_separable = False #* Redundant: Removed for performance # # def __init__(self): # TransformNode.__init__(self) def __add__(self, other): """ Composes two transforms together such that *self* is followed by *other*. """ if isinstance(other, Transform): return composite_transform_factory(self, other) raise TypeError( "Can not add Transform to object of type '%s'" % type(other)) def __radd__(self, other): """ Composes two transforms together such that *self* is followed by *other*. """ if isinstance(other, Transform): return composite_transform_factory(other, self) raise TypeError( "Can not add Transform to object of type '%s'" % type(other)) def __array__(self, *args, **kwargs): """ Used by C/C++ -based backends to get at the array matrix data. """ return self.frozen().__array__() def transform(self, values): """ Performs the transformation on the given array of values. Accepts a numpy array of shape (N x :attr:`input_dims`) and returns a numpy array of shape (N x :attr:`output_dims`). """ raise NotImplementedError() def transform_affine(self, values): """ Performs only the affine part of this transformation on the given array of values. ``transform(values)`` is always equivalent to ``transform_affine(transform_non_affine(values))``. In non-affine transformations, this is generally a no-op. In affine transformations, this is equivalent to ``transform(values)``. Accepts a numpy array of shape (N x :attr:`input_dims`) and returns a numpy array of shape (N x :attr:`output_dims`). """ return values def transform_non_affine(self, values): """ Performs only the non-affine part of the transformation. ``transform(values)`` is always equivalent to ``transform_affine(transform_non_affine(values))``. In non-affine transformations, this is generally equivalent to ``transform(values)``. In affine transformations, this is always a no-op. Accepts a numpy array of shape (N x :attr:`input_dims`) and returns a numpy array of shape (N x :attr:`output_dims`). """ return self.transform(values) def get_affine(self): """ Get the affine part of this transform. """ return IdentityTransform() def transform_point(self, point): """ A convenience function that returns the transformed copy of a single point. The point is given as a sequence of length :attr:`input_dims`. The transformed point is returned as a sequence of length :attr:`output_dims`. """ assert len(point) == self.input_dims return self.transform(np.asarray([point]))[0] def transform_path(self, path): """ Returns a transformed copy of path. *path*: a :class:`~matplotlib.path.Path` instance. In some cases, this transform may insert curves into the path that began as line segments. """ return Path(self.transform(path.vertices), path.codes) def transform_path_affine(self, path): """ Returns a copy of path, transformed only by the affine part of this transform. *path*: a :class:`~matplotlib.path.Path` instance. ``transform_path(path)`` is equivalent to ``transform_path_affine(transform_path_non_affine(values))``. """ return path def transform_path_non_affine(self, path): """ Returns a copy of path, transformed only by the non-affine part of this transform. *path*: a :class:`~matplotlib.path.Path` instance. ``transform_path(path)`` is equivalent to ``transform_path_affine(transform_path_non_affine(values))``. """ return Path(self.transform_non_affine(path.vertices), path.codes) def transform_angles(self, angles, pts, radians=False, pushoff=1e-5): """ Performs transformation on a set of angles anchored at specific locations. The *angles* must be a column vector (i.e., numpy array). The *pts* must be a two-column numpy array of x,y positions (angle transforms currently only work in 2D). This array must have the same number of rows as *angles*. *radians* indicates whether or not input angles are given in radians (True) or degrees (False; the default). *pushoff* is the distance to move away from *pts* for determining transformed angles (see discussion of method below). The transformed angles are returned in an array with the same size as *angles*. The generic version of this method uses a very generic algorithm that transforms *pts*, as well as locations very close to *pts*, to find the angle in the transformed system. """ # Must be 2D if self.input_dims <> 2 or self.output_dims <> 2: raise NotImplementedError('Only defined in 2D') # pts must be array with 2 columns for x,y assert pts.shape[1] == 2 # angles must be a column vector and have same number of # rows as pts assert np.prod(angles.shape) == angles.shape[0] == pts.shape[0] # Convert to radians if desired if not radians: angles = angles / 180.0 * np.pi # Move a short distance away pts2 = pts + pushoff * np.c_[ np.cos(angles), np.sin(angles) ] # Transform both sets of points tpts = self.transform( pts ) tpts2 = self.transform( pts2 ) # Calculate transformed angles d = tpts2 - tpts a = np.arctan2( d[:,1], d[:,0] ) # Convert back to degrees if desired if not radians: a = a * 180.0 / np.pi return a def inverted(self): """ Return the corresponding inverse transformation. The return value of this method should be treated as temporary. An update to *self* does not cause a corresponding update to its inverted copy. ``x === self.inverted().transform(self.transform(x))`` """ raise NotImplementedError() class TransformWrapper(Transform): """ A helper class that holds a single child transform and acts equivalently to it. This is useful if a node of the transform tree must be replaced at run time with a transform of a different type. This class allows that replacement to correctly trigger invalidation. Note that :class:`TransformWrapper` instances must have the same input and output dimensions during their entire lifetime, so the child transform may only be replaced with another child transform of the same dimensions. """ pass_through = True is_affine = False def __init__(self, child): """ *child*: A class:`Transform` instance. This child may later be replaced with :meth:`set`. """ assert isinstance(child, Transform) Transform.__init__(self) self.input_dims = child.input_dims self.output_dims = child.output_dims self._set(child) self._invalid = 0 def __repr__(self): return "TransformWrapper(%r)" % self._child __str__ = __repr__ def frozen(self): return self._child.frozen() frozen.__doc__ = Transform.frozen.__doc__ def _set(self, child): self._child = child self.set_children(child) self.transform = child.transform self.transform_affine = child.transform_affine self.transform_non_affine = child.transform_non_affine self.transform_path = child.transform_path self.transform_path_affine = child.transform_path_affine self.transform_path_non_affine = child.transform_path_non_affine self.get_affine = child.get_affine self.inverted = child.inverted def set(self, child): """ Replace the current child of this transform with another one. The new child must have the same number of input and output dimensions as the current child. """ assert child.input_dims == self.input_dims assert child.output_dims == self.output_dims self._set(child) self._invalid = 0 self.invalidate() self._invalid = 0 def _get_is_separable(self): return self._child.is_separable is_separable = property(_get_is_separable) def _get_has_inverse(self): return self._child.has_inverse has_inverse = property(_get_has_inverse) class AffineBase(Transform): """ The base class of all affine transformations of any number of dimensions. """ is_affine = True def __init__(self): Transform.__init__(self) self._inverted = None def __array__(self, *args, **kwargs): return self.get_matrix() #@staticmethod def _concat(a, b): """ Concatenates two transformation matrices (represented as numpy arrays) together. """ return np.dot(b, a) _concat = staticmethod(_concat) def get_matrix(self): """ Get the underlying transformation matrix as a numpy array. """ raise NotImplementedError() def transform_non_affine(self, points): return points transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__ def transform_path_affine(self, path): return self.transform_path(path) transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__ def transform_path_non_affine(self, path): return path transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__ def get_affine(self): return self get_affine.__doc__ = Transform.get_affine.__doc__ class Affine2DBase(AffineBase): """ The base class of all 2D affine transformations. 2D affine transformations are performed using a 3x3 numpy array:: a c e b d f 0 0 1 This class provides the read-only interface. For a mutable 2D affine transformation, use :class:`Affine2D`. Subclasses of this class will generally only need to override a constructor and :meth:`get_matrix` that generates a custom 3x3 matrix. """ input_dims = 2 output_dims = 2 #* Redundant: Removed for performance # # def __init__(self): # Affine2DBase.__init__(self) def frozen(self): return Affine2D(self.get_matrix().copy()) frozen.__doc__ = AffineBase.frozen.__doc__ def _get_is_separable(self): mtx = self.get_matrix() return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0 is_separable = property(_get_is_separable) def __array__(self, *args, **kwargs): return self.get_matrix() def to_values(self): """ Return the values of the matrix as a sequence (a,b,c,d,e,f) """ mtx = self.get_matrix() return tuple(mtx[:2].swapaxes(0, 1).flatten()) #@staticmethod def matrix_from_values(a, b, c, d, e, f): """ (staticmethod) Create a new transformation matrix as a 3x3 numpy array of the form:: a c e b d f 0 0 1 """ return np.array([[a, c, e], [b, d, f], [0.0, 0.0, 1.0]], np.float_) matrix_from_values = staticmethod(matrix_from_values) def transform(self, points): mtx = self.get_matrix() if isinstance(points, MaskedArray): tpoints = affine_transform(points.data, mtx) return ma.MaskedArray(tpoints, mask=ma.getmask(points)) return affine_transform(points, mtx) def transform_point(self, point): mtx = self.get_matrix() return affine_transform(point, mtx) transform_point.__doc__ = AffineBase.transform_point.__doc__ if DEBUG: _transform = transform def transform(self, points): # The major speed trap here is just converting to the # points to an array in the first place. If we can use # more arrays upstream, that should help here. if (not ma.isMaskedArray(points) and not isinstance(points, np.ndarray)): warnings.warn( ('A non-numpy array of type %s was passed in for ' + 'transformation. Please correct this.') % type(values)) return self._transform(points) transform.__doc__ = AffineBase.transform.__doc__ transform_affine = transform transform_affine.__doc__ = AffineBase.transform_affine.__doc__ def inverted(self): if self._inverted is None or self._invalid: mtx = self.get_matrix() self._inverted = Affine2D(inv(mtx)) self._invalid = 0 return self._inverted inverted.__doc__ = AffineBase.inverted.__doc__ class Affine2D(Affine2DBase): """ A mutable 2D affine transformation. """ def __init__(self, matrix = None): """ Initialize an Affine transform from a 3x3 numpy float array:: a c e b d f 0 0 1 If *matrix* is None, initialize with the identity transform. """ Affine2DBase.__init__(self) if matrix is None: matrix = np.identity(3) elif DEBUG: matrix = np.asarray(matrix, np.float_) assert matrix.shape == (3, 3) self._mtx = matrix self._invalid = 0 def __repr__(self): return "Affine2D(%s)" % repr(self._mtx) __str__ = __repr__ def __cmp__(self, other): if (isinstance(other, Affine2D) and (self.get_matrix() == other.get_matrix()).all()): return 0 return -1 #@staticmethod def from_values(a, b, c, d, e, f): """ (staticmethod) Create a new Affine2D instance from the given values:: a c e b d f 0 0 1 """ return Affine2D( np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], np.float_) .reshape((3,3))) from_values = staticmethod(from_values) def get_matrix(self): """ Get the underlying transformation matrix as a 3x3 numpy array:: a c e b d f 0 0 1 """ self._invalid = 0 return self._mtx def set_matrix(self, mtx): """ Set the underlying transformation matrix from a 3x3 numpy array:: a c e b d f 0 0 1 """ self._mtx = mtx self.invalidate() def set(self, other): """ Set this transformation from the frozen copy of another :class:`Affine2DBase` object. """ assert isinstance(other, Affine2DBase) self._mtx = other.get_matrix() self.invalidate() #@staticmethod def identity(): """ (staticmethod) Return a new :class:`Affine2D` object that is the identity transform. Unless this transform will be mutated later on, consider using the faster :class:`IdentityTransform` class instead. """ return Affine2D(np.identity(3)) identity = staticmethod(identity) def clear(self): """ Reset the underlying matrix to the identity transform. """ self._mtx = np.identity(3) self.invalidate() return self def rotate(self, theta): """ Add a rotation (in radians) to this transform in place. Returns *self*, so this method can easily be chained with more calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate` and :meth:`scale`. """ a = np.cos(theta) b = np.sin(theta) rotate_mtx = np.array( [[a, -b, 0.0], [b, a, 0.0], [0.0, 0.0, 1.0]], np.float_) self._mtx = np.dot(rotate_mtx, self._mtx) self.invalidate() return self def rotate_deg(self, degrees): """ Add a rotation (in degrees) to this transform in place. Returns *self*, so this method can easily be chained with more calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate` and :meth:`scale`. """ return self.rotate(degrees*np.pi/180.) def rotate_around(self, x, y, theta): """ Add a rotation (in radians) around the point (x, y) in place. Returns *self*, so this method can easily be chained with more calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate` and :meth:`scale`. """ return self.translate(-x, -y).rotate(theta).translate(x, y) def rotate_deg_around(self, x, y, degrees): """ Add a rotation (in degrees) around the point (x, y) in place. Returns *self*, so this method can easily be chained with more calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate` and :meth:`scale`. """ return self.translate(-x, -y).rotate_deg(degrees).translate(x, y) def translate(self, tx, ty): """ Adds a translation in place. Returns *self*, so this method can easily be chained with more calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate` and :meth:`scale`. """ translate_mtx = np.array( [[1.0, 0.0, tx], [0.0, 1.0, ty], [0.0, 0.0, 1.0]], np.float_) self._mtx = np.dot(translate_mtx, self._mtx) self.invalidate() return self def scale(self, sx, sy=None): """ Adds a scale in place. If *sy* is None, the same scale is applied in both the *x*- and *y*-directions. Returns *self*, so this method can easily be chained with more calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate` and :meth:`scale`. """ if sy is None: sy = sx scale_mtx = np.array( [[sx, 0.0, 0.0], [0.0, sy, 0.0], [0.0, 0.0, 1.0]], np.float_) self._mtx = np.dot(scale_mtx, self._mtx) self.invalidate() return self def _get_is_separable(self): mtx = self.get_matrix() return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0 is_separable = property(_get_is_separable) class IdentityTransform(Affine2DBase): """ A special class that does on thing, the identity transform, in a fast way. """ _mtx = np.identity(3) def frozen(self): return self frozen.__doc__ = Affine2DBase.frozen.__doc__ def __repr__(self): return "IdentityTransform()" __str__ = __repr__ def get_matrix(self): return self._mtx get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__ def transform(self, points): return points transform.__doc__ = Affine2DBase.transform.__doc__ transform_affine = transform transform_affine.__doc__ = Affine2DBase.transform_affine.__doc__ transform_non_affine = transform transform_non_affine.__doc__ = Affine2DBase.transform_non_affine.__doc__ def transform_path(self, path): return path transform_path.__doc__ = Affine2DBase.transform_path.__doc__ transform_path_affine = transform_path transform_path_affine.__doc__ = Affine2DBase.transform_path_affine.__doc__ transform_path_non_affine = transform_path transform_path_non_affine.__doc__ = Affine2DBase.transform_path_non_affine.__doc__ def get_affine(self): return self get_affine.__doc__ = Affine2DBase.get_affine.__doc__ inverted = get_affine inverted.__doc__ = Affine2DBase.inverted.__doc__ class BlendedGenericTransform(Transform): """ A "blended" transform uses one transform for the *x*-direction, and another transform for the *y*-direction. This "generic" version can handle any given child transform in the *x*- and *y*-directions. """ input_dims = 2 output_dims = 2 is_separable = True pass_through = True def __init__(self, x_transform, y_transform): """ Create a new "blended" transform using *x_transform* to transform the *x*-axis and *y_transform* to transform the *y*-axis. You will generally not call this constructor directly but use the :func:`blended_transform_factory` function instead, which can determine automatically which kind of blended transform to create. """ # Here we ask: "Does it blend?" Transform.__init__(self) self._x = x_transform self._y = y_transform self.set_children(x_transform, y_transform) self._affine = None def _get_is_affine(self): return self._x.is_affine and self._y.is_affine is_affine = property(_get_is_affine) def frozen(self): return blended_transform_factory(self._x.frozen(), self._y.frozen()) frozen.__doc__ = Transform.frozen.__doc__ def __repr__(self): return "BlendedGenericTransform(%s,%s)" % (self._x, self._y) __str__ = __repr__ def transform(self, points): x = self._x y = self._y if x is y and x.input_dims == 2: return x.transform(points) if x.input_dims == 2: x_points = x.transform(points)[:, 0:1] else: x_points = x.transform(points[:, 0]) x_points = x_points.reshape((len(x_points), 1)) if y.input_dims == 2: y_points = y.transform(points)[:, 1:] else: y_points = y.transform(points[:, 1]) y_points = y_points.reshape((len(y_points), 1)) if isinstance(x_points, MaskedArray) or isinstance(y_points, MaskedArray): return ma.concatenate((x_points, y_points), 1) else: return np.concatenate((x_points, y_points), 1) transform.__doc__ = Transform.transform.__doc__ def transform_affine(self, points): return self.get_affine().transform(points) transform_affine.__doc__ = Transform.transform_affine.__doc__ def transform_non_affine(self, points): if self._x.is_affine and self._y.is_affine: return points return self.transform(points) transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__ def inverted(self): return BlendedGenericTransform(self._x.inverted(), self._y.inverted()) inverted.__doc__ = Transform.inverted.__doc__ def get_affine(self): if self._invalid or self._affine is None: if self._x.is_affine and self._y.is_affine: if self._x == self._y: self._affine = self._x.get_affine() else: x_mtx = self._x.get_affine().get_matrix() y_mtx = self._y.get_affine().get_matrix() # This works because we already know the transforms are # separable, though normally one would want to set b and # c to zero. mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0])) self._affine = Affine2D(mtx) else: self._affine = IdentityTransform() self._invalid = 0 return self._affine get_affine.__doc__ = Transform.get_affine.__doc__ class BlendedAffine2D(Affine2DBase): """ A "blended" transform uses one transform for the *x*-direction, and another transform for the *y*-direction. This version is an optimization for the case where both child transforms are of type :class:`Affine2DBase`. """ is_separable = True def __init__(self, x_transform, y_transform): """ Create a new "blended" transform using *x_transform* to transform the *x*-axis and *y_transform* to transform the *y*-axis. Both *x_transform* and *y_transform* must be 2D affine transforms. You will generally not call this constructor directly but use the :func:`blended_transform_factory` function instead, which can determine automatically which kind of blended transform to create. """ assert x_transform.is_affine assert y_transform.is_affine assert x_transform.is_separable assert y_transform.is_separable Transform.__init__(self) self._x = x_transform self._y = y_transform self.set_children(x_transform, y_transform) Affine2DBase.__init__(self) self._mtx = None def __repr__(self): return "BlendedAffine2D(%s,%s)" % (self._x, self._y) __str__ = __repr__ def get_matrix(self): if self._invalid: if self._x == self._y: self._mtx = self._x.get_matrix() else: x_mtx = self._x.get_matrix() y_mtx = self._y.get_matrix() # This works because we already know the transforms are # separable, though normally one would want to set b and # c to zero. self._mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0])) self._inverted = None self._invalid = 0 return self._mtx get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__ def blended_transform_factory(x_transform, y_transform): """ Create a new "blended" transform using *x_transform* to transform the *x*-axis and *y_transform* to transform the *y*-axis. A faster version of the blended transform is returned for the case where both child transforms are affine. """ if (isinstance(x_transform, Affine2DBase) and isinstance(y_transform, Affine2DBase)): return BlendedAffine2D(x_transform, y_transform) return BlendedGenericTransform(x_transform, y_transform) class CompositeGenericTransform(Transform): """ A composite transform formed by applying transform *a* then transform *b*. This "generic" version can handle any two arbitrary transformations. """ pass_through = True def __init__(self, a, b): """ Create a new composite transform that is the result of applying transform *a* then transform *b*. You will generally not call this constructor directly but use the :func:`composite_transform_factory` function instead, which can automatically choose the best kind of composite transform instance to create. """ assert a.output_dims == b.input_dims self.input_dims = a.input_dims self.output_dims = b.output_dims Transform.__init__(self) self._a = a self._b = b self.set_children(a, b) def frozen(self): self._invalid = 0 frozen = composite_transform_factory(self._a.frozen(), self._b.frozen()) if not isinstance(frozen, CompositeGenericTransform): return frozen.frozen() return frozen frozen.__doc__ = Transform.frozen.__doc__ def _get_is_affine(self): return self._a.is_affine and self._b.is_affine is_affine = property(_get_is_affine) def _get_is_separable(self): return self._a.is_separable and self._b.is_separable is_separable = property(_get_is_separable) def __repr__(self): return "CompositeGenericTransform(%s, %s)" % (self._a, self._b) __str__ = __repr__ def transform(self, points): return self._b.transform( self._a.transform(points)) transform.__doc__ = Transform.transform.__doc__ def transform_affine(self, points): return self.get_affine().transform(points) transform_affine.__doc__ = Transform.transform_affine.__doc__ def transform_non_affine(self, points): if self._a.is_affine and self._b.is_affine: return points return self._b.transform_non_affine( self._a.transform(points)) transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__ def transform_path(self, path): return self._b.transform_path( self._a.transform_path(path)) transform_path.__doc__ = Transform.transform_path.__doc__ def transform_path_affine(self, path): return self._b.transform_path_affine( self._a.transform_path(path)) transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__ def transform_path_non_affine(self, path): if self._a.is_affine and self._b.is_affine: return path return self._b.transform_path_non_affine( self._a.transform_path(path)) transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__ def get_affine(self): if self._a.is_affine and self._b.is_affine: return Affine2D(np.dot(self._b.get_affine().get_matrix(), self._a.get_affine().get_matrix())) else: return self._b.get_affine() get_affine.__doc__ = Transform.get_affine.__doc__ def inverted(self): return CompositeGenericTransform(self._b.inverted(), self._a.inverted()) inverted.__doc__ = Transform.inverted.__doc__ class CompositeAffine2D(Affine2DBase): """ A composite transform formed by applying transform *a* then transform *b*. This version is an optimization that handles the case where both *a* and *b* are 2D affines. """ def __init__(self, a, b): """ Create a new composite transform that is the result of applying transform *a* then transform *b*. Both *a* and *b* must be instances of :class:`Affine2DBase`. You will generally not call this constructor directly but use the :func:`composite_transform_factory` function instead, which can automatically choose the best kind of composite transform instance to create. """ assert a.output_dims == b.input_dims self.input_dims = a.input_dims self.output_dims = b.output_dims assert a.is_affine assert b.is_affine Affine2DBase.__init__(self) self._a = a self._b = b self.set_children(a, b) self._mtx = None def __repr__(self): return "CompositeAffine2D(%s, %s)" % (self._a, self._b) __str__ = __repr__ def get_matrix(self): if self._invalid: self._mtx = np.dot( self._b.get_matrix(), self._a.get_matrix()) self._inverted = None self._invalid = 0 return self._mtx get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__ def composite_transform_factory(a, b): """ Create a new composite transform that is the result of applying transform a then transform b. Shortcut versions of the blended transform are provided for the case where both child transforms are affine, or one or the other is the identity transform. Composite transforms may also be created using the '+' operator, e.g.:: c = a + b """ if isinstance(a, IdentityTransform): return b elif isinstance(b, IdentityTransform): return a elif isinstance(a, AffineBase) and isinstance(b, AffineBase): return CompositeAffine2D(a, b) return CompositeGenericTransform(a, b) class BboxTransform(Affine2DBase): """ :class:`BboxTransform` linearly transforms points from one :class:`Bbox` to another :class:`Bbox`. """ is_separable = True def __init__(self, boxin, boxout): """ Create a new :class:`BboxTransform` that linearly transforms points from *boxin* to *boxout*. """ assert boxin.is_bbox assert boxout.is_bbox Affine2DBase.__init__(self) self._boxin = boxin self._boxout = boxout self.set_children(boxin, boxout) self._mtx = None self._inverted = None def __repr__(self): return "BboxTransform(%s, %s)" % (self._boxin, self._boxout) __str__ = __repr__ def get_matrix(self): if self._invalid: inl, inb, inw, inh = self._boxin.bounds outl, outb, outw, outh = self._boxout.bounds x_scale = outw / inw y_scale = outh / inh if DEBUG and (x_scale == 0 or y_scale == 0): raise ValueError("Transforming from or to a singular bounding box.") self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale+outl)], [0.0 , y_scale, (-inb*y_scale+outb)], [0.0 , 0.0 , 1.0 ]], np.float_) self._inverted = None self._invalid = 0 return self._mtx get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__ class BboxTransformTo(Affine2DBase): """ :class:`BboxTransformTo` is a transformation that linearly transforms points from the unit bounding box to a given :class:`Bbox`. """ is_separable = True def __init__(self, boxout): """ Create a new :class:`BboxTransformTo` that linearly transforms points from the unit bounding box to *boxout*. """ assert boxout.is_bbox Affine2DBase.__init__(self) self._boxout = boxout self.set_children(boxout) self._mtx = None self._inverted = None def __repr__(self): return "BboxTransformTo(%s)" % (self._boxout) __str__ = __repr__ def get_matrix(self): if self._invalid: outl, outb, outw, outh = self._boxout.bounds if DEBUG and (outw == 0 or outh == 0): raise ValueError("Transforming to a singular bounding box.") self._mtx = np.array([[outw, 0.0, outl], [ 0.0, outh, outb], [ 0.0, 0.0, 1.0]], np.float_) self._inverted = None self._invalid = 0 return self._mtx get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__ class BboxTransformFrom(Affine2DBase): """ :class:`BboxTransformFrom` linearly transforms points from a given :class:`Bbox` to the unit bounding box. """ is_separable = True def __init__(self, boxin): assert boxin.is_bbox Affine2DBase.__init__(self) self._boxin = boxin self.set_children(boxin) self._mtx = None self._inverted = None def __repr__(self): return "BboxTransformFrom(%s)" % (self._boxin) __str__ = __repr__ def get_matrix(self): if self._invalid: inl, inb, inw, inh = self._boxin.bounds if DEBUG and (inw == 0 or inh == 0): raise ValueError("Transforming from a singular bounding box.") x_scale = 1.0 / inw y_scale = 1.0 / inh self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale)], [0.0 , y_scale, (-inb*y_scale)], [0.0 , 0.0 , 1.0 ]], np.float_) self._inverted = None self._invalid = 0 return self._mtx get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__ class ScaledTranslation(Affine2DBase): """ A transformation that translates by *xt* and *yt*, after *xt* and *yt* have been transformad by the given transform *scale_trans*. """ def __init__(self, xt, yt, scale_trans): Affine2DBase.__init__(self) self._t = (xt, yt) self._scale_trans = scale_trans self.set_children(scale_trans) self._mtx = None self._inverted = None def __repr__(self): return "ScaledTranslation(%s)" % (self._t,) __str__ = __repr__ def get_matrix(self): if self._invalid: xt, yt = self._scale_trans.transform_point(self._t) self._mtx = np.array([[1.0, 0.0, xt], [0.0, 1.0, yt], [0.0, 0.0, 1.0]], np.float_) self._invalid = 0 self._inverted = None return self._mtx get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__ class TransformedPath(TransformNode): """ A :class:`TransformedPath` caches a non-affine transformed copy of the :class:`~matplotlib.path.Path`. This cached copy is automatically updated when the non-affine part of the transform changes. """ def __init__(self, path, transform): """ Create a new :class:`TransformedPath` from the given :class:`~matplotlib.path.Path` and :class:`Transform`. """ assert isinstance(transform, Transform) TransformNode.__init__(self) self._path = path self._transform = transform self.set_children(transform) self._transformed_path = None self._transformed_points = None def _revalidate(self): if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE) or self._transformed_path is None): self._transformed_path = \ self._transform.transform_path_non_affine(self._path) self._transformed_points = \ Path(self._transform.transform_non_affine(self._path.vertices)) self._invalid = 0 def get_transformed_points_and_affine(self): """ Return a copy of the child path, with the non-affine part of the transform already applied, along with the affine part of the path necessary to complete the transformation. Unlike :meth:`get_transformed_path_and_affine`, no interpolation will be performed. """ self._revalidate() return self._transformed_points, self.get_affine() def get_transformed_path_and_affine(self): """ Return a copy of the child path, with the non-affine part of the transform already applied, along with the affine part of the path necessary to complete the transformation. """ self._revalidate() return self._transformed_path, self.get_affine() def get_fully_transformed_path(self): """ Return a fully-transformed copy of the child path. """ if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE) or self._transformed_path is None): self._transformed_path = \ self._transform.transform_path_non_affine(self._path) self._invalid = 0 return self._transform.transform_path_affine(self._transformed_path) def get_affine(self): return self._transform.get_affine() def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True): ''' Ensure the endpoints of a range are finite and not too close together. "too close" means the interval is smaller than 'tiny' times the maximum absolute value. If they are too close, each will be moved by the 'expander'. If 'increasing' is True and vmin > vmax, they will be swapped, regardless of whether they are too close. If either is inf or -inf or nan, return - expander, expander. ''' if (not np.isfinite(vmin)) or (not np.isfinite(vmax)): return -expander, expander swapped = False if vmax < vmin: vmin, vmax = vmax, vmin swapped = True if vmax - vmin <= max(abs(vmin), abs(vmax)) * tiny: if vmin == 0.0: vmin = -expander vmax = expander else: vmin -= expander*abs(vmin) vmax += expander*abs(vmax) if swapped and not increasing: vmin, vmax = vmax, vmin return vmin, vmax def interval_contains(interval, val): a, b = interval return ( ((a < b) and (a <= val and b >= val)) or (b <= val and a >= val)) def interval_contains_open(interval, val): a, b = interval return ( ((a < b) and (a < val and b > val)) or (b < val and a > val)) def offset_copy(trans, fig, x=0.0, y=0.0, units='inches'): ''' Return a new transform with an added offset. args: trans is any transform kwargs: fig is the current figure; it can be None if units are 'dots' x, y give the offset units is 'inches', 'points' or 'dots' ''' if units == 'dots': return trans + Affine2D().translate(x, y) if fig is None: raise ValueError('For units of inches or points a fig kwarg is needed') if units == 'points': x /= 72.0 y /= 72.0 elif not units == 'inches': raise ValueError('units must be dots, points, or inches') return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)
agpl-3.0
agartland/utils
ics/.ipynb_checkpoints/plotting-checkpoint.py
1
12289
import pandas as pd import numpy as np import matplotlib.pyplot as plt import palettable import itertools from hclusterplot import plotHCluster import re from myboxplot import myboxplot import networkx as nx import seaborn as sns sns.set(style='darkgrid', palette='muted', font_scale=1.5) __all__ = ['icsTicks', 'icsTickLabels', 'swarmBox'] from .loading import * from .analyzing import * icsTicks = np.log10([0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1]) icsTickLabels = ['0.01', '0.025', '0.05', '0.1', '0.25', '0.5', '1'] # icsTicks = np.log10([0.01, 0.025, 0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1]) #icsTickLabels = ['0.01','0.025', '0.05', '0.1','0.2','0.4','0.6','0.8', '1'] def prepPlotDf(jDf, antigen, rxIDs, visitno, tcellsubset='CD4+', column='pvalue', cutoff='pvalue', pAdjust=True, allSubsets=False): cytokineSubsets = jDf.cytokine.unique() subset = cytokineSubsets[0].replace('-', '+').split('+')[:-1] cyCols = [c for c in cytokineSubsets if not c == '-'.join(subset)+'-'] ind = (jDf.tcellsub == tcellsubset) & (jDf.visitno == visitno) & (jDf.TreatmentGroupID.isin(rxIDs)) agInd = (jDf.antigen == antigen) & ind pvalueDf = pivotPvalues(jDf.loc[agInd], adjust=pAdjust) """Use cutoff from HVTN ICS SAP, p < 0.00001""" responseAlpha = 1e-5 callDf = (pvalueDf < responseAlpha).astype(float) magDf = jDf.loc[agInd].pivot(index='sample', columns='cytokine', values='mag') magAdjDf = jDf.loc[agInd].pivot(index='sample', columns='cytokine', values='mag_adj') bgDf = jDf.loc[agInd].pivot(index='sample', columns='cytokine', values='bg') """Positive subsets (to-be plotted) includes all columns unless a cutoff is specified""" if cutoff == 'mag': posSubsets = pvalueDf[cyCols].columns[(magDf[cyCols] > 0.00025).any(axis=0)] elif cutoff == 'mag_adj': posSubsets = pvalueDf[cyCols].columns[(magAdjDf[cyCols] > 0.00025).any(axis=0)] elif cutoff == 'bg': posSubsets = pvalueDf[cyCols].columns[(bgDf[cyCols] > 0).any(axis=0)] elif cutoff == 'pvalue': posSubsets = pvalueDf[cyCols].columns[(callDf[cyCols] > 0).any(axis=0)] else: posSubsets = pvalueDf[cyCols].columns if allSubsets: posSubsets = sorted(cytokineSubsets, key=lambda s: s.count('+'), reverse=True) else: posSubsets = sorted(posSubsets, key=lambda s: s.count('+'), reverse=True) if column == 'pvalue': plotDf = callDf elif column == 'mag': plotDf = magDf.applymap(np.log) elif column == 'mag_adj': plotDf = magAdjDf.applymap(np.log) elif column == 'bg': plotDf = bgDf.applymap(np.log) """Give labels a more readable look""" plotDf = plotDf.rename_axis(cytokineSubsetLabel, axis=1) posSubsets = list(map(cytokineSubsetLabel, posSubsets)) return plotDf[posSubsets] def plotPolyBP(jDf, antigen, rxIDs, visitno, tcellsubset='CD4+', column='pvalue', cutoff='pvalue', pAdjust=True, allSubsets=False, plotSubsets=None, returnPlotSubsets=False): if plotSubsets is None: plotDf = prepPlotDf(jDf, antigen, rxIDs, visitno, tcellsubset=tcellsubset, column=column, cutoff=cutoff, pAdjust=pAdjust, allSubsets=allSubsets) posSubsets = plotDf.columns else: plotDf = prepPlotDf(jDf, antigen, rxIDs, visitno, tcellsubset=tcellsubset, column=column, cutoff=cutoff, pAdjust=pAdjust, allSubsets=True) posSubsets = plotSubsets cbt = np.log([0.0001, 0.00025, 0.0005, 0.001, 0.002, 0.004, 0.006, 0.008, 0.01]) cbtl = ['0.01', '0.025', '0.05', '0.1', '0.2', '0.4', '0.6', '0.8', '1'] plt.clf() plotDf = pd.DataFrame(plotDf.stack().reset_index()) plotDf = plotDf.set_index('sample') plotDf = plotDf.join(ptidDf[['TreatmentGroupID', 'TreatmentGroupName']], how='left').sort_values(by='TreatmentGroupID') if column == 'mag' or column == 'mag_adj': plotDf[0].loc[(plotDf[0] < np.log(0.00025)) | plotDf[0].isnull()] = np.log(0.00025) yl = np.log([0.0002, 0.01]) elif column == 'bg': plotDf[0].loc[(plotDf[0] < np.log(0.00001)) | plotDf[0].isnull()] = np.log(0.00001) yl = np.log([0.00001, 0.01]) else: print('Must specify mag, mag_adj or bg (not %s)' % column) axh = plt.subplot(111) sns.boxplot(x='cytokine', y=0, data=plotDf, hue='TreatmentGroupName', fliersize=0, ax=axh, order=posSubsets) sns.stripplot(x='cytokine', y=0, data=plotDf, hue='TreatmentGroupName', jitter=True, ax=axh, order=posSubsets) plt.yticks(cbt, cbtl) plt.ylim(yl) plt.xticks(list(range(len(posSubsets))), posSubsets, fontsize='large', fontname='Consolas') plt.ylabel('% cytokine expressing cells') handles, labels = axh.get_legend_handles_labels() l = plt.legend(handles[len(rxIDs):], labels[len(rxIDs):], loc='upper right') if returnPlotSubsets: return axh, posSubsets else: return axh def plotPolyHeat(jDf, antigen, rxIDs, visitno, tcellsubset='CD4+', cluster=False, column='pvalue', cutoff='pvalue', pAdjust=True, allSubsets=False): plotDf = prepPlotDf(jDf, antigen, rxIDs, visitno, tcellsubset=tcellsubset, column=column, cutoff=cutoff, pAdjust=pAdjust, allSubsets=allSubsets) posSubsets = plotDf.columns plotDf = plotDf.join(ptidDf[['TreatmentGroupID', 'TreatmentGroupName']], how='left').sort_values(by='TreatmentGroupID') cbt = np.log([0.0001, 0.00025, 0.0005, 0.001, 0.002, 0.004, 0.006, 0.008, 0.01]) cbtl = ['0.01', '0.025', '0.05', '0.1', '0.2', '0.4', '0.6', '0.8', '1'] if cluster: clusterBool = [True, True] else: clusterBool = [False, False] if column == 'pvalue': vRange = [0, 2] elif column == 'mag': vRange = np.log([0.0001, 0.01]) elif column == 'mag_adj': vRange = np.log([0.0001, 0.01]) elif column == 'bg': vRange = np.log([0.0001, 0.01]) #valVec = tmp[posSubsets].values.flatten() #vRange = [log(valVec[valVec>0].min()),log(valVec.max())] ptidInd, cyColInd, handles = plotHCluster(plotDf[posSubsets], row_labels=plotDf.TreatmentGroupID, cmap=palettable.colorbrewer.sequential.YlOrRd_9.mpl_colormap, yTickSz=None, xTickSz='large', clusterBool=clusterBool, vRange=vRange) if column == 'pvalue': handles['cb'].remove() else: handles['cb'].set_ticks(cbt) handles['cb'].set_ticklabels(cbtl) handles['cb'].set_label('% cells') for xh in handles['xlabelsL']: xh.set_rotation(0) handles['heatmapAX'].grid(b=None) #handles['heatmapGS'].tight_layout(handles['fig'], h_pad=0.1, w_pad=0.5) return handles def plotResponsePattern(jDf, antigen, rxIDs, visitno, tcellsubset='CD4+', column='pvalue', cluster=False, cutoff='pvalue', pAdjust=True, boxplot=False, allSubsets=False): if column == 'pvalue' and boxplot: boxplot = False print('Forced heatmap for p-value plotting.') if boxplot: axh = plotPolyBP(jDf, antigen, rxIDs, visitno, tcellsubset=tcellsubset, column=column, cutoff=cutoff, pAdjust=pAdjust, allSubsets=allSubsets) else: axh = plotPolyHeat(jDf, antigen, rxIDs, visitno, tcellsubset=tcellsubset, cluster=cluster, column=column, cutoff=cutoff, pAdjust=pAdjust, allSubsets=allSubsets) return axh def _szscale(vec, mx=np.inf, mn=1): """Normalize values of vec to [mn, mx] interval such that sz ratios remain representative.""" factor = mn/np.nanmin(vec) vec = vec*factor vec[vec > mx] = mx vec[np.isnan(vec)] = mn return vec def plotPolyFunNetwork(cdf): """This visualization isn't promising, but its also the start to how I'd think about defining a pairwise sample distance matrix. Instead of considering each subset as independent they could be related by their distance on this graph (just the sum of the binayr vector representation), then the distance would be somekind of earth over's distance between the two graphs""" binSubsets = np.concatenate([m[None, :] for m in map(_subset2vec, cdf.cytokine.unique())], axis=0) nColors = (np.unique(binSubsets.sum(axis=1)) > 0).sum() cmap = sns.light_palette('red', as_cmap=True, n_colors=nColors) freqDf = cdf.groupby('cytokine')['mag'].agg(np.mean) freqDf = freqDf.drop(vec2subset([0]*len(binSubsets)), axis=0) g = nx.Graph() for ss,f in freqDf.iteritems(): g.add_node(ss, freq=f, fscore=subset2vec(ss).sum()) for ss1, ss2 in itertools.product(freqDf.index, freqDf.index): if np.abs(subset2vec(ss1) - subset2vec(ss2)).sum() <= 1: g.add_edge(ss1, ss2) nodesize = np.array([d['freq'] for n, d in g.nodes(data=True)]) nodecolor = np.array([d['fscore'] for n, d in g.nodes(data=True)]) nodecolor = (nodecolor - nodecolor.min() + 1) / (nodecolor.max() - nodecolor.min() + 1) freq = {n:d['freq'] for n, d in g.nodes(data=True)} pos = nx.nx_pydot.graphviz_layout(g, prog=layout, root=max(list(freq.keys()), key=freq.get)) #pos = spring_layout(g) #pos = spectral_layout(g) #layouts = ['twopi', 'fdp', 'circo', 'neato', 'dot', 'spring', 'spectral'] #pos = nx.graphviz_layout(g, prog=layout) plt.clf() figh = plt.gcf() axh = figh.add_axes([0.04, 0.04, 0.92, 0.92]) axh.axis('off') figh.set_facecolor('white') #nx.draw_networkx_edges(g,pos,alpha=0.5,width=sznorm(edgewidth,mn=0.5,mx=10), edge_color='k') #nx.draw_networkx_nodes(g,pos,node_size=sznorm(nodesize,mn=500,mx=5000),node_color=nodecolors,alpha=1) for e in g.edges_iter(): x1, y1=pos[e[0]] x2, y2=pos[e[1]] props = dict(color='black', alpha=0.4, zorder=1) plt.plot([x1, x2], [y1, y2], '-', lw=2, **props) plt.scatter(x=[pos[s][0] for s in g.nodes()], y=[pos[s][1] for s in g.nodes()], s=_szscale(nodesize, mn=20, mx=200), #Units for scatter is (size in points)**2 c=nodecolor, alpha=1, zorder=2, cmap=cmap) for n, d in g.nodes(data=True): if d['freq'] >= 0: plt.annotate(n, xy=pos[n], fontname='Arial', size=10, weight='bold', color='black', va='center', ha='center') def swarmBox(data, x, y, hue, palette=None, order=None, hue_order=None, connect=False): """Depends on plot order of the swarm plot which does not seem dependable at the moment. Better idea would be to adopt code from the actual swarm function for this, adding boxplots separately""" if palette is None: palette = sns.color_palette('Set2', n_colors=data[hue].unique().shape[0]) if hue_order is None: hue_order = sorted(data[hue].unique()) if order is None: order = sorted(data[x].unqiue()) params = dict(data=data, x=x, y=y, hue=hue, palette=palette, order=order, hue_order=hue_order) sns.boxplot(**params, fliersize=0, linewidth=0.5) swarm = sns.swarmplot(**params, linewidth=0.5, edgecolor='black', dodge=True) if connect: zipper = [order] + [swarm.collections[i::len(hue_order)] for i in range(len(hue_order))] for z in zip(*zipper): curx = z[0] collections = z[1:] offsets = [] for c,h in zip(collections, hue_order): ind = (data[x] == curx) & (data[hue] == h) sortii = np.argsort(np.argsort(data.loc[ind, y])) offsets.append(c.get_offsets()[sortii,:]) for zoffsets in zip(*offsets): xvec = [o[0] for o in zoffsets] yvec = [o[1] for o in zoffsets] plt.plot(xvec, yvec, '-', color='gray', linewidth=0.5) plt.legend([plt.Circle(1, color=c) for c in palette], hue_order, title=hue)
mit
HolgerPeters/scikit-learn
examples/ensemble/plot_forest_iris.py
335
6271
""" ==================================================================== Plot the decision surfaces of ensembles of trees on the iris dataset ==================================================================== Plot the decision surfaces of forests of randomized trees trained on pairs of features of the iris dataset. This plot compares the decision surfaces learned by a decision tree classifier (first column), by a random forest classifier (second column), by an extra- trees classifier (third column) and by an AdaBoost classifier (fourth column). In the first row, the classifiers are built using the sepal width and the sepal length features only, on the second row using the petal length and sepal length only, and on the third row using the petal width and the petal length only. In descending order of quality, when trained (outside of this example) on all 4 features using 30 estimators and scored using 10 fold cross validation, we see:: ExtraTreesClassifier() # 0.95 score RandomForestClassifier() # 0.94 score AdaBoost(DecisionTree(max_depth=3)) # 0.94 score DecisionTree(max_depth=None) # 0.94 score Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but the average score does not improve). See the console's output for further details about each model. In this example you might try to: 1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and ``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the ``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier`` 2) vary ``n_estimators`` It is worth noting that RandomForests and ExtraTrees can be fitted in parallel on many cores as each tree is built independently of the others. AdaBoost's samples are built sequentially and so do not use multiple cores. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import clone from sklearn.datasets import load_iris from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier) from sklearn.externals.six.moves import xrange from sklearn.tree import DecisionTreeClassifier # Parameters n_classes = 3 n_estimators = 30 plot_colors = "ryb" cmap = plt.cm.RdYlBu plot_step = 0.02 # fine step width for decision surface contours plot_step_coarser = 0.5 # step widths for coarse classifier guesses RANDOM_SEED = 13 # fix the seed on each iteration # Load data iris = load_iris() plot_idx = 1 models = [DecisionTreeClassifier(max_depth=None), RandomForestClassifier(n_estimators=n_estimators), ExtraTreesClassifier(n_estimators=n_estimators), AdaBoostClassifier(DecisionTreeClassifier(max_depth=3), n_estimators=n_estimators)] for pair in ([0, 1], [0, 2], [2, 3]): for model in models: # We only take the two corresponding features X = iris.data[:, pair] y = iris.target # Shuffle idx = np.arange(X.shape[0]) np.random.seed(RANDOM_SEED) np.random.shuffle(idx) X = X[idx] y = y[idx] # Standardize mean = X.mean(axis=0) std = X.std(axis=0) X = (X - mean) / std # Train clf = clone(model) clf = model.fit(X, y) scores = clf.score(X, y) # Create a title for each column and the console by using str() and # slicing away useless parts of the string model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")] model_details = model_title if hasattr(model, "estimators_"): model_details += " with {} estimators".format(len(model.estimators_)) print( model_details + " with features", pair, "has a score of", scores ) plt.subplot(3, 4, plot_idx) if plot_idx <= len(models): # Add a title at the top of each column plt.title(model_title) # Now plot the decision boundary using a fine mesh as input to a # filled contour plot x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)) # Plot either a single DecisionTreeClassifier or alpha blend the # decision surfaces of the ensemble of classifiers if isinstance(model, DecisionTreeClassifier): Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, cmap=cmap) else: # Choose alpha blend level with respect to the number of estimators # that are in use (noting that AdaBoost can use fewer estimators # than its maximum if it achieves a good enough fit early on) estimator_alpha = 1.0 / len(model.estimators_) for tree in model.estimators_: Z = tree.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap) # Build a coarser grid to plot a set of ensemble classifications # to show how these are different to what we see in the decision # surfaces. These points are regularly space and do not have a black outline xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser), np.arange(y_min, y_max, plot_step_coarser)) Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape) cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none") # Plot the training points, these are clustered together and have a # black outline for i, c in zip(xrange(n_classes), plot_colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i], cmap=cmap) plot_idx += 1 # move on to the next plot in sequence plt.suptitle("Classifiers on feature subsets of the Iris dataset") plt.axis("tight") plt.show()
bsd-3-clause
valexandersaulys/prudential_insurance_kaggle
venv/lib/python2.7/site-packages/pandas/core/series.py
9
95521
""" Data structure for 1-dimensional cross-sectional and time series data """ from __future__ import division # pylint: disable=E1101,E1103 # pylint: disable=W0703,W0622,W0613,W0201 import types import warnings from numpy import nan, ndarray import numpy as np import numpy.ma as ma from pandas.core.common import (isnull, notnull, is_bool_indexer, _default_index, _maybe_upcast, _asarray_tuplesafe, _infer_dtype_from_scalar, is_list_like, _values_from_object, is_categorical_dtype, is_datetime64tz_dtype, needs_i8_conversion, i8_boxer, _possibly_cast_to_datetime, _possibly_castable, _possibly_convert_platform, _try_sort, is_int64_dtype, is_internal_type, is_datetimetz, _maybe_match_name, ABCSparseArray, _coerce_to_dtype, SettingWithCopyError, _maybe_box_datetimelike, ABCDataFrame, _dict_compat) from pandas.core.index import (Index, MultiIndex, InvalidIndexError, Float64Index, _ensure_index) from pandas.core.indexing import check_bool_indexer, maybe_convert_indices from pandas.core import generic, base from pandas.core.internals import SingleBlockManager from pandas.core.categorical import Categorical, CategoricalAccessor import pandas.core.strings as strings from pandas.tseries.common import (maybe_to_datetimelike, CombinedDatetimelikeProperties) from pandas.tseries.index import DatetimeIndex from pandas.tseries.tdi import TimedeltaIndex from pandas.tseries.period import PeriodIndex, Period from pandas import compat from pandas.util.terminal import get_terminal_size from pandas.compat import zip, u, OrderedDict, StringIO import pandas.core.ops as ops from pandas.core import algorithms import pandas.core.common as com import pandas.core.datetools as datetools import pandas.core.format as fmt import pandas.core.nanops as nanops from pandas.util.decorators import Appender, cache_readonly, deprecate_kwarg import pandas.lib as lib import pandas.tslib as tslib import pandas.index as _index from numpy import percentile as _quantile from pandas.core.config import get_option __all__ = ['Series'] _shared_doc_kwargs = dict( axes='index', klass='Series', axes_single_arg="{0, 'index'}", inplace="""inplace : boolean, default False If True, performs operation inplace and returns None.""", duplicated='Series' ) def _coerce_method(converter): """ install the scalar coercion methods """ def wrapper(self): if len(self) == 1: return converter(self.iloc[0]) raise TypeError( "cannot convert the series to {0}".format(str(converter))) return wrapper #---------------------------------------------------------------------- # Series class class Series(base.IndexOpsMixin, strings.StringAccessorMixin, generic.NDFrame,): """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be any hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN) Operations between Series (+, -, /, *, **) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, dict, or scalar value Contains data stored in Series index : array-like or Index (1d) Values must be unique and hashable, same length as data. Index object (or other iterable of same length as data) Will default to np.arange(len(data)) if not provided. If both a dict and index sequence are used, the index will override the keys found in the dict. dtype : numpy.dtype or None If None, dtype will be inferred copy : boolean, default False Copy input data """ _metadata = ['name'] _accessors = frozenset(['dt', 'cat', 'str']) _allow_index_ops = True def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False): # we are called internally, so short-circuit if fastpath: # data is an ndarray, index is defined if not isinstance(data, SingleBlockManager): data = SingleBlockManager(data, index, fastpath=True) if copy: data = data.copy() if index is None: index = data.index else: if index is not None: index = _ensure_index(index) if data is None: data = {} if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, MultiIndex): raise NotImplementedError("initializing a Series from a " "MultiIndex is not supported") elif isinstance(data, Index): # need to copy to avoid aliasing issues if name is None: name = data.name data = data._to_embed(keep_tz=True) copy = True elif isinstance(data, np.ndarray): pass elif isinstance(data, Series): if name is None: name = data.name if index is None: index = data.index else: data = data.reindex(index, copy=copy) data = data._data elif isinstance(data, dict): if index is None: if isinstance(data, OrderedDict): index = Index(data) else: index = Index(_try_sort(data)) try: if isinstance(index, DatetimeIndex): if len(data): # coerce back to datetime objects for lookup data = _dict_compat(data) data = lib.fast_multiget(data, index.astype('O'), default=np.nan) else: data = np.nan elif isinstance(index, PeriodIndex): data = [data.get(i, nan) for i in index] if data else np.nan else: data = lib.fast_multiget(data, index.values, default=np.nan) except TypeError: data = [data.get(i, nan) for i in index] if data else np.nan elif isinstance(data, SingleBlockManager): if index is None: index = data.index else: data = data.reindex(index, copy=copy) elif isinstance(data, Categorical): if dtype is not None: raise ValueError("cannot specify a dtype with a Categorical") elif (isinstance(data, types.GeneratorType) or (compat.PY3 and isinstance(data, map))): data = list(data) elif isinstance(data, (set, frozenset)): raise TypeError("{0!r} type is unordered" "".format(data.__class__.__name__)) else: # handle sparse passed here (and force conversion) if isinstance(data, ABCSparseArray): data = data.to_dense() if index is None: if not is_list_like(data): data = [data] index = _default_index(len(data)) # create/copy the manager if isinstance(data, SingleBlockManager): if dtype is not None: data = data.astype(dtype=dtype, raise_on_error=False) elif copy: data = data.copy() else: data = _sanitize_array(data, index, dtype, copy, raise_cast_failure=True) data = SingleBlockManager(data, index, fastpath=True) generic.NDFrame.__init__(self, data, fastpath=True) object.__setattr__(self, 'name', name) self._set_axis(0, index, fastpath=True) @classmethod def from_array(cls, arr, index=None, name=None, dtype=None, copy=False, fastpath=False): # return a sparse series here if isinstance(arr, ABCSparseArray): from pandas.sparse.series import SparseSeries cls = SparseSeries return cls(arr, index=index, name=name, dtype=dtype, copy=copy, fastpath=fastpath) @property def _constructor(self): return Series @property def _constructor_expanddim(self): from pandas.core.frame import DataFrame return DataFrame # types @property def _can_hold_na(self): return self._data._can_hold_na @property def is_time_series(self): msg = "is_time_series is deprecated. Please use Series.index.is_all_dates" warnings.warn(msg, FutureWarning, stacklevel=2) # return self._subtyp in ['time_series', 'sparse_time_series'] return self.index.is_all_dates _index = None def _set_axis(self, axis, labels, fastpath=False): """ override generic, we want to set the _typ here """ if not fastpath: labels = _ensure_index(labels) is_all_dates = labels.is_all_dates if is_all_dates: if not isinstance(labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): labels = DatetimeIndex(labels) # need to set here becuase we changed the index if fastpath: self._data.set_axis(axis, labels) self._set_subtyp(is_all_dates) object.__setattr__(self, '_index', labels) if not fastpath: self._data.set_axis(axis, labels) def _set_subtyp(self, is_all_dates): if is_all_dates: object.__setattr__(self, '_subtyp', 'time_series') else: object.__setattr__(self, '_subtyp', 'series') def _update_inplace(self, result, **kwargs): # we want to call the generic version and not the IndexOpsMixin return generic.NDFrame._update_inplace(self, result, **kwargs) # ndarray compatibility @property def dtype(self): """ return the dtype object of the underlying data """ return self._data.dtype @property def dtypes(self): """ return the dtype object of the underlying data """ return self._data.dtype @property def ftype(self): """ return if the data is sparse|dense """ return self._data.ftype @property def ftypes(self): """ return if the data is sparse|dense """ return self._data.ftype @property def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype Returns ------- arr : numpy.ndarray or ndarray-like Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values [a, a, b, c] Categories (3, object): [a, b, c] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101',periods=3,tz='US/Eastern')).values array(['2013-01-01T00:00:00.000000000-0500', '2013-01-02T00:00:00.000000000-0500', '2013-01-03T00:00:00.000000000-0500'], dtype='datetime64[ns]') """ return self._data.external_values() @property def _values(self): """ return the internal repr of this data """ return self._data.internal_values() def get_values(self): """ same as values (but handles sparseness conversions); is a view """ return self._data.get_values() # ops def ravel(self, order='C'): """ Return the flattened underlying data as an ndarray See also -------- numpy.ndarray.ravel """ return self._values.ravel(order=order) def compress(self, condition, axis=0, out=None, **kwargs): """ Return selected slices of an array along given axis as a Series See also -------- numpy.ndarray.compress """ return self[condition] def nonzero(self): """ Return the indices of the elements that are non-zero This method is equivalent to calling `numpy.nonzero` on the series data. For compatability with NumPy, the return value is the same (a tuple with an array of indices for each dimension), but it will always be a one-item tuple because series only have one dimension. Examples -------- >>> s = pd.Series([0, 3, 0, 4]) >>> s.nonzero() (array([1, 3]),) >>> s.iloc[s.nonzero()[0]] 1 3 3 4 dtype: int64 See Also -------- numpy.nonzero """ return self._values.nonzero() def put(self, *args, **kwargs): """ return a ndarray with the values put See also -------- numpy.ndarray.put """ self._values.put(*args, **kwargs) def __len__(self): """ return the length of the Series """ return len(self._data) def view(self, dtype=None): return self._constructor(self._values.view(dtype), index=self.index).__finalize__(self) def __array__(self, result=None): """ the array interface, return my values """ return self.get_values() def __array_wrap__(self, result, context=None): """ Gets called after a ufunc """ return self._constructor(result, index=self.index, copy=False).__finalize__(self) def __array_prepare__(self, result, context=None): """ Gets called prior to a ufunc """ # nice error message for non-ufunc types if context is not None and not isinstance(self._values, np.ndarray): obj = context[1][0] raise TypeError("{obj} with dtype {dtype} cannot perform " "the numpy op {op}".format(obj=type(obj).__name__, dtype=getattr(obj,'dtype',None), op=context[0].__name__)) return result # complex @property def real(self): return self.values.real @real.setter def real(self, v): self.values.real = v @property def imag(self): return self.values.imag @imag.setter def imag(self, v): self.values.imag = v # coercion __float__ = _coerce_method(float) __long__ = _coerce_method(int) __int__ = _coerce_method(int) def _unpickle_series_compat(self, state): if isinstance(state, dict): self._data = state['_data'] self.name = state['name'] self.index = self._data.index elif isinstance(state, tuple): # < 0.12 series pickle nd_state, own_state = state # recreate the ndarray data = np.empty(nd_state[1], dtype=nd_state[2]) np.ndarray.__setstate__(data, nd_state) # backwards compat index, name = own_state[0], None if len(own_state) > 1: name = own_state[1] # recreate self._data = SingleBlockManager(data, index, fastpath=True) self._index = index self.name = name else: raise Exception("cannot unpickle legacy formats -> [%s]" % state) # indexers @property def axes(self): """ Return a list of the row axis labels """ return [self.index] def _ixs(self, i, axis=0): """ Return the i-th value or values in the Series by location Parameters ---------- i : int, slice, or sequence of integers Returns ------- value : scalar (int) or Series (slice, sequence) """ try: # dispatch to the values if we need values = self._values if isinstance(values, np.ndarray): return _index.get_value_at(values, i) else: return values[i] except IndexError: raise except: if isinstance(i, slice): indexer = self.index._convert_slice_indexer(i, kind='iloc') return self._get_values(indexer) else: label = self.index[i] if isinstance(label, Index): return self.take(i, axis=axis, convert=True) else: return _index.get_value_at(self, i) @property def _is_mixed_type(self): return False def _slice(self, slobj, axis=0, kind=None): slobj = self.index._convert_slice_indexer(slobj, kind=kind or 'getitem') return self._get_values(slobj) def __getitem__(self, key): try: result = self.index.get_value(self, key) if not np.isscalar(result): if is_list_like(result) and not isinstance(result, Series): # we need to box if we have a non-unique index here # otherwise have inline ndarray/lists if not self.index.is_unique: result = self._constructor(result, index=[key]*len(result) ,dtype=self.dtype).__finalize__(self) return result except InvalidIndexError: pass except (KeyError, ValueError): if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # kludge pass elif key is Ellipsis: return self elif is_bool_indexer(key): pass else: # we can try to coerce the indexer (or this will raise) new_key = self.index._convert_scalar_indexer(key,kind='getitem') if type(new_key) != type(key): return self.__getitem__(new_key) raise except Exception: raise if com.is_iterator(key): key = list(key) if is_bool_indexer(key): key = check_bool_indexer(self.index, key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind='getitem') return self._get_values(indexer) elif isinstance(key, ABCDataFrame): raise TypeError('Indexing a Series with DataFrame is not supported, '\ 'use the appropriate DataFrame column') else: if isinstance(key, tuple): try: return self._get_values_tuple(key) except: if len(key) == 1: key = key[0] if isinstance(key, slice): return self._get_values(key) raise # pragma: no cover if not isinstance(key, (list, np.ndarray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key) if key_type == 'integer': if self.index.is_integer() or self.index.is_floating(): return self.reindex(key) else: return self._get_values(key) elif key_type == 'boolean': return self._get_values(key) else: try: # handle the dup indexing case (GH 4246) if isinstance(key, (list, tuple)): return self.ix[key] return self.reindex(key) except Exception: # [slice(0, 5, None)] will break if you convert to ndarray, # e.g. as requested by np.median # hack if isinstance(key[0], slice): return self._get_values(key) raise def _get_values_tuple(self, key): # mpl hackaround if any(k is None for k in key): return self._get_values(key) if not isinstance(self.index, MultiIndex): raise ValueError('Can only tuple-index with a MultiIndex') # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) return self._constructor(self._values[indexer], index=new_index).__finalize__(self) def _get_values(self, indexer): try: return self._constructor(self._data.get_slice(indexer), fastpath=True).__finalize__(self) except Exception: return self._values[indexer] def __setitem__(self, key, value): def setitem(key, value): try: self._set_with_engine(key, value) return except (SettingWithCopyError): raise except (KeyError, ValueError): values = self._values if (com.is_integer(key) and not self.index.inferred_type == 'integer'): values[key] = value return elif key is Ellipsis: self[:] = value return elif is_bool_indexer(key): pass elif com.is_timedelta64_dtype(self.dtype): # reassign a null value to iNaT if isnull(value): value = tslib.iNaT try: self.index._engine.set_value(self._values, key, value) return except (TypeError): pass self.loc[key] = value return except TypeError as e: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): raise ValueError("Can only tuple-index with a MultiIndex") # python 3 type errors should be raised if 'unorderable' in str(e): # pragma: no cover raise IndexError(key) if is_bool_indexer(key): key = check_bool_indexer(self.index, key) try: self.where(~key, value, inplace=True) return except (InvalidIndexError): pass self._set_with(key, value) # do the setitem cacher_needs_updating = self._check_is_chained_assignment_possible() setitem(key, value) if cacher_needs_updating: self._maybe_update_cacher() def _set_with_engine(self, key, value): values = self._values try: self.index._engine.set_value(values, key, value) return except KeyError: values[self.index.get_loc(key)] = value return def _set_with(self, key, value): # other: fancy integer or otherwise if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind='getitem') return self._set_values(indexer, value) else: if isinstance(key, tuple): try: self._set_values(key, value) except Exception: pass if not isinstance(key, (list, Series, np.ndarray, Series)): try: key = list(key) except: key = [ key ] if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key) if key_type == 'integer': if self.index.inferred_type == 'integer': self._set_labels(key, value) else: return self._set_values(key, value) elif key_type == 'boolean': self._set_values(key.astype(np.bool_), value) else: self._set_labels(key, value) def _set_labels(self, key, value): if isinstance(key, Index): key = key.values else: key = _asarray_tuplesafe(key) indexer = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise ValueError('%s not contained in the index' % str(key[mask])) self._set_values(indexer, value) def _set_values(self, key, value): if isinstance(key, Series): key = key._values self._data = self._data.setitem(indexer=key, value=value) self._maybe_update_cacher() # help out SparseSeries _get_val_at = ndarray.__getitem__ def repeat(self, reps): """ return a new Series with the values repeated reps times See also -------- numpy.ndarray.repeat """ new_index = self.index.repeat(reps) new_values = self._values.repeat(reps) return self._constructor(new_values, index=new_index).__finalize__(self) def reshape(self, *args, **kwargs): """ return an ndarray with the values shape if the specified shape matches exactly the current shape, then return self (for compat) See also -------- numpy.ndarray.take """ if len(args) == 1 and hasattr(args[0], '__iter__'): shape = args[0] else: shape = args if tuple(shape) == self.shape: # XXX ignoring the "order" keyword. return self return self._values.reshape(shape, **kwargs) def iget_value(self, i, axis=0): """ DEPRECATED. Use ``.iloc[i]`` or ``.iat[i]`` instead """ warnings.warn("iget_value(i) is deprecated. Please use .iloc[i] or .iat[i]", FutureWarning, stacklevel=2) return self._ixs(i) def iget(self, i, axis=0): """ DEPRECATED. Use ``.iloc[i]`` or ``.iat[i]`` instead """ warnings.warn("iget(i) is deprecated. Please use .iloc[i] or .iat[i]", FutureWarning, stacklevel=2) return self._ixs(i) def irow(self, i, axis=0): """ DEPRECATED. Use ``.iloc[i]`` or ``.iat[i]`` instead """ warnings.warn("irow(i) is deprecated. Please use .iloc[i] or .iat[i]", FutureWarning, stacklevel=2) return self._ixs(i) def get_value(self, label, takeable=False): """ Quickly retrieve single value at passed index label Parameters ---------- index : label takeable : interpret the index as indexers, default False Returns ------- value : scalar value """ if takeable is True: return _maybe_box_datetimelike(self._values[label]) return self.index.get_value(self._values, label) def set_value(self, label, value, takeable=False): """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index Parameters ---------- label : object Partial indexing with MultiIndex not allowed value : object Scalar value takeable : interpret the index as indexers, default False Returns ------- series : Series If label is contained, will be reference to calling Series, otherwise a new object """ try: if takeable: self._values[label] = value else: self.index._engine.set_value(self._values, label, value) return self except KeyError: # set using a non-recursive method self.loc[label] = value return self def reset_index(self, level=None, drop=False, name=None, inplace=False): """ Analogous to the :meth:`pandas.DataFrame.reset_index` function, see docstring there. Parameters ---------- level : int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default drop : boolean, default False Do not try to insert index into dataframe columns name : object, default None The name of the column corresponding to the Series values inplace : boolean, default False Modify the Series in place (do not create a new object) Returns ---------- resetted : DataFrame, or Series if drop == True """ if drop: new_index = np.arange(len(self)) if level is not None and isinstance(self.index, MultiIndex): if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] if len(level) < len(self.index.levels): new_index = self.index.droplevel(level) if inplace: self.index = new_index # set name if it was passed, otherwise, keep the previous name self.name = name or self.name else: return self._constructor(self._values.copy(), index=new_index).__finalize__(self) elif inplace: raise TypeError('Cannot reset_index inplace on a Series ' 'to create a DataFrame') else: df = self.to_frame(name) return df.reset_index(level=level, drop=drop) def __unicode__(self): """ Return a string representation for a particular DataFrame Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3. """ buf = StringIO(u("")) width, height = get_terminal_size() max_rows = (height if get_option("display.max_rows") == 0 else get_option("display.max_rows")) self.to_string(buf=buf, name=self.name, dtype=self.dtype, max_rows=max_rows) result = buf.getvalue() return result def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True, length=False, dtype=False, name=False, max_rows=None): """ Render a string representation of the Series Parameters ---------- buf : StringIO-like, optional buffer to write to na_rep : string, optional string representation of NAN to use, default 'NaN' float_format : one-parameter function, optional formatter function to apply to columns' elements if they are floats default None header: boolean, default True Add the Series header (index name) length : boolean, default False Add the Series length dtype : boolean, default False Add the Series dtype name : boolean, default False Add the Series name if not None max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. Returns ------- formatted : string (if not buffer passed) """ the_repr = self._get_repr(float_format=float_format, na_rep=na_rep, header=header, length=length, dtype=dtype, name=name, max_rows=max_rows) # catch contract violations if not isinstance(the_repr, compat.text_type): raise AssertionError("result must be of type unicode, type" " of result is {0!r}" "".format(the_repr.__class__.__name__)) if buf is None: return the_repr else: try: buf.write(the_repr) except AttributeError: with open(buf, 'w') as f: f.write(the_repr) def _get_repr( self, name=False, header=True, length=True, dtype=True, na_rep='NaN', float_format=None, max_rows=None): """ Internal function, should always return unicode string """ formatter = fmt.SeriesFormatter(self, name=name, length=length, header=header, dtype=dtype, na_rep=na_rep, float_format=float_format, max_rows=max_rows) result = formatter.to_string() # TODO: following check prob. not neces. if not isinstance(result, compat.text_type): raise AssertionError("result must be of type unicode, type" " of result is {0!r}" "".format(result.__class__.__name__)) return result def __iter__(self): """ provide iteration over the values of the Series box values if necessary """ if needs_i8_conversion(self.dtype): boxer = i8_boxer(self) return (boxer(x) for x in self._values) else: return iter(self._values) def iteritems(self): """ Lazily iterate over (index, value) tuples """ return zip(iter(self.index), iter(self)) if compat.PY3: # pragma: no cover items = iteritems #---------------------------------------------------------------------- # Misc public methods def keys(self): "Alias for index" return self.index def tolist(self): """ Convert Series to a nested list """ return list(self) def to_dict(self): """ Convert Series to {label -> value} dict Returns ------- value_dict : dict """ return dict(compat.iteritems(self)) def to_frame(self, name=None): """ Convert Series to DataFrame Parameters ---------- name : object, default None The passed name should substitute for the series name (if it has one). Returns ------- data_frame : DataFrame """ if name is None: df = self._constructor_expanddim(self) else: df = self._constructor_expanddim({name: self}) return df def to_sparse(self, kind='block', fill_value=None): """ Convert Series to SparseSeries Parameters ---------- kind : {'block', 'integer'} fill_value : float, defaults to NaN (missing) Returns ------- sp : SparseSeries """ from pandas.core.sparse import SparseSeries return SparseSeries(self, kind=kind, fill_value=fill_value).__finalize__(self) #---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self, level=None): """ Return number of non-NA/null observations in the Series Parameters ---------- level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a smaller Series Returns ------- nobs : int or Series (if level specified) """ from pandas.core.index import _get_na_value if level is None: return notnull(_values_from_object(self)).sum() if isinstance(level, compat.string_types): level = self.index._get_level_number(level) lev = self.index.levels[level] lab = np.array(self.index.labels[level], subok=False, copy=True) mask = lab == -1 if mask.any(): lab[mask] = cnt = len(lev) lev = lev.insert(cnt, _get_na_value(lev.dtype.type)) out = np.bincount(lab[notnull(self.values)], minlength=len(lev)) return self._constructor(out, index=lev, dtype='int64').__finalize__(self) def mode(self): """Returns the mode(s) of the dataset. Empty if nothing occurs at least 2 times. Always returns Series even if only one value. Parameters ---------- sort : bool, default True If True, will lexicographically sort values, if False skips sorting. Result ordering when ``sort=False`` is not defined. Returns ------- modes : Series (sorted) """ # TODO: Add option for bins like value_counts() return algorithms.mode(self) @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'}) @Appender(base._shared_docs['drop_duplicates'] % _shared_doc_kwargs) def drop_duplicates(self, keep='first', inplace=False): return super(Series, self).drop_duplicates(keep=keep, inplace=inplace) @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'}) @Appender(base._shared_docs['duplicated'] % _shared_doc_kwargs) def duplicated(self, keep='first'): return super(Series, self).duplicated(keep=keep) def idxmin(self, axis=None, out=None, skipna=True): """ Index of first occurrence of minimum of values. Parameters ---------- skipna : boolean, default True Exclude NA/null values Returns ------- idxmin : Index of minimum of values Notes ----- This method is the Series version of ``ndarray.argmin``. See Also -------- DataFrame.idxmin numpy.ndarray.argmin """ i = nanops.nanargmin(_values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i] def idxmax(self, axis=None, out=None, skipna=True): """ Index of first occurrence of maximum of values. Parameters ---------- skipna : boolean, default True Exclude NA/null values Returns ------- idxmax : Index of maximum of values Notes ----- This method is the Series version of ``ndarray.argmax``. See Also -------- DataFrame.idxmax numpy.ndarray.argmax """ i = nanops.nanargmax(_values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i] # ndarray compat argmin = idxmin argmax = idxmax @Appender(np.ndarray.round.__doc__) def round(self, decimals=0, out=None): """ """ result = _values_from_object(self).round(decimals, out=out) if out is None: result = self._constructor(result, index=self.index).__finalize__(self) return result def quantile(self, q=0.5): """ Return value at the given quantile, a la numpy.percentile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) 0 <= q <= 1, the quantile(s) to compute Returns ------- quantile : float or Series if ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles. Examples -------- >>> s = Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ valid = self.dropna() self._check_percentile(q) def multi(values, qs): if com.is_list_like(qs): values = [_quantile(values, x*100) for x in qs] # let empty result to be Float64Index qs = Float64Index(qs) return self._constructor(values, index=qs, name=self.name) else: return _quantile(values, qs*100) return self._maybe_box(lambda values: multi(values, q), dropna=True) def corr(self, other, method='pearson', min_periods=None): """ Compute correlation with `other` Series, excluding missing values Parameters ---------- other : Series method : {'pearson', 'kendall', 'spearman'} * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation min_periods : int, optional Minimum number of observations needed to have a valid result Returns ------- correlation : float """ this, other = self.align(other, join='inner', copy=False) if len(this) == 0: return np.nan return nanops.nancorr(this.values, other.values, method=method, min_periods=min_periods) def cov(self, other, min_periods=None): """ Compute covariance with Series, excluding missing values Parameters ---------- other : Series min_periods : int, optional Minimum number of observations needed to have a valid result Returns ------- covariance : float Normalized by N-1 (unbiased estimator). """ this, other = self.align(other, join='inner', copy=False) if len(this) == 0: return np.nan return nanops.nancov(this.values, other.values, min_periods=min_periods) def diff(self, periods=1): """ 1st discrete difference of object Parameters ---------- periods : int, default 1 Periods to shift for forming difference Returns ------- diffed : Series """ result = com.diff(_values_from_object(self), periods) return self._constructor(result, index=self.index).__finalize__(self) def autocorr(self, lag=1): """ Lag-N autocorrelation Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- autocorr : float """ return self.corr(self.shift(lag)) def dot(self, other): """ Matrix multiplication with DataFrame or inner-product with Series objects Parameters ---------- other : Series or DataFrame Returns ------- dot_product : scalar or Series """ from pandas.core.frame import DataFrame if isinstance(other, (Series, DataFrame)): common = self.index.union(other.index) if (len(common) > len(self.index) or len(common) > len(other.index)): raise ValueError('matrices are not aligned') left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception('Dot product shape mismatch, %s vs %s' % (lvals.shape, rvals.shape)) if isinstance(other, DataFrame): return self._constructor(np.dot(lvals, rvals), index=other.columns).__finalize__(self) elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError('unsupported type: %s' % type(other)) def searchsorted(self, v, side='left', sorter=None): """Find indices where elements should be inserted to maintain order. Find the indices into a sorted Series `self` such that, if the corresponding elements in `v` were inserted before the indices, the order of `self` would be preserved. Parameters ---------- v : array_like Values to insert into `a`. side : {'left', 'right'}, optional If 'left', the index of the first suitable location found is given. If 'right', return the last such index. If there is no suitable index, return either 0 or N (where N is the length of `a`). sorter : 1-D array_like, optional Optional array of integer indices that sort `self` into ascending order. They are typically the result of ``np.argsort``. Returns ------- indices : array of ints Array of insertion points with the same shape as `v`. See Also -------- Series.sort_values numpy.searchsorted Notes ----- Binary search is used to find the required insertion points. Examples -------- >>> x = pd.Series([1, 2, 3]) >>> x 0 1 1 2 2 3 dtype: int64 >>> x.searchsorted(4) array([3]) >>> x.searchsorted([0, 4]) array([0, 3]) >>> x.searchsorted([1, 3], side='left') array([0, 2]) >>> x.searchsorted([1, 3], side='right') array([1, 3]) >>> x.searchsorted([1, 2], side='right', sorter=[0, 2, 1]) array([1, 3]) """ if sorter is not None: sorter = com._ensure_platform_int(sorter) return self._values.searchsorted(Series(v)._values, side=side, sorter=sorter) #------------------------------------------------------------------------------ # Combination def append(self, to_append, verify_integrity=False): """ Concatenate two or more Series. Parameters ---------- to_append : Series or list/tuple of Series verify_integrity : boolean, default False If True, raise Exception on creating index with duplicates Returns ------- appended : Series """ from pandas.tools.merge import concat if isinstance(to_append, (list, tuple)): to_concat = [self] + to_append else: to_concat = [self, to_append] return concat(to_concat, ignore_index=False, verify_integrity=verify_integrity) def _binop(self, other, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level Returns ------- combined : Series """ if not isinstance(other, Series): raise AssertionError('Other operand must be Series') new_index = self.index this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join='outer', copy=False) new_index = this.index this_vals = this.values other_vals = other.values if fill_value is not None: this_mask = isnull(this_vals) other_mask = isnull(other_vals) this_vals = this_vals.copy() other_vals = other_vals.copy() # one but not both mask = this_mask ^ other_mask this_vals[this_mask & mask] = fill_value other_vals[other_mask & mask] = fill_value result = func(this_vals, other_vals) name = _maybe_match_name(self, other) result = self._constructor(result, index=new_index, name=name) result = result.__finalize__(self) if name is None: # When name is None, __finalize__ overwrites current name result.name = None return result def combine(self, other, func, fill_value=nan): """ Perform elementwise binary operation on two Series using given function with optional fill value when an index is missing from one Series or the other Parameters ---------- other : Series or scalar value func : function fill_value : scalar value Returns ------- result : Series """ if isinstance(other, Series): new_index = self.index.union(other.index) new_name = _maybe_match_name(self, other) new_values = np.empty(len(new_index), dtype=self.dtype) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) new_values[i] = func(lv, rv) else: new_index = self.index new_values = func(self._values, other) new_name = self.name return self._constructor(new_values, index=new_index, name=new_name) def combine_first(self, other): """ Combine Series values, choosing the calling Series's values first. Result index will be the union of the two indexes Parameters ---------- other : Series Returns ------- y : Series """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) name = _maybe_match_name(self, other) rs_vals = com._where_compat(isnull(this), other._values, this._values) return self._constructor(rs_vals, index=new_index).__finalize__(self) def update(self, other): """ Modify Series in place using non-NA values from passed Series. Aligns on index Parameters ---------- other : Series """ other = other.reindex_like(self) mask = notnull(other) self._data = self._data.putmask(mask=mask, new=other, inplace=True) self._maybe_update_cacher() #---------------------------------------------------------------------- # Reindexing, sorting @Appender(generic._shared_docs['sort_values'] % _shared_doc_kwargs) def sort_values(self, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): axis = self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError("This Series is a view of some other array, to " "sort in-place you must create a copy") def _try_kind_sort(arr): # easier to ask forgiveness than permission try: # if kind==mergesort, it can fail for object dtype return arr.argsort(kind=kind) except TypeError: # stable sort not available for object dtype # uses the argsort default quicksort return arr.argsort(kind='quicksort') arr = self._values sortedIdx = np.empty(len(self), dtype=np.int32) bad = isnull(arr) good = ~bad idx = np.arange(len(self)) argsorted = _try_kind_sort(arr[good]) if not ascending: argsorted = argsorted[::-1] if na_position == 'last': n = good.sum() sortedIdx[:n] = idx[good][argsorted] sortedIdx[n:] = idx[bad] elif na_position == 'first': n = bad.sum() sortedIdx[n:] = idx[good][argsorted] sortedIdx[:n] = idx[bad] else: raise ValueError('invalid na_position: {!r}'.format(na_position)) result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx]) if inplace: self._update_inplace(result) else: return result.__finalize__(self) @Appender(generic._shared_docs['sort_index'] % _shared_doc_kwargs) def sort_index(self, axis=0, level=None, ascending=True, inplace=False, sort_remaining=True): axis = self._get_axis_number(axis) index = self.index if level is not None: new_index, indexer = index.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) elif isinstance(index, MultiIndex): from pandas.core.groupby import _lexsort_indexer indexer = _lexsort_indexer(index.labels, orders=ascending) indexer = com._ensure_platform_int(indexer) new_index = index.take(indexer) else: new_index, indexer = index.sort_values(return_indexer=True, ascending=ascending) new_values = self._values.take(indexer) result = self._constructor(new_values, index=new_index) if inplace: self._update_inplace(result) else: return result.__finalize__(self) def sort(self, axis=0, ascending=True, kind='quicksort', na_position='last', inplace=True): """ DEPRECATED: use :meth:`Series.sort_values(inplace=True)` for INPLACE sorting Sort values and index labels by value. This is an inplace sort by default. Series.order is the equivalent but returns a new Series. Parameters ---------- axis : int (can only be zero) ascending : boolean, default True Sort ascending. Passing False sorts descending kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See np.sort for more information. 'mergesort' is the only stable algorithm na_position : {'first', 'last'} (optional, default='last') 'first' puts NaNs at the beginning 'last' puts NaNs at the end inplace : boolean, default True Do operation in place. See Also -------- Series.sort_values """ warnings.warn("sort is deprecated, use sort_values(inplace=True) for for INPLACE sorting", FutureWarning, stacklevel=2) return self.sort_values(ascending=ascending, kind=kind, na_position=na_position, inplace=inplace) def order(self, na_last=None, ascending=True, kind='quicksort', na_position='last', inplace=False): """ DEPRECATED: use :meth:`Series.sort_values` Sorts Series object, by value, maintaining index-value link. This will return a new Series by default. Series.sort is the equivalent but as an inplace method. Parameters ---------- na_last : boolean (optional, default=True) (DEPRECATED; use na_position) Put NaN's at beginning or end ascending : boolean, default True Sort ascending. Passing False sorts descending kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See np.sort for more information. 'mergesort' is the only stable algorithm na_position : {'first', 'last'} (optional, default='last') 'first' puts NaNs at the beginning 'last' puts NaNs at the end inplace : boolean, default False Do operation in place. Returns ------- y : Series See Also -------- Series.sort_values """ warnings.warn("order is deprecated, use sort_values(...)", FutureWarning, stacklevel=2) return self.sort_values(ascending=ascending, kind=kind, na_position=na_position, inplace=inplace) def argsort(self, axis=0, kind='quicksort', order=None): """ Overrides ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values Parameters ---------- axis : int (can only be zero) kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See np.sort for more information. 'mergesort' is the only stable algorithm order : ignored Returns ------- argsorted : Series, with -1 indicated where nan values are present See also -------- numpy.ndarray.argsort """ values = self._values mask = isnull(values) if mask.any(): result = Series( -1, index=self.index, name=self.name, dtype='int64') notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) return self._constructor(result, index=self.index).__finalize__(self) else: return self._constructor( np.argsort(values, kind=kind), index=self.index, dtype='int64').__finalize__(self) def rank(self, method='average', na_option='keep', ascending=True, pct=False): """ Compute data ranks (1 through n). Equal values are assigned a rank that is the average of the ranks of those values Parameters ---------- method : {'average', 'min', 'max', 'first', 'dense'} * average: average rank of group * min: lowest rank in group * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups na_option : {'keep'} keep: leave NA values where they are ascending : boolean, default True False for ranks by high (1) to low (N) pct : boolean, default False Computes percentage rank of data Returns ------- ranks : Series """ ranks = algorithms.rank(self._values, method=method, na_option=na_option, ascending=ascending, pct=pct) return self._constructor(ranks, index=self.index).__finalize__(self) @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'}) def nlargest(self, n=5, keep='first'): """Return the largest `n` elements. Parameters ---------- n : int Return this many descending sorted values keep : {'first', 'last', False}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. take_last : deprecated Returns ------- top_n : Series The n largest values in the Series, in sorted order Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. See Also -------- Series.nsmallest Examples -------- >>> import pandas as pd >>> import numpy as np >>> s = pd.Series(np.random.randn(1e6)) >>> s.nlargest(10) # only sorts up to the N requested """ return algorithms.select_n(self, n=n, keep=keep, method='nlargest') @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'}) def nsmallest(self, n=5, keep='first'): """Return the smallest `n` elements. Parameters ---------- n : int Return this many ascending sorted values keep : {'first', 'last', False}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. take_last : deprecated Returns ------- bottom_n : Series The n smallest values in the Series, in sorted order Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. See Also -------- Series.nlargest Examples -------- >>> import pandas as pd >>> import numpy as np >>> s = pd.Series(np.random.randn(1e6)) >>> s.nsmallest(10) # only sorts up to the N requested """ return algorithms.select_n(self, n=n, keep=keep, method='nsmallest') def sortlevel(self, level=0, ascending=True, sort_remaining=True): """ Sort Series with MultiIndex by chosen level. Data will be lexicographically sorted by the chosen level followed by the other levels (in order) Parameters ---------- level : int or level name, default None ascending : bool, default True Returns ------- sorted : Series See Also -------- Series.sort_index(level=...) """ return self.sort_index(level=level, ascending=ascending, sort_remaining=sort_remaining) def swaplevel(self, i, j, copy=True): """ Swap levels i and j in a MultiIndex Parameters ---------- i, j : int, string (can be mixed) Level of index to be swapped. Can pass level name as string. Returns ------- swapped : Series """ new_index = self.index.swaplevel(i, j) return self._constructor(self._values, index=new_index, copy=copy).__finalize__(self) def reorder_levels(self, order): """ Rearrange index levels using input order. May not drop or duplicate levels Parameters ---------- order: list of int representing new level order. (reference level by number or key) axis: where to reorder levels Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception('Can only reorder levels on a hierarchical axis.') result = self.copy() result.index = result.index.reorder_levels(order) return result def unstack(self, level=-1): """ Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. The level involved will automatically get sorted. Parameters ---------- level : int, string, or list of these, default last level Level(s) to unstack, can pass level name Examples -------- >>> s one a 1. one b 2. two a 3. two b 4. >>> s.unstack(level=-1) a b one 1. 2. two 3. 4. >>> s.unstack(level=0) one two a 1. 2. b 3. 4. Returns ------- unstacked : DataFrame """ from pandas.core.reshape import unstack return unstack(self, level) #---------------------------------------------------------------------- # function application def map(self, arg, na_action=None): """ Map values of Series using input correspondence (which can be a dict, Series, or function) Parameters ---------- arg : function, dict, or Series na_action : {None, 'ignore'} If 'ignore', propagate NA values Examples -------- >>> x one 1 two 2 three 3 >>> y 1 foo 2 bar 3 baz >>> x.map(y) one foo two bar three baz Returns ------- y : Series same index as caller """ values = self._values if com.is_datetime64_dtype(values.dtype): values = lib.map_infer(values, lib.Timestamp) if na_action == 'ignore': mask = isnull(values) def map_f(values, f): return lib.map_infer_mask(values, f, mask.view(np.uint8)) else: map_f = lib.map_infer if isinstance(arg, (dict, Series)): if isinstance(arg, dict): arg = self._constructor(arg, index=arg.keys()) indexer = arg.index.get_indexer(values) new_values = com.take_1d(arg._values, indexer) return self._constructor(new_values, index=self.index).__finalize__(self) else: mapped = map_f(values, arg) return self._constructor(mapped, index=self.index).__finalize__(self) def apply(self, func, convert_dtype=True, args=(), **kwds): """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values Parameters ---------- func : function convert_dtype : boolean, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object args : tuple Positional arguments to pass to function in addition to the value Additional keyword arguments will be passed as keywords to the function Returns ------- y : Series or DataFrame if func returns a Series See also -------- Series.map: For element-wise operations Examples -------- Create a series with typical summer temperatures for each city. >>> import pandas as pd >>> import numpy as np >>> series = pd.Series([20, 21, 12], index=['London', ... 'New York','Helsinki']) London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x**2 >>> series.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> series.apply(lambda x: x**2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x-custom_value >>> series.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x+=kwargs[month] ... return x >>> series.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> series.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ if len(self) == 0: return self._constructor(dtype=self.dtype, index=self.index).__finalize__(self) if kwds or args and not isinstance(func, np.ufunc): f = lambda x: func(x, *args, **kwds) else: f = func if isinstance(f, np.ufunc): return f(self) values = _values_from_object(self) if com.is_datetime64_dtype(values.dtype): values = lib.map_infer(values, lib.Timestamp) mapped = lib.map_infer(values, f, convert=convert_dtype) if len(mapped) and isinstance(mapped[0], Series): from pandas.core.frame import DataFrame return DataFrame(mapped.tolist(), index=self.index) else: return self._constructor(mapped, index=self.index).__finalize__(self) def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): """ perform a reduction operation if we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object """ delegate = self._values if isinstance(delegate, np.ndarray): # Validate that 'axis' is consistent with Series's single axis. self._get_axis_number(axis) if numeric_only: raise NotImplementedError( 'Series.{0} does not implement numeric_only.'.format(name)) return op(delegate, skipna=skipna, **kwds) return delegate._reduce(op=op, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only, filter_type=filter_type, **kwds) def _maybe_box(self, func, dropna=False): """ evaluate a function with possible input/output conversion if we are i8 Parameters ---------- dropna : bool, default False whether to drop values if necessary """ if dropna: values = self.dropna()._values else: values = self._values if needs_i8_conversion(self): boxer = i8_boxer(self) if len(values) == 0: return boxer(tslib.iNaT) values = values.view('i8') result = func(values) if com.is_list_like(result): result = result.map(boxer) else: result = boxer(result) else: # let the function return nan if appropriate if dropna: if len(values) == 0: return np.nan result = func(values) return result def _reindex_indexer(self, new_index, indexer, copy): if indexer is None: if copy: return self.copy() return self # be subclass-friendly new_values = com.take_1d(self.get_values(), indexer) return self._constructor(new_values, index=new_index) def _needs_reindex_multi(self, axes, method, level): """ check if we do need a multi reindex; this is for compat with higher dims """ return False @Appender(generic._shared_docs['align'] % _shared_doc_kwargs) def align(self, other, join='outer', axis=None, level=None, copy=True, fill_value=None, method=None, limit=None, fill_axis=0, broadcast_axis=None): return super(Series, self).align(other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis) @Appender(generic._shared_docs['rename'] % _shared_doc_kwargs) def rename(self, index=None, **kwargs): return super(Series, self).rename(index=index, **kwargs) @Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs) def reindex(self, index=None, **kwargs): return super(Series, self).reindex(index=index, **kwargs) @Appender(generic._shared_docs['fillna'] % _shared_doc_kwargs) def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None, **kwargs): return super(Series, self).fillna(value=value, method=method, axis=axis, inplace=inplace, limit=limit, downcast=downcast, **kwargs) @Appender(generic._shared_docs['shift'] % _shared_doc_kwargs) def shift(self, periods=1, freq=None, axis=0): return super(Series, self).shift(periods=periods, freq=freq, axis=axis) def reindex_axis(self, labels, axis=0, **kwargs): """ for compatibility with higher dims """ if axis != 0: raise ValueError("cannot reindex series on non-zero axis!") return self.reindex(index=labels, **kwargs) def memory_usage(self, index=False, deep=False): """Memory usage of the Series Parameters ---------- index : bool Specifies whether to include memory usage of Series index deep : bool Introspect the data deeply, interrogate `object` dtypes for system-level memory consumption Returns ------- scalar bytes of memory consumed Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False See Also -------- numpy.ndarray.nbytes """ v = super(Series, self).memory_usage(deep=deep) if index: v += self.index.memory_usage(deep=deep) return v def take(self, indices, axis=0, convert=True, is_copy=False): """ return Series corresponding to requested indices Parameters ---------- indices : list / array of ints convert : translate negative to positive indices (default) Returns ------- taken : Series See also -------- numpy.ndarray.take """ # check/convert indicies here if convert: indices = maybe_convert_indices( indices, len(self._get_axis(axis))) indices = com._ensure_platform_int(indices) new_index = self.index.take(indices) new_values = self._values.take(indices) return self._constructor(new_values, index=new_index).__finalize__(self) def isin(self, values): """ Return a boolean :class:`~pandas.Series` showing whether each element in the :class:`~pandas.Series` is exactly contained in the passed sequence of ``values``. Parameters ---------- values : list-like The sequence of values to test. Passing in a single string will raise a ``TypeError``. Instead, turn a single string into a ``list`` of one element. Returns ------- isin : Series (bool dtype) Raises ------ TypeError * If ``values`` is a string See Also -------- pandas.DataFrame.isin Examples -------- >>> s = pd.Series(list('abc')) >>> s.isin(['a', 'c', 'e']) 0 True 1 False 2 True dtype: bool Passing a single string as ``s.isin('a')`` will raise an error. Use a list of one element instead: >>> s.isin(['a']) 0 True 1 False 2 False dtype: bool """ result = algorithms.isin(_values_from_object(self), values) return self._constructor(result, index=self.index).__finalize__(self) def between(self, left, right, inclusive=True): """ Return boolean Series equivalent to left <= series <= right. NA values will be treated as False Parameters ---------- left : scalar Left boundary right : scalar Right boundary Returns ------- is_between : Series """ if inclusive: lmask = self >= left rmask = self <= right else: lmask = self > left rmask = self < right return lmask & rmask @classmethod def from_csv(cls, path, sep=',', parse_dates=True, header=None, index_col=0, encoding=None, infer_datetime_format=False): """ Read CSV file (DISCOURAGED, please use :func:`pandas.read_csv` instead). It is preferable to use the more powerful :func:`pandas.read_csv` for most general purposes, but ``from_csv`` makes for an easy roundtrip to and from a file (the exact counterpart of ``to_csv``), especially with a time Series. This method only differs from :func:`pandas.read_csv` in some defaults: - `index_col` is ``0`` instead of ``None`` (take first column as index by default) - `header` is ``None`` instead of ``0`` (the first row is not used as the column names) - `parse_dates` is ``True`` instead of ``False`` (try parsing the index as datetime by default) With :func:`pandas.read_csv`, the option ``squeeze=True`` can be used to return a Series like ``from_csv``. Parameters ---------- path : string file path or file handle / StringIO sep : string, default ',' Field delimiter parse_dates : boolean, default True Parse dates. Different default from read_table header : int, default None Row to use as header (skip prior rows) index_col : int or sequence, default 0 Column to use for index. If a sequence is given, a MultiIndex is used. Different default from read_table encoding : string, optional a string representing the encoding to use if the contents are non-ascii, for python versions prior to 3 infer_datetime_format: boolean, default False If True and `parse_dates` is True for a column, try to infer the datetime format based on the first datetime string. If the format can be inferred, there often will be a large parsing speed-up. See also -------- pandas.read_csv Returns ------- y : Series """ from pandas.core.frame import DataFrame df = DataFrame.from_csv(path, header=header, index_col=index_col, sep=sep, parse_dates=parse_dates, encoding=encoding, infer_datetime_format=infer_datetime_format) result = df.iloc[:,0] if header is None: result.index.name = result.name = None return result def to_csv(self, path, index=True, sep=",", na_rep='', float_format=None, header=False, index_label=None, mode='w', nanRep=None, encoding=None, date_format=None, decimal='.'): """ Write Series to a comma-separated values (csv) file Parameters ---------- path : string file path or file handle / StringIO. If None is provided the result is returned as a string. na_rep : string, default '' Missing data representation float_format : string, default None Format string for floating point numbers header : boolean, default False Write out series name index : boolean, default True Write row names (index) index_label : string or sequence, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. mode : Python write mode, default 'w' sep : character, default "," Field delimiter for the output file. encoding : string, optional a string representing the encoding to use if the contents are non-ascii, for python versions prior to 3 date_format: string, default None Format string for datetime objects. decimal: string, default '.' Character recognized as decimal separator. E.g. use ',' for European data """ from pandas.core.frame import DataFrame df = DataFrame(self) # result is only a string if no path provided, otherwise None result = df.to_csv(path, index=index, sep=sep, na_rep=na_rep, float_format=float_format, header=header, index_label=index_label, mode=mode, nanRep=nanRep, encoding=encoding, date_format=date_format, decimal=decimal) if path is None: return result def dropna(self, axis=0, inplace=False, **kwargs): """ Return Series without null values Returns ------- valid : Series inplace : boolean, default False Do operation in place. """ kwargs.pop('how', None) if kwargs: raise TypeError('dropna() got an unexpected keyword ' 'argument "{0}"'.format(list(kwargs.keys())[0])) axis = self._get_axis_number(axis or 0) if self._can_hold_na: result = remove_na(self) if inplace: self._update_inplace(result) else: return result else: if inplace: # do nothing pass else: return self.copy() valid = lambda self, inplace=False, **kwargs: self.dropna(inplace=inplace, **kwargs) def first_valid_index(self): """ Return label for first non-NA/null value """ if len(self) == 0: return None mask = isnull(self._values) i = mask.argmin() if mask[i]: return None else: return self.index[i] def last_valid_index(self): """ Return label for last non-NA/null value """ if len(self) == 0: return None mask = isnull(self._values[::-1]) i = mask.argmin() if mask[i]: return None else: return self.index[len(self) - i - 1] #---------------------------------------------------------------------- # Time series-oriented methods def asof(self, where): """ Return last good (non-NaN) value in Series if value is NaN for requested date. If there is no good value, NaN is returned. Parameters ---------- where : date or array of dates Notes ----- Dates are assumed to be sorted Returns ------- value or NaN """ if isinstance(where, compat.string_types): where = datetools.to_datetime(where) values = self._values if not hasattr(where, '__iter__'): start = self.index[0] if isinstance(self.index, PeriodIndex): where = Period(where, freq=self.index.freq).ordinal start = start.ordinal if where < start: return np.nan loc = self.index.searchsorted(where, side='right') if loc > 0: loc -= 1 while isnull(values[loc]) and loc > 0: loc -= 1 return values[loc] if not isinstance(where, Index): where = Index(where) locs = self.index.asof_locs(where, notnull(values)) new_values = com.take_1d(values, locs) return self._constructor(new_values, index=where).__finalize__(self) def to_timestamp(self, freq=None, how='start', copy=True): """ Cast to datetimeindex of timestamps, at *beginning* of period Parameters ---------- freq : string, default frequency of PeriodIndex Desired frequency how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period vs. end Returns ------- ts : Series with DatetimeIndex """ new_values = self._values if copy: new_values = new_values.copy() new_index = self.index.to_timestamp(freq=freq, how=how) return self._constructor(new_values, index=new_index).__finalize__(self) def to_period(self, freq=None, copy=True): """ Convert Series from DatetimeIndex to PeriodIndex with desired frequency (inferred from index if not passed) Parameters ---------- freq : string, default Returns ------- ts : Series with PeriodIndex """ new_values = self._values if copy: new_values = new_values.copy() new_index = self.index.to_period(freq=freq) return self._constructor(new_values, index=new_index).__finalize__(self) #------------------------------------------------------------------------------ # Datetimelike delegation methods def _make_dt_accessor(self): try: return maybe_to_datetimelike(self) except Exception: raise AttributeError("Can only use .dt accessor with datetimelike " "values") dt = base.AccessorProperty(CombinedDatetimelikeProperties, _make_dt_accessor) #------------------------------------------------------------------------------ # Categorical methods def _make_cat_accessor(self): if not is_categorical_dtype(self.dtype): raise AttributeError("Can only use .cat accessor with a " "'category' dtype") return CategoricalAccessor(self.values, self.index) cat = base.AccessorProperty(CategoricalAccessor, _make_cat_accessor) def _dir_deletions(self): return self._accessors def _dir_additions(self): rv = set() for accessor in self._accessors: try: getattr(self, accessor) rv.add(accessor) except AttributeError: pass return rv Series._setup_axes(['index'], info_axis=0, stat_axis=0, aliases={'rows': 0}) Series._add_numeric_operations() Series._add_series_only_operations() _INDEX_TYPES = ndarray, Index, list, tuple #------------------------------------------------------------------------------ # Supplementary functions def remove_na(series): """ Return series containing only true/non-NaN values, possibly empty. """ return series[notnull(_values_from_object(series))] def _sanitize_index(data, index, copy=False): """ sanitize an index type to return an ndarray of the underlying, pass thru a non-Index """ if index is None: return data if len(data) != len(index): raise ValueError('Length of values does not match length of ' 'index') if isinstance(data, PeriodIndex): data = data.asobject elif isinstance(data, DatetimeIndex): data = data._to_embed(keep_tz=True) if copy: data = data.copy() elif isinstance(data, np.ndarray): # coerce datetimelike types if data.dtype.kind in ['M','m']: data = _sanitize_array(data, index, copy=copy) return data def _sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False): """ sanitize input data to an ndarray, copy if specified, coerce to the dtype if specified """ if dtype is not None: dtype = _coerce_to_dtype(dtype) if isinstance(data, ma.MaskedArray): mask = ma.getmaskarray(data) if mask.any(): data, fill_value = _maybe_upcast(data, copy=True) data[mask] = fill_value else: data = data.copy() def _try_cast(arr, take_fast_path): # perf shortcut as this is the most common case if take_fast_path: if _possibly_castable(arr) and not copy and dtype is None: return arr try: subarr = _possibly_cast_to_datetime(arr, dtype) if not is_internal_type(subarr): subarr = np.array(subarr, dtype=dtype, copy=copy) except (ValueError, TypeError): if is_categorical_dtype(dtype): subarr = Categorical(arr) elif dtype is not None and raise_cast_failure: raise else: subarr = np.array(arr, dtype=object, copy=copy) return subarr # GH #846 if isinstance(data, (np.ndarray, Index, Series)): if dtype is not None: subarr = np.array(data, copy=False) # possibility of nan -> garbage if com.is_float_dtype(data.dtype) and com.is_integer_dtype(dtype): if not isnull(data).any(): subarr = _try_cast(data, True) elif copy: subarr = data.copy() else: subarr = _try_cast(data, True) elif isinstance(data, Index): # don't coerce Index types # e.g. indexes can have different conversions (so don't fast path them) # GH 6140 subarr = _sanitize_index(data, index, copy=True) else: subarr = _try_cast(data, True) if copy: subarr = data.copy() elif isinstance(data, Categorical): subarr = data if copy: subarr = data.copy() return subarr elif isinstance(data, list) and len(data) > 0: if dtype is not None: try: subarr = _try_cast(data, False) except Exception: if raise_cast_failure: # pragma: no cover raise subarr = np.array(data, dtype=object, copy=copy) subarr = lib.maybe_convert_objects(subarr) else: subarr = _possibly_convert_platform(data) subarr = _possibly_cast_to_datetime(subarr, dtype) else: subarr = _try_cast(data, False) def create_from_value(value, index, dtype): # return a new empty value suitable for the dtype if is_datetimetz(dtype): subarr = DatetimeIndex([value]*len(index)) else: if not isinstance(dtype, (np.dtype, type(np.dtype))): dtype = dtype.dtype subarr = np.empty(len(index), dtype=dtype) subarr.fill(value) return subarr # scalar like if subarr.ndim == 0: if isinstance(data, list): # pragma: no cover subarr = np.array(data, dtype=object) elif index is not None: value = data # figure out the dtype from the value (upcast if necessary) if dtype is None: dtype, value = _infer_dtype_from_scalar(value) else: # need to possibly convert the value here value = _possibly_cast_to_datetime(value, dtype) subarr = create_from_value(value, index, dtype) else: return subarr.item() # the result that we want elif subarr.ndim == 1: if index is not None: # a 1-element ndarray if len(subarr) != len(index) and len(subarr) == 1: subarr = create_from_value(subarr[0], index, subarr) elif subarr.ndim > 1: if isinstance(data, np.ndarray): raise Exception('Data must be 1-dimensional') else: subarr = _asarray_tuplesafe(data, dtype=dtype) # This is to prevent mixed-type Series getting all casted to # NumPy string type, e.g. NaN --> '-1#IND'. if issubclass(subarr.dtype.type, compat.string_types): subarr = np.array(data, dtype=object, copy=copy) return subarr # backwards compatiblity class TimeSeries(Series): def __init__(self, *args, **kwargs): # deprecation TimeSeries, #10890 warnings.warn("TimeSeries is deprecated. Please use Series", FutureWarning, stacklevel=2) super(TimeSeries, self).__init__(*args, **kwargs) #---------------------------------------------------------------------- # Add plotting methods to Series import pandas.tools.plotting as _gfx Series.plot = base.AccessorProperty(_gfx.SeriesPlotMethods, _gfx.SeriesPlotMethods) Series.hist = _gfx.hist_series # Add arithmetic! ops.add_flex_arithmetic_methods(Series, **ops.series_flex_funcs) ops.add_special_arithmetic_methods(Series, **ops.series_special_funcs)
gpl-2.0
Limags/MissionPlanner
Lib/site-packages/numpy/lib/twodim_base.py
70
23431
""" Basic functions for manipulating 2d arrays """ __all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu', 'tril','vander','histogram2d','mask_indices', 'tril_indices','tril_indices_from','triu_indices','triu_indices_from', ] from numpy.core.numeric import asanyarray, equal, subtract, arange, \ zeros, greater_equal, multiply, ones, asarray, alltrue, where, \ empty def fliplr(m): """ Flip array in the left/right direction. Flip the entries in each row in the left/right direction. Columns are preserved, but appear in a different order than before. Parameters ---------- m : array_like Input array. Returns ------- f : ndarray A view of `m` with the columns reversed. Since a view is returned, this operation is :math:`\\mathcal O(1)`. See Also -------- flipud : Flip array in the up/down direction. rot90 : Rotate array counterclockwise. Notes ----- Equivalent to A[:,::-1]. Does not require the array to be two-dimensional. Examples -------- >>> A = np.diag([1.,2.,3.]) >>> A array([[ 1., 0., 0.], [ 0., 2., 0.], [ 0., 0., 3.]]) >>> np.fliplr(A) array([[ 0., 0., 1.], [ 0., 2., 0.], [ 3., 0., 0.]]) >>> A = np.random.randn(2,3,5) >>> np.all(np.fliplr(A)==A[:,::-1,...]) True """ m = asanyarray(m) if m.ndim < 2: raise ValueError("Input must be >= 2-d.") return m[:, ::-1] def flipud(m): """ Flip array in the up/down direction. Flip the entries in each column in the up/down direction. Rows are preserved, but appear in a different order than before. Parameters ---------- m : array_like Input array. Returns ------- out : array_like A view of `m` with the rows reversed. Since a view is returned, this operation is :math:`\\mathcal O(1)`. See Also -------- fliplr : Flip array in the left/right direction. rot90 : Rotate array counterclockwise. Notes ----- Equivalent to ``A[::-1,...]``. Does not require the array to be two-dimensional. Examples -------- >>> A = np.diag([1.0, 2, 3]) >>> A array([[ 1., 0., 0.], [ 0., 2., 0.], [ 0., 0., 3.]]) >>> np.flipud(A) array([[ 0., 0., 3.], [ 0., 2., 0.], [ 1., 0., 0.]]) >>> A = np.random.randn(2,3,5) >>> np.all(np.flipud(A)==A[::-1,...]) True >>> np.flipud([1,2]) array([2, 1]) """ m = asanyarray(m) if m.ndim < 1: raise ValueError("Input must be >= 1-d.") return m[::-1,...] def rot90(m, k=1): """ Rotate an array by 90 degrees in the counter-clockwise direction. The first two dimensions are rotated; therefore, the array must be at least 2-D. Parameters ---------- m : array_like Array of two or more dimensions. k : integer Number of times the array is rotated by 90 degrees. Returns ------- y : ndarray Rotated array. See Also -------- fliplr : Flip an array horizontally. flipud : Flip an array vertically. Examples -------- >>> m = np.array([[1,2],[3,4]], int) >>> m array([[1, 2], [3, 4]]) >>> np.rot90(m) array([[2, 4], [1, 3]]) >>> np.rot90(m, 2) array([[4, 3], [2, 1]]) """ m = asanyarray(m) if m.ndim < 2: raise ValueError("Input must >= 2-d.") k = k % 4 if k == 0: return m elif k == 1: return fliplr(m).swapaxes(0,1) elif k == 2: return fliplr(flipud(m)) else: # k == 3 return fliplr(m.swapaxes(0,1)) def eye(N, M=None, k=0, dtype=float): """ Return a 2-D array with ones on the diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the output. M : int, optional Number of columns in the output. If None, defaults to `N`. k : int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. Returns ------- I : ndarray of shape (N,M) An array where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. See Also -------- identity : (almost) equivalent function diag : diagonal 2-D array from a 1-D array specified by the user. Examples -------- >>> np.eye(2, dtype=int) array([[1, 0], [0, 1]]) >>> np.eye(3, k=1) array([[ 0., 1., 0.], [ 0., 0., 1.], [ 0., 0., 0.]]) """ if M is None: M = N m = zeros((N, M), dtype=dtype) if k >= M: return m if k >= 0: i = k else: i = (-k) * M m[:M-k].flat[i::M+1] = 1 return m def diag(v, k=0): """ Extract a diagonal or construct a diagonal array. Parameters ---------- v : array_like If `v` is a 2-D array, return a copy of its `k`-th diagonal. If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th diagonal. k : int, optional Diagonal in question. The default is 0. Use `k>0` for diagonals above the main diagonal, and `k<0` for diagonals below the main diagonal. Returns ------- out : ndarray The extracted diagonal or constructed diagonal array. See Also -------- diagonal : Return specified diagonals. diagflat : Create a 2-D array with the flattened input as a diagonal. trace : Sum along diagonals. triu : Upper triangle of an array. tril : Lower triange of an array. Examples -------- >>> x = np.arange(9).reshape((3,3)) >>> x array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) >>> np.diag(x) array([0, 4, 8]) >>> np.diag(x, k=1) array([1, 5]) >>> np.diag(x, k=-1) array([3, 7]) >>> np.diag(np.diag(x)) array([[0, 0, 0], [0, 4, 0], [0, 0, 8]]) """ v = asarray(v) s = v.shape if len(s) == 1: n = s[0]+abs(k) res = zeros((n,n), v.dtype) if k >= 0: i = k else: i = (-k) * n res[:n-k].flat[i::n+1] = v return res elif len(s) == 2: if k >= s[1]: return empty(0, dtype=v.dtype) if v.flags.f_contiguous: # faster slicing v, k, s = v.T, -k, s[::-1] if k >= 0: i = k else: i = (-k) * s[1] return v[:s[1]-k].flat[i::s[1]+1] else: raise ValueError("Input must be 1- or 2-d.") def diagflat(v, k=0): """ Create a two-dimensional array with the flattened input as a diagonal. Parameters ---------- v : array_like Input data, which is flattened and set as the `k`-th diagonal of the output. k : int, optional Diagonal to set; 0, the default, corresponds to the "main" diagonal, a positive (negative) `k` giving the number of the diagonal above (below) the main. Returns ------- out : ndarray The 2-D output array. See Also -------- diag : MATLAB work-alike for 1-D and 2-D arrays. diagonal : Return specified diagonals. trace : Sum along diagonals. Examples -------- >>> np.diagflat([[1,2], [3,4]]) array([[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]]) >>> np.diagflat([1,2], 1) array([[0, 1, 0], [0, 0, 2], [0, 0, 0]]) """ try: wrap = v.__array_wrap__ except AttributeError: wrap = None v = asarray(v).ravel() s = len(v) n = s + abs(k) res = zeros((n,n), v.dtype) if (k >= 0): i = arange(0,n-k) fi = i+k+i*n else: i = arange(0,n+k) fi = i+(i-k)*n res.flat[fi] = v if not wrap: return res return wrap(res) def tri(N, M=None, k=0, dtype=float): """ An array with ones at and below the given diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the array. M : int, optional Number of columns in the array. By default, `M` is taken equal to `N`. k : int, optional The sub-diagonal at and below which the array is filled. `k` = 0 is the main diagonal, while `k` < 0 is below it, and `k` > 0 is above. The default is 0. dtype : dtype, optional Data type of the returned array. The default is float. Returns ------- T : ndarray of shape (N, M) Array with its lower triangle filled with ones and zero elsewhere; in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise. Examples -------- >>> np.tri(3, 5, 2, dtype=int) array([[1, 1, 1, 0, 0], [1, 1, 1, 1, 0], [1, 1, 1, 1, 1]]) >>> np.tri(3, 5, -1) array([[ 0., 0., 0., 0., 0.], [ 1., 0., 0., 0., 0.], [ 1., 1., 0., 0., 0.]]) """ if M is None: M = N m = greater_equal(subtract.outer(arange(N), arange(M)),-k) return m.astype(dtype) def tril(m, k=0): """ Lower triangle of an array. Return a copy of an array with elements above the `k`-th diagonal zeroed. Parameters ---------- m : array_like, shape (M, N) Input array. k : int, optional Diagonal above which to zero elements. `k = 0` (the default) is the main diagonal, `k < 0` is below it and `k > 0` is above. Returns ------- L : ndarray, shape (M, N) Lower triangle of `m`, of same shape and data-type as `m`. See Also -------- triu : same thing, only for the upper triangle Examples -------- >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) array([[ 0, 0, 0], [ 4, 0, 0], [ 7, 8, 0], [10, 11, 12]]) """ m = asanyarray(m) out = multiply(tri(m.shape[0], m.shape[1], k=k, dtype=int),m) return out def triu(m, k=0): """ Upper triangle of an array. Return a copy of a matrix with the elements below the `k`-th diagonal zeroed. Please refer to the documentation for `tril` for further details. See Also -------- tril : lower triangle of an array Examples -------- >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) array([[ 1, 2, 3], [ 4, 5, 6], [ 0, 8, 9], [ 0, 0, 12]]) """ m = asanyarray(m) out = multiply((1 - tri(m.shape[0], m.shape[1], k - 1, int)), m) return out # borrowed from John Hunter and matplotlib def vander(x, N=None): """ Generate a Van der Monde matrix. The columns of the output matrix are decreasing powers of the input vector. Specifically, the `i`-th output column is the input vector raised element-wise to the power of ``N - i - 1``. Such a matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde. Parameters ---------- x : array_like 1-D input array. N : int, optional Order of (number of columns in) the output. If `N` is not specified, a square array is returned (``N = len(x)``). Returns ------- out : ndarray Van der Monde matrix of order `N`. The first column is ``x^(N-1)``, the second ``x^(N-2)`` and so forth. Examples -------- >>> x = np.array([1, 2, 3, 5]) >>> N = 3 >>> np.vander(x, N) array([[ 1, 1, 1], [ 4, 2, 1], [ 9, 3, 1], [25, 5, 1]]) >>> np.column_stack([x**(N-1-i) for i in range(N)]) array([[ 1, 1, 1], [ 4, 2, 1], [ 9, 3, 1], [25, 5, 1]]) >>> x = np.array([1, 2, 3, 5]) >>> np.vander(x) array([[ 1, 1, 1, 1], [ 8, 4, 2, 1], [ 27, 9, 3, 1], [125, 25, 5, 1]]) The determinant of a square Vandermonde matrix is the product of the differences between the values of the input vector: >>> np.linalg.det(np.vander(x)) 48.000000000000043 >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1) 48 """ x = asarray(x) if N is None: N=len(x) X = ones( (len(x),N), x.dtype) for i in range(N - 1): X[:,i] = x**(N - i - 1) return X def histogram2d(x, y, bins=10, range=None, normed=False, weights=None): """ Compute the bi-dimensional histogram of two data samples. Parameters ---------- x : array_like, shape(N,) A sequence of values to be histogrammed along the first dimension. y : array_like, shape(M,) A sequence of values to be histogrammed along the second dimension. bins : int or [int, int] or array_like or [array, array], optional The bin specification: * If int, the number of bins for the two dimensions (nx=ny=bins). * If [int, int], the number of bins in each dimension (nx, ny = bins). * If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins). * If [array, array], the bin edges in each dimension (x_edges, y_edges = bins). range : array_like, shape(2,2), optional The leftmost and rightmost edges of the bins along each dimension (if not specified explicitly in the `bins` parameters): ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range will be considered outliers and not tallied in the histogram. normed : bool, optional If False, returns the number of samples in each bin. If True, returns the bin density, i.e. the bin count divided by the bin area. weights : array_like, shape(N,), optional An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights are normalized to 1 if `normed` is True. If `normed` is False, the values of the returned histogram are equal to the sum of the weights belonging to the samples falling into each bin. Returns ------- H : ndarray, shape(nx, ny) The bi-dimensional histogram of samples `x` and `y`. Values in `x` are histogrammed along the first dimension and values in `y` are histogrammed along the second dimension. xedges : ndarray, shape(nx,) The bin edges along the first dimension. yedges : ndarray, shape(ny,) The bin edges along the second dimension. See Also -------- histogram: 1D histogram histogramdd: Multidimensional histogram Notes ----- When `normed` is True, then the returned histogram is the sample density, defined such that: .. math:: \\sum_{i=0}^{nx-1} \\sum_{j=0}^{ny-1} H_{i,j} \\Delta x_i \\Delta y_j = 1 where `H` is the histogram array and :math:`\\Delta x_i \\Delta y_i` the area of bin `{i,j}`. Please note that the histogram does not follow the Cartesian convention where `x` values are on the abcissa and `y` values on the ordinate axis. Rather, `x` is histogrammed along the first dimension of the array (vertical), and `y` along the second dimension of the array (horizontal). This ensures compatibility with `histogramdd`. Examples -------- >>> x, y = np.random.randn(2, 100) >>> H, xedges, yedges = np.histogram2d(x, y, bins=(5, 8)) >>> H.shape, xedges.shape, yedges.shape ((5, 8), (6,), (9,)) We can now use the Matplotlib to visualize this 2-dimensional histogram: >>> extent = [yedges[0], yedges[-1], xedges[-1], xedges[0]] >>> import matplotlib.pyplot as plt >>> plt.imshow(H, extent=extent, interpolation='nearest') <matplotlib.image.AxesImage object at ...> >>> plt.colorbar() <matplotlib.colorbar.Colorbar instance at ...> >>> plt.show() """ from numpy import histogramdd try: N = len(bins) except TypeError: N = 1 if N != 1 and N != 2: xedges = yedges = asarray(bins, float) bins = [xedges, yedges] hist, edges = histogramdd([x,y], bins, range, normed, weights) return hist, edges[0], edges[1] def mask_indices(n, mask_func, k=0): """ Return the indices to access (n, n) arrays, given a masking function. Assume `mask_func` is a function that, for a square array a of size ``(n, n)`` with a possible offset argument `k`, when called as ``mask_func(a, k)`` returns a new array with zeros in certain locations (functions like `triu` or `tril` do precisely this). Then this function returns the indices where the non-zero values would be located. Parameters ---------- n : int The returned indices will be valid to access arrays of shape (n, n). mask_func : callable A function whose call signature is similar to that of `triu`, `tril`. That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`. `k` is an optional argument to the function. k : scalar An optional argument which is passed through to `mask_func`. Functions like `triu`, `tril` take a second argument that is interpreted as an offset. Returns ------- indices : tuple of arrays. The `n` arrays of indices corresponding to the locations where ``mask_func(np.ones((n, n)), k)`` is True. See Also -------- triu, tril, triu_indices, tril_indices Notes ----- .. versionadded:: 1.4.0 Examples -------- These are the indices that would allow you to access the upper triangular part of any 3x3 array: >>> iu = np.mask_indices(3, np.triu) For example, if `a` is a 3x3 array: >>> a = np.arange(9).reshape(3, 3) >>> a array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) >>> a[iu] array([0, 1, 2, 4, 5, 8]) An offset can be passed also to the masking function. This gets us the indices starting on the first diagonal right of the main one: >>> iu1 = np.mask_indices(3, np.triu, 1) with which we now extract only three elements: >>> a[iu1] array([1, 2, 5]) """ m = ones((n,n), int) a = mask_func(m, k) return where(a != 0) def tril_indices(n, k=0): """ Return the indices for the lower-triangle of an (n, n) array. Parameters ---------- n : int The row dimension of the square arrays for which the returned indices will be valid. k : int, optional Diagonal offset (see `tril` for details). Returns ------- inds : tuple of arrays The indices for the triangle. The returned tuple contains two arrays, each with the indices along one dimension of the array. See also -------- triu_indices : similar function, for upper-triangular. mask_indices : generic function accepting an arbitrary mask function. tril, triu Notes ----- .. versionadded:: 1.4.0 Examples -------- Compute two different sets of indices to access 4x4 arrays, one for the lower triangular part starting at the main diagonal, and one starting two diagonals further right: >>> il1 = np.tril_indices(4) >>> il2 = np.tril_indices(4, 2) Here is how they can be used with a sample array: >>> a = np.arange(16).reshape(4, 4) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) Both for indexing: >>> a[il1] array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15]) And for assigning values: >>> a[il1] = -1 >>> a array([[-1, 1, 2, 3], [-1, -1, 6, 7], [-1, -1, -1, 11], [-1, -1, -1, -1]]) These cover almost the whole array (two diagonals right of the main one): >>> a[il2] = -10 >>> a array([[-10, -10, -10, 3], [-10, -10, -10, -10], [-10, -10, -10, -10], [-10, -10, -10, -10]]) """ return mask_indices(n, tril, k) def tril_indices_from(arr, k=0): """ Return the indices for the lower-triangle of arr. See `tril_indices` for full details. Parameters ---------- arr : array_like The indices will be valid for square arrays whose dimensions are the same as arr. k : int, optional Diagonal offset (see `tril` for details). See Also -------- tril_indices, tril Notes ----- .. versionadded:: 1.4.0 """ if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]): raise ValueError("input array must be 2-d and square") return tril_indices(arr.shape[0], k) def triu_indices(n, k=0): """ Return the indices for the upper-triangle of an (n, n) array. Parameters ---------- n : int The size of the arrays for which the returned indices will be valid. k : int, optional Diagonal offset (see `triu` for details). Returns ------- inds : tuple of arrays The indices for the triangle. The returned tuple contains two arrays, each with the indices along one dimension of the array. See also -------- tril_indices : similar function, for lower-triangular. mask_indices : generic function accepting an arbitrary mask function. triu, tril Notes ----- .. versionadded:: 1.4.0 Examples -------- Compute two different sets of indices to access 4x4 arrays, one for the upper triangular part starting at the main diagonal, and one starting two diagonals further right: >>> iu1 = np.triu_indices(4) >>> iu2 = np.triu_indices(4, 2) Here is how they can be used with a sample array: >>> a = np.arange(16).reshape(4, 4) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) Both for indexing: >>> a[iu1] array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15]) And for assigning values: >>> a[iu1] = -1 >>> a array([[-1, -1, -1, -1], [ 4, -1, -1, -1], [ 8, 9, -1, -1], [12, 13, 14, -1]]) These cover only a small part of the whole array (two diagonals right of the main one): >>> a[iu2] = -10 >>> a array([[ -1, -1, -10, -10], [ 4, -1, -1, -10], [ 8, 9, -1, -1], [ 12, 13, 14, -1]]) """ return mask_indices(n, triu, k) def triu_indices_from(arr, k=0): """ Return the indices for the upper-triangle of an (n, n) array. See `triu_indices` for full details. Parameters ---------- arr : array_like The indices will be valid for square arrays whose dimensions are the same as arr. k : int, optional Diagonal offset (see `triu` for details). See Also -------- triu_indices, triu Notes ----- .. versionadded:: 1.4.0 """ if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]): raise ValueError("input array must be 2-d and square") return triu_indices(arr.shape[0],k)
gpl-3.0
dsm054/pandas
pandas/tests/arrays/categorical/test_repr.py
2
25897
# -*- coding: utf-8 -*- import numpy as np from pandas.compat import PY3, u from pandas import ( Categorical, CategoricalIndex, Series, date_range, period_range, timedelta_range) from pandas.core.config import option_context from pandas.tests.arrays.categorical.common import TestCategorical class TestCategoricalReprWithFactor(TestCategorical): def test_print(self): expected = ["[a, b, b, a, a, c, c, c]", "Categories (3, object): [a < b < c]"] expected = "\n".join(expected) actual = repr(self.factor) assert actual == expected class TestCategoricalRepr(object): def test_big_print(self): factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'], fastpath=True) expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600", "Categories (3, object): [a, b, c]"] expected = "\n".join(expected) actual = repr(factor) assert actual == expected def test_empty_print(self): factor = Categorical([], ["a", "b", "c"]) expected = ("[], Categories (3, object): [a, b, c]") # hack because array_repr changed in numpy > 1.6.x actual = repr(factor) assert actual == expected assert expected == actual factor = Categorical([], ["a", "b", "c"], ordered=True) expected = ("[], Categories (3, object): [a < b < c]") actual = repr(factor) assert expected == actual factor = Categorical([], []) expected = ("[], Categories (0, object): []") assert expected == repr(factor) def test_print_none_width(self): # GH10087 a = Series(Categorical([1, 2, 3, 4])) exp = u("0 1\n1 2\n2 3\n3 4\n" + "dtype: category\nCategories (4, int64): [1, 2, 3, 4]") with option_context("display.width", None): assert exp == repr(a) def test_unicode_print(self): if PY3: _rep = repr else: _rep = unicode # noqa c = Categorical(['aaaaa', 'bb', 'cccc'] * 20) expected = u"""\ [aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc] Length: 60 Categories (3, object): [aaaaa, bb, cccc]""" assert _rep(c) == expected c = Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20) expected = u"""\ [ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう] Length: 60 Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa assert _rep(c) == expected # unicode option should not affect to Categorical, as it doesn't care # the repr width with option_context('display.unicode.east_asian_width', True): c = Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20) expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう] Length: 60 Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa assert _rep(c) == expected def test_categorical_repr(self): c = Categorical([1, 2, 3]) exp = """[1, 2, 3] Categories (3, int64): [1, 2, 3]""" assert repr(c) == exp c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3]) exp = """[1, 2, 3, 1, 2, 3] Categories (3, int64): [1, 2, 3]""" assert repr(c) == exp c = Categorical([1, 2, 3, 4, 5] * 10) exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5] Length: 50 Categories (5, int64): [1, 2, 3, 4, 5]""" assert repr(c) == exp c = Categorical(np.arange(20)) exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19] Length: 20 Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]""" assert repr(c) == exp def test_categorical_repr_ordered(self): c = Categorical([1, 2, 3], ordered=True) exp = """[1, 2, 3] Categories (3, int64): [1 < 2 < 3]""" assert repr(c) == exp c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3], ordered=True) exp = """[1, 2, 3, 1, 2, 3] Categories (3, int64): [1 < 2 < 3]""" assert repr(c) == exp c = Categorical([1, 2, 3, 4, 5] * 10, ordered=True) exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5] Length: 50 Categories (5, int64): [1 < 2 < 3 < 4 < 5]""" assert repr(c) == exp c = Categorical(np.arange(20), ordered=True) exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19] Length: 20 Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]""" assert repr(c) == exp def test_categorical_repr_datetime(self): idx = date_range('2011-01-01 09:00', freq='H', periods=5) c = Categorical(idx) # TODO(wesm): exceeding 80 characters in the console is not good # behavior exp = ( "[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, " "2011-01-01 12:00:00, 2011-01-01 13:00:00]\n" "Categories (5, datetime64[ns]): [2011-01-01 09:00:00, " "2011-01-01 10:00:00, 2011-01-01 11:00:00,\n" " 2011-01-01 12:00:00, " "2011-01-01 13:00:00]""") assert repr(c) == exp c = Categorical(idx.append(idx), categories=idx) exp = ( "[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, " "2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, " "2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, " "2011-01-01 13:00:00]\n" "Categories (5, datetime64[ns]): [2011-01-01 09:00:00, " "2011-01-01 10:00:00, 2011-01-01 11:00:00,\n" " 2011-01-01 12:00:00, " "2011-01-01 13:00:00]") assert repr(c) == exp idx = date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern') c = Categorical(idx) exp = ( "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, " "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, " "2011-01-01 13:00:00-05:00]\n" "Categories (5, datetime64[ns, US/Eastern]): " "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n" " " "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n" " " "2011-01-01 13:00:00-05:00]") assert repr(c) == exp c = Categorical(idx.append(idx), categories=idx) exp = ( "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, " "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, " "2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, " "2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, " "2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n" "Categories (5, datetime64[ns, US/Eastern]): " "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n" " " "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n" " " "2011-01-01 13:00:00-05:00]") assert repr(c) == exp def test_categorical_repr_datetime_ordered(self): idx = date_range('2011-01-01 09:00', freq='H', periods=5) c = Categorical(idx, ordered=True) exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00] Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 < 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa assert repr(c) == exp c = Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00] Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 < 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa assert repr(c) == exp idx = date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern') c = Categorical(idx, ordered=True) exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00] Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 < 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 < 2011-01-01 13:00:00-05:00]""" # noqa assert repr(c) == exp c = Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00] Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 < 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 < 2011-01-01 13:00:00-05:00]""" # noqa assert repr(c) == exp def test_categorical_repr_period(self): idx = period_range('2011-01-01 09:00', freq='H', periods=5) c = Categorical(idx) exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]""" # noqa assert repr(c) == exp c = Categorical(idx.append(idx), categories=idx) exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]""" # noqa assert repr(c) == exp idx = period_range('2011-01', freq='M', periods=5) c = Categorical(idx) exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05] Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" assert repr(c) == exp c = Categorical(idx.append(idx), categories=idx) exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05] Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" # noqa assert repr(c) == exp def test_categorical_repr_period_ordered(self): idx = period_range('2011-01-01 09:00', freq='H', periods=5) c = Categorical(idx, ordered=True) exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 < 2011-01-01 13:00]""" # noqa assert repr(c) == exp c = Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 < 2011-01-01 13:00]""" # noqa assert repr(c) == exp idx = period_range('2011-01', freq='M', periods=5) c = Categorical(idx, ordered=True) exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05] Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" assert repr(c) == exp c = Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05] Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" # noqa assert repr(c) == exp def test_categorical_repr_timedelta(self): idx = timedelta_range('1 days', periods=5) c = Categorical(idx) exp = """[1 days, 2 days, 3 days, 4 days, 5 days] Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" assert repr(c) == exp c = Categorical(idx.append(idx), categories=idx) exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days] Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" # noqa assert repr(c) == exp idx = timedelta_range('1 hours', periods=20) c = Categorical(idx) exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] Length: 20 Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]""" # noqa assert repr(c) == exp c = Categorical(idx.append(idx), categories=idx) exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] Length: 40 Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]""" # noqa assert repr(c) == exp def test_categorical_repr_timedelta_ordered(self): idx = timedelta_range('1 days', periods=5) c = Categorical(idx, ordered=True) exp = """[1 days, 2 days, 3 days, 4 days, 5 days] Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa assert repr(c) == exp c = Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days] Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa assert repr(c) == exp idx = timedelta_range('1 hours', periods=20) c = Categorical(idx, ordered=True) exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] Length: 20 Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 < 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 < 18 days 01:00:00 < 19 days 01:00:00]""" # noqa assert repr(c) == exp c = Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] Length: 40 Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 < 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 < 18 days 01:00:00 < 19 days 01:00:00]""" # noqa assert repr(c) == exp def test_categorical_index_repr(self): idx = CategoricalIndex(Categorical([1, 2, 3])) exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')""" # noqa assert repr(idx) == exp i = CategoricalIndex(Categorical(np.arange(10))) exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')""" # noqa assert repr(i) == exp def test_categorical_index_repr_ordered(self): i = CategoricalIndex(Categorical([1, 2, 3], ordered=True)) exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')""" # noqa assert repr(i) == exp i = CategoricalIndex(Categorical(np.arange(10), ordered=True)) exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')""" # noqa assert repr(i) == exp def test_categorical_index_repr_datetime(self): idx = date_range('2011-01-01 09:00', freq='H', periods=5) i = CategoricalIndex(Categorical(idx)) exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00', '2011-01-01 11:00:00', '2011-01-01 12:00:00', '2011-01-01 13:00:00'], categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')""" # noqa assert repr(i) == exp idx = date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern') i = CategoricalIndex(Categorical(idx)) exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'], categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')""" # noqa assert repr(i) == exp def test_categorical_index_repr_datetime_ordered(self): idx = date_range('2011-01-01 09:00', freq='H', periods=5) i = CategoricalIndex(Categorical(idx, ordered=True)) exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00', '2011-01-01 11:00:00', '2011-01-01 12:00:00', '2011-01-01 13:00:00'], categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')""" # noqa assert repr(i) == exp idx = date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern') i = CategoricalIndex(Categorical(idx, ordered=True)) exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'], categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa assert repr(i) == exp i = CategoricalIndex(Categorical(idx.append(idx), ordered=True)) exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'], categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa assert repr(i) == exp def test_categorical_index_repr_period(self): # test all length idx = period_range('2011-01-01 09:00', freq='H', periods=1) i = CategoricalIndex(Categorical(idx)) exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')""" # noqa assert repr(i) == exp idx = period_range('2011-01-01 09:00', freq='H', periods=2) i = CategoricalIndex(Categorical(idx)) exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')""" # noqa assert repr(i) == exp idx = period_range('2011-01-01 09:00', freq='H', periods=3) i = CategoricalIndex(Categorical(idx)) exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')""" # noqa assert repr(i) == exp idx = period_range('2011-01-01 09:00', freq='H', periods=5) i = CategoricalIndex(Categorical(idx)) exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00', '2011-01-01 13:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa assert repr(i) == exp i = CategoricalIndex(Categorical(idx.append(idx))) exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00', '2011-01-01 13:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa assert repr(i) == exp idx = period_range('2011-01', freq='M', periods=5) i = CategoricalIndex(Categorical(idx)) exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')""" # noqa assert repr(i) == exp def test_categorical_index_repr_period_ordered(self): idx = period_range('2011-01-01 09:00', freq='H', periods=5) i = CategoricalIndex(Categorical(idx, ordered=True)) exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00', '2011-01-01 13:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')""" # noqa assert repr(i) == exp idx = period_range('2011-01', freq='M', periods=5) i = CategoricalIndex(Categorical(idx, ordered=True)) exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')""" # noqa assert repr(i) == exp def test_categorical_index_repr_timedelta(self): idx = timedelta_range('1 days', periods=5) i = CategoricalIndex(Categorical(idx)) exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')""" # noqa assert repr(i) == exp idx = timedelta_range('1 hours', periods=10) i = CategoricalIndex(Categorical(idx)) exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00', '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00', '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00', '9 days 01:00:00'], categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')""" # noqa assert repr(i) == exp def test_categorical_index_repr_timedelta_ordered(self): idx = timedelta_range('1 days', periods=5) i = CategoricalIndex(Categorical(idx, ordered=True)) exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')""" # noqa assert repr(i) == exp idx = timedelta_range('1 hours', periods=10) i = CategoricalIndex(Categorical(idx, ordered=True)) exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00', '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00', '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00', '9 days 01:00:00'], categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')""" # noqa assert repr(i) == exp
bsd-3-clause
dopplershift/MetPy
examples/gridding/Point_Interpolation.py
6
5187
# Copyright (c) 2016 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause """ Point Interpolation =================== Compares different point interpolation approaches. """ import cartopy.crs as ccrs import cartopy.feature as cfeature from matplotlib.colors import BoundaryNorm import matplotlib.pyplot as plt import numpy as np from metpy.cbook import get_test_data from metpy.interpolate import (interpolate_to_grid, remove_nan_observations, remove_repeat_coordinates) from metpy.plots import add_metpy_logo ########################################### def basic_map(proj, title): """Make our basic default map for plotting""" fig = plt.figure(figsize=(15, 10)) add_metpy_logo(fig, 0, 80, size='large') view = fig.add_axes([0, 0, 1, 1], projection=proj) view.set_title(title) view.set_extent([-120, -70, 20, 50]) view.add_feature(cfeature.STATES.with_scale('50m')) view.add_feature(cfeature.OCEAN) view.add_feature(cfeature.COASTLINE) view.add_feature(cfeature.BORDERS, linestyle=':') return fig, view def station_test_data(variable_names, proj_from=None, proj_to=None): with get_test_data('station_data.txt') as f: all_data = np.loadtxt(f, skiprows=1, delimiter=',', usecols=(1, 2, 3, 4, 5, 6, 7, 17, 18, 19), dtype=np.dtype([('stid', '3S'), ('lat', 'f'), ('lon', 'f'), ('slp', 'f'), ('air_temperature', 'f'), ('cloud_fraction', 'f'), ('dewpoint', 'f'), ('weather', '16S'), ('wind_dir', 'f'), ('wind_speed', 'f')])) all_stids = [s.decode('ascii') for s in all_data['stid']] data = np.concatenate([all_data[all_stids.index(site)].reshape(1, ) for site in all_stids]) value = data[variable_names] lon = data['lon'] lat = data['lat'] if proj_from is not None and proj_to is not None: try: proj_points = proj_to.transform_points(proj_from, lon, lat) return proj_points[:, 0], proj_points[:, 1], value except Exception as e: print(e) return None return lon, lat, value from_proj = ccrs.Geodetic() to_proj = ccrs.AlbersEqualArea(central_longitude=-97.0000, central_latitude=38.0000) levels = list(range(-20, 20, 1)) cmap = plt.get_cmap('magma') norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True) x, y, temp = station_test_data('air_temperature', from_proj, to_proj) x, y, temp = remove_nan_observations(x, y, temp) x, y, temp = remove_repeat_coordinates(x, y, temp) ########################################### # Scipy.interpolate linear # ------------------------ gx, gy, img = interpolate_to_grid(x, y, temp, interp_type='linear', hres=75000) img = np.ma.masked_where(np.isnan(img), img) fig, view = basic_map(to_proj, 'Linear') mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm) fig.colorbar(mmb, shrink=.4, pad=0, boundaries=levels) ########################################### # Natural neighbor interpolation (MetPy implementation) # ----------------------------------------------------- # `Reference <https://github.com/Unidata/MetPy/files/138653/cwp-657.pdf>`_ gx, gy, img = interpolate_to_grid(x, y, temp, interp_type='natural_neighbor', hres=75000) img = np.ma.masked_where(np.isnan(img), img) fig, view = basic_map(to_proj, 'Natural Neighbor') mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm) fig.colorbar(mmb, shrink=.4, pad=0, boundaries=levels) ########################################### # Cressman interpolation # ---------------------- # search_radius = 100 km # # grid resolution = 25 km # # min_neighbors = 1 gx, gy, img = interpolate_to_grid(x, y, temp, interp_type='cressman', minimum_neighbors=1, hres=75000, search_radius=100000) img = np.ma.masked_where(np.isnan(img), img) fig, view = basic_map(to_proj, 'Cressman') mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm) fig.colorbar(mmb, shrink=.4, pad=0, boundaries=levels) ########################################### # Barnes Interpolation # -------------------- # search_radius = 100km # # min_neighbors = 3 gx, gy, img1 = interpolate_to_grid(x, y, temp, interp_type='barnes', hres=75000, search_radius=100000) img1 = np.ma.masked_where(np.isnan(img1), img1) fig, view = basic_map(to_proj, 'Barnes') mmb = view.pcolormesh(gx, gy, img1, cmap=cmap, norm=norm) fig.colorbar(mmb, shrink=.4, pad=0, boundaries=levels) ########################################### # Radial basis function interpolation # ------------------------------------ # linear gx, gy, img = interpolate_to_grid(x, y, temp, interp_type='rbf', hres=75000, rbf_func='linear', rbf_smooth=0) img = np.ma.masked_where(np.isnan(img), img) fig, view = basic_map(to_proj, 'Radial Basis Function') mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm) fig.colorbar(mmb, shrink=.4, pad=0, boundaries=levels) plt.show()
bsd-3-clause
lenovor/scikit-learn
sklearn/calibration.py
137
18876
"""Calibration of predicted probabilities.""" # Author: Alexandre Gramfort <[email protected]> # Balazs Kegl <[email protected]> # Jan Hendrik Metzen <[email protected]> # Mathieu Blondel <[email protected]> # # License: BSD 3 clause from __future__ import division import inspect import warnings from math import log import numpy as np from scipy.optimize import fmin_bfgs from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone from .preprocessing import LabelBinarizer from .utils import check_X_y, check_array, indexable, column_or_1d from .utils.validation import check_is_fitted from .isotonic import IsotonicRegression from .svm import LinearSVC from .cross_validation import check_cv from .metrics.classification import _check_binary_probabilistic_predictions class CalibratedClassifierCV(BaseEstimator, ClassifierMixin): """Probability calibration with isotonic regression or sigmoid. With this class, the base_estimator is fit on the train set of the cross-validation generator and the test set is used for calibration. The probabilities for each of the folds are then averaged for prediction. In case that cv="prefit" is passed to __init__, it is it is assumed that base_estimator has been fitted already and all data is used for calibration. Note that data for fitting the classifier and for calibrating it must be disjpint. Read more in the :ref:`User Guide <calibration>`. Parameters ---------- base_estimator : instance BaseEstimator The classifier whose output decision function needs to be calibrated to offer more accurate predict_proba outputs. If cv=prefit, the classifier must have been fit already on data. method : 'sigmoid' | 'isotonic' The method to use for calibration. Can be 'sigmoid' which corresponds to Platt's method or 'isotonic' which is a non-parameteric approach. It is not advised to use isotonic calibration with too few calibration samples (<<1000) since it tends to overfit. Use sigmoids (Platt's calibration) in this case. cv : integer or cross-validation generator or "prefit", optional If an integer is passed, it is the number of folds (default 3). Specific cross-validation objects can be passed, see sklearn.cross_validation module for the list of possible objects. If "prefit" is passed, it is assumed that base_estimator has been fitted already and all data is used for calibration. Attributes ---------- classes_ : array, shape (n_classes) The class labels. calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit") The list of calibrated classifiers, one for each crossvalidation fold, which has been fitted on all but the validation fold and calibrated on the validation fold. References ---------- .. [1] Obtaining calibrated probability estimates from decision trees and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001 .. [2] Transforming Classifier Scores into Accurate Multiclass Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002) .. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to Regularized Likelihood Methods, J. Platt, (1999) .. [4] Predicting Good Probabilities with Supervised Learning, A. Niculescu-Mizil & R. Caruana, ICML 2005 """ def __init__(self, base_estimator=None, method='sigmoid', cv=3): self.base_estimator = base_estimator self.method = method self.cv = cv def fit(self, X, y, sample_weight=None): """Fit the calibrated model Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) Target values. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Returns ------- self : object Returns an instance of self. """ X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'], force_all_finite=False) X, y = indexable(X, y) lb = LabelBinarizer().fit(y) self.classes_ = lb.classes_ # Check that we each cross-validation fold can have at least one # example per class n_folds = self.cv if isinstance(self.cv, int) \ else self.cv.n_folds if hasattr(self.cv, "n_folds") else None if n_folds and \ np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]): raise ValueError("Requesting %d-fold cross-validation but provided" " less than %d examples for at least one class." % (n_folds, n_folds)) self.calibrated_classifiers_ = [] if self.base_estimator is None: # we want all classifiers that don't expose a random_state # to be deterministic (and we don't want to expose this one). base_estimator = LinearSVC(random_state=0) else: base_estimator = self.base_estimator if self.cv == "prefit": calibrated_classifier = _CalibratedClassifier( base_estimator, method=self.method) if sample_weight is not None: calibrated_classifier.fit(X, y, sample_weight) else: calibrated_classifier.fit(X, y) self.calibrated_classifiers_.append(calibrated_classifier) else: cv = check_cv(self.cv, X, y, classifier=True) arg_names = inspect.getargspec(base_estimator.fit)[0] estimator_name = type(base_estimator).__name__ if (sample_weight is not None and "sample_weight" not in arg_names): warnings.warn("%s does not support sample_weight. Samples" " weights are only used for the calibration" " itself." % estimator_name) base_estimator_sample_weight = None else: base_estimator_sample_weight = sample_weight for train, test in cv: this_estimator = clone(base_estimator) if base_estimator_sample_weight is not None: this_estimator.fit( X[train], y[train], sample_weight=base_estimator_sample_weight[train]) else: this_estimator.fit(X[train], y[train]) calibrated_classifier = _CalibratedClassifier( this_estimator, method=self.method) if sample_weight is not None: calibrated_classifier.fit(X[test], y[test], sample_weight[test]) else: calibrated_classifier.fit(X[test], y[test]) self.calibrated_classifiers_.append(calibrated_classifier) return self def predict_proba(self, X): """Posterior probabilities of classification This function returns posterior probabilities of classification according to each class on an array of test vectors X. Parameters ---------- X : array-like, shape (n_samples, n_features) The samples. Returns ------- C : array, shape (n_samples, n_classes) The predicted probas. """ check_is_fitted(self, ["classes_", "calibrated_classifiers_"]) X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], force_all_finite=False) # Compute the arithmetic mean of the predictions of the calibrated # classfiers mean_proba = np.zeros((X.shape[0], len(self.classes_))) for calibrated_classifier in self.calibrated_classifiers_: proba = calibrated_classifier.predict_proba(X) mean_proba += proba mean_proba /= len(self.calibrated_classifiers_) return mean_proba def predict(self, X): """Predict the target of new samples. Can be different from the prediction of the uncalibrated classifier. Parameters ---------- X : array-like, shape (n_samples, n_features) The samples. Returns ------- C : array, shape (n_samples,) The predicted class. """ check_is_fitted(self, ["classes_", "calibrated_classifiers_"]) return self.classes_[np.argmax(self.predict_proba(X), axis=1)] class _CalibratedClassifier(object): """Probability calibration with isotonic regression or sigmoid. It assumes that base_estimator has already been fit, and trains the calibration on the input set of the fit function. Note that this class should not be used as an estimator directly. Use CalibratedClassifierCV with cv="prefit" instead. Parameters ---------- base_estimator : instance BaseEstimator The classifier whose output decision function needs to be calibrated to offer more accurate predict_proba outputs. No default value since it has to be an already fitted estimator. method : 'sigmoid' | 'isotonic' The method to use for calibration. Can be 'sigmoid' which corresponds to Platt's method or 'isotonic' which is a non-parameteric approach based on isotonic regression. References ---------- .. [1] Obtaining calibrated probability estimates from decision trees and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001 .. [2] Transforming Classifier Scores into Accurate Multiclass Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002) .. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to Regularized Likelihood Methods, J. Platt, (1999) .. [4] Predicting Good Probabilities with Supervised Learning, A. Niculescu-Mizil & R. Caruana, ICML 2005 """ def __init__(self, base_estimator, method='sigmoid'): self.base_estimator = base_estimator self.method = method def _preproc(self, X): n_classes = len(self.classes_) if hasattr(self.base_estimator, "decision_function"): df = self.base_estimator.decision_function(X) if df.ndim == 1: df = df[:, np.newaxis] elif hasattr(self.base_estimator, "predict_proba"): df = self.base_estimator.predict_proba(X) if n_classes == 2: df = df[:, 1:] else: raise RuntimeError('classifier has no decision_function or ' 'predict_proba method.') idx_pos_class = np.arange(df.shape[1]) return df, idx_pos_class def fit(self, X, y, sample_weight=None): """Calibrate the fitted model Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) Target values. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Returns ------- self : object Returns an instance of self. """ lb = LabelBinarizer() Y = lb.fit_transform(y) self.classes_ = lb.classes_ df, idx_pos_class = self._preproc(X) self.calibrators_ = [] for k, this_df in zip(idx_pos_class, df.T): if self.method == 'isotonic': calibrator = IsotonicRegression(out_of_bounds='clip') elif self.method == 'sigmoid': calibrator = _SigmoidCalibration() else: raise ValueError('method should be "sigmoid" or ' '"isotonic". Got %s.' % self.method) calibrator.fit(this_df, Y[:, k], sample_weight) self.calibrators_.append(calibrator) return self def predict_proba(self, X): """Posterior probabilities of classification This function returns posterior probabilities of classification according to each class on an array of test vectors X. Parameters ---------- X : array-like, shape (n_samples, n_features) The samples. Returns ------- C : array, shape (n_samples, n_classes) The predicted probas. Can be exact zeros. """ n_classes = len(self.classes_) proba = np.zeros((X.shape[0], n_classes)) df, idx_pos_class = self._preproc(X) for k, this_df, calibrator in \ zip(idx_pos_class, df.T, self.calibrators_): if n_classes == 2: k += 1 proba[:, k] = calibrator.predict(this_df) # Normalize the probabilities if n_classes == 2: proba[:, 0] = 1. - proba[:, 1] else: proba /= np.sum(proba, axis=1)[:, np.newaxis] # XXX : for some reason all probas can be 0 proba[np.isnan(proba)] = 1. / n_classes # Deal with cases where the predicted probability minimally exceeds 1.0 proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0 return proba def _sigmoid_calibration(df, y, sample_weight=None): """Probability Calibration with sigmoid method (Platt 2000) Parameters ---------- df : ndarray, shape (n_samples,) The decision function or predict proba for the samples. y : ndarray, shape (n_samples,) The targets. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Returns ------- a : float The slope. b : float The intercept. References ---------- Platt, "Probabilistic Outputs for Support Vector Machines" """ df = column_or_1d(df) y = column_or_1d(y) F = df # F follows Platt's notations tiny = np.finfo(np.float).tiny # to avoid division by 0 warning # Bayesian priors (see Platt end of section 2.2) prior0 = float(np.sum(y <= 0)) prior1 = y.shape[0] - prior0 T = np.zeros(y.shape) T[y > 0] = (prior1 + 1.) / (prior1 + 2.) T[y <= 0] = 1. / (prior0 + 2.) T1 = 1. - T def objective(AB): # From Platt (beginning of Section 2.2) E = np.exp(AB[0] * F + AB[1]) P = 1. / (1. + E) l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny)) if sample_weight is not None: return (sample_weight * l).sum() else: return l.sum() def grad(AB): # gradient of the objective function E = np.exp(AB[0] * F + AB[1]) P = 1. / (1. + E) TEP_minus_T1P = P * (T * E - T1) if sample_weight is not None: TEP_minus_T1P *= sample_weight dA = np.dot(TEP_minus_T1P, F) dB = np.sum(TEP_minus_T1P) return np.array([dA, dB]) AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))]) AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False) return AB_[0], AB_[1] class _SigmoidCalibration(BaseEstimator, RegressorMixin): """Sigmoid regression model. Attributes ---------- a_ : float The slope. b_ : float The intercept. """ def fit(self, X, y, sample_weight=None): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples,) Training data. y : array-like, shape (n_samples,) Training target. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Returns ------- self : object Returns an instance of self. """ X = column_or_1d(X) y = column_or_1d(y) X, y = indexable(X, y) self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight) return self def predict(self, T): """Predict new data by linear interpolation. Parameters ---------- T : array-like, shape (n_samples,) Data to predict from. Returns ------- T_ : array, shape (n_samples,) The predicted data. """ T = column_or_1d(T) return 1. / (1. + np.exp(self.a_ * T + self.b_)) def calibration_curve(y_true, y_prob, normalize=False, n_bins=5): """Compute true and predicted probabilities for a calibration curve. Read more in the :ref:`User Guide <calibration>`. Parameters ---------- y_true : array, shape (n_samples,) True targets. y_prob : array, shape (n_samples,) Probabilities of the positive class. normalize : bool, optional, default=False Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not a proper probability. If True, the smallest value in y_prob is mapped onto 0 and the largest one onto 1. n_bins : int Number of bins. A bigger number requires more data. Returns ------- prob_true : array, shape (n_bins,) The true probability in each bin (fraction of positives). prob_pred : array, shape (n_bins,) The mean predicted probability in each bin. References ---------- Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good Probabilities With Supervised Learning, in Proceedings of the 22nd International Conference on Machine Learning (ICML). See section 4 (Qualitative Analysis of Predictions). """ y_true = column_or_1d(y_true) y_prob = column_or_1d(y_prob) if normalize: # Normalize predicted values into interval [0, 1] y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min()) elif y_prob.min() < 0 or y_prob.max() > 1: raise ValueError("y_prob has values outside [0, 1] and normalize is " "set to False.") y_true = _check_binary_probabilistic_predictions(y_true, y_prob) bins = np.linspace(0., 1. + 1e-8, n_bins + 1) binids = np.digitize(y_prob, bins) - 1 bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins)) bin_true = np.bincount(binids, weights=y_true, minlength=len(bins)) bin_total = np.bincount(binids, minlength=len(bins)) nonzero = bin_total != 0 prob_true = (bin_true[nonzero] / bin_total[nonzero]) prob_pred = (bin_sums[nonzero] / bin_total[nonzero]) return prob_true, prob_pred
bsd-3-clause
MJJoyce/climate
mccsearch/code/mainProgTemplate.py
5
4713
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ''' # running the program ''' import sys import networkx as nx import mccSearch import numpy as np import numpy.ma as ma import files import matplotlib.pyplot as plt import subprocess def main(): CEGraph = nx.DiGraph() prunedGraph = nx.DiGraph() MCCList =[] MCSList=[] MCSMCCNodesList =[] allMCSsList =[] allCETRMMList =[] #for GrADs subprocess.call('export DISPLAY=:0.0', shell=True) mainDirStr= "/directory/to/where/to/store/outputs" TRMMdirName = "/directory/to/the/TRMM/netCDF/files" CEoriDirName = "/directory/to/the/MERG/netCDF/files" #for first time working with the raw MERG zipped files # mccSearch.preprocessingMERG("/Users/kimwhitehall/Documents/HU/research/DATA") # --------------------------------------------------------------------------------- #create main directory and file structure for storing intel mccSearch.createMainDirectory(mainDirStr) TRMMCEdirName = mainDirStr+'/TRMMnetcdfCEs' CEdirName = mainDirStr+'/MERGnetcdfCEs' # for doing some postprocessing with the clipped datasets instead of running the full program, e.g. # mccSearch.postProcessingNetCDF(3,CEoriDirName) # mccSearch.postProcessingNetCDF(2) # ------------------------------------------------------------------------------------------------- #let's go! print "\n -------------- Read MERG Data ----------" mergImgs, timeList = mccSearch.readMergData(CEoriDirName) print ("-"*80) print 'in main', len(mergImgs) #print 'timeList', timeList print 'TRMMdirName ', TRMMdirName print "\n -------------- TESTING findCloudElements ----------" CEGraph = mccSearch.findCloudElements(mergImgs,timeList,TRMMdirName) #if the TRMMdirName wasnt entered for whatever reason, you can still get the TRMM data this way # CEGraph = mccSearch.findCloudElements(mergImgs,timeList) # allCETRMMList=mccSearch.findPrecipRate(TRMMdirName,timeList) # ---------------------------------------------------------------------------------------------- print ("-"*80) print "number of nodes in CEGraph is: ", CEGraph.number_of_nodes() print ("-"*80) print "\n -------------- TESTING findCloudClusters ----------" prunedGraph = mccSearch.findCloudClusters(CEGraph) print ("-"*80) print "number of nodes in prunedGraph is: ", prunedGraph.number_of_nodes() print ("-"*80) #sys.exit() print "\n -------------- TESTING findMCCs ----------" MCCList,MCSList = mccSearch.findMCC(prunedGraph) print ("-"*80) print "MCC List has been acquired ", len(MCCList) print "MCS List has been acquired ", len(MCSList) print ("-"*80) #now ready to perform various calculations/metrics print "\n -------------- TESTING METRICS ----------" #some calculations/metrics that work that work # print "creating the MCC userfile ", mccSearch.createTextFile(MCCList,1) # print "creating the MCS userfile ", mccSearch.createTextFile(MCSList,2) # MCCTimes, tdelta = mccSearch.temporalAndAreaInfoMetric(MCCList) # print "number of MCCs is: ", mccSearch.numberOfFeatures(MCCList) # print "longest duration is: ", mccSearch.longestDuration(MCCTimes), "hrs" # print "shortest duration is: ", mccSearch.shortestDuration(MCCTimes), "hrs" # #print "Average duration is: ", mccSearch.convert_timedelta(mccSearch.averageMCCLength(MCCTimes)) # print "Average duration is: ", mccSearch.averageDuration(MCCTimes), "hrs" # print "Average size is: ", mccSearch.averageFeatureSize(MCCList), "km^2" #some plots that work # mccSearch.plotAccTRMM(MCCList) # mccSearch.displayPrecip(MCCList) # mccSearch.plotAccuInTimeRange('2009-09-01_00:00:00', '2009-09-01_09:00:00') # mccSearch.displaySize(MCCList) # mccSearch.displayPrecip(MCCList) # mccSearch.plotHistogram(MCCList) # print ("-"*80) main()
apache-2.0
samehuman/fast-rcnn
lib/roi_data_layer/minibatch.py
44
7337
# -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick # -------------------------------------------------------- """Compute minibatch blobs for training a Fast R-CNN network.""" import numpy as np import numpy.random as npr import cv2 from fast_rcnn.config import cfg from utils.blob import prep_im_for_blob, im_list_to_blob def get_minibatch(roidb, num_classes): """Given a roidb, construct a minibatch sampled from it.""" num_images = len(roidb) # Sample random scales to use for each image in this batch random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES), size=num_images) assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \ 'num_images ({}) must divide BATCH_SIZE ({})'. \ format(num_images, cfg.TRAIN.BATCH_SIZE) rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image) # Get the input image blob, formatted for caffe im_blob, im_scales = _get_image_blob(roidb, random_scale_inds) # Now, build the region of interest and label blobs rois_blob = np.zeros((0, 5), dtype=np.float32) labels_blob = np.zeros((0), dtype=np.float32) bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32) bbox_loss_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32) # all_overlaps = [] for im_i in xrange(num_images): labels, overlaps, im_rois, bbox_targets, bbox_loss \ = _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image, num_classes) # Add to RoIs blob rois = _project_im_rois(im_rois, im_scales[im_i]) batch_ind = im_i * np.ones((rois.shape[0], 1)) rois_blob_this_image = np.hstack((batch_ind, rois)) rois_blob = np.vstack((rois_blob, rois_blob_this_image)) # Add to labels, bbox targets, and bbox loss blobs labels_blob = np.hstack((labels_blob, labels)) bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets)) bbox_loss_blob = np.vstack((bbox_loss_blob, bbox_loss)) # all_overlaps = np.hstack((all_overlaps, overlaps)) # For debug visualizations # _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps) blobs = {'data': im_blob, 'rois': rois_blob, 'labels': labels_blob} if cfg.TRAIN.BBOX_REG: blobs['bbox_targets'] = bbox_targets_blob blobs['bbox_loss_weights'] = bbox_loss_blob return blobs def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes): """Generate a random sample of RoIs comprising foreground and background examples. """ # label = class RoI has max overlap with labels = roidb['max_classes'] overlaps = roidb['max_overlaps'] rois = roidb['boxes'] # Select foreground RoIs as those with >= FG_THRESH overlap fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0] # Guard against the case when an image has fewer than fg_rois_per_image # foreground RoIs fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size) # Sample foreground regions without replacement if fg_inds.size > 0: fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False) # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI) bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) & (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0] # Compute number of background RoIs to take from this image (guarding # against there being fewer than desired) bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_inds.size) # Sample foreground regions without replacement if bg_inds.size > 0: bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False) # The indices that we're selecting (both fg and bg) keep_inds = np.append(fg_inds, bg_inds) # Select sampled values from various arrays: labels = labels[keep_inds] # Clamp labels for the background RoIs to 0 labels[fg_rois_per_this_image:] = 0 overlaps = overlaps[keep_inds] rois = rois[keep_inds] bbox_targets, bbox_loss_weights = \ _get_bbox_regression_labels(roidb['bbox_targets'][keep_inds, :], num_classes) return labels, overlaps, rois, bbox_targets, bbox_loss_weights def _get_image_blob(roidb, scale_inds): """Builds an input blob from the images in the roidb at the specified scales. """ num_images = len(roidb) processed_ims = [] im_scales = [] for i in xrange(num_images): im = cv2.imread(roidb[i]['image']) if roidb[i]['flipped']: im = im[:, ::-1, :] target_size = cfg.TRAIN.SCALES[scale_inds[i]] im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE) im_scales.append(im_scale) processed_ims.append(im) # Create a blob to hold the input images blob = im_list_to_blob(processed_ims) return blob, im_scales def _project_im_rois(im_rois, im_scale_factor): """Project image RoIs into the rescaled training image.""" rois = im_rois * im_scale_factor return rois def _get_bbox_regression_labels(bbox_target_data, num_classes): """Bounding-box regression targets are stored in a compact form in the roidb. This function expands those targets into the 4-of-4*K representation used by the network (i.e. only one class has non-zero targets). The loss weights are similarly expanded. Returns: bbox_target_data (ndarray): N x 4K blob of regression targets bbox_loss_weights (ndarray): N x 4K blob of loss weights """ clss = bbox_target_data[:, 0] bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32) bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32) inds = np.where(clss > 0)[0] for ind in inds: cls = clss[ind] start = 4 * cls end = start + 4 bbox_targets[ind, start:end] = bbox_target_data[ind, 1:] bbox_loss_weights[ind, start:end] = [1., 1., 1., 1.] return bbox_targets, bbox_loss_weights def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps): """Visualize a mini-batch for debugging.""" import matplotlib.pyplot as plt for i in xrange(rois_blob.shape[0]): rois = rois_blob[i, :] im_ind = rois[0] roi = rois[1:] im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy() im += cfg.PIXEL_MEANS im = im[:, :, (2, 1, 0)] im = im.astype(np.uint8) cls = labels_blob[i] plt.imshow(im) print 'class: ', cls, ' overlap: ', overlaps[i] plt.gca().add_patch( plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0], roi[3] - roi[1], fill=False, edgecolor='r', linewidth=3) ) plt.show()
mit
rhyolight/nupic.research
projects/lateral_pooler/src/run_experiment.py
4
11174
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2017, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import inspect, os import numpy as np import pickle import json import sys from optparse import OptionParser from tabulate import tabulate import itertools import matplotlib.pyplot as plt from pprint import PrettyPrinter pprint = PrettyPrinter(indent=4).pprint from htmresearch.support.lateral_pooler.datasets import load_data from htmresearch.support.lateral_pooler.utils import random_id, add_noise from htmresearch.support.lateral_pooler.metrics import mean_mutual_info_from_model, reconstruction_error # from htmresearch.frameworks.sp_paper.sp_metrics import reconstructionError from nupic.algorithms.spatial_pooler import SpatialPooler as SpatialPooler from sp_wrapper import SpatialPoolerWrapper from htmresearch.algorithms.lateral_pooler import LateralPooler from htmresearch.support.lateral_pooler.callbacks import (ModelCheckpoint, ModelOutputEvaluator, Reconstructor, ModelInspector, OutputCollector, Logger) def load_pooler(path): filename = "{}/pooler.p".format(path) with open(filename) as f: pooler = pickle.load(f) return pooler def dump_pooler(path, t, pooler): filename = "{}/pooler/pooler_{:04}.p".format(path, t) with open(filename, 'wb') as f: pickle.dump(pooler, f) def dump_entity(path, label, t, X): filename = "{}/{}/{}_{:04}.p".format(path, label,label, t) with open(filename, 'wb') as f: pickle.dump(X, f) def dump_json(path_to_file, my_dict): with open(path_to_file, 'wb') as f: json.dump(my_dict, f, indent=4) def dump_data(path_to_file, data): with open(path_to_file, 'wb') as f: pickle.dump(data, f) def dump_results(path, results): for key in results: os.makedirs(os.path.dirname("{}/{}/".format(path, key))) for i, data in enumerate(results[key]): filename ='{}/{}/{}_{:04}.p'.format(path, key, key, i + 1) with open(filename, 'wb') as file: pickle.dump(data, file) def get_shape(params): if "inputDimensions" in params: return params["columnDimensions"][0], params["inputDimensions"][0] else: return params["output_size"], params["input_size"] def get_permanence_vals(sp): m = sp.getNumInputs() n = np.prod(sp.getColumnDimensions()) W = np.zeros((n, m)) for i in range(sp._numColumns): sp.getPermanence(i, W[i, :]) return W def parse_argv(): parser = OptionParser(usage = "<yourscript> [options]\n\n"\ "Example:\n"\ "python {} --sp lateral --data mnist --params mnist --name example -e 2 -b 1 -d 100" .format(sys.argv[0])) parser.add_option("--data", type=str, default='', dest="data_set", help="") parser.add_option("-d", "--num_data", type=int, default=30000, dest="num_data_points", help="") parser.add_option("-e", "--num_epochs", type=int, default=6, dest="num_epochs", help="number of epochs") parser.add_option("-b", "--batch_size", type=int, default=1, dest="batch_size", help="Mini batch size") parser.add_option("--sp", type=str, default="nupic", dest="pooler_type", help="spatial pooler implementations: nupic, lateral") parser.add_option("--params", type=str, dest="sp_params", help="json file with spatial pooler parameters") parser.add_option("--name", type=str, default=None, dest="experiment_id", help="") parser.add_option("--seed", type=str, default=None, dest="seed", help="random seed for SP and dataset") parser.add_option("--cont", type=str, default=None, dest="cont", help="...") (options, remainder) = parser.parse_args() return options, remainder #################################################### #################################################### #################################################### ### ### Main() ### #################################################### #################################################### #################################################### def main(argv): args, _ = parse_argv() data_set = args.data_set d = args.num_data_points sp_type = args.pooler_type num_epochs = args.num_epochs batch_size = args.batch_size experiment_id = args.experiment_id seed = args.seed cont = args.cont #################################################### # # Create folder for the experiment # #################################################### if experiment_id is None: experiment_id = random_id(5) else: experiment_id += "_" + random_id(5) the_scripts_path = os.path.dirname(os.path.realpath(__file__)) # script directory relative_path = "../results/{}_pooler_{}_{}".format(sp_type, data_set, experiment_id) path = the_scripts_path + "/" + relative_path os.makedirs(os.path.dirname("{}/".format(path))) os.makedirs(os.path.dirname("{}/pooler/".format(path))) os.makedirs(os.path.dirname("{}/inputs/".format(path))) os.makedirs(os.path.dirname("{}/outputs/".format(path))) os.makedirs(os.path.dirname("{}/targets/".format(path))) os.makedirs(os.path.dirname("{}/test_inputs/".format(path))) os.makedirs(os.path.dirname("{}/test_targets/".format(path))) print( "\nExperiment directory:\n\n\t\"{}\"\n" .format(relative_path)) #################################################### # # Load the sp parameters # #################################################### sp_params_dict = json.load(open(the_scripts_path + "/params.json")) if args.sp_params is not None: sp_params = sp_params_dict["nupic"][args.sp_params] else: sp_params = sp_params_dict["nupic"][data_set] if seed is not None: sp_params["seed"] = seed pprint(sp_params) #################################################### # # Load the SP # #################################################### if sp_type == "nupic": if cont == None: pooler = SpatialPoolerWrapper(**sp_params) else: pooler = load_pooler(cont) elif sp_type == "lateral": if cont == None: pooler = LateralPooler(**sp_params) else: pooler = load_pooler(cont) else: raise "I don't know an SP of that type:{}".format(sp_type) print( "\nUsing {} pooler.\n\n"\ "\tdesired sparsity: {}\n"\ "\tdesired code weight: {}\n" .format(sp_type, pooler.sparsity, pooler.code_weight)) #################################################### # # Load the data # #################################################### X, T, X_test, T_test = load_data(data_set) X = X[:,:d] dump_entity(path, "inputs", 0, X) dump_entity(path, "targets", 0, T) dump_entity(path, "test_inputs", 0, X_test) dump_entity(path, "test_targets", 0, T_test) #################################################### # # Training # #################################################### if batch_size != 1: raise Exception( "Let's stick with online learning for now..."\ "set the batch size -b to 1.") checkpoints = set(range(0, d, 100)) results = { "outputs_test": [], "inputs_test": [], "feedforward" : [], "avg_activity_units" : [], "avg_activity_pairs" : []} metrics = { "code_weight" : [], "rec_error" : [], "mutual_info" : []} config = { "num_checkpoints" : len(checkpoints), "path": relative_path, "sp_type": sp_type, "data_set": data_set, "sparsity": pooler.sparsity, "code_weight": pooler.code_weight } print( "Training (online, i.e. batch size = 1)...\n" .format(sp_type)) # # "Fit" the model to the training data # n, m = pooler.shape for epoch in range(num_epochs): print( "\te:{}/{}" .format(epoch + 1, num_epochs,)) Targets = np.zeros((T.shape[0],d)) Y = np.zeros((n,d)) perm = np.random.permutation(d) check_count = 0 for t in range(d): if t%500 == 0: print( "\t\tb:{}/{}" .format(t, d)) x = X[:,perm[t]] y = Y[:, t] Targets[:, t] = T[:,perm[t]] pooler.compute(x, True, y) if t in checkpoints: check_count += 1 # Y_test = pooler.encode(X_test) # results["avg_activity_units"].append(pooler._activeDutyCycles.copy()) # results["avg_activity_pairs"].append(pooler.avgActivityPairs.copy()) # results["inputs_test"].append(X_test) # dump_entity(path, "pooler", check_count, pooler) # dump_entity(path, "inputs", check_count, X) # dump_entity(path, "outputs", check_count, Y) # dump_entity(path, "targets", check_count, Targets) # results["outputs_test"].append(Y_test) # results["feedforward"].append(pooler.feedforward.copy()) # metrics["code_weight"].append(np.mean(np.sum(Y_test, axis=0))) # metrics["mutual_info"].append(mean_mutual_info_from_model(pooler)) # metrics["rec_error"].append(reconstruction_error(pooler, X_test)) print("") print(tabulate(metrics, headers="keys")) print( "\nSaving results to file...") # dump_json(path + "/metrics.json", metrics) dump_json(path + "/config.json", config) # dump_results(path, results) dump_pooler(path, check_count, pooler) print( "Done.") if __name__ == "__main__": main(sys.argv[1:])
gpl-3.0
msaffarm/DeepRetina
util/readImages.py
1
9874
# A helper function to read and manipulate retianl images # import tensorflow as tf import numpy as np import random import os import sys from scipy import misc from scipy import ndimage import matplotlib.pyplot as plt # wrap_counting from wrap_counting import sampler UNET_PATH = os.getcwd() + "/../" sys.path.append(UNET_PATH) DATA_PATH = os.getcwd() + "/../../RetinalDataJohn" # DATA_PATH = os.getcwd() + "/../sampleData" SEED = 1234 class DataProvider(object): def __init__(self, validationSize = 20, batchSize = 1): # super(DataProvider, self).__init__(a_min, a_max) # metaData dict self.trainData = None self.testData = None self.validData = None self.n_class = 2 self.a_min = 0 self.a_max = 255 self.validationSize = validationSize self.trainSize = None self.testSize = None self.batchSize = batchSize self.sampler = None self.channels = 1 self.n_class = 2 def getBatchSize(self): return self.batchSize def getValidationSize(self): return self.validationSize def createMetaDataDict(self, path): files = os.listdir(path) # print(files) # metaData = {image:GT} metaData = {} images = [] GTs = [] # create a list of images and GTs for file in files: # print(file.split("-")[0]) if file.split("-")[0] != "GT": images.append((file,file.split("_")[-1])) else: GTs.append((file,file.split("_")[-1])) # metaData = {image:GT} for img in images: for g in GTs: if g[1] == img[1]: metaData[img[0]] = g[0] return metaData def createAugmentedData(self, metaDataDict, dataPath): augDataImg = [] augDataGt = [] for aImg, aGt in metaDataDict.items(): img = misc.imread(dataPath +"/" + aImg) # read image gt = misc.imread(dataPath +"/" + aGt) # read its GT augDataImg.append(img) augDataGt.append(gt) augImg, augGt = self.augmentData(img, gt) for i in range(len(augImg)): augDataImg.append(augImg[i]) augDataGt.append(augGt[i]) del img, gt, augImg, augGt return augDataImg, augDataGt def augmentData(self, img, gt): augImg = [] augGt = [] # flip up-down augImg.append(np.flipud(img)) augGt.append(np.flipud(gt)) # flip right-left augImg.append(np.fliplr(img)) augGt.append(np.fliplr(gt)) # rotate 90, 180 and 270 clockwise for i in range(1,4): augImg.append(ndimage.rotate(img, i*90)) augGt.append(ndimage.rotate(gt, i*90)) return augImg,augGt def readTrainData(self): # read train Data train_path = DATA_PATH + "/train" trainMetaData = self.createMetaDataDict(train_path) self.trainData = self.createAugmentedData(trainMetaData, train_path) print("done reading data") # extract validation data self.validData = self.createValidationData() # get train size self.trainSize = len(self.trainData[0]) # create sampler to get samples from train data self.sampler = sampler(self.batchSize, self.trainSize, seed = SEED) def readTestData(self): # read test Data test_path = DATA_PATH + "/test" testMetaData = self.createMetaDataDict(test_path) self.testData = self.createAugmentedData(testMetaData, test_path) # get train size self.testSize = len(self.testData[0]) # print(self.testSize) def readData(self): self.readTrainData() self.readTestData() def createValidationData(self): trainSize = len(self.trainData[0]) # randInt = random.sample(range(trainSize), self.validationSize) randInt = range(self.validationSize) validImg = [] validGT = [] for r in randInt: validImg.append(self.trainData[0][r]) validGT.append(self.trainData[1][r]) # pop validation data from train data tempImg = [] tempGT = [] for i in range(trainSize): if i not in randInt: tempImg.append(self.trainData[0][i]) tempGT.append(self.trainData[1][i]) self.trainData = (tempImg, tempGT) return validImg, validGT def processLabels(self, label): nx = label.shape[1] ny = label.shape[0] # label = self.normalize(label) # print(label.dtype) labels = np.zeros((ny, nx, self.n_class), dtype=np.float32) labels[..., 1] = self.normalize(label) labels[..., 0] = self.normalize(~label) return labels def processData(self, data): # normalization data = self.normalize(data) return np.reshape(data, (data.shape[0], data.shape[1], self.channels)) def normalize(self,data): # check if all zeros or ones if np.count_nonzero(data)==0: # print("zeros") return data if np.count_nonzero(data -1 )==0: # print("ones") return data data = np.clip(np.fabs(data), self.a_min, self.a_max) data -= np.amin(data) data /= (np.amax(data) + 1e-6) return data def cropImage(self,data): m, n = data.shape data = data[m/4:-m/4,n/4:-n/4] return data def getValidationData(self, batchSize,crop=True): if batchSize == -1: batchSize = self.validationSize if crop: nx = self.validData[0][0].shape[0]/2 ny = self.validData[0][0].shape[1]/2 X = np.zeros((batchSize, nx, ny, self.channels)) Y = np.zeros((batchSize, nx, ny, self.n_class)) else: nx = self.validData[0][0].shape[0] ny = self.validData[0][0].shape[1] X = np.zeros((batchSize, nx, ny, self.channels)) Y = np.zeros((batchSize, nx, ny, self.n_class)) selected = random.sample(range(self.validationSize), batchSize) for idx, val in enumerate(selected): if crop: # crop before processing d = self.cropImage(self.validData[0][val]) l = self.cropImage(self.validData[1][val]) else: d = self.validData[0][val] l = self.validData[1][val] X[idx] = self.processData(d) Y[idx] = self.processLabels(l) return X, Y def getTestData(self, batchSize, crop = True): if batchSize == -1: batchSize = self.testSize if crop: nx = self.testData[0][0].shape[0]/2 ny = self.testData[0][0].shape[1]/2 X = np.zeros((batchSize, nx, ny, self.channels)) Y = np.zeros((batchSize, nx, ny, self.n_class)) else: nx = self.testData[0][0].shape[0] ny = self.testData[0][0].shape[1] X = np.zeros((batchSize, nx, ny, self.channels)) Y = np.zeros((batchSize, nx, ny, self.n_class)) selected = random.sample(range(self.testSize), batchSize) for idx, val in enumerate(selected): if crop: # crop before processing d = self.cropImage(self.testData[0][val]) l = self.cropImage(self.testData[1][val]) else: d = self.testData[0][val] l = self.testData[1][val] print(d.shape) print(l.shape) X[idx] = self.processData(d) Y[idx] = self.processLabels(l) return X, Y def __call__(self,crop = True): # print(self.sampler.getOrder()) nextIdx = self.sampler.next_inds() # print(nextIdx) # train_data, labels = self._load_data_and_label() if crop: nx = self.trainData[0][0].shape[0]/2 ny = self.trainData[0][0].shape[1]/2 X = np.zeros((self.batchSize, nx, ny, self.channels)) Y = np.zeros((self.batchSize, nx, ny, self.n_class)) else: nx = self.trainData[0][0].shape[0] ny = self.trainData[0][0].shape[1] X = np.zeros((self.batchSize, nx, ny, self.channels)) Y = np.zeros((self.batchSize, nx, ny, self.n_class)) for idx, val in enumerate(nextIdx): if crop: # crop before processing d = self.cropImage(self.trainData[0][val]) l = self.cropImage(self.trainData[1][val]) else: d = self.trainData[0][val] l = self.trainData[1][val] X[idx] = self.processData(d) Y[idx] = self.processLabels(l) # print(type(X)) return X, Y def getTrainSize(self): return self.trainSize class DataProviderTiled(DataProvider): def __init__(self,validationSize = 20, batchSize = 1, splits = 8): super(DataProviderTiled, self).__init__(validationSize, batchSize) self.splits = splits # def createAugmentedData(self, metaDataDict, dataPath): # augDataImg = [] # augDataGt = [] # for aImg, aGt in metaDataDict.items(): # img = misc.imread(dataPath +"/" + aImg) # read image # gt = misc.imread(dataPath +"/" + aGt) # read its GT # # tile img data # # print(self.splits) # imgTiles = self.split(img,self.splits) # for x in imgTiles: # augDataImg.append(x) # # tile gt data # gtTiles = self.split(gt,self.splits) # for x in gtTiles: # augDataGt.append(gt) # augImg, augGt = self.augmentData(img, gt) # for i in range(len(augImg)): # for x in self.split(augImg[i],self.splits): # augDataImg.append(x) # for x in self.split(augGt[i],self.splits): # augDataGt.append(x) # del img, gt, augImg, augGt # return augDataImg, augDataGt def createAugmentedData(self, metaDataDict, dataPath): augDataImg = [] augDataGt = [] for aImg, aGt in metaDataDict.items(): img = misc.imread(dataPath +"/" + aImg) # read image gt = misc.imread(dataPath +"/" + aGt) # read its GT # tile img data # print(self.splits) imgTiles = self.split(img,self.splits) for x in imgTiles: augDataImg.append(x) # tile gt data gtTiles = self.split(gt,self.splits) for x in gtTiles: augDataGt.append(x) del img, gt return augDataImg, augDataGt def split(self,data, splits): tiles = [] m = data.shape[0]/splits n = data.shape[1]/splits # print("m" , m) # print("n" , n) for i in range(splits): for j in range(splits): tiles.append(data[i*m:(i+1)*m,j*n:(j+1)*n]) return tiles def main(): dp = DataProviderTiled(splits = 12 , batchSize = 10) dp.readTrainData() # x ,y = dp.getTestData(4, crop= False) x ,y = dp(crop = False) print(dp.getTrainSize()) # # print(np.max(x)) # # print(np.max(y)) # # sanity check # print(y.shape) # # g = np.reshape(y[...,1],[-1,1]) # # print(g.shape) fig, ax = plt.subplots(2, 2) ax[0][0].imshow(x[2,:,:,0],cmap=plt.cm.gray) ax[1][0].imshow(y[2,:,:,1],cmap=plt.cm.gray) ax[0][1].imshow(x[0,:,:,0],cmap=plt.cm.gray) ax[1][1].imshow(y[0,:,:,1],cmap=plt.cm.gray) plt.show() if __name__ == '__main__': main()
gpl-3.0
h2oai/h2o-3
h2o-py/tests/testdir_algos/kmeans/pyunit_iris_h2o_vs_sciKmeans.py
8
1469
from __future__ import print_function from builtins import zip from builtins import range import sys sys.path.insert(1,"../../../") import h2o from tests import pyunit_utils import numpy as np from sklearn.cluster import KMeans from h2o.estimators.kmeans import H2OKMeansEstimator def iris_h2o_vs_sciKmeans(): # Connect to a pre-existing cluster # connect to localhost:54321 iris_h2o = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv")) iris_sci = np.genfromtxt(pyunit_utils.locate("smalldata/iris/iris.csv"), delimiter=',') iris_sci = iris_sci[:,0:4] s =[[4.9,3.0,1.4,0.2], [5.6,2.5,3.9,1.1], [6.5,3.0,5.2,2.0]] start = h2o.H2OFrame(s) h2o_km = H2OKMeansEstimator(k=3, user_points=start, standardize=False) h2o_km.train(x=list(range(4)),training_frame=iris_h2o) sci_km = KMeans(n_clusters=3, init=np.asarray(s), n_init=1) sci_km.fit(iris_sci) # Log.info("Cluster centers from H2O:") print("Cluster centers from H2O:") h2o_centers = h2o_km.centers() print(h2o_centers) # Log.info("Cluster centers from scikit:") print("Cluster centers from scikit:") sci_centers = sci_km.cluster_centers_.tolist() for hcenter, scenter in zip(h2o_centers, sci_centers): for hpoint, spoint in zip(hcenter,scenter): assert (hpoint- spoint) < 1e-10, "expected centers to be the same" if __name__ == "__main__": pyunit_utils.standalone_test(iris_h2o_vs_sciKmeans) else: iris_h2o_vs_sciKmeans()
apache-2.0
victorbergelin/scikit-learn
sklearn/kernel_approximation.py
258
17973
""" The :mod:`sklearn.kernel_approximation` module implements several approximate kernel feature maps base on Fourier transforms. """ # Author: Andreas Mueller <[email protected]> # # License: BSD 3 clause import warnings import numpy as np import scipy.sparse as sp from scipy.linalg import svd from .base import BaseEstimator from .base import TransformerMixin from .utils import check_array, check_random_state, as_float_array from .utils.extmath import safe_sparse_dot from .utils.validation import check_is_fitted from .metrics.pairwise import pairwise_kernels class RBFSampler(BaseEstimator, TransformerMixin): """Approximates feature map of an RBF kernel by Monte Carlo approximation of its Fourier transform. It implements a variant of Random Kitchen Sinks.[1] Read more in the :ref:`User Guide <rbf_kernel_approx>`. Parameters ---------- gamma : float Parameter of RBF kernel: exp(-gamma * x^2) n_components : int Number of Monte Carlo samples per original feature. Equals the dimensionality of the computed feature space. random_state : {int, RandomState}, optional If int, random_state is the seed used by the random number generator; if RandomState instance, random_state is the random number generator. Notes ----- See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and Benjamin Recht. [1] "Weighted Sums of Random Kitchen Sinks: Replacing minimization with randomization in learning" by A. Rahimi and Benjamin Recht. (http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf) """ def __init__(self, gamma=1., n_components=100, random_state=None): self.gamma = gamma self.n_components = n_components self.random_state = random_state def fit(self, X, y=None): """Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the transformer. """ X = check_array(X, accept_sparse='csr') random_state = check_random_state(self.random_state) n_features = X.shape[1] self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal( size=(n_features, self.n_components))) self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components) return self def transform(self, X, y=None): """Apply the approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ check_is_fitted(self, 'random_weights_') X = check_array(X, accept_sparse='csr') projection = safe_sparse_dot(X, self.random_weights_) projection += self.random_offset_ np.cos(projection, projection) projection *= np.sqrt(2.) / np.sqrt(self.n_components) return projection class SkewedChi2Sampler(BaseEstimator, TransformerMixin): """Approximates feature map of the "skewed chi-squared" kernel by Monte Carlo approximation of its Fourier transform. Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`. Parameters ---------- skewedness : float "skewedness" parameter of the kernel. Needs to be cross-validated. n_components : int number of Monte Carlo samples per original feature. Equals the dimensionality of the computed feature space. random_state : {int, RandomState}, optional If int, random_state is the seed used by the random number generator; if RandomState instance, random_state is the random number generator. References ---------- See "Random Fourier Approximations for Skewed Multiplicative Histogram Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu. See also -------- AdditiveChi2Sampler : A different approach for approximating an additive variant of the chi squared kernel. sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel. """ def __init__(self, skewedness=1., n_components=100, random_state=None): self.skewedness = skewedness self.n_components = n_components self.random_state = random_state def fit(self, X, y=None): """Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the transformer. """ X = check_array(X) random_state = check_random_state(self.random_state) n_features = X.shape[1] uniform = random_state.uniform(size=(n_features, self.n_components)) # transform by inverse CDF of sech self.random_weights_ = (1. / np.pi * np.log(np.tan(np.pi / 2. * uniform))) self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components) return self def transform(self, X, y=None): """Apply the approximate feature map to X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ check_is_fitted(self, 'random_weights_') X = as_float_array(X, copy=True) X = check_array(X, copy=False) if (X < 0).any(): raise ValueError("X may not contain entries smaller than zero.") X += self.skewedness np.log(X, X) projection = safe_sparse_dot(X, self.random_weights_) projection += self.random_offset_ np.cos(projection, projection) projection *= np.sqrt(2.) / np.sqrt(self.n_components) return projection class AdditiveChi2Sampler(BaseEstimator, TransformerMixin): """Approximate feature map for additive chi2 kernel. Uses sampling the fourier transform of the kernel characteristic at regular intervals. Since the kernel that is to be approximated is additive, the components of the input vectors can be treated separately. Each entry in the original space is transformed into 2*sample_steps+1 features, where sample_steps is a parameter of the method. Typical values of sample_steps include 1, 2 and 3. Optimal choices for the sampling interval for certain data ranges can be computed (see the reference). The default values should be reasonable. Read more in the :ref:`User Guide <additive_chi_kernel_approx>`. Parameters ---------- sample_steps : int, optional Gives the number of (complex) sampling points. sample_interval : float, optional Sampling interval. Must be specified when sample_steps not in {1,2,3}. Notes ----- This estimator approximates a slightly different version of the additive chi squared kernel then ``metric.additive_chi2`` computes. See also -------- SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of the chi squared kernel. sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel. sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi squared kernel. References ---------- See `"Efficient additive kernels via explicit feature maps" <http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_ A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence, 2011 """ def __init__(self, sample_steps=2, sample_interval=None): self.sample_steps = sample_steps self.sample_interval = sample_interval def fit(self, X, y=None): """Set parameters.""" X = check_array(X, accept_sparse='csr') if self.sample_interval is None: # See reference, figure 2 c) if self.sample_steps == 1: self.sample_interval_ = 0.8 elif self.sample_steps == 2: self.sample_interval_ = 0.5 elif self.sample_steps == 3: self.sample_interval_ = 0.4 else: raise ValueError("If sample_steps is not in [1, 2, 3]," " you need to provide sample_interval") else: self.sample_interval_ = self.sample_interval return self def transform(self, X, y=None): """Apply approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape = (n_samples, n_features) Returns ------- X_new : {array, sparse matrix}, \ shape = (n_samples, n_features * (2*sample_steps + 1)) Whether the return value is an array of sparse matrix depends on the type of the input X. """ msg = ("%(name)s is not fitted. Call fit to set the parameters before" " calling transform") check_is_fitted(self, "sample_interval_", msg=msg) X = check_array(X, accept_sparse='csr') sparse = sp.issparse(X) # check if X has negative values. Doesn't play well with np.log. if ((X.data if sparse else X) < 0).any(): raise ValueError("Entries of X must be non-negative.") # zeroth component # 1/cosh = sech # cosh(0) = 1.0 transf = self._transform_sparse if sparse else self._transform_dense return transf(X) def _transform_dense(self, X): non_zero = (X != 0.0) X_nz = X[non_zero] X_step = np.zeros_like(X) X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_) X_new = [X_step] log_step_nz = self.sample_interval_ * np.log(X_nz) step_nz = 2 * X_nz * self.sample_interval_ for j in range(1, self.sample_steps): factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * self.sample_interval_)) X_step = np.zeros_like(X) X_step[non_zero] = factor_nz * np.cos(j * log_step_nz) X_new.append(X_step) X_step = np.zeros_like(X) X_step[non_zero] = factor_nz * np.sin(j * log_step_nz) X_new.append(X_step) return np.hstack(X_new) def _transform_sparse(self, X): indices = X.indices.copy() indptr = X.indptr.copy() data_step = np.sqrt(X.data * self.sample_interval_) X_step = sp.csr_matrix((data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False) X_new = [X_step] log_step_nz = self.sample_interval_ * np.log(X.data) step_nz = 2 * X.data * self.sample_interval_ for j in range(1, self.sample_steps): factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * self.sample_interval_)) data_step = factor_nz * np.cos(j * log_step_nz) X_step = sp.csr_matrix((data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False) X_new.append(X_step) data_step = factor_nz * np.sin(j * log_step_nz) X_step = sp.csr_matrix((data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False) X_new.append(X_step) return sp.hstack(X_new) class Nystroem(BaseEstimator, TransformerMixin): """Approximate a kernel map using a subset of the training data. Constructs an approximate feature map for an arbitrary kernel using a subset of the data as basis. Read more in the :ref:`User Guide <nystroem_kernel_approx>`. Parameters ---------- kernel : string or callable, default="rbf" Kernel map to be approximated. A callable should accept two arguments and the keyword arguments passed to this object as kernel_params, and should return a floating point number. n_components : int Number of features to construct. How many data points will be used to construct the mapping. gamma : float, default=None Gamma parameter for the RBF, polynomial, exponential chi2 and sigmoid kernels. Interpretation of the default value is left to the kernel; see the documentation for sklearn.metrics.pairwise. Ignored by other kernels. degree : float, default=3 Degree of the polynomial kernel. Ignored by other kernels. coef0 : float, default=1 Zero coefficient for polynomial and sigmoid kernels. Ignored by other kernels. kernel_params : mapping of string to any, optional Additional parameters (keyword arguments) for kernel function passed as callable object. random_state : {int, RandomState}, optional If int, random_state is the seed used by the random number generator; if RandomState instance, random_state is the random number generator. Attributes ---------- components_ : array, shape (n_components, n_features) Subset of training points used to construct the feature map. component_indices_ : array, shape (n_components) Indices of ``components_`` in the training set. normalization_ : array, shape (n_components, n_components) Normalization matrix needed for embedding. Square root of the kernel matrix on ``components_``. References ---------- * Williams, C.K.I. and Seeger, M. "Using the Nystroem method to speed up kernel machines", Advances in neural information processing systems 2001 * T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou "Nystroem Method vs Random Fourier Features: A Theoretical and Empirical Comparison", Advances in Neural Information Processing Systems 2012 See also -------- RBFSampler : An approximation to the RBF kernel using random Fourier features. sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels. """ def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3, kernel_params=None, n_components=100, random_state=None): self.kernel = kernel self.gamma = gamma self.coef0 = coef0 self.degree = degree self.kernel_params = kernel_params self.n_components = n_components self.random_state = random_state def fit(self, X, y=None): """Fit estimator to data. Samples a subset of training points, computes kernel on these and computes normalization matrix. Parameters ---------- X : array-like, shape=(n_samples, n_feature) Training data. """ X = check_array(X, accept_sparse='csr') rnd = check_random_state(self.random_state) n_samples = X.shape[0] # get basis vectors if self.n_components > n_samples: # XXX should we just bail? n_components = n_samples warnings.warn("n_components > n_samples. This is not possible.\n" "n_components was set to n_samples, which results" " in inefficient evaluation of the full kernel.") else: n_components = self.n_components n_components = min(n_samples, n_components) inds = rnd.permutation(n_samples) basis_inds = inds[:n_components] basis = X[basis_inds] basis_kernel = pairwise_kernels(basis, metric=self.kernel, filter_params=True, **self._get_kernel_params()) # sqrt of kernel matrix on basis vectors U, S, V = svd(basis_kernel) S = np.maximum(S, 1e-12) self.normalization_ = np.dot(U * 1. / np.sqrt(S), V) self.components_ = basis self.component_indices_ = inds return self def transform(self, X): """Apply feature map to X. Computes an approximate feature map using the kernel between some training points and X. Parameters ---------- X : array-like, shape=(n_samples, n_features) Data to transform. Returns ------- X_transformed : array, shape=(n_samples, n_components) Transformed data. """ check_is_fitted(self, 'components_') X = check_array(X, accept_sparse='csr') kernel_params = self._get_kernel_params() embedded = pairwise_kernels(X, self.components_, metric=self.kernel, filter_params=True, **kernel_params) return np.dot(embedded, self.normalization_.T) def _get_kernel_params(self): params = self.kernel_params if params is None: params = {} if not callable(self.kernel): params['gamma'] = self.gamma params['degree'] = self.degree params['coef0'] = self.coef0 return params
bsd-3-clause
ZENGXH/scikit-learn
sklearn/utils/tests/test_linear_assignment.py
421
1349
# Author: Brian M. Clapper, G Varoquaux # License: BSD import numpy as np # XXX we should be testing the public API here from sklearn.utils.linear_assignment_ import _hungarian def test_hungarian(): matrices = [ # Square ([[400, 150, 400], [400, 450, 600], [300, 225, 300]], 850 # expected cost ), # Rectangular variant ([[400, 150, 400, 1], [400, 450, 600, 2], [300, 225, 300, 3]], 452 # expected cost ), # Square ([[10, 10, 8], [9, 8, 1], [9, 7, 4]], 18 ), # Rectangular variant ([[10, 10, 8, 11], [9, 8, 1, 1], [9, 7, 4, 10]], 15 ), # n == 2, m == 0 matrix ([[], []], 0 ), ] for cost_matrix, expected_total in matrices: cost_matrix = np.array(cost_matrix) indexes = _hungarian(cost_matrix) total_cost = 0 for r, c in indexes: x = cost_matrix[r, c] total_cost += x assert expected_total == total_cost indexes = _hungarian(cost_matrix.T) total_cost = 0 for c, r in indexes: x = cost_matrix[r, c] total_cost += x assert expected_total == total_cost
bsd-3-clause
nkhuyu/FTRLp
FTRLp.py
1
31158
from __future__ import division from __future__ import print_function """ ------------ Follow The Regularized Leader - Proximal ------------ FTRL-P is an online classification algorithm that combines both L1 and L2 norms, particularly suited for large data sets with extremely high dimensionality. This implementation follow the algorithm by H. B. McMahan et. al. It minimizes the LogLoss function iteratively with a combination of L2 and L1 (centralized at the current point) norms and adaptive, per coordinate learning rates. This algorithm is efficient at obtaining sparsity and has proven to perform very well in massive Click-Through-Rate prediction tasks. This module contains two objects... References: * Follow-the-Regularized-Leader and Mirror Descent: Equivalent Theorems and L1 Regularization, H. Brendan McMahan * Ad Click Prediction: a View from the Trenches, H. Brendan McMahan et. al. """ from math import log, exp, fabs, sqrt from csv import DictReader from datetime import datetime from random import random def log_loss(y, p): """ --- Log_loss computing function A function to compute the log loss of a predicted probability p given a true target y. :param y: True target value :param p: Predicted probability :return: Log loss. """ p = max(min(p, 1. - 10e-15), 10e-15) return -log(p) if y == 1 else -log(1. - p) class DataGen(object): """ DataGen is an object to generate the data that is fed to the classifier. It reads the data file one row at a time, hashes it and returns it. The names and types of columns must be passed to it, so that categorical, target, numerical and identification columns can be treated differently. It also keeps track of the name and position of all features to allow the classifier to keep track of the coefficients by feature. """ def __init__(self, max_features, target, descriptive=(), categorical=(), numerical=None, transformation=None): """ The object initialized with the maximum number of features to be generated and the names of the appropriate columns. Categorical columns are hashed while numerical columns are kept as is, therefore care must be taken with normalization and pre processing. :param max_features: The maximum number of features to generate. It includes all numerical and categorical features. Must be greater than the number of numerical features. :param target: The name of the target variable. It must be a binary variable taking values in {0, 1}. :param descriptive: Descriptive features that are used to identify the samples but are not to be used for modelling, such as IDs, public identifiers, etc. :param categorical: Categorical variable to be hashed. :param numerical: Numerical variable. These will not be hashed but will be used in the modelling phase. """ # --- Instance variables. # Instance variables are created for columns names and the number of numerical # columns in addition to all of the object's parameters. # Stores the maximum number of features to generate while hashing self.mf = max_features # Stores the name of the target variable. self.y = target # Stores a list with the names of all descriptive variables. self.ids = descriptive # Stores a list with the names of all categorical variables. self.cat = categorical # Stores a list with the names of all numerical variables. self.num = numerical # Stores a dictionary with the names of numerical variable to apply a given function to. self.tra = transformation if transformation is not None else {} # Dictionary to store names self.names = {} # --- Numerical features # Numerical features are indexed in sorted order. The number # of features is determined by the variable size. The value # of each feature is just the value read from the file. Start # by defining what is numeric. If the user does not pass the # names of all numerical features, the code will assume # every columns that is not id, target or categorical is # numeric and find their name when the training process begin. if self.num is not None: self.num_cols = sorted(self.num) # Store the names in our names dictionary self.names.update(dict(zip(self.num_cols, range(len(self.num_cols))))) else: self.num_cols = [] # --- Something to build model on # Make sure the user passed some information on the columns to # be used to build the model upon assert len(self.cat) + len(self.num_cols) > 0, 'At least one categorical or numerical feature must ' \ 'be provided.' def _fetch(self, path): """ This method is the core reason this object exists. It is a python generator that hashes categorical variables, combines them to numerical variables and yields all the relevant information, row by row. :param path: Path of the data file to be read. :return: YIELDS the current row, ID information, feature values and the target value. even if the file does not contain a target field it returns a target value of zero anyway. """ for t, row in enumerate(DictReader(open(path))): # --- Variables # t: The current line being read # row: All the values in this line # --- Ids and other descriptive fields # Process any descriptive fields and put it all in a list. ids = [] for ID in self.ids: ids.append(row[ID]) del row[ID] # --- Target # Process target and delete its entry from row if it exists # otherwise just ignore and move along y = 0. if self.y in row: if row[self.y] == '1': y = 1. del row[self.y] # --- Features # Initialize an empty dictionary to hold feature # indexes and their corresponding values. # x = {} # --- Enough features? # For the very first row make sure we have enough features (max features # is large enough) by computing the number of numerical columns and # asserting that the maximum number of features is larger than it. if t == 0: # --- Hash size # Computes a constant to add to hash index, it dictates the # number of features that will not be hashed num_size = len(self.num_cols) size = num_size + len(self.tra) # Make sure there is enough space for hashing assert self.mf > size, 'Not enough dimensions to fit all features.' # --- Numerical Variables # Now we loop over numerical variables for i, key in enumerate(self.num_cols): # --- No transformation # If no transformation is necessary, just store the actual value # of the variable. x[i] = float(row[key]) # --- Transformations # Create on the fly transformed variables. The user passes a map of the # name of the new variable to a tuple containing the name of the original # variable to be transformed and the function to be applied to it. # Once completed the new name is appended to the names dictionary with its # corresponding index.# for i, key in enumerate(self.tra): # Start by addition to the data array x the new transformed values # by looping over new_features and applying the transformation to the # desired old feature. x[num_size + i] = self.tra[key][1](row[self.tra[key][0]]) # Create a key in names dictionary with the new name and its # corresponding index. self.names[key] = num_size + i # --- Categorical features # Categorical features are hashed. For each different kind a # hashed index is created and a value of 1 is 'stored' in that # position. for key in self.cat: # --- Category # Get the categorial variable from row value = row[key] # --- Hash # One-hot encode everything with hash trick index = (abs(hash(key + '_' + value)) % (self.mf - size)) + size x[index] = 1. # --- Save Name # Save the name and index to the names dictionary if its a new feature # AND if there's still enough space. if key + '_' + value not in self.names and len(self.names) < self.mf: self.names[key + '_' + value] = index # Yield everything. yield t, ids, x, y def train(self, path): """ The train method is just a wrapper around the _fetch generator to comply with sklearn's API. :param path: The path for the training file. :return: YIELDS row, features, target value """ # --- Generates train data # This is just a generator on top of the basic _fetch. If this was python 3 I # could use 'yield from', but I don't think this syntax exists in python 2.7, # so I opted to use the explicit, less pythonic way. for t, ids, x, y in self._fetch(path): # --- Variables # t: Current row # ids: List of ID information # x: Feature values # y: Target values yield t, x, y def test(self, path): """ The test method is just a wrapper around the _fetch generator to comply with sklearn's API. :param path: The path for the test file. :return: YIELDS row, features """ # --- Generates test data # This is just a generator on top of the basic _fetch. If this was python 3 I # could use 'yield from', but I don't think this syntax exists in python 2.7, # so I opted to use the explicit, less pythonic way. for t, ids, x, y in self._fetch(path): # --- Variables # t: Current row # ids: List of ID information # x: Feature values # y: Target values yield t, x class FTRLP(object): """ --- Follow The Regularized Leader - Proximal --- FTRL-P is an online classification algorithm that combines both L1 and L2 norms, particularly suited for large data sets with extremely high dimensionality. This implementation follow the algorithm by H. B. McMahan et. al. It minimizes the LogLoss function iteratively with a combination of L2 and L1 (centralized at the current point) norms and adaptive, per coordinate learning rates. This algorithm is efficient at obtaining sparsity and has proven to perform very well in massive Click-Through-Rate prediction tasks. References: * Follow-the-Regularized-Leader and Mirror Descent: Equivalent Theorems and L1 Regularization, H. Brendan McMahan * Ad Click Prediction: a View from the Trenches, H. Brendan McMahan et. al. """ def __init__(self, alpha=1, beta=1, l1=1, l2=1, subsample=1, epochs=1, rate=0): """ Initializes the classifier's learning rate constants alpha and beta, the regularization constants L1 and L2, and the maximum number of features (limiting factor of the hash function). The per feature learning rate is given by: eta = alpha / ( beta + sqrt( sum g**g ) ) :param alpha: Learning rate's proportionality constant. :param beta: Learning rate's parameter. :param l1: l1 regularization constant. :param l2: l2 regularization constant. :return: """ # --- Classifier Parameters # The FTRLP algorithm has four free parameters that can be tuned as pleased. # Learning rate's proportionality constant. self.alpha = alpha # Learning rate's parameter. self.beta = beta # L1 regularization constant. self.l1 = l1 # L2 regularization constant. self.l2 = l2 # --- Log likelihood # Stores the log likelihood during the whole # fitting process. self.log_likelihood_ = 0 self.loss = [] # --- Weight parameters. # Lists and dictionaries to hold the weights. Initiate # the weight vector z and learning rate n as None so that # when self.train is called multiple times it will not # overwrite the stored values. This essentially allows epoch # training to take place, albeit a little bit ugly. self.z = None self.n = None # The weight vector used for prediction is constructed on the fly # and, in order to keep the memory cost low, it is a dictionary # that receives values and keys as needed. # --- Coefficients # Lists to store the coefficients and their corresponding names. # Initialized to None and constructed once the training method is # completed. In case of multiple epochs, these quantities will be # computed multiple times. self.coef_ = {} self.cname = None # --- Target Ratio # Store the ratio of each class of a binnary target variable to use # it to make weighted discrete label predictions. self.target_ratio = 0. # --- Printing Rate # Number of samples to train and predict on before printing # current status self.rate = rate # --- Subsample # While online methods can't be shuffle, combining subsampling of # the training set with multiple epoch training gives similar results. self.subsample = subsample # --- Epochs # something... self.epochs = epochs # --- Flag for partial fit # Keeps a flag to allow the user to train multiple times # without overwriting the object. self.fit_flag = False def _build_p(self, data_gen, path): # Maybe is worth migrating the weight construction algorithm # to here, I think it could clean up the code a little a bit # in both train and predict methods. pass def _clear_params(self): """ If the fit method is called multiple times, all trained parameters must be cleared allowing for a fresh start. This function simply resets everything back to square one. :return: Nothing """ # All models parameters are set to their original value (see # __init__ description self.log_likelihood_ = 0 self.loss = [] self.z = None self.n = None self.coef_ = {} self.cname = None def get_params(self, deep=True): """ A function to return a map of parameters names and values. :param deep: Not sure yet, gotta check sklearn usage. :return: Dictionary mapping parameters names to their values """ ps = {'alpha': self.alpha, 'beta': self.beta, 'l1': self.l1, 'l2': self.l2, 'subsample': self.subsample, 'epochs': self.epochs, 'rate': self.rate} return ps def set_params(self, **params): """ :param params: :return: """ for key, value in params.iteritems(): setattr(self, key, value) def _update(self, y, p, x, w): """ # --- Update weight vector and learning rate. # With the prediction round completed we can proceed to # updating the weight vector z and the learning rate eta # based on the last observed label. # To do so we will use the computed probability and target # value to find the gradient loss and continue from there. # The gradient for the log likelihood for round t can easily # be shown to be: # g_i = (p - y) * x_i, (round t) # The remaining quantities are updated according to the # minimization procedure outlined in [2]. :param y: True target variable :param p: Predicted probability for the current sample :param x: Non zero feature values :param w: Weights :return: Nothing """ # --- Update loop # Loop over all relevant indexes and update all values # accordingly. for i in x.keys(): # --- Compute Gradient of LogLoss g = (p - y) * x[i] # --- Update constant sigma # Note that this upgrade is equivalent to # (eta_(t, i))^-1 - (eta_(t - 1, i))^-1 # as discussed in [2]. s = (sqrt(self.n[i] + g * g) - sqrt(self.n[i])) / self.alpha # --- Increment changes # Finally, increment the appropriate changes to weights and # learning rate vectors. self.z[i] += g - s * w[i] self.n[i] += g * g def _train(self, data_gen, path): """ --- Fitting method --- Online fitting method. It takes one sample at a time, builds the weight vector on the fly and computes the dot product of weight vector and values and a prediction is made. Then the true label of the target variable is observed and the loss is added. Once this is completed the weights are updated based on the previously observed values. :param data_gen: An instance of the DataGen class :param path: The path to the training set :return: """ # Best way? Proper coding means no access to protected members... if self.z is None and self.n is None: self.z = [0.] * data_gen.mf self.n = [0.] * data_gen.mf # --- Start the clock! start_time = datetime.now() for t, x, y in data_gen.train(path): # --- Variables # t: Current row # x: Feature values # y: Target values # --- Target Ratio Update # Rolling calculation of the target average self.target_ratio = (1.0 * (t * self.target_ratio + y)) / (t + 1) # --- Stochastic sample selection # Chose whether or not to use a sample in # training time. Since online methods can't # really be shuffle we can use this combined # with multiple epochs to create heterogeneity. #if random() > self.subsample and ((t + 1) % self.rate != 0): if random() > self.subsample and (t + 1) % self.rate != 0: continue # --- Dot product init. # The dot product is computed as the weights are calculated, # here it is initiated at zero. wtx = 0 # --- Real time weights # Initialize an empty dictionary to hold the weights w = {} # --- Weights and prediction # Computes the weights for numerical features using the # indexes and values present in the x dictionary. And make # a prediction. # This first loop build the weight vector on the fly. Since # we expect most weights to be zero, the weight vector can # be constructed in real time. Furthermore, there is no # reason to store it, neither to clear it, since at each # iteration only the relevant indexes are populated and used. for indx in x.keys(): # --- Loop over indicator I # x.keys() carries all the indexes of the feature # vector with non-zero entries. Therefore, we can # simply loop over it since anything else will not # contribute to the dot product w.x, and, consequently # to the prediction. if fabs(self.z[indx]) <= self.l1: # --- L1 regularization # If the condition on the absolute value of the # vector Z is not met, the weight coefficient is # set exactly to zero. w[indx] = 0 else: # --- Non zero weight # Provided abs(z_i) is large enough, the weight w_i # is computed. First, the sign of z_i is determined. sign = 1. if self.z[indx] >= 0 else -1. # Then the value of w_i if computed and stored. Note # that any previous value w_i may have had will be # overwritten here. Which is fine since it will not # be used anywhere outside this (t) loop. w[indx] = - (self.z[indx] - sign * self.l1) / \ (self.l2 + (self.beta + sqrt(self.n[indx])) / self.alpha) # --- Update dot product # Once the value of w_i is computed we can use to compute # the i-th contribution to the dot product w.x. Which, here # is being done inside the index loop, compute only coordinates # that could possible be non-zero. wtx += w[indx] * x[indx] # --- Make a prediction # With the w.x dot product in hand we can compute the output # probability by putting wtx through the sigmoid function. # We limit wtx value to lie in the [-35, 35] interval to # avoid round off errors. p = 1. / (1. + exp(-max(min(wtx, 35.), -35.))) # --- Update the loss function # Now we look at the target value and use it, together with the # output probability that was just computed to find the loss we # suffer this round. self.log_likelihood_ += log_loss(y, p) # --- Verbose section if (self.rate > 0) and (t + 1) % self.rate == 0: # Append to the loss list. self.loss.append(self.log_likelihood_) # Print all the current information print('Training Samples: {0:9} | ' 'Loss: {1:11.2f} | ' 'Time taken: {2:4} seconds'.format(t + 1, self.log_likelihood_, (datetime.now() - start_time).seconds)) # --- Update weights # Finally, we now how well we did this round and move on to # updating the weights based on the current status of our # knowledge. self._update(y, p, x, w) # --- Coefficient names and indexes # Bind the feature names to their corresponding coefficient obtained from # the regression. self.coef_.update(dict([[key, self.z[data_gen.names[key]]] for key in data_gen.names.keys()])) def fit(self, data_gen, path): """ Epoch wrapper around the main fitting method _train :param data_gen: An instance of the DataGen class :param path: The path to the training set :return: """ # --- Check fit flag # Make sure the fit methods is starting from a clean slate by # checking the fit_flag variable and calling the _clear_params # function if necessary. # While always calling _clear_params would do the job, by setting # this flag we are also able to call fit multiple times WITHOUT # clearing all parameters --- See partial_fit. if self.fit_flag: self._clear_params() # --- Start the clock! total_time = datetime.now() # Train epochs for epoch in range(self.epochs): # --- Start the clock! epoch_time = datetime.now() # --- Verbose # Print epoch if verbose is turned on if self.rate > 0: print('TRAINING EPOCH: {0:2}'.format(epoch + 1)) print('-' * 18) self._train(data_gen, path) # --- Verbose # Print time taken if verbose is turned on if self.rate > 0: print('EPOCH {0:2} FINISHED IN {1} seconds'.format(epoch + 1, (datetime.now() - epoch_time).seconds)) print() # --- Verbose # Print fit information if verbose is on if self.rate > 0: print(' --- TRAINING FINISHED IN ' '{0} SECONDS WITH LOSS {1:.2f} ---'.format((datetime.now() - total_time).seconds, self.log_likelihood_)) print() # --- Fit Flag # Set fit_flag to true. If fit is called again this is will trigger # the call of _clean_params. See partial_fit for different usage. self.fit_flag = True def partial_fit(self, data_gen, path): """ Simple solution to allow multiple fit calls without overwriting previously calculated weights, losses and etc. :param data_gen: An instance of the DataGen class :param path: The path to the training set :return: """ # --- Fit Flag # Start by reseting fit_flag to false to "trick" # the fit method into keep training without overwriting # previously calculated quantities. self.fit_flag = False # --- Fit # Call the fit method and proceed as normal self.fit(data_gen, path) def predict_proba(self, data_gen, path): """ --- Predicting Probabilities method --- Predictions... :param data_gen: An instance of the DataGen class :param path: The path to the test set :return: A list with predicted probabilities """ # --- Results # Initialize an empty list to hold predicted values. result = [] # --- Start the clock! start_time = datetime.now() for t, x in data_gen.test(path): # --- Variables # t: Current row # x: Feature values # --- Dot product init. # The dot product is computed as the weights are calculated, # here it is initiated at zero. wtx = 0 # --- Real time weights # Initialize an empty dictionary to hold the weights w = {} # --- Weights and prediction # Computes the weights for numerical features using the # indexes and values present in the x dictionary. And make # a prediction. # This first loop build the weight vector on the fly. Since # we expect most weights to be zero, the weight vector can # be constructed in real time. Furthermore, there is no # reason to store it, neither to clear it, since at each # iteration only the relevant indexes are populated and used. for indx in x.keys(): # --- Loop over indicator I # x.keys() carries all the indexes of the feature # vector with non-zero entries. Therefore, we can # simply loop over it since anything else will not # contribute to the dot product w.x, and, consequently # to the prediction. if fabs(self.z[indx]) <= self.l1: # --- L1 regularization # If the condition on the absolute value of the # vector Z is not met, the weight coefficient is # set exactly to zero. w[indx] = 0 else: # --- Non zero weight # Provided abs(z_i) is large enough, the weight w_i # is computed. First, the sign of z_i is determined. sign = 1. if self.z[indx] >= 0 else -1. # Then the value of w_i if computed and stored. Note # that any previous value w_i may have had will be # overwritten here. Which is fine since it will not # be used anywhere outside this (t) loop. w[indx] = - (self.z[indx] - sign * self.l1) / \ (self.l2 + (self.beta + sqrt(self.n[indx])) / self.alpha) # --- Update dot product # Once the value of w_i is computed we can use to compute # the i-th contribution to the dot product w.x. Which, here # is being done inside the index loop, compute only coordinates # that could possible be non-zero. wtx += w[indx] * x[indx] # --- Make a prediction # With the w.x dot product in hand we can compute the output # probability by putting wTx through the sigmoid function. # We limit wTx value to lie in the [-35, 35] interval to # avoid round off errors. result.append(1. / (1. + exp(-max(min(wtx, 35.), -35.)))) # Verbose section - Still needs work... if (t + 1) % self.rate == 0: # print some stuff print('Test Samples: {0:8} | ' 'Time taken: {1:3} seconds'.format(t + 1, (datetime.now() - start_time).seconds)) # All done, return the predictions! return result def predict(self, data_gen, path): """ --- Predicting method --- Predictions... :param data_gen: An instance of the DataGen class :param path: The path to the test set :return: A list with predicted probabilities """ # --- Probabilities # Compute probabilities by invoking the predict_proba method probs = self.predict_proba(data_gen, path) # --- Return # Return binary labels. The threshold is set using the mean value of the # target variable. return map(lambda x: 0 if x <= self.target_ratio else 1, probs)
mit
Garrett-R/scikit-learn
sklearn/decomposition/tests/test_dict_learning.py
40
7535
import numpy as np from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.decomposition import DictionaryLearning from sklearn.decomposition import MiniBatchDictionaryLearning from sklearn.decomposition import SparseCoder from sklearn.decomposition import dict_learning_online from sklearn.decomposition import sparse_encode rng_global = np.random.RandomState(0) n_samples, n_features = 10, 8 X = rng_global.randn(n_samples, n_features) def test_dict_learning_shapes(): n_components = 5 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_overcomplete(): n_components = 12 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_reconstruction(): n_components = 12 dico = DictionaryLearning(n_components, transform_algorithm='omp', transform_alpha=0.001, random_state=0) code = dico.fit(X).transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X) dico.set_params(transform_algorithm='lasso_lars') code = dico.transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) # used to test lars here too, but there's no guarantee the number of # nonzero atoms is right. def test_dict_learning_nonzero_coefs(): n_components = 4 dico = DictionaryLearning(n_components, transform_algorithm='lars', transform_n_nonzero_coefs=3, random_state=0) code = dico.fit(X).transform(X[1]) assert_true(len(np.flatnonzero(code)) == 3) dico.set_params(transform_algorithm='omp') code = dico.transform(X[1]) assert_equal(len(np.flatnonzero(code)), 3) def test_dict_learning_unknown_fit_algorithm(): n_components = 5 dico = DictionaryLearning(n_components, fit_algorithm='<unknown>') assert_raises(ValueError, dico.fit, X) def test_dict_learning_split(): n_components = 5 dico = DictionaryLearning(n_components, transform_algorithm='threshold', random_state=0) code = dico.fit(X).transform(X) dico.split_sign = True split_code = dico.transform(X) assert_array_equal(split_code[:, :n_components] - split_code[:, n_components:], code) def test_dict_learning_online_shapes(): rng = np.random.RandomState(0) n_components = 8 code, dictionary = dict_learning_online(X, n_components=n_components, alpha=1, random_state=rng) assert_equal(code.shape, (n_samples, n_components)) assert_equal(dictionary.shape, (n_components, n_features)) assert_equal(np.dot(code, dictionary).shape, X.shape) def test_dict_learning_online_verbosity(): n_components = 5 # test verbosity from sklearn.externals.six.moves import cStringIO as StringIO import sys old_stdout = sys.stdout try: sys.stdout = StringIO() dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1, random_state=0) dico.fit(X) dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2, random_state=0) dico.fit(X) dict_learning_online(X, n_components=n_components, alpha=1, verbose=1, random_state=0) dict_learning_online(X, n_components=n_components, alpha=1, verbose=2, random_state=0) finally: sys.stdout = old_stdout assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_estimator_shapes(): n_components = 5 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0) dico.fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_overcomplete(): n_components = 12 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_initialization(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) dico = MiniBatchDictionaryLearning(n_components, n_iter=0, dict_init=V, random_state=0).fit(X) assert_array_equal(dico.components_, V) def test_dict_learning_online_partial_fit(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10*len(X), batch_size=1, alpha=1, shuffle=False, dict_init=V, random_state=0).fit(X) dict2 = MiniBatchDictionaryLearning(n_components, alpha=1, n_iter=1, dict_init=V, random_state=0) for i in range(10): for sample in X: dict2.partial_fit(sample) assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)) assert_array_almost_equal(dict1.components_, dict2.components_, decimal=2) def test_sparse_encode_shapes(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'): code = sparse_encode(X, V, algorithm=algo) assert_equal(code.shape, (n_samples, n_components)) def test_sparse_encode_error(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = sparse_encode(X, V, alpha=0.001) assert_true(not np.all(code == 0)) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) def test_sparse_encode_error_default_sparsity(): rng = np.random.RandomState(0) X = rng.randn(100, 64) D = rng.randn(2, 64) code = ignore_warnings(sparse_encode)(X, D, algorithm='omp', n_nonzero_coefs=None) assert_equal(code.shape, (100, 2)) def test_unknown_method(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>") def test_sparse_coder_estimator(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars', transform_alpha=0.001).transform(X) assert_true(not np.all(code == 0)) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
bsd-3-clause
dingocuster/scikit-learn
sklearn/neural_network/rbm.py
206
12292
"""Restricted Boltzmann Machine """ # Authors: Yann N. Dauphin <[email protected]> # Vlad Niculae # Gabriel Synnaeve # Lars Buitinck # License: BSD 3 clause import time import numpy as np import scipy.sparse as sp from ..base import BaseEstimator from ..base import TransformerMixin from ..externals.six.moves import xrange from ..utils import check_array from ..utils import check_random_state from ..utils import gen_even_slices from ..utils import issparse from ..utils.extmath import safe_sparse_dot from ..utils.extmath import log_logistic from ..utils.fixes import expit # logistic function from ..utils.validation import check_is_fitted class BernoulliRBM(BaseEstimator, TransformerMixin): """Bernoulli Restricted Boltzmann Machine (RBM). A Restricted Boltzmann Machine with binary visible units and binary hiddens. Parameters are estimated using Stochastic Maximum Likelihood (SML), also known as Persistent Contrastive Divergence (PCD) [2]. The time complexity of this implementation is ``O(d ** 2)`` assuming d ~ n_features ~ n_components. Read more in the :ref:`User Guide <rbm>`. Parameters ---------- n_components : int, optional Number of binary hidden units. learning_rate : float, optional The learning rate for weight updates. It is *highly* recommended to tune this hyper-parameter. Reasonable values are in the 10**[0., -3.] range. batch_size : int, optional Number of examples per minibatch. n_iter : int, optional Number of iterations/sweeps over the training dataset to perform during training. verbose : int, optional The verbosity level. The default, zero, means silent mode. random_state : integer or numpy.RandomState, optional A random number generator instance to define the state of the random permutations generator. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Attributes ---------- intercept_hidden_ : array-like, shape (n_components,) Biases of the hidden units. intercept_visible_ : array-like, shape (n_features,) Biases of the visible units. components_ : array-like, shape (n_components, n_features) Weight matrix, where n_features in the number of visible units and n_components is the number of hidden units. Examples -------- >>> import numpy as np >>> from sklearn.neural_network import BernoulliRBM >>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]) >>> model = BernoulliRBM(n_components=2) >>> model.fit(X) BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10, random_state=None, verbose=0) References ---------- [1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for deep belief nets. Neural Computation 18, pp 1527-1554. http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf [2] Tieleman, T. Training Restricted Boltzmann Machines using Approximations to the Likelihood Gradient. International Conference on Machine Learning (ICML) 2008 """ def __init__(self, n_components=256, learning_rate=0.1, batch_size=10, n_iter=10, verbose=0, random_state=None): self.n_components = n_components self.learning_rate = learning_rate self.batch_size = batch_size self.n_iter = n_iter self.verbose = verbose self.random_state = random_state def transform(self, X): """Compute the hidden layer activation probabilities, P(h=1|v=X). Parameters ---------- X : {array-like, sparse matrix} shape (n_samples, n_features) The data to be transformed. Returns ------- h : array, shape (n_samples, n_components) Latent representations of the data. """ check_is_fitted(self, "components_") X = check_array(X, accept_sparse='csr', dtype=np.float) return self._mean_hiddens(X) def _mean_hiddens(self, v): """Computes the probabilities P(h=1|v). Parameters ---------- v : array-like, shape (n_samples, n_features) Values of the visible layer. Returns ------- h : array-like, shape (n_samples, n_components) Corresponding mean field values for the hidden layer. """ p = safe_sparse_dot(v, self.components_.T) p += self.intercept_hidden_ return expit(p, out=p) def _sample_hiddens(self, v, rng): """Sample from the distribution P(h|v). Parameters ---------- v : array-like, shape (n_samples, n_features) Values of the visible layer to sample from. rng : RandomState Random number generator to use. Returns ------- h : array-like, shape (n_samples, n_components) Values of the hidden layer. """ p = self._mean_hiddens(v) return (rng.random_sample(size=p.shape) < p) def _sample_visibles(self, h, rng): """Sample from the distribution P(v|h). Parameters ---------- h : array-like, shape (n_samples, n_components) Values of the hidden layer to sample from. rng : RandomState Random number generator to use. Returns ------- v : array-like, shape (n_samples, n_features) Values of the visible layer. """ p = np.dot(h, self.components_) p += self.intercept_visible_ expit(p, out=p) return (rng.random_sample(size=p.shape) < p) def _free_energy(self, v): """Computes the free energy F(v) = - log sum_h exp(-E(v,h)). Parameters ---------- v : array-like, shape (n_samples, n_features) Values of the visible layer. Returns ------- free_energy : array-like, shape (n_samples,) The value of the free energy. """ return (- safe_sparse_dot(v, self.intercept_visible_) - np.logaddexp(0, safe_sparse_dot(v, self.components_.T) + self.intercept_hidden_).sum(axis=1)) def gibbs(self, v): """Perform one Gibbs sampling step. Parameters ---------- v : array-like, shape (n_samples, n_features) Values of the visible layer to start from. Returns ------- v_new : array-like, shape (n_samples, n_features) Values of the visible layer after one Gibbs step. """ check_is_fitted(self, "components_") if not hasattr(self, "random_state_"): self.random_state_ = check_random_state(self.random_state) h_ = self._sample_hiddens(v, self.random_state_) v_ = self._sample_visibles(h_, self.random_state_) return v_ def partial_fit(self, X, y=None): """Fit the model to the data X which should contain a partial segment of the data. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. Returns ------- self : BernoulliRBM The fitted model. """ X = check_array(X, accept_sparse='csr', dtype=np.float) if not hasattr(self, 'random_state_'): self.random_state_ = check_random_state(self.random_state) if not hasattr(self, 'components_'): self.components_ = np.asarray( self.random_state_.normal( 0, 0.01, (self.n_components, X.shape[1]) ), order='fortran') if not hasattr(self, 'intercept_hidden_'): self.intercept_hidden_ = np.zeros(self.n_components, ) if not hasattr(self, 'intercept_visible_'): self.intercept_visible_ = np.zeros(X.shape[1], ) if not hasattr(self, 'h_samples_'): self.h_samples_ = np.zeros((self.batch_size, self.n_components)) self._fit(X, self.random_state_) def _fit(self, v_pos, rng): """Inner fit for one mini-batch. Adjust the parameters to maximize the likelihood of v using Stochastic Maximum Likelihood (SML). Parameters ---------- v_pos : array-like, shape (n_samples, n_features) The data to use for training. rng : RandomState Random number generator to use for sampling. """ h_pos = self._mean_hiddens(v_pos) v_neg = self._sample_visibles(self.h_samples_, rng) h_neg = self._mean_hiddens(v_neg) lr = float(self.learning_rate) / v_pos.shape[0] update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T update -= np.dot(h_neg.T, v_neg) self.components_ += lr * update self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0)) self.intercept_visible_ += lr * (np.asarray( v_pos.sum(axis=0)).squeeze() - v_neg.sum(axis=0)) h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial self.h_samples_ = np.floor(h_neg, h_neg) def score_samples(self, X): """Compute the pseudo-likelihood of X. Parameters ---------- X : {array-like, sparse matrix} shape (n_samples, n_features) Values of the visible layer. Must be all-boolean (not checked). Returns ------- pseudo_likelihood : array-like, shape (n_samples,) Value of the pseudo-likelihood (proxy for likelihood). Notes ----- This method is not deterministic: it computes a quantity called the free energy on X, then on a randomly corrupted version of X, and returns the log of the logistic function of the difference. """ check_is_fitted(self, "components_") v = check_array(X, accept_sparse='csr') rng = check_random_state(self.random_state) # Randomly corrupt one feature in each sample in v. ind = (np.arange(v.shape[0]), rng.randint(0, v.shape[1], v.shape[0])) if issparse(v): data = -2 * v[ind] + 1 v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape) else: v_ = v.copy() v_[ind] = 1 - v_[ind] fe = self._free_energy(v) fe_ = self._free_energy(v_) return v.shape[1] * log_logistic(fe_ - fe) def fit(self, X, y=None): """Fit the model to the data X. Parameters ---------- X : {array-like, sparse matrix} shape (n_samples, n_features) Training data. Returns ------- self : BernoulliRBM The fitted model. """ X = check_array(X, accept_sparse='csr', dtype=np.float) n_samples = X.shape[0] rng = check_random_state(self.random_state) self.components_ = np.asarray( rng.normal(0, 0.01, (self.n_components, X.shape[1])), order='fortran') self.intercept_hidden_ = np.zeros(self.n_components, ) self.intercept_visible_ = np.zeros(X.shape[1], ) self.h_samples_ = np.zeros((self.batch_size, self.n_components)) n_batches = int(np.ceil(float(n_samples) / self.batch_size)) batch_slices = list(gen_even_slices(n_batches * self.batch_size, n_batches, n_samples)) verbose = self.verbose begin = time.time() for iteration in xrange(1, self.n_iter + 1): for batch_slice in batch_slices: self._fit(X[batch_slice], rng) if verbose: end = time.time() print("[%s] Iteration %d, pseudo-likelihood = %.2f," " time = %.2fs" % (type(self).__name__, iteration, self.score_samples(X).mean(), end - begin)) begin = end return self
bsd-3-clause
nutils/nutils
tests/test_docs.py
2
3723
import doctest as _doctest, unittest, importlib, os, tempfile, pathlib, functools, warnings, subprocess, sys, treelog import nutils.testing _doctestlog = treelog.FilterLog(treelog.StdoutLog(), minlevel=1) class DocTestCase(nutils.testing.ContextTestCase, _doctest.DocTestCase): def __init__(self, test, *, requires=None, **kwargs): self.__test = test self.__requires = tuple(requires) if requires else () super().__init__(test, **kwargs) def setUpContext(self, stack): lines = self.__test.docstring.splitlines() indent = min((len(line) - len(line.lstrip()) for line in lines[1:] if line.strip()), default=0) blank = True requires = list(self.__requires) for line in lines: if blank and line[indent:].startswith('.. requires:: '): requires.extend(name.strip() for name in line[indent+13:].split(',')) blank = not line.strip() missing = tuple(filter(nutils.testing._not_has_module, requires)) if missing: self.skipTest('missing module{}: {}'.format('s' if len(missing) > 1 else '', ','.join(missing))) if 'matplotlib' in requires: import matplotlib.testing matplotlib.testing.setup() super().setUpContext(stack) stack.enter_context(warnings.catch_warnings()) warnings.simplefilter('ignore') stack.enter_context(treelog.set(_doctestlog)) import numpy printoptions = numpy.get_printoptions() if 'legacy' in printoptions: stack.callback(numpy.set_printoptions, **printoptions) numpy.set_printoptions(legacy='1.13') def shortDescription(self): return None def __repr__(self): return '{} ({}.doctest)'.format(self.id(), __name__) __str__ = __repr__ doctest = unittest.TestSuite() parser = _doctest.DocTestParser() finder = _doctest.DocTestFinder(parser=parser) checker = nutils.testing.FloatNeighborhoodOutputChecker() root = pathlib.Path(__file__).parent.parent for path in sorted((root/'nutils').glob('**/*.py')): name = '.'.join(path.relative_to(root).parts)[:-3] if name.endswith('.__init__'): name = name[:-9] module = importlib.import_module(name) for test in sorted(finder.find(module)): if len(test.examples) == 0: continue if not test.filename: test.filename = module.__file__ doctest.addTest(DocTestCase(test, optionflags=_doctest.ELLIPSIS, checker=checker)) for path in sorted((root/'docs').glob('**/*.rst')): name = str(path.relative_to(root)) with path.open(encoding='utf-8') as f: doc = f.read() test = parser.get_doctest(doc, globs={}, name=name, filename=str(path), lineno=0) if test.examples: doctest.addTest(DocTestCase(test, optionflags=_doctest.ELLIPSIS, checker=checker, requires=['matplotlib'])) class sphinx(nutils.testing.TestCase): def setUpContext(self, stack): super().setUpContext(stack) self.tmpdir = pathlib.Path(stack.enter_context(tempfile.TemporaryDirectory(prefix='nutils'))) @nutils.testing.requires('sphinx', 'matplotlib', 'scipy') def test(self): from sphinx.application import Sphinx app = Sphinx(srcdir=str(root/'docs'), confdir=str(root/'docs'), outdir=str(self.tmpdir/'html'), doctreedir=str(self.tmpdir/'doctree'), buildername='html', freshenv=True, warningiserror=True, confoverrides=dict(nitpicky=True)) app.build() if app.statuscode: self.fail('sphinx build failed with code {}'.format(app.statuscode)) def load_tests(loader, suite, pattern): # Ignore default suite (containing `DocTestCase`). suite = unittest.TestSuite() suite.addTest(doctest) suite.addTests(loader.loadTestsFromTestCase(sphinx)) return suite
mit
HesselTjeerdsma/Cyber-Physical-Pacman-Game
Algor/flask/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.py
7
119504
# # Author: Travis Oliphant 2002-2011 with contributions from # SciPy Developers 2004-2011 # from __future__ import division, print_function, absolute_import from scipy._lib.six import string_types, exec_, PY3 from scipy._lib._util import getargspec_no_self as _getargspec import sys import keyword import re import types import warnings from scipy.misc import doccer from ._distr_params import distcont, distdiscrete from scipy._lib._util import check_random_state, _lazywhere, _lazyselect from scipy._lib._util import _valarray as valarray from scipy.special import (comb, chndtr, entr, rel_entr, kl_div, xlogy, ive) # for root finding for discrete distribution ppf, and max likelihood estimation from scipy import optimize # for functions of continuous distributions (e.g. moments, entropy, cdf) from scipy import integrate # to approximate the pdf of a continuous distribution given its cdf from scipy.misc import derivative from numpy import (arange, putmask, ravel, take, ones, shape, ndarray, product, reshape, zeros, floor, logical_and, log, sqrt, exp) from numpy import (place, argsort, argmax, vectorize, asarray, nan, inf, isinf, NINF, empty) import numpy as np from ._constants import _XMAX if PY3: def instancemethod(func, obj, cls): return types.MethodType(func, obj) else: instancemethod = types.MethodType # These are the docstring parts used for substitution in specific # distribution docstrings docheaders = {'methods': """\nMethods\n-------\n""", 'notes': """\nNotes\n-----\n""", 'examples': """\nExamples\n--------\n"""} _doc_rvs = """\ ``rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)`` Random variates. """ _doc_pdf = """\ ``pdf(x, %(shapes)s, loc=0, scale=1)`` Probability density function. """ _doc_logpdf = """\ ``logpdf(x, %(shapes)s, loc=0, scale=1)`` Log of the probability density function. """ _doc_pmf = """\ ``pmf(k, %(shapes)s, loc=0, scale=1)`` Probability mass function. """ _doc_logpmf = """\ ``logpmf(k, %(shapes)s, loc=0, scale=1)`` Log of the probability mass function. """ _doc_cdf = """\ ``cdf(x, %(shapes)s, loc=0, scale=1)`` Cumulative distribution function. """ _doc_logcdf = """\ ``logcdf(x, %(shapes)s, loc=0, scale=1)`` Log of the cumulative distribution function. """ _doc_sf = """\ ``sf(x, %(shapes)s, loc=0, scale=1)`` Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate). """ _doc_logsf = """\ ``logsf(x, %(shapes)s, loc=0, scale=1)`` Log of the survival function. """ _doc_ppf = """\ ``ppf(q, %(shapes)s, loc=0, scale=1)`` Percent point function (inverse of ``cdf`` --- percentiles). """ _doc_isf = """\ ``isf(q, %(shapes)s, loc=0, scale=1)`` Inverse survival function (inverse of ``sf``). """ _doc_moment = """\ ``moment(n, %(shapes)s, loc=0, scale=1)`` Non-central moment of order n """ _doc_stats = """\ ``stats(%(shapes)s, loc=0, scale=1, moments='mv')`` Mean('m'), variance('v'), skew('s'), and/or kurtosis('k'). """ _doc_entropy = """\ ``entropy(%(shapes)s, loc=0, scale=1)`` (Differential) entropy of the RV. """ _doc_fit = """\ ``fit(data, %(shapes)s, loc=0, scale=1)`` Parameter estimates for generic data. """ _doc_expect = """\ ``expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)`` Expected value of a function (of one argument) with respect to the distribution. """ _doc_expect_discrete = """\ ``expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)`` Expected value of a function (of one argument) with respect to the distribution. """ _doc_median = """\ ``median(%(shapes)s, loc=0, scale=1)`` Median of the distribution. """ _doc_mean = """\ ``mean(%(shapes)s, loc=0, scale=1)`` Mean of the distribution. """ _doc_var = """\ ``var(%(shapes)s, loc=0, scale=1)`` Variance of the distribution. """ _doc_std = """\ ``std(%(shapes)s, loc=0, scale=1)`` Standard deviation of the distribution. """ _doc_interval = """\ ``interval(alpha, %(shapes)s, loc=0, scale=1)`` Endpoints of the range that contains alpha percent of the distribution """ _doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf, _doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf, _doc_logsf, _doc_ppf, _doc_isf, _doc_moment, _doc_stats, _doc_entropy, _doc_fit, _doc_expect, _doc_median, _doc_mean, _doc_var, _doc_std, _doc_interval]) _doc_default_longsummary = """\ As an instance of the `rv_continuous` class, `%(name)s` object inherits from it a collection of generic methods (see below for the full list), and completes them with details specific for this particular distribution. """ _doc_default_frozen_note = """ Alternatively, the object may be called (as a function) to fix the shape, location, and scale parameters returning a "frozen" continuous RV object: rv = %(name)s(%(shapes)s, loc=0, scale=1) - Frozen RV object with the same methods but holding the given shape, location, and scale fixed. """ _doc_default_example = """\ Examples -------- >>> from scipy.stats import %(name)s >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots(1, 1) Calculate a few first moments: %(set_vals_stmt)s >>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk') Display the probability density function (``pdf``): >>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s), ... %(name)s.ppf(0.99, %(shapes)s), 100) >>> ax.plot(x, %(name)s.pdf(x, %(shapes)s), ... 'r-', lw=5, alpha=0.6, label='%(name)s pdf') Alternatively, the distribution object can be called (as a function) to fix the shape, location and scale parameters. This returns a "frozen" RV object holding the given parameters fixed. Freeze the distribution and display the frozen ``pdf``: >>> rv = %(name)s(%(shapes)s) >>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf') Check accuracy of ``cdf`` and ``ppf``: >>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s) >>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s)) True Generate random numbers: >>> r = %(name)s.rvs(%(shapes)s, size=1000) And compare the histogram: >>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2) >>> ax.legend(loc='best', frameon=False) >>> plt.show() """ _doc_default_locscale = """\ The probability density above is defined in the "standardized" form. To shift and/or scale the distribution use the ``loc`` and ``scale`` parameters. Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with ``y = (x - loc) / scale``. """ _doc_default = ''.join([_doc_default_longsummary, _doc_allmethods, '\n', _doc_default_example]) _doc_default_before_notes = ''.join([_doc_default_longsummary, _doc_allmethods]) docdict = { 'rvs': _doc_rvs, 'pdf': _doc_pdf, 'logpdf': _doc_logpdf, 'cdf': _doc_cdf, 'logcdf': _doc_logcdf, 'sf': _doc_sf, 'logsf': _doc_logsf, 'ppf': _doc_ppf, 'isf': _doc_isf, 'stats': _doc_stats, 'entropy': _doc_entropy, 'fit': _doc_fit, 'moment': _doc_moment, 'expect': _doc_expect, 'interval': _doc_interval, 'mean': _doc_mean, 'std': _doc_std, 'var': _doc_var, 'median': _doc_median, 'allmethods': _doc_allmethods, 'longsummary': _doc_default_longsummary, 'frozennote': _doc_default_frozen_note, 'example': _doc_default_example, 'default': _doc_default, 'before_notes': _doc_default_before_notes, 'after_notes': _doc_default_locscale } # Reuse common content between continuous and discrete docs, change some # minor bits. docdict_discrete = docdict.copy() docdict_discrete['pmf'] = _doc_pmf docdict_discrete['logpmf'] = _doc_logpmf docdict_discrete['expect'] = _doc_expect_discrete _doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf', 'ppf', 'isf', 'stats', 'entropy', 'expect', 'median', 'mean', 'var', 'std', 'interval'] for obj in _doc_disc_methods: docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '') _doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf'] for obj in _doc_disc_methods_err_varname: docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ') docdict_discrete.pop('pdf') docdict_discrete.pop('logpdf') _doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods]) docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods docdict_discrete['longsummary'] = _doc_default_longsummary.replace( 'rv_continuous', 'rv_discrete') _doc_default_frozen_note = """ Alternatively, the object may be called (as a function) to fix the shape and location parameters returning a "frozen" discrete RV object: rv = %(name)s(%(shapes)s, loc=0) - Frozen RV object with the same methods but holding the given shape and location fixed. """ docdict_discrete['frozennote'] = _doc_default_frozen_note _doc_default_discrete_example = """\ Examples -------- >>> from scipy.stats import %(name)s >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots(1, 1) Calculate a few first moments: %(set_vals_stmt)s >>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk') Display the probability mass function (``pmf``): >>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s), ... %(name)s.ppf(0.99, %(shapes)s)) >>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf') >>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5) Alternatively, the distribution object can be called (as a function) to fix the shape and location. This returns a "frozen" RV object holding the given parameters fixed. Freeze the distribution and display the frozen ``pmf``: >>> rv = %(name)s(%(shapes)s) >>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1, ... label='frozen pmf') >>> ax.legend(loc='best', frameon=False) >>> plt.show() Check accuracy of ``cdf`` and ``ppf``: >>> prob = %(name)s.cdf(x, %(shapes)s) >>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s)) True Generate random numbers: >>> r = %(name)s.rvs(%(shapes)s, size=1000) """ _doc_default_discrete_locscale = """\ The probability mass function above is defined in the "standardized" form. To shift distribution use the ``loc`` parameter. Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``. """ docdict_discrete['example'] = _doc_default_discrete_example docdict_discrete['after_notes'] = _doc_default_discrete_locscale _doc_default_before_notes = ''.join([docdict_discrete['longsummary'], docdict_discrete['allmethods']]) docdict_discrete['before_notes'] = _doc_default_before_notes _doc_default_disc = ''.join([docdict_discrete['longsummary'], docdict_discrete['allmethods'], docdict_discrete['frozennote'], docdict_discrete['example']]) docdict_discrete['default'] = _doc_default_disc # clean up all the separate docstring elements, we do not need them anymore for obj in [s for s in dir() if s.startswith('_doc_')]: exec('del ' + obj) del obj try: del s except NameError: # in Python 3, loop variables are not visible after the loop pass def _moment(data, n, mu=None): if mu is None: mu = data.mean() return ((data - mu)**n).mean() def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args): if (n == 0): return 1.0 elif (n == 1): if mu is None: val = moment_func(1, *args) else: val = mu elif (n == 2): if mu2 is None or mu is None: val = moment_func(2, *args) else: val = mu2 + mu*mu elif (n == 3): if g1 is None or mu2 is None or mu is None: val = moment_func(3, *args) else: mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment elif (n == 4): if g1 is None or g2 is None or mu2 is None or mu is None: val = moment_func(4, *args) else: mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment mu3 = g1*np.power(mu2, 1.5) # 3rd central moment val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu else: val = moment_func(n, *args) return val def _skew(data): """ skew is third central moment / variance**(1.5) """ data = np.ravel(data) mu = data.mean() m2 = ((data - mu)**2).mean() m3 = ((data - mu)**3).mean() return m3 / np.power(m2, 1.5) def _kurtosis(data): """ kurtosis is fourth central moment / variance**2 - 3 """ data = np.ravel(data) mu = data.mean() m2 = ((data - mu)**2).mean() m4 = ((data - mu)**4).mean() return m4 / m2**2 - 3 # Frozen RV class class rv_frozen(object): def __init__(self, dist, *args, **kwds): self.args = args self.kwds = kwds # create a new instance self.dist = dist.__class__(**dist._updated_ctor_param()) # a, b may be set in _argcheck, depending on *args, **kwds. Ouch. shapes, _, _ = self.dist._parse_args(*args, **kwds) self.dist._argcheck(*shapes) self.a, self.b = self.dist.a, self.dist.b @property def random_state(self): return self.dist._random_state @random_state.setter def random_state(self, seed): self.dist._random_state = check_random_state(seed) def pdf(self, x): # raises AttributeError in frozen discrete distribution return self.dist.pdf(x, *self.args, **self.kwds) def logpdf(self, x): return self.dist.logpdf(x, *self.args, **self.kwds) def cdf(self, x): return self.dist.cdf(x, *self.args, **self.kwds) def logcdf(self, x): return self.dist.logcdf(x, *self.args, **self.kwds) def ppf(self, q): return self.dist.ppf(q, *self.args, **self.kwds) def isf(self, q): return self.dist.isf(q, *self.args, **self.kwds) def rvs(self, size=None, random_state=None): kwds = self.kwds.copy() kwds.update({'size': size, 'random_state': random_state}) return self.dist.rvs(*self.args, **kwds) def sf(self, x): return self.dist.sf(x, *self.args, **self.kwds) def logsf(self, x): return self.dist.logsf(x, *self.args, **self.kwds) def stats(self, moments='mv'): kwds = self.kwds.copy() kwds.update({'moments': moments}) return self.dist.stats(*self.args, **kwds) def median(self): return self.dist.median(*self.args, **self.kwds) def mean(self): return self.dist.mean(*self.args, **self.kwds) def var(self): return self.dist.var(*self.args, **self.kwds) def std(self): return self.dist.std(*self.args, **self.kwds) def moment(self, n): return self.dist.moment(n, *self.args, **self.kwds) def entropy(self): return self.dist.entropy(*self.args, **self.kwds) def pmf(self, k): return self.dist.pmf(k, *self.args, **self.kwds) def logpmf(self, k): return self.dist.logpmf(k, *self.args, **self.kwds) def interval(self, alpha): return self.dist.interval(alpha, *self.args, **self.kwds) def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds): # expect method only accepts shape parameters as positional args # hence convert self.args, self.kwds, also loc/scale # See the .expect method docstrings for the meaning of # other parameters. a, loc, scale = self.dist._parse_args(*self.args, **self.kwds) if isinstance(self.dist, rv_discrete): return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds) else: return self.dist.expect(func, a, loc, scale, lb, ub, conditional, **kwds) # This should be rewritten def argsreduce(cond, *args): """Return the sequence of ravel(args[i]) where ravel(condition) is True in 1D. Examples -------- >>> import numpy as np >>> rand = np.random.random_sample >>> A = rand((4, 5)) >>> B = 2 >>> C = rand((1, 5)) >>> cond = np.ones(A.shape) >>> [A1, B1, C1] = argsreduce(cond, A, B, C) >>> B1.shape (20,) >>> cond[2,:] = 0 >>> [A2, B2, C2] = argsreduce(cond, A, B, C) >>> B2.shape (15,) """ newargs = np.atleast_1d(*args) if not isinstance(newargs, list): newargs = [newargs, ] expand_arr = (cond == cond) return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs] parse_arg_template = """ def _parse_args(self, %(shape_arg_str)s %(locscale_in)s): return (%(shape_arg_str)s), %(locscale_out)s def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None): return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size) def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'): return (%(shape_arg_str)s), %(locscale_out)s, moments """ # Both the continuous and discrete distributions depend on ncx2. # I think the function name ncx2 is an abbreviation for noncentral chi squared. def _ncx2_log_pdf(x, df, nc): # We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the factor # of exp(-xs*ns) into the ive function to improve numerical stability # at large values of xs. See also `rice.pdf`. df2 = df/2.0 - 1.0 xs, ns = np.sqrt(x), np.sqrt(nc) res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2 res += np.log(ive(df2, xs*ns) / 2.0) return res def _ncx2_pdf(x, df, nc): return np.exp(_ncx2_log_pdf(x, df, nc)) def _ncx2_cdf(x, df, nc): return chndtr(x, df, nc) class rv_generic(object): """Class which encapsulates common functionality between rv_discrete and rv_continuous. """ def __init__(self, seed=None): super(rv_generic, self).__init__() # figure out if _stats signature has 'moments' keyword sign = _getargspec(self._stats) self._stats_has_moments = ((sign[2] is not None) or ('moments' in sign[0])) self._random_state = check_random_state(seed) @property def random_state(self): """ Get or set the RandomState object for generating random variates. This can be either None or an existing RandomState object. If None (or np.random), use the RandomState singleton used by np.random. If already a RandomState instance, use it. If an int, use a new RandomState instance seeded with seed. """ return self._random_state @random_state.setter def random_state(self, seed): self._random_state = check_random_state(seed) def __getstate__(self): return self._updated_ctor_param(), self._random_state def __setstate__(self, state): ctor_param, r = state self.__init__(**ctor_param) self._random_state = r return self def _construct_argparser( self, meths_to_inspect, locscale_in, locscale_out): """Construct the parser for the shape arguments. Generates the argument-parsing functions dynamically and attaches them to the instance. Is supposed to be called in __init__ of a class for each distribution. If self.shapes is a non-empty string, interprets it as a comma-separated list of shape parameters. Otherwise inspects the call signatures of `meths_to_inspect` and constructs the argument-parsing functions from these. In this case also sets `shapes` and `numargs`. """ if self.shapes: # sanitize the user-supplied shapes if not isinstance(self.shapes, string_types): raise TypeError('shapes must be a string.') shapes = self.shapes.replace(',', ' ').split() for field in shapes: if keyword.iskeyword(field): raise SyntaxError('keywords cannot be used as shapes.') if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field): raise SyntaxError( 'shapes must be valid python identifiers') else: # find out the call signatures (_pdf, _cdf etc), deduce shape # arguments. Generic methods only have 'self, x', any further args # are shapes. shapes_list = [] for meth in meths_to_inspect: shapes_args = _getargspec(meth) # NB: does not contain self args = shapes_args.args[1:] # peel off 'x', too if args: shapes_list.append(args) # *args or **kwargs are not allowed w/automatic shapes if shapes_args.varargs is not None: raise TypeError( '*args are not allowed w/out explicit shapes') if shapes_args.keywords is not None: raise TypeError( '**kwds are not allowed w/out explicit shapes') if shapes_args.defaults is not None: raise TypeError('defaults are not allowed for shapes') if shapes_list: shapes = shapes_list[0] # make sure the signatures are consistent for item in shapes_list: if item != shapes: raise TypeError('Shape arguments are inconsistent.') else: shapes = [] # have the arguments, construct the method from template shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None dct = dict(shape_arg_str=shapes_str, locscale_in=locscale_in, locscale_out=locscale_out, ) ns = {} exec_(parse_arg_template % dct, ns) # NB: attach to the instance, not class for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']: setattr(self, name, instancemethod(ns[name], self, self.__class__) ) self.shapes = ', '.join(shapes) if shapes else None if not hasattr(self, 'numargs'): # allows more general subclassing with *args self.numargs = len(shapes) def _construct_doc(self, docdict, shapes_vals=None): """Construct the instance docstring with string substitutions.""" tempdict = docdict.copy() tempdict['name'] = self.name or 'distname' tempdict['shapes'] = self.shapes or '' if shapes_vals is None: shapes_vals = () vals = ', '.join('%.3g' % val for val in shapes_vals) tempdict['vals'] = vals tempdict['shapes_'] = self.shapes or '' if self.shapes and self.numargs == 1: tempdict['shapes_'] += ',' if self.shapes: tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals) else: tempdict['set_vals_stmt'] = '' if self.shapes is None: # remove shapes from call parameters if there are none for item in ['default', 'before_notes']: tempdict[item] = tempdict[item].replace( "\n%(shapes)s : array_like\n shape parameters", "") for i in range(2): if self.shapes is None: # necessary because we use %(shapes)s in two forms (w w/o ", ") self.__doc__ = self.__doc__.replace("%(shapes)s, ", "") self.__doc__ = doccer.docformat(self.__doc__, tempdict) # correct for empty shapes self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')') def _construct_default_doc(self, longname=None, extradoc=None, docdict=None, discrete='continuous'): """Construct instance docstring from the default template.""" if longname is None: longname = 'A' if extradoc is None: extradoc = '' if extradoc.startswith('\n\n'): extradoc = extradoc[2:] self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete), '\n\n%(before_notes)s\n', docheaders['notes'], extradoc, '\n%(example)s']) self._construct_doc(docdict) def freeze(self, *args, **kwds): """Freeze the distribution for the given arguments. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution. Should include all the non-optional arguments, may include ``loc`` and ``scale``. Returns ------- rv_frozen : rv_frozen instance The frozen distribution. """ return rv_frozen(self, *args, **kwds) def __call__(self, *args, **kwds): return self.freeze(*args, **kwds) __call__.__doc__ = freeze.__doc__ # The actual calculation functions (no basic checking need be done) # If these are defined, the others won't be looked at. # Otherwise, the other set can be defined. def _stats(self, *args, **kwds): return None, None, None, None # Central moments def _munp(self, n, *args): # Silence floating point warnings from integration. olderr = np.seterr(all='ignore') vals = self.generic_moment(n, *args) np.seterr(**olderr) return vals def _argcheck_rvs(self, *args, **kwargs): # Handle broadcasting and size validation of the rvs method. # Subclasses should not have to override this method. # The rule is that if `size` is not None, then `size` gives the # shape of the result (integer values of `size` are treated as # tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.) # # `args` is expected to contain the shape parameters (if any), the # location and the scale in a flat tuple (e.g. if there are two # shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`). # The only keyword argument expected is 'size'. size = kwargs.get('size', None) all_bcast = np.broadcast_arrays(*args) def squeeze_left(a): while a.ndim > 0 and a.shape[0] == 1: a = a[0] return a # Eliminate trivial leading dimensions. In the convention # used by numpy's random variate generators, trivial leading # dimensions are effectively ignored. In other words, when `size` # is given, trivial leading dimensions of the broadcast parameters # in excess of the number of dimensions in size are ignored, e.g. # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3) # array([ 1.00104267, 3.00422496, 4.99799278]) # If `size` is not given, the exact broadcast shape is preserved: # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]]) # array([[[[ 1.00862899, 3.00061431, 4.99867122]]]]) # all_bcast = [squeeze_left(a) for a in all_bcast] bcast_shape = all_bcast[0].shape bcast_ndim = all_bcast[0].ndim if size is None: size_ = bcast_shape else: size_ = tuple(np.atleast_1d(size)) # Check compatibility of size_ with the broadcast shape of all # the parameters. This check is intended to be consistent with # how the numpy random variate generators (e.g. np.random.normal, # np.random.beta) handle their arguments. The rule is that, if size # is given, it determines the shape of the output. Broadcasting # can't change the output size. # This is the standard broadcasting convention of extending the # shape with fewer dimensions with enough dimensions of length 1 # so that the two shapes have the same number of dimensions. ndiff = bcast_ndim - len(size_) if ndiff < 0: bcast_shape = (1,)*(-ndiff) + bcast_shape elif ndiff > 0: size_ = (1,)*ndiff + size_ # This compatibility test is not standard. In "regular" broadcasting, # two shapes are compatible if for each dimension, the lengths are the # same or one of the lengths is 1. Here, the length of a dimension in # size_ must not be less than the corresponding length in bcast_shape. ok = all([bcdim == 1 or bcdim == szdim for (bcdim, szdim) in zip(bcast_shape, size_)]) if not ok: raise ValueError("size does not match the broadcast shape of " "the parameters.") param_bcast = all_bcast[:-2] loc_bcast = all_bcast[-2] scale_bcast = all_bcast[-1] return param_bcast, loc_bcast, scale_bcast, size_ ## These are the methods you must define (standard form functions) ## NB: generic _pdf, _logpdf, _cdf are different for ## rv_continuous and rv_discrete hence are defined in there def _argcheck(self, *args): """Default check for correct values on args and keywords. Returns condition array of 1's where arguments are correct and 0's where they are not. """ cond = 1 for arg in args: cond = logical_and(cond, (asarray(arg) > 0)) return cond def _support_mask(self, x): return (self.a <= x) & (x <= self.b) def _open_support_mask(self, x): return (self.a < x) & (x < self.b) def _rvs(self, *args): # This method must handle self._size being a tuple, and it must # properly broadcast *args and self._size. self._size might be # an empty tuple, which means a scalar random variate is to be # generated. ## Use basic inverse cdf algorithm for RV generation as default. U = self._random_state.random_sample(self._size) Y = self._ppf(U, *args) return Y def _logcdf(self, x, *args): return log(self._cdf(x, *args)) def _sf(self, x, *args): return 1.0-self._cdf(x, *args) def _logsf(self, x, *args): return log(self._sf(x, *args)) def _ppf(self, q, *args): return self._ppfvec(q, *args) def _isf(self, q, *args): return self._ppf(1.0-q, *args) # use correct _ppf for subclasses # These are actually called, and should not be overwritten if you # want to keep error checking. def rvs(self, *args, **kwds): """ Random variates of given type. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). scale : array_like, optional Scale parameter (default=1). size : int or tuple of ints, optional Defining number of random variates (default is 1). random_state : None or int or ``np.random.RandomState`` instance, optional If int or RandomState, use it for drawing the random variates. If None, rely on ``self.random_state``. Default is None. Returns ------- rvs : ndarray or scalar Random variates of given `size`. """ discrete = kwds.pop('discrete', None) rndm = kwds.pop('random_state', None) args, loc, scale, size = self._parse_args_rvs(*args, **kwds) cond = logical_and(self._argcheck(*args), (scale >= 0)) if not np.all(cond): raise ValueError("Domain error in arguments.") if np.all(scale == 0): return loc*ones(size, 'd') # extra gymnastics needed for a custom random_state if rndm is not None: random_state_saved = self._random_state self._random_state = check_random_state(rndm) # `size` should just be an argument to _rvs(), but for, um, # historical reasons, it is made an attribute that is read # by _rvs(). self._size = size vals = self._rvs(*args) vals = vals * scale + loc # do not forget to restore the _random_state if rndm is not None: self._random_state = random_state_saved # Cast to int if discrete if discrete: if size == (): vals = int(vals) else: vals = vals.astype(int) return vals def stats(self, *args, **kwds): """ Some statistics of the given RV. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional (continuous RVs only) scale parameter (default=1) moments : str, optional composed of letters ['mvsk'] defining which moments to compute: 'm' = mean, 'v' = variance, 's' = (Fisher's) skew, 'k' = (Fisher's) kurtosis. (default is 'mv') Returns ------- stats : sequence of requested moments. """ args, loc, scale, moments = self._parse_args_stats(*args, **kwds) # scale = 1 by construction for discrete RVs loc, scale = map(asarray, (loc, scale)) args = tuple(map(asarray, args)) cond = self._argcheck(*args) & (scale > 0) & (loc == loc) output = [] default = valarray(shape(cond), self.badvalue) # Use only entries that are valid in calculation if np.any(cond): goodargs = argsreduce(cond, *(args+(scale, loc))) scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] if self._stats_has_moments: mu, mu2, g1, g2 = self._stats(*goodargs, **{'moments': moments}) else: mu, mu2, g1, g2 = self._stats(*goodargs) if g1 is None: mu3 = None else: if mu2 is None: mu2 = self._munp(2, *goodargs) if g2 is None: # (mu2**1.5) breaks down for nan and inf mu3 = g1 * np.power(mu2, 1.5) if 'm' in moments: if mu is None: mu = self._munp(1, *goodargs) out0 = default.copy() place(out0, cond, mu * scale + loc) output.append(out0) if 'v' in moments: if mu2 is None: mu2p = self._munp(2, *goodargs) if mu is None: mu = self._munp(1, *goodargs) mu2 = mu2p - mu * mu if np.isinf(mu): # if mean is inf then var is also inf mu2 = np.inf out0 = default.copy() place(out0, cond, mu2 * scale * scale) output.append(out0) if 's' in moments: if g1 is None: mu3p = self._munp(3, *goodargs) if mu is None: mu = self._munp(1, *goodargs) if mu2 is None: mu2p = self._munp(2, *goodargs) mu2 = mu2p - mu * mu mu3 = mu3p - 3 * mu * mu2 - mu**3 g1 = mu3 / np.power(mu2, 1.5) out0 = default.copy() place(out0, cond, g1) output.append(out0) if 'k' in moments: if g2 is None: mu4p = self._munp(4, *goodargs) if mu is None: mu = self._munp(1, *goodargs) if mu2 is None: mu2p = self._munp(2, *goodargs) mu2 = mu2p - mu * mu if mu3 is None: mu3p = self._munp(3, *goodargs) mu3 = mu3p - 3 * mu * mu2 - mu**3 mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4 g2 = mu4 / mu2**2.0 - 3.0 out0 = default.copy() place(out0, cond, g2) output.append(out0) else: # no valid args output = [] for _ in moments: out0 = default.copy() output.append(out0) if len(output) == 1: return output[0] else: return tuple(output) def entropy(self, *args, **kwds): """ Differential entropy of the RV. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). scale : array_like, optional (continuous distributions only). Scale parameter (default=1). Notes ----- Entropy is defined base `e`: >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5))) >>> np.allclose(drv.entropy(), np.log(2.0)) True """ args, loc, scale = self._parse_args(*args, **kwds) # NB: for discrete distributions scale=1 by construction in _parse_args args = tuple(map(asarray, args)) cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) output = zeros(shape(cond0), 'd') place(output, (1-cond0), self.badvalue) goodargs = argsreduce(cond0, *args) place(output, cond0, self.vecentropy(*goodargs) + log(scale)) return output def moment(self, n, *args, **kwds): """ n-th order non-central moment of distribution. Parameters ---------- n : int, n >= 1 Order of moment. arg1, arg2, arg3,... : float The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) """ args, loc, scale = self._parse_args(*args, **kwds) if not (self._argcheck(*args) and (scale > 0)): return nan if (floor(n) != n): raise ValueError("Moment must be an integer.") if (n < 0): raise ValueError("Moment must be positive.") mu, mu2, g1, g2 = None, None, None, None if (n > 0) and (n < 5): if self._stats_has_moments: mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]} else: mdict = {} mu, mu2, g1, g2 = self._stats(*args, **mdict) val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args) # Convert to transformed X = L + S*Y # E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n) if loc == 0: return scale**n * val else: result = 0 fac = float(scale) / float(loc) for k in range(n): valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args) result += comb(n, k, exact=True)*(fac**k) * valk result += fac**n * val return result * loc**n def median(self, *args, **kwds): """ Median of the distribution. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional Location parameter, Default is 0. scale : array_like, optional Scale parameter, Default is 1. Returns ------- median : float The median of the distribution. See Also -------- stats.distributions.rv_discrete.ppf Inverse of the CDF """ return self.ppf(0.5, *args, **kwds) def mean(self, *args, **kwds): """ Mean of the distribution. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- mean : float the mean of the distribution """ kwds['moments'] = 'm' res = self.stats(*args, **kwds) if isinstance(res, ndarray) and res.ndim == 0: return res[()] return res def var(self, *args, **kwds): """ Variance of the distribution. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- var : float the variance of the distribution """ kwds['moments'] = 'v' res = self.stats(*args, **kwds) if isinstance(res, ndarray) and res.ndim == 0: return res[()] return res def std(self, *args, **kwds): """ Standard deviation of the distribution. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- std : float standard deviation of the distribution """ kwds['moments'] = 'v' res = sqrt(self.stats(*args, **kwds)) return res def interval(self, alpha, *args, **kwds): """ Confidence interval with equal areas around the median. Parameters ---------- alpha : array_like of float Probability that an rv will be drawn from the returned range. Each value should be in the range [0, 1]. arg1, arg2, ... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional location parameter, Default is 0. scale : array_like, optional scale parameter, Default is 1. Returns ------- a, b : ndarray of float end-points of range that contain ``100 * alpha %`` of the rv's possible values. """ alpha = asarray(alpha) if np.any((alpha > 1) | (alpha < 0)): raise ValueError("alpha must be between 0 and 1 inclusive") q1 = (1.0-alpha)/2 q2 = (1.0+alpha)/2 a = self.ppf(q1, *args, **kwds) b = self.ppf(q2, *args, **kwds) return a, b ## continuous random variables: implement maybe later ## ## hf --- Hazard Function (PDF / SF) ## chf --- Cumulative hazard function (-log(SF)) ## psf --- Probability sparsity function (reciprocal of the pdf) in ## units of percent-point-function (as a function of q). ## Also, the derivative of the percent-point function. class rv_continuous(rv_generic): """ A generic continuous random variable class meant for subclassing. `rv_continuous` is a base class to construct specific distribution classes and instances for continuous random variables. It cannot be used directly as a distribution. Parameters ---------- momtype : int, optional The type of generic moment calculation to use: 0 for pdf, 1 (default) for ppf. a : float, optional Lower bound of the support of the distribution, default is minus infinity. b : float, optional Upper bound of the support of the distribution, default is plus infinity. xtol : float, optional The tolerance for fixed point calculation for generic ppf. badvalue : float, optional The value in a result arrays that indicates a value that for which some argument restriction is violated, default is np.nan. name : str, optional The name of the instance. This string is used to construct the default example for distributions. longname : str, optional This string is used as part of the first line of the docstring returned when a subclass has no docstring of its own. Note: `longname` exists for backwards compatibility, do not use for new subclasses. shapes : str, optional The shape of the distribution. For example ``"m, n"`` for a distribution that takes two integers as the two shape arguments for all its methods. If not provided, shape parameters will be inferred from the signature of the private methods, ``_pdf`` and ``_cdf`` of the instance. extradoc : str, optional, deprecated This string is used as the last part of the docstring returned when a subclass has no docstring of its own. Note: `extradoc` exists for backwards compatibility, do not use for new subclasses. seed : None or int or ``numpy.random.RandomState`` instance, optional This parameter defines the RandomState object to use for drawing random variates. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local RandomState instance. Default is None. Methods ------- rvs pdf logpdf cdf logcdf sf logsf ppf isf moment stats entropy expect median mean std var interval __call__ fit fit_loc_scale nnlf Notes ----- Public methods of an instance of a distribution class (e.g., ``pdf``, ``cdf``) check their arguments and pass valid arguments to private, computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid if it is within the support of a distribution, ``self.a <= x <= self.b``. Whether a shape parameter is valid is decided by an ``_argcheck`` method (which defaults to checking that its arguments are strictly positive.) **Subclassing** New random variables can be defined by subclassing the `rv_continuous` class and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized to location 0 and scale 1). If positive argument checking is not correct for your RV then you will also need to re-define the ``_argcheck`` method. Correct, but potentially slow defaults exist for the remaining methods but for speed and/or accuracy you can over-ride:: _logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could. **Methods that can be overwritten by subclasses** :: _rvs _pdf _cdf _sf _ppf _isf _stats _munp _entropy _argcheck There are additional (internal and private) generic methods that can be useful for cross-checking and for debugging, but might work in all cases when directly called. A note on ``shapes``: subclasses need not specify them explicitly. In this case, `shapes` will be automatically deduced from the signatures of the overridden methods (`pdf`, `cdf` etc). If, for some reason, you prefer to avoid relying on introspection, you can specify ``shapes`` explicitly as an argument to the instance constructor. **Frozen Distributions** Normally, you must provide shape parameters (and, optionally, location and scale parameters to each call of a method of a distribution. Alternatively, the object may be called (as a function) to fix the shape, location, and scale parameters returning a "frozen" continuous RV object: rv = generic(<shape(s)>, loc=0, scale=1) frozen RV object with the same methods but holding the given shape, location, and scale fixed **Statistics** Statistics are computed using numerical integration by default. For speed you can redefine this using ``_stats``: - take shape parameters and return mu, mu2, g1, g2 - If you can't compute one of these, return it as None - Can also be defined with a keyword argument ``moments``, which is a string composed of "m", "v", "s", and/or "k". Only the components appearing in string should be computed and returned in the order "m", "v", "s", or "k" with missing values returned as None. Alternatively, you can override ``_munp``, which takes ``n`` and shape parameters and returns the n-th non-central moment of the distribution. Examples -------- To create a new Gaussian distribution, we would do the following: >>> from scipy.stats import rv_continuous >>> class gaussian_gen(rv_continuous): ... "Gaussian distribution" ... def _pdf(self, x): ... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi) >>> gaussian = gaussian_gen(name='gaussian') ``scipy.stats`` distributions are *instances*, so here we subclass `rv_continuous` and create an instance. With this, we now have a fully functional distribution with all relevant methods automagically generated by the framework. Note that above we defined a standard normal distribution, with zero mean and unit variance. Shifting and scaling of the distribution can be done by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)`` essentially computes ``y = (x - loc) / scale`` and ``gaussian._pdf(y) / scale``. """ def __init__(self, momtype=1, a=None, b=None, xtol=1e-14, badvalue=None, name=None, longname=None, shapes=None, extradoc=None, seed=None): super(rv_continuous, self).__init__(seed) # save the ctor parameters, cf generic freeze self._ctor_param = dict( momtype=momtype, a=a, b=b, xtol=xtol, badvalue=badvalue, name=name, longname=longname, shapes=shapes, extradoc=extradoc, seed=seed) if badvalue is None: badvalue = nan if name is None: name = 'Distribution' self.badvalue = badvalue self.name = name self.a = a self.b = b if a is None: self.a = -inf if b is None: self.b = inf self.xtol = xtol self.moment_type = momtype self.shapes = shapes self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf], locscale_in='loc=0, scale=1', locscale_out='loc, scale') # nin correction self._ppfvec = vectorize(self._ppf_single, otypes='d') self._ppfvec.nin = self.numargs + 1 self.vecentropy = vectorize(self._entropy, otypes='d') self._cdfvec = vectorize(self._cdf_single, otypes='d') self._cdfvec.nin = self.numargs + 1 self.extradoc = extradoc if momtype == 0: self.generic_moment = vectorize(self._mom0_sc, otypes='d') else: self.generic_moment = vectorize(self._mom1_sc, otypes='d') # Because of the *args argument of _mom0_sc, vectorize cannot count the # number of arguments correctly. self.generic_moment.nin = self.numargs + 1 if longname is None: if name[0] in ['aeiouAEIOU']: hstr = "An " else: hstr = "A " longname = hstr + name if sys.flags.optimize < 2: # Skip adding docstrings if interpreter is run with -OO if self.__doc__ is None: self._construct_default_doc(longname=longname, extradoc=extradoc, docdict=docdict, discrete='continuous') else: dct = dict(distcont) self._construct_doc(docdict, dct.get(self.name)) def _updated_ctor_param(self): """ Return the current version of _ctor_param, possibly updated by user. Used by freezing and pickling. Keep this in sync with the signature of __init__. """ dct = self._ctor_param.copy() dct['a'] = self.a dct['b'] = self.b dct['xtol'] = self.xtol dct['badvalue'] = self.badvalue dct['name'] = self.name dct['shapes'] = self.shapes dct['extradoc'] = self.extradoc return dct def _ppf_to_solve(self, x, q, *args): return self.cdf(*(x, )+args)-q def _ppf_single(self, q, *args): left = right = None if self.a > -np.inf: left = self.a if self.b < np.inf: right = self.b factor = 10. if not left: # i.e. self.a = -inf left = -1.*factor while self._ppf_to_solve(left, q, *args) > 0.: right = left left *= factor # left is now such that cdf(left) < q if not right: # i.e. self.b = inf right = factor while self._ppf_to_solve(right, q, *args) < 0.: left = right right *= factor # right is now such that cdf(right) > q return optimize.brentq(self._ppf_to_solve, left, right, args=(q,)+args, xtol=self.xtol) # moment from definition def _mom_integ0(self, x, m, *args): return x**m * self.pdf(x, *args) def _mom0_sc(self, m, *args): return integrate.quad(self._mom_integ0, self.a, self.b, args=(m,)+args)[0] # moment calculated using ppf def _mom_integ1(self, q, m, *args): return (self.ppf(q, *args))**m def _mom1_sc(self, m, *args): return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0] def _pdf(self, x, *args): return derivative(self._cdf, x, dx=1e-5, args=args, order=5) ## Could also define any of these def _logpdf(self, x, *args): return log(self._pdf(x, *args)) def _cdf_single(self, x, *args): return integrate.quad(self._pdf, self.a, x, args=args)[0] def _cdf(self, x, *args): return self._cdfvec(x, *args) ## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined ## in rv_generic def pdf(self, x, *args, **kwds): """ Probability density function at x of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- pdf : ndarray Probability density function evaluated at x """ args, loc, scale = self._parse_args(*args, **kwds) x, loc, scale = map(asarray, (x, loc, scale)) args = tuple(map(asarray, args)) dtyp = np.find_common_type([x.dtype, np.float64], []) x = np.asarray((x - loc)/scale, dtype=dtyp) cond0 = self._argcheck(*args) & (scale > 0) cond1 = self._support_mask(x) & (scale > 0) cond = cond0 & cond1 output = zeros(shape(cond), dtyp) putmask(output, (1-cond0)+np.isnan(x), self.badvalue) if np.any(cond): goodargs = argsreduce(cond, *((x,)+args+(scale,))) scale, goodargs = goodargs[-1], goodargs[:-1] place(output, cond, self._pdf(*goodargs) / scale) if output.ndim == 0: return output[()] return output def logpdf(self, x, *args, **kwds): """ Log of the probability density function at x of the given RV. This uses a more numerically accurate calculation if available. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- logpdf : array_like Log of the probability density function evaluated at x """ args, loc, scale = self._parse_args(*args, **kwds) x, loc, scale = map(asarray, (x, loc, scale)) args = tuple(map(asarray, args)) dtyp = np.find_common_type([x.dtype, np.float64], []) x = np.asarray((x - loc)/scale, dtype=dtyp) cond0 = self._argcheck(*args) & (scale > 0) cond1 = self._support_mask(x) & (scale > 0) cond = cond0 & cond1 output = empty(shape(cond), dtyp) output.fill(NINF) putmask(output, (1-cond0)+np.isnan(x), self.badvalue) if np.any(cond): goodargs = argsreduce(cond, *((x,)+args+(scale,))) scale, goodargs = goodargs[-1], goodargs[:-1] place(output, cond, self._logpdf(*goodargs) - log(scale)) if output.ndim == 0: return output[()] return output def cdf(self, x, *args, **kwds): """ Cumulative distribution function of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- cdf : ndarray Cumulative distribution function evaluated at `x` """ args, loc, scale = self._parse_args(*args, **kwds) x, loc, scale = map(asarray, (x, loc, scale)) args = tuple(map(asarray, args)) dtyp = np.find_common_type([x.dtype, np.float64], []) x = np.asarray((x - loc)/scale, dtype=dtyp) cond0 = self._argcheck(*args) & (scale > 0) cond1 = self._open_support_mask(x) & (scale > 0) cond2 = (x >= self.b) & cond0 cond = cond0 & cond1 output = zeros(shape(cond), dtyp) place(output, (1-cond0)+np.isnan(x), self.badvalue) place(output, cond2, 1.0) if np.any(cond): # call only if at least 1 entry goodargs = argsreduce(cond, *((x,)+args)) place(output, cond, self._cdf(*goodargs)) if output.ndim == 0: return output[()] return output def logcdf(self, x, *args, **kwds): """ Log of the cumulative distribution function at x of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- logcdf : array_like Log of the cumulative distribution function evaluated at x """ args, loc, scale = self._parse_args(*args, **kwds) x, loc, scale = map(asarray, (x, loc, scale)) args = tuple(map(asarray, args)) dtyp = np.find_common_type([x.dtype, np.float64], []) x = np.asarray((x - loc)/scale, dtype=dtyp) cond0 = self._argcheck(*args) & (scale > 0) cond1 = self._open_support_mask(x) & (scale > 0) cond2 = (x >= self.b) & cond0 cond = cond0 & cond1 output = empty(shape(cond), dtyp) output.fill(NINF) place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue) place(output, cond2, 0.0) if np.any(cond): # call only if at least 1 entry goodargs = argsreduce(cond, *((x,)+args)) place(output, cond, self._logcdf(*goodargs)) if output.ndim == 0: return output[()] return output def sf(self, x, *args, **kwds): """ Survival function (1 - `cdf`) at x of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- sf : array_like Survival function evaluated at x """ args, loc, scale = self._parse_args(*args, **kwds) x, loc, scale = map(asarray, (x, loc, scale)) args = tuple(map(asarray, args)) dtyp = np.find_common_type([x.dtype, np.float64], []) x = np.asarray((x - loc)/scale, dtype=dtyp) cond0 = self._argcheck(*args) & (scale > 0) cond1 = self._open_support_mask(x) & (scale > 0) cond2 = cond0 & (x <= self.a) cond = cond0 & cond1 output = zeros(shape(cond), dtyp) place(output, (1-cond0)+np.isnan(x), self.badvalue) place(output, cond2, 1.0) if np.any(cond): goodargs = argsreduce(cond, *((x,)+args)) place(output, cond, self._sf(*goodargs)) if output.ndim == 0: return output[()] return output def logsf(self, x, *args, **kwds): """ Log of the survival function of the given RV. Returns the log of the "survival function," defined as (1 - `cdf`), evaluated at `x`. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- logsf : ndarray Log of the survival function evaluated at `x`. """ args, loc, scale = self._parse_args(*args, **kwds) x, loc, scale = map(asarray, (x, loc, scale)) args = tuple(map(asarray, args)) dtyp = np.find_common_type([x.dtype, np.float64], []) x = np.asarray((x - loc)/scale, dtype=dtyp) cond0 = self._argcheck(*args) & (scale > 0) cond1 = self._open_support_mask(x) & (scale > 0) cond2 = cond0 & (x <= self.a) cond = cond0 & cond1 output = empty(shape(cond), dtyp) output.fill(NINF) place(output, (1-cond0)+np.isnan(x), self.badvalue) place(output, cond2, 0.0) if np.any(cond): goodargs = argsreduce(cond, *((x,)+args)) place(output, cond, self._logsf(*goodargs)) if output.ndim == 0: return output[()] return output def ppf(self, q, *args, **kwds): """ Percent point function (inverse of `cdf`) at q of the given RV. Parameters ---------- q : array_like lower tail probability arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- x : array_like quantile corresponding to the lower tail probability q. """ args, loc, scale = self._parse_args(*args, **kwds) q, loc, scale = map(asarray, (q, loc, scale)) args = tuple(map(asarray, args)) cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) cond1 = (0 < q) & (q < 1) cond2 = cond0 & (q == 0) cond3 = cond0 & (q == 1) cond = cond0 & cond1 output = valarray(shape(cond), value=self.badvalue) lower_bound = self.a * scale + loc upper_bound = self.b * scale + loc place(output, cond2, argsreduce(cond2, lower_bound)[0]) place(output, cond3, argsreduce(cond3, upper_bound)[0]) if np.any(cond): # call only if at least 1 entry goodargs = argsreduce(cond, *((q,)+args+(scale, loc))) scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] place(output, cond, self._ppf(*goodargs) * scale + loc) if output.ndim == 0: return output[()] return output def isf(self, q, *args, **kwds): """ Inverse survival function (inverse of `sf`) at q of the given RV. Parameters ---------- q : array_like upper tail probability arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- x : ndarray or scalar Quantile corresponding to the upper tail probability q. """ args, loc, scale = self._parse_args(*args, **kwds) q, loc, scale = map(asarray, (q, loc, scale)) args = tuple(map(asarray, args)) cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) cond1 = (0 < q) & (q < 1) cond2 = cond0 & (q == 1) cond3 = cond0 & (q == 0) cond = cond0 & cond1 output = valarray(shape(cond), value=self.badvalue) lower_bound = self.a * scale + loc upper_bound = self.b * scale + loc place(output, cond2, argsreduce(cond2, lower_bound)[0]) place(output, cond3, argsreduce(cond3, upper_bound)[0]) if np.any(cond): goodargs = argsreduce(cond, *((q,)+args+(scale, loc))) scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] place(output, cond, self._isf(*goodargs) * scale + loc) if output.ndim == 0: return output[()] return output def _nnlf(self, x, *args): return -np.sum(self._logpdf(x, *args), axis=0) def _unpack_loc_scale(self, theta): try: loc = theta[-2] scale = theta[-1] args = tuple(theta[:-2]) except IndexError: raise ValueError("Not enough input arguments.") return loc, scale, args def nnlf(self, theta, x): '''Return negative loglikelihood function. Notes ----- This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the parameters (including loc and scale). ''' loc, scale, args = self._unpack_loc_scale(theta) if not self._argcheck(*args) or scale <= 0: return inf x = asarray((x-loc) / scale) n_log_scale = len(x) * log(scale) if np.any(~self._support_mask(x)): return inf return self._nnlf(x, *args) + n_log_scale def _nnlf_and_penalty(self, x, args): cond0 = ~self._support_mask(x) n_bad = np.count_nonzero(cond0, axis=0) if n_bad > 0: x = argsreduce(~cond0, x)[0] logpdf = self._logpdf(x, *args) finite_logpdf = np.isfinite(logpdf) n_bad += np.sum(~finite_logpdf, axis=0) if n_bad > 0: penalty = n_bad * log(_XMAX) * 100 return -np.sum(logpdf[finite_logpdf], axis=0) + penalty return -np.sum(logpdf, axis=0) def _penalized_nnlf(self, theta, x): ''' Return penalized negative loglikelihood function, i.e., - sum (log pdf(x, theta), axis=0) + penalty where theta are the parameters (including loc and scale) ''' loc, scale, args = self._unpack_loc_scale(theta) if not self._argcheck(*args) or scale <= 0: return inf x = asarray((x-loc) / scale) n_log_scale = len(x) * log(scale) return self._nnlf_and_penalty(x, args) + n_log_scale # return starting point for fit (shape arguments + loc + scale) def _fitstart(self, data, args=None): if args is None: args = (1.0,)*self.numargs loc, scale = self._fit_loc_scale_support(data, *args) return args + (loc, scale) # Return the (possibly reduced) function to optimize in order to find MLE # estimates for the .fit method def _reduce_func(self, args, kwds): # First of all, convert fshapes params to fnum: eg for stats.beta, # shapes='a, b'. To fix `a`, can specify either `f1` or `fa`. # Convert the latter into the former. if self.shapes: shapes = self.shapes.replace(',', ' ').split() for j, s in enumerate(shapes): val = kwds.pop('f' + s, None) or kwds.pop('fix_' + s, None) if val is not None: key = 'f%d' % j if key in kwds: raise ValueError("Duplicate entry for %s." % key) else: kwds[key] = val args = list(args) Nargs = len(args) fixedn = [] names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale'] x0 = [] for n, key in enumerate(names): if key in kwds: fixedn.append(n) args[n] = kwds.pop(key) else: x0.append(args[n]) if len(fixedn) == 0: func = self._penalized_nnlf restore = None else: if len(fixedn) == Nargs: raise ValueError( "All parameters fixed. There is nothing to optimize.") def restore(args, theta): # Replace with theta for all numbers not in fixedn # This allows the non-fixed values to vary, but # we still call self.nnlf with all parameters. i = 0 for n in range(Nargs): if n not in fixedn: args[n] = theta[i] i += 1 return args def func(theta, x): newtheta = restore(args[:], theta) return self._penalized_nnlf(newtheta, x) return x0, func, restore, args def fit(self, data, *args, **kwds): """ Return MLEs for shape (if applicable), location, and scale parameters from data. MLE stands for Maximum Likelihood Estimate. Starting estimates for the fit are given by input arguments; for any arguments not provided with starting estimates, ``self._fitstart(data)`` is called to generate such. One can hold some parameters fixed to specific values by passing in keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters) and ``floc`` and ``fscale`` (for location and scale parameters, respectively). Parameters ---------- data : array_like Data to use in calculating the MLEs. args : floats, optional Starting value(s) for any shape-characterizing arguments (those not provided will be determined by a call to ``_fitstart(data)``). No default value. kwds : floats, optional Starting values for the location and scale parameters; no default. Special keyword arguments are recognized as holding certain parameters fixed: - f0...fn : hold respective shape parameters fixed. Alternatively, shape parameters to fix can be specified by name. For example, if ``self.shapes == "a, b"``, ``fa``and ``fix_a`` are equivalent to ``f0``, and ``fb`` and ``fix_b`` are equivalent to ``f1``. - floc : hold location parameter fixed to specified value. - fscale : hold scale parameter fixed to specified value. - optimizer : The optimizer to use. The optimizer must take ``func``, and starting position as the first two arguments, plus ``args`` (for extra arguments to pass to the function to be optimized) and ``disp=0`` to suppress output as keyword arguments. Returns ------- mle_tuple : tuple of floats MLEs for any shape parameters (if applicable), followed by those for location and scale. For most random variables, shape statistics will be returned, but there are exceptions (e.g. ``norm``). Notes ----- This fit is computed by maximizing a log-likelihood function, with penalty applied for samples outside of range of the distribution. The returned answer is not guaranteed to be the globally optimal MLE, it may only be locally optimal, or the optimization may fail altogether. Examples -------- Generate some data to fit: draw random variates from the `beta` distribution >>> from scipy.stats import beta >>> a, b = 1., 2. >>> x = beta.rvs(a, b, size=1000) Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``): >>> a1, b1, loc1, scale1 = beta.fit(x) We can also use some prior knowledge about the dataset: let's keep ``loc`` and ``scale`` fixed: >>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1) >>> loc1, scale1 (0, 1) We can also keep shape parameters fixed by using ``f``-keywords. To keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or, equivalently, ``fa=1``: >>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1) >>> a1 1 Not all distributions return estimates for the shape parameters. ``norm`` for example just returns estimates for location and scale: >>> from scipy.stats import norm >>> x = norm.rvs(a, b, size=1000, random_state=123) >>> loc1, scale1 = norm.fit(x) >>> loc1, scale1 (0.92087172783841631, 2.0015750750324668) """ Narg = len(args) if Narg > self.numargs: raise TypeError("Too many input arguments.") start = [None]*2 if (Narg < self.numargs) or not ('loc' in kwds and 'scale' in kwds): # get distribution specific starting locations start = self._fitstart(data) args += start[Narg:-2] loc = kwds.pop('loc', start[-2]) scale = kwds.pop('scale', start[-1]) args += (loc, scale) x0, func, restore, args = self._reduce_func(args, kwds) optimizer = kwds.pop('optimizer', optimize.fmin) # convert string to function in scipy.optimize if not callable(optimizer) and isinstance(optimizer, string_types): if not optimizer.startswith('fmin_'): optimizer = "fmin_"+optimizer if optimizer == 'fmin_': optimizer = 'fmin' try: optimizer = getattr(optimize, optimizer) except AttributeError: raise ValueError("%s is not a valid optimizer" % optimizer) # by now kwds must be empty, since everybody took what they needed if kwds: raise TypeError("Unknown arguments: %s." % kwds) vals = optimizer(func, x0, args=(ravel(data),), disp=0) if restore is not None: vals = restore(args, vals) vals = tuple(vals) return vals def _fit_loc_scale_support(self, data, *args): """ Estimate loc and scale parameters from data accounting for support. Parameters ---------- data : array_like Data to fit. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). Returns ------- Lhat : float Estimated location parameter for the data. Shat : float Estimated scale parameter for the data. """ data = np.asarray(data) # Estimate location and scale according to the method of moments. loc_hat, scale_hat = self.fit_loc_scale(data, *args) # Compute the support according to the shape parameters. self._argcheck(*args) a, b = self.a, self.b support_width = b - a # If the support is empty then return the moment-based estimates. if support_width <= 0: return loc_hat, scale_hat # Compute the proposed support according to the loc and scale estimates. a_hat = loc_hat + a * scale_hat b_hat = loc_hat + b * scale_hat # Use the moment-based estimates if they are compatible with the data. data_a = np.min(data) data_b = np.max(data) if a_hat < data_a and data_b < b_hat: return loc_hat, scale_hat # Otherwise find other estimates that are compatible with the data. data_width = data_b - data_a rel_margin = 0.1 margin = data_width * rel_margin # For a finite interval, both the location and scale # should have interesting values. if support_width < np.inf: loc_hat = (data_a - a) - margin scale_hat = (data_width + 2 * margin) / support_width return loc_hat, scale_hat # For a one-sided interval, use only an interesting location parameter. if a > -np.inf: return (data_a - a) - margin, 1 elif b < np.inf: return (data_b - b) + margin, 1 else: raise RuntimeError def fit_loc_scale(self, data, *args): """ Estimate loc and scale parameters from data using 1st and 2nd moments. Parameters ---------- data : array_like Data to fit. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). Returns ------- Lhat : float Estimated location parameter for the data. Shat : float Estimated scale parameter for the data. """ mu, mu2 = self.stats(*args, **{'moments': 'mv'}) tmp = asarray(data) muhat = tmp.mean() mu2hat = tmp.var() Shat = sqrt(mu2hat / mu2) Lhat = muhat - Shat*mu if not np.isfinite(Lhat): Lhat = 0 if not (np.isfinite(Shat) and (0 < Shat)): Shat = 1 return Lhat, Shat def _entropy(self, *args): def integ(x): val = self._pdf(x, *args) return entr(val) # upper limit is often inf, so suppress warnings when integrating olderr = np.seterr(over='ignore') h = integrate.quad(integ, self.a, self.b)[0] np.seterr(**olderr) if not np.isnan(h): return h else: # try with different limits if integration problems low, upp = self.ppf([1e-10, 1. - 1e-10], *args) if np.isinf(self.b): upper = upp else: upper = self.b if np.isinf(self.a): lower = low else: lower = self.a return integrate.quad(integ, lower, upper)[0] def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds): """Calculate expected value of a function with respect to the distribution. The expected value of a function ``f(x)`` with respect to a distribution ``dist`` is defined as:: ubound E[x] = Integral(f(x) * dist.pdf(x)) lbound Parameters ---------- func : callable, optional Function for which integral is calculated. Takes only one argument. The default is the identity mapping f(x) = x. args : tuple, optional Shape parameters of the distribution. loc : float, optional Location parameter (default=0). scale : float, optional Scale parameter (default=1). lb, ub : scalar, optional Lower and upper bound for integration. Default is set to the support of the distribution. conditional : bool, optional If True, the integral is corrected by the conditional probability of the integration interval. The return value is the expectation of the function, conditional on being in the given interval. Default is False. Additional keyword arguments are passed to the integration routine. Returns ------- expect : float The calculated expected value. Notes ----- The integration behavior of this function is inherited from `integrate.quad`. """ lockwds = {'loc': loc, 'scale': scale} self._argcheck(*args) if func is None: def fun(x, *args): return x * self.pdf(x, *args, **lockwds) else: def fun(x, *args): return func(x) * self.pdf(x, *args, **lockwds) if lb is None: lb = loc + self.a * scale if ub is None: ub = loc + self.b * scale if conditional: invfac = (self.sf(lb, *args, **lockwds) - self.sf(ub, *args, **lockwds)) else: invfac = 1.0 kwds['args'] = args # Silence floating point warnings from integration. olderr = np.seterr(all='ignore') vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac np.seterr(**olderr) return vals # Helpers for the discrete distributions def _drv2_moment(self, n, *args): """Non-central moment of discrete distribution.""" def fun(x): return np.power(x, n) * self._pmf(x, *args) return _expect(fun, self.a, self.b, self.ppf(0.5, *args), self.inc) def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm b = self.b a = self.a if isinf(b): # Be sure ending point is > q b = int(max(100*q, 10)) while 1: if b >= self.b: qb = 1.0 break qb = self._cdf(b, *args) if (qb < q): b += 10 else: break else: qb = 1.0 if isinf(a): # be sure starting point < q a = int(min(-100*q, -10)) while 1: if a <= self.a: qb = 0.0 break qa = self._cdf(a, *args) if (qa > q): a -= 10 else: break else: qa = self._cdf(a, *args) while 1: if (qa == q): return a if (qb == q): return b if b <= a+1: # testcase: return wrong number at lower index # python -c "from scipy.stats import zipf;print zipf.ppf(0.01, 2)" wrong # python -c "from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)" # python -c "from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)" if qa > q: return a else: return b c = int((a+b)/2.0) qc = self._cdf(c, *args) if (qc < q): if a != c: a = c else: raise RuntimeError('updating stopped, endless loop') qa = qc elif (qc > q): if b != c: b = c else: raise RuntimeError('updating stopped, endless loop') qb = qc else: return c def entropy(pk, qk=None, base=None): """Calculate the entropy of a distribution for given probability values. If only probabilities `pk` are given, the entropy is calculated as ``S = -sum(pk * log(pk), axis=0)``. If `qk` is not None, then compute the Kullback-Leibler divergence ``S = sum(pk * log(pk / qk), axis=0)``. This routine will normalize `pk` and `qk` if they don't sum to 1. Parameters ---------- pk : sequence Defines the (discrete) distribution. ``pk[i]`` is the (possibly unnormalized) probability of event ``i``. qk : sequence, optional Sequence against which the relative entropy is computed. Should be in the same format as `pk`. base : float, optional The logarithmic base to use, defaults to ``e`` (natural logarithm). Returns ------- S : float The calculated entropy. """ pk = asarray(pk) pk = 1.0*pk / np.sum(pk, axis=0) if qk is None: vec = entr(pk) else: qk = asarray(qk) if len(qk) != len(pk): raise ValueError("qk and pk must have same length.") qk = 1.0*qk / np.sum(qk, axis=0) vec = rel_entr(pk, qk) S = np.sum(vec, axis=0) if base is not None: S /= log(base) return S # Must over-ride one of _pmf or _cdf or pass in # x_k, p(x_k) lists in initialization class rv_discrete(rv_generic): """ A generic discrete random variable class meant for subclassing. `rv_discrete` is a base class to construct specific distribution classes and instances for discrete random variables. It can also be used to construct an arbitrary distribution defined by a list of support points and corresponding probabilities. Parameters ---------- a : float, optional Lower bound of the support of the distribution, default: 0 b : float, optional Upper bound of the support of the distribution, default: plus infinity moment_tol : float, optional The tolerance for the generic calculation of moments. values : tuple of two array_like, optional ``(xk, pk)`` where ``xk`` are integers with non-zero probabilities ``pk`` with ``sum(pk) = 1``. inc : integer, optional Increment for the support of the distribution. Default is 1. (other values have not been tested) badvalue : float, optional The value in a result arrays that indicates a value that for which some argument restriction is violated, default is np.nan. name : str, optional The name of the instance. This string is used to construct the default example for distributions. longname : str, optional This string is used as part of the first line of the docstring returned when a subclass has no docstring of its own. Note: `longname` exists for backwards compatibility, do not use for new subclasses. shapes : str, optional The shape of the distribution. For example "m, n" for a distribution that takes two integers as the two shape arguments for all its methods If not provided, shape parameters will be inferred from the signatures of the private methods, ``_pmf`` and ``_cdf`` of the instance. extradoc : str, optional This string is used as the last part of the docstring returned when a subclass has no docstring of its own. Note: `extradoc` exists for backwards compatibility, do not use for new subclasses. seed : None or int or ``numpy.random.RandomState`` instance, optional This parameter defines the RandomState object to use for drawing random variates. If None, the global np.random state is used. If integer, it is used to seed the local RandomState instance. Default is None. Methods ------- rvs pmf logpmf cdf logcdf sf logsf ppf isf moment stats entropy expect median mean std var interval __call__ Notes ----- This class is similar to `rv_continuous`, the main differences being: - the support of the distribution is a set of integers - instead of the probability density function, ``pdf`` (and the corresponding private ``_pdf``), this class defines the *probability mass function*, `pmf` (and the corresponding private ``_pmf``.) - scale parameter is not defined. To create a new discrete distribution, we would do the following: >>> from scipy.stats import rv_discrete >>> class poisson_gen(rv_discrete): ... "Poisson distribution" ... def _pmf(self, k, mu): ... return exp(-mu) * mu**k / factorial(k) and create an instance:: >>> poisson = poisson_gen(name="poisson") Note that above we defined the Poisson distribution in the standard form. Shifting the distribution can be done by providing the ``loc`` parameter to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)`` delegates the work to ``poisson._pmf(x-loc, mu)``. **Discrete distributions from a list of probabilities** Alternatively, you can construct an arbitrary discrete rv defined on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the ``values`` keyword argument to the `rv_discrete` constructor. Examples -------- Custom made discrete distribution: >>> from scipy import stats >>> xk = np.arange(7) >>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2) >>> custm = stats.rv_discrete(name='custm', values=(xk, pk)) >>> >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots(1, 1) >>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r') >>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4) >>> plt.show() Random number generation: >>> R = custm.rvs(size=100) """ def __new__(cls, a=0, b=inf, name=None, badvalue=None, moment_tol=1e-8, values=None, inc=1, longname=None, shapes=None, extradoc=None, seed=None): if values is not None: # dispatch to a subclass return super(rv_discrete, cls).__new__(rv_sample) else: # business as usual return super(rv_discrete, cls).__new__(cls) def __init__(self, a=0, b=inf, name=None, badvalue=None, moment_tol=1e-8, values=None, inc=1, longname=None, shapes=None, extradoc=None, seed=None): super(rv_discrete, self).__init__(seed) # cf generic freeze self._ctor_param = dict( a=a, b=b, name=name, badvalue=badvalue, moment_tol=moment_tol, values=values, inc=inc, longname=longname, shapes=shapes, extradoc=extradoc, seed=seed) if badvalue is None: badvalue = nan self.badvalue = badvalue self.a = a self.b = b self.moment_tol = moment_tol self.inc = inc self._cdfvec = vectorize(self._cdf_single, otypes='d') self.vecentropy = vectorize(self._entropy) self.shapes = shapes if values is not None: raise ValueError("rv_discrete.__init__(..., values != None, ...)") self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf], locscale_in='loc=0', # scale=1 for discrete RVs locscale_out='loc, 1') # nin correction needs to be after we know numargs # correct nin for generic moment vectorization _vec_generic_moment = vectorize(_drv2_moment, otypes='d') _vec_generic_moment.nin = self.numargs + 2 self.generic_moment = instancemethod(_vec_generic_moment, self, rv_discrete) # correct nin for ppf vectorization _vppf = vectorize(_drv2_ppfsingle, otypes='d') _vppf.nin = self.numargs + 2 self._ppfvec = instancemethod(_vppf, self, rv_discrete) # now that self.numargs is defined, we can adjust nin self._cdfvec.nin = self.numargs + 1 self._construct_docstrings(name, longname, extradoc) def _construct_docstrings(self, name, longname, extradoc): if name is None: name = 'Distribution' self.name = name self.extradoc = extradoc # generate docstring for subclass instances if longname is None: if name[0] in ['aeiouAEIOU']: hstr = "An " else: hstr = "A " longname = hstr + name if sys.flags.optimize < 2: # Skip adding docstrings if interpreter is run with -OO if self.__doc__ is None: self._construct_default_doc(longname=longname, extradoc=extradoc, docdict=docdict_discrete, discrete='discrete') else: dct = dict(distdiscrete) self._construct_doc(docdict_discrete, dct.get(self.name)) # discrete RV do not have the scale parameter, remove it self.__doc__ = self.__doc__.replace( '\n scale : array_like, ' 'optional\n scale parameter (default=1)', '') @property @np.deprecate(message="`return_integers` attribute is not used anywhere any " " longer and is deprecated in scipy 0.18.") def return_integers(self): return 1 def _updated_ctor_param(self): """ Return the current version of _ctor_param, possibly updated by user. Used by freezing and pickling. Keep this in sync with the signature of __init__. """ dct = self._ctor_param.copy() dct['a'] = self.a dct['b'] = self.b dct['badvalue'] = self.badvalue dct['moment_tol'] = self.moment_tol dct['inc'] = self.inc dct['name'] = self.name dct['shapes'] = self.shapes dct['extradoc'] = self.extradoc return dct def _nonzero(self, k, *args): return floor(k) == k def _pmf(self, k, *args): return self._cdf(k, *args) - self._cdf(k-1, *args) def _logpmf(self, k, *args): return log(self._pmf(k, *args)) def _cdf_single(self, k, *args): m = arange(int(self.a), k+1) return np.sum(self._pmf(m, *args), axis=0) def _cdf(self, x, *args): k = floor(x) return self._cdfvec(k, *args) # generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic def rvs(self, *args, **kwargs): """ Random variates of given type. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). size : int or tuple of ints, optional Defining number of random variates (Default is 1). Note that `size` has to be given as keyword, not as positional argument. random_state : None or int or ``np.random.RandomState`` instance, optional If int or RandomState, use it for drawing the random variates. If None, rely on ``self.random_state``. Default is None. Returns ------- rvs : ndarray or scalar Random variates of given `size`. """ kwargs['discrete'] = True return super(rv_discrete, self).rvs(*args, **kwargs) def pmf(self, k, *args, **kwds): """ Probability mass function at k of the given RV. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional Location parameter (default=0). Returns ------- pmf : array_like Probability mass function evaluated at k """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) k = asarray((k-loc)) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args) cond = cond0 & cond1 output = zeros(shape(cond), 'd') place(output, (1-cond0) + np.isnan(k), self.badvalue) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, np.clip(self._pmf(*goodargs), 0, 1)) if output.ndim == 0: return output[()] return output def logpmf(self, k, *args, **kwds): """ Log of the probability mass function at k of the given RV. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter. Default is 0. Returns ------- logpmf : array_like Log of the probability mass function evaluated at k. """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) k = asarray((k-loc)) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args) cond = cond0 & cond1 output = empty(shape(cond), 'd') output.fill(NINF) place(output, (1-cond0) + np.isnan(k), self.badvalue) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, self._logpmf(*goodargs)) if output.ndim == 0: return output[()] return output def cdf(self, k, *args, **kwds): """ Cumulative distribution function of the given RV. Parameters ---------- k : array_like, int Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- cdf : ndarray Cumulative distribution function evaluated at `k`. """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) k = asarray((k-loc)) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k < self.b) cond2 = (k >= self.b) cond = cond0 & cond1 output = zeros(shape(cond), 'd') place(output, (1-cond0) + np.isnan(k), self.badvalue) place(output, cond2*(cond0 == cond0), 1.0) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, np.clip(self._cdf(*goodargs), 0, 1)) if output.ndim == 0: return output[()] return output def logcdf(self, k, *args, **kwds): """ Log of the cumulative distribution function at k of the given RV. Parameters ---------- k : array_like, int Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- logcdf : array_like Log of the cumulative distribution function evaluated at k. """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) k = asarray((k-loc)) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k < self.b) cond2 = (k >= self.b) cond = cond0 & cond1 output = empty(shape(cond), 'd') output.fill(NINF) place(output, (1-cond0) + np.isnan(k), self.badvalue) place(output, cond2*(cond0 == cond0), 0.0) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, self._logcdf(*goodargs)) if output.ndim == 0: return output[()] return output def sf(self, k, *args, **kwds): """ Survival function (1 - `cdf`) at k of the given RV. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- sf : array_like Survival function evaluated at k. """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) k = asarray(k-loc) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k < self.b) cond2 = (k < self.a) & cond0 cond = cond0 & cond1 output = zeros(shape(cond), 'd') place(output, (1-cond0) + np.isnan(k), self.badvalue) place(output, cond2, 1.0) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, np.clip(self._sf(*goodargs), 0, 1)) if output.ndim == 0: return output[()] return output def logsf(self, k, *args, **kwds): """ Log of the survival function of the given RV. Returns the log of the "survival function," defined as 1 - `cdf`, evaluated at `k`. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- logsf : ndarray Log of the survival function evaluated at `k`. """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) k = asarray(k-loc) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k < self.b) cond2 = (k < self.a) & cond0 cond = cond0 & cond1 output = empty(shape(cond), 'd') output.fill(NINF) place(output, (1-cond0) + np.isnan(k), self.badvalue) place(output, cond2, 0.0) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, self._logsf(*goodargs)) if output.ndim == 0: return output[()] return output def ppf(self, q, *args, **kwds): """ Percent point function (inverse of `cdf`) at q of the given RV. Parameters ---------- q : array_like Lower tail probability. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- k : array_like Quantile corresponding to the lower tail probability, q. """ args, loc, _ = self._parse_args(*args, **kwds) q, loc = map(asarray, (q, loc)) args = tuple(map(asarray, args)) cond0 = self._argcheck(*args) & (loc == loc) cond1 = (q > 0) & (q < 1) cond2 = (q == 1) & cond0 cond = cond0 & cond1 output = valarray(shape(cond), value=self.badvalue, typecode='d') # output type 'd' to handle nin and inf place(output, (q == 0)*(cond == cond), self.a-1) place(output, cond2, self.b) if np.any(cond): goodargs = argsreduce(cond, *((q,)+args+(loc,))) loc, goodargs = goodargs[-1], goodargs[:-1] place(output, cond, self._ppf(*goodargs) + loc) if output.ndim == 0: return output[()] return output def isf(self, q, *args, **kwds): """ Inverse survival function (inverse of `sf`) at q of the given RV. Parameters ---------- q : array_like Upper tail probability. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- k : ndarray or scalar Quantile corresponding to the upper tail probability, q. """ args, loc, _ = self._parse_args(*args, **kwds) q, loc = map(asarray, (q, loc)) args = tuple(map(asarray, args)) cond0 = self._argcheck(*args) & (loc == loc) cond1 = (q > 0) & (q < 1) cond2 = (q == 1) & cond0 cond = cond0 & cond1 # same problem as with ppf; copied from ppf and changed output = valarray(shape(cond), value=self.badvalue, typecode='d') # output type 'd' to handle nin and inf place(output, (q == 0)*(cond == cond), self.b) place(output, cond2, self.a-1) # call place only if at least 1 valid argument if np.any(cond): goodargs = argsreduce(cond, *((q,)+args+(loc,))) loc, goodargs = goodargs[-1], goodargs[:-1] # PB same as ticket 766 place(output, cond, self._isf(*goodargs) + loc) if output.ndim == 0: return output[()] return output def _entropy(self, *args): if hasattr(self, 'pk'): return entropy(self.pk) else: return _expect(lambda x: entr(self.pmf(x, *args)), self.a, self.b, self.ppf(0.5, *args), self.inc) def expect(self, func=None, args=(), loc=0, lb=None, ub=None, conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32): """ Calculate expected value of a function with respect to the distribution for discrete distribution. Parameters ---------- func : callable, optional Function for which the expectation value is calculated. Takes only one argument. The default is the identity mapping f(k) = k. args : tuple, optional Shape parameters of the distribution. loc : float, optional Location parameter. Default is 0. lb, ub : int, optional Lower and upper bound for the summation, default is set to the support of the distribution, inclusive (``ul <= k <= ub``). conditional : bool, optional If true then the expectation is corrected by the conditional probability of the summation interval. The return value is the expectation of the function, `func`, conditional on being in the given interval (k such that ``ul <= k <= ub``). Default is False. maxcount : int, optional Maximal number of terms to evaluate (to avoid an endless loop for an infinite sum). Default is 1000. tolerance : float, optional Absolute tolerance for the summation. Default is 1e-10. chunksize : int, optional Iterate over the support of a distributions in chunks of this size. Default is 32. Returns ------- expect : float Expected value. Notes ----- For heavy-tailed distributions, the expected value may or may not exist, depending on the function, `func`. If it does exist, but the sum converges slowly, the accuracy of the result may be rather low. For instance, for ``zipf(4)``, accuracy for mean, variance in example is only 1e-5. increasing `maxcount` and/or `chunksize` may improve the result, but may also make zipf very slow. The function is not vectorized. """ if func is None: def fun(x): # loc and args from outer scope return (x+loc)*self._pmf(x, *args) else: def fun(x): # loc and args from outer scope return func(x+loc)*self._pmf(x, *args) # used pmf because _pmf does not check support in randint and there # might be problems(?) with correct self.a, self.b at this stage maybe # not anymore, seems to work now with _pmf self._argcheck(*args) # (re)generate scalar self.a and self.b if lb is None: lb = self.a else: lb = lb - loc # convert bound for standardized distribution if ub is None: ub = self.b else: ub = ub - loc # convert bound for standardized distribution if conditional: invfac = self.sf(lb-1, *args) - self.sf(ub, *args) else: invfac = 1.0 # iterate over the support, starting from the median x0 = self.ppf(0.5, *args) res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize) return res / invfac def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10, chunksize=32): """Helper for computing the expectation value of `fun`.""" # short-circuit if the support size is small enough if (ub - lb) <= chunksize: supp = np.arange(lb, ub+1, inc) vals = fun(supp) return np.sum(vals) # otherwise, iterate starting from x0 if x0 < lb: x0 = lb if x0 > ub: x0 = ub count, tot = 0, 0. # iterate over [x0, ub] inclusive for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc): count += x.size delta = np.sum(fun(x)) tot += delta if abs(delta) < tolerance * x.size: break if count > maxcount: warnings.warn('expect(): sum did not converge', RuntimeWarning) return tot # iterate over [lb, x0) for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc): count += x.size delta = np.sum(fun(x)) tot += delta if abs(delta) < tolerance * x.size: break if count > maxcount: warnings.warn('expect(): sum did not converge', RuntimeWarning) break return tot def _iter_chunked(x0, x1, chunksize=4, inc=1): """Iterate from x0 to x1 in chunks of chunksize and steps inc. x0 must be finite, x1 need not be. In the latter case, the iterator is infinite. Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards (make sure to set inc < 0.) >>> [x for x in _iter_chunked(2, 5, inc=2)] [array([2, 4])] >>> [x for x in _iter_chunked(2, 11, inc=2)] [array([2, 4, 6, 8]), array([10])] >>> [x for x in _iter_chunked(2, -5, inc=-2)] [array([ 2, 0, -2, -4])] >>> [x for x in _iter_chunked(2, -9, inc=-2)] [array([ 2, 0, -2, -4]), array([-6, -8])] """ if inc == 0: raise ValueError('Cannot increment by zero.') if chunksize <= 0: raise ValueError('Chunk size must be positive; got %s.' % chunksize) s = 1 if inc > 0 else -1 stepsize = abs(chunksize * inc) x = x0 while (x - x1) * inc < 0: delta = min(stepsize, abs(x - x1)) step = delta * s supp = np.arange(x, x + step, inc) x += step yield supp class rv_sample(rv_discrete): """A 'sample' discrete distribution defined by the support and values. The ctor ignores most of the arguments, only needs the `values` argument. """ def __init__(self, a=0, b=inf, name=None, badvalue=None, moment_tol=1e-8, values=None, inc=1, longname=None, shapes=None, extradoc=None, seed=None): super(rv_discrete, self).__init__(seed) if values is None: raise ValueError("rv_sample.__init__(..., values=None,...)") # cf generic freeze self._ctor_param = dict( a=a, b=b, name=name, badvalue=badvalue, moment_tol=moment_tol, values=values, inc=inc, longname=longname, shapes=shapes, extradoc=extradoc, seed=seed) if badvalue is None: badvalue = nan self.badvalue = badvalue self.moment_tol = moment_tol self.inc = inc self.shapes = shapes self.vecentropy = self._entropy xk, pk = values if len(xk) != len(pk): raise ValueError("xk and pk need to have the same length.") if not np.allclose(np.sum(pk), 1): raise ValueError("The sum of provided pk is not 1.") indx = np.argsort(np.ravel(xk)) self.xk = np.take(np.ravel(xk), indx, 0) self.pk = np.take(np.ravel(pk), indx, 0) self.a = self.xk[0] self.b = self.xk[-1] self.qvals = np.cumsum(self.pk, axis=0) self.shapes = ' ' # bypass inspection self._construct_argparser(meths_to_inspect=[self._pmf], locscale_in='loc=0', # scale=1 for discrete RVs locscale_out='loc, 1') self._construct_docstrings(name, longname, extradoc) @property @np.deprecate(message="`return_integers` attribute is not used anywhere any" " longer and is deprecated in scipy 0.18.") def return_integers(self): return 0 def _pmf(self, x): return np.select([x == k for k in self.xk], [np.broadcast_arrays(p, x)[0] for p in self.pk], 0) def _cdf(self, x): xx, xxk = np.broadcast_arrays(x[:, None], self.xk) indx = np.argmax(xxk > xx, axis=-1) - 1 return self.qvals[indx] def _ppf(self, q): qq, sqq = np.broadcast_arrays(q[..., None], self.qvals) indx = argmax(sqq >= qq, axis=-1) return self.xk[indx] def _rvs(self): # Need to define it explicitly, otherwise .rvs() with size=None # fails due to explicit broadcasting in _ppf U = self._random_state.random_sample(self._size) if self._size is None: U = np.array(U, ndmin=1) Y = self._ppf(U)[0] else: Y = self._ppf(U) return Y def _entropy(self): return entropy(self.pk) def generic_moment(self, n): n = asarray(n) return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0) @np.deprecate(message="moment_gen method is not used anywhere any more " "and is deprecated in scipy 0.18.") def moment_gen(self, t): t = asarray(t) return np.sum(exp(self.xk * t[np.newaxis, ...]) * self.pk, axis=0) @property @np.deprecate(message="F attribute is not used anywhere any longer and " "is deprecated in scipy 0.18.") def F(self): return dict(zip(self.xk, self.qvals)) @property @np.deprecate(message="Finv attribute is not used anywhere any longer and " "is deprecated in scipy 0.18.") def Finv(self): decreasing_keys = sorted(self.F.keys(), reverse=True) return dict((self.F[k], k) for k in decreasing_keys) def get_distribution_names(namespace_pairs, rv_base_class): """ Collect names of statistical distributions and their generators. Parameters ---------- namespace_pairs : sequence A snapshot of (name, value) pairs in the namespace of a module. rv_base_class : class The base class of random variable generator classes in a module. Returns ------- distn_names : list of strings Names of the statistical distributions. distn_gen_names : list of strings Names of the generators of the statistical distributions. Note that these are not simply the names of the statistical distributions, with a _gen suffix added. """ distn_names = [] distn_gen_names = [] for name, value in namespace_pairs: if name.startswith('_'): continue if name.endswith('_gen') and issubclass(value, rv_base_class): distn_gen_names.append(name) if isinstance(value, rv_base_class): distn_names.append(name) return distn_names, distn_gen_names
apache-2.0
TheWeiTheTruthAndTheLight/senior-design
src/spark/ml.py
1
2970
import pickle import warnings from copy import deepcopy from datetime import datetime from itertools import islice, tee from os import listdir from random import shuffle import numpy as np from nlp import * from sklearn.ensemble import VotingClassifier from sklearn.feature_extraction import DictVectorizer from sklearn.feature_selection import SelectPercentile, SelectKBest, f_classif from sklearn.metrics import classification_report from sklearn.cross_validation import StratifiedShuffleSplit def trainTest(X, y, classifiers, reduce=0, splits=10, trainsize=0.8, testsize=0.2): sss = StratifiedShuffleSplit(y, n_iter=splits, test_size=testsize, train_size=trainsize) results = [] for i, (train_index, test_index) in enumerate(sss): X_train = X[train_index] y_train = y[train_index] X_test = X[test_index] y_test = y[test_index] if reduce > 0: print("Features before reduction: " + str(X_train.shape)) reducer = SelectKBest(score_func=f_classif, k=reduce) X_train = reducer.fit_transform(X_train, y_train) X_test = reducer.transform(X_test) print("Features after reduction: " + str(str(X_train.shape))) support = reducer.get_support() for classifier in classifiers: print("Starting to train %s"%str(type(classifier))) s = datetime.now() classifier.fit(X_train, y_train) traintime = (datetime.now() - s).total_seconds() score = classifier.score(X_test, y_test) if reduce > 0: results.append((classifier, traintime, score, support)) else: results.append((classifier, traintime, score)) print("%s\tTime: %d\tScore:\t%f" %(str(type(classifier)), traintime, score)) return results def flattenDict(feature): d = {} for key, value in feature.items(): if isinstance(value, dict): for subkey, subvalue in value.items(): d[subkey] = subvalue else: d[key] = value return d def flatten(X,y=None): if y: return (flattenDict(x) for x in X), y else: return (flattenDict(x) for x in X) def split_feat(gen, n): def create_generator(it, n): return (item[n] for item in it) G = tee(gen, n) return [create_generator(g, n) for n, g in enumerate(G)] def predict(listOfString, classifier, dvp, cleanTokens): listOfFeats = [flattenDict(feature(s, cleanTokens)) for s in listOfString] X = dvp.transform(listOfFeats) prediction = classifier.predict(X) invert_op = getattr(classifier, "predict_proba", None) if callable(invert_op): preProb = classifier.predict_proba(X) return {'classifier':classifier, 'prediction': prediction, 'prediction_probabilities':preProb} else: return {'classifier':classifier, 'prediction': prediction} print(r) return r
mit
ashtonwebster/tl_algs
tests/test_tnb_paperexample.py
1
1149
# coding: utf-8 import pandas as pd import numpy as np from sklearn.ensemble import RandomForestClassifier import random from tl_algs import tnb RAND_SEED = 2016 random.seed(RAND_SEED) # change this to see new random data! """ This example is taken from the paper [1]. Not all results are the same due to arithmetic errors in the paper. [1] Ma, Y., Luo, G., Zeng, X., & Chen, A. (2012). Transfer learning for cross-company software defect prediction. Information and Software Technology, 54(3), 248-256. https://doi.org/10.1016/j.infsof.2011.09.007 """ X_train = pd.DataFrame([[2,1,3], [1,2,2], [1,3,4]]) X_test = pd.DataFrame([[2,1,3], [1,2,3]]) y_train = pd.Series([False, False, True]) # data is already discretized so we specify discretize=False w = tnb.TransferNaiveBayes(test_set_X=X_test, test_set_domain='a', train_pool_X=X_train, train_pool_y=y_train, train_pool_domain=None, rand_seed=None, similarity_func=tnb.sim_minmax, discretize=False) conf, y_pred = w.train_filter_test() # 0.936 obtained by manual computation assert abs(conf[0] - 0.936) < 0.001
mit
solin319/incubator-mxnet
docs/conf.py
23
6968
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # -*- coding: utf-8 -*- import sys, os, re, subprocess import mock from recommonmark import parser curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) libpath = os.path.join(curr_path, '../python/') sys.path.insert(0, libpath) sys.path.insert(0, curr_path) # -- mock out modules MOCK_MODULES = ['scipy', 'scipy.sparse', 'sklearn'] for mod_name in MOCK_MODULES: sys.modules[mod_name] = mock.Mock() # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.2' # General information about the project. project = u'mxnet' author = u'%s developers' % project copyright = u'2015-2017, %s' % author github_doc_root = 'https://github.com/dmlc/mxnet/tree/master/docs/' doc_root = 'http://mxnet.io/' # add markdown parser source_parsers = { '.md': parser.CommonMarkParser, '.Rmd': parser.CommonMarkParser } # Version information. # from mxnet import libinfo # version = libinfo.__version__ # release = libinfo.__version__ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.napoleon', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'breathe', 'mxdoc' ] # Use breathe to include doxygen documents breathe_projects = {'mxnet' : 'doxygen/xml/'} breathe_default_project = 'mxnet' autodoc_member_order = 'bysource' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. # source_suffix = '.rst' source_suffix = ['.rst', '.md', '.Rmd'] # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. # Version and release are passed from CMake. #version = None # The full version, including alpha/beta/rc tags. #release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['virtualenv'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] suppress_warnings = [ 'image.nonlocal_uri', ] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'mxnet-theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_static'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = { '**': 'relations.html' } # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'formatdoc'
apache-2.0
robcarver17/pysystemtrade
sysobjects/production/timed_storage.py
1
8184
from copy import copy import datetime import pandas as pd from syscore.objects import success, arg_not_supplied, missing_data DATE_KEY_NAME= 'date' class timedEntry(object): """ These four functions normally overriden by an inheriting class """ @property def required_argument_names(self) -> list: ## We pass **kwargs and *args to these functions, but the args have to be given names return ["test1", "test2"] # compulsory args @property def _name_(self) -> str: # name used for labelling so __repr__ can be generic return "timedEntry" @property def containing_data_class_name(self) -> str: ## this makes sure we don't mix and match different types of timed storage return "sysdata.production.generic_timed_storage.listOfEntries" def _argument_checks(self, kwargs: dict): ## used to check that certain kwargs meet certain filters ## inherited version should raise exception if problems pass def __init__(self, *args, date: datetime.datetime=arg_not_supplied): """ Can pass either a single dict (which can include 'date') or the arguments in the order of required_arguments >>> timedEntry(1,2)._arg_dict_excluding_date {'test1': 1, 'test2': 2} >>> timedEntry(dict(test1=1, test2=2))._arg_dict_excluding_date {'test1': 1, 'test2': 2} >>> timedEntry(dict(test1=1, test2=2), date = datetime.datetime(2012,1,1)) timedEntry 2012-01-01 00:00:00: {'test1': 1, 'test2': 2} >>> timedEntry(dict(test1=1, test2=2, date = datetime.datetime(2012,1,1)), date = datetime.datetime(2012,1,12)) timedEntry 2012-01-01 00:00:00: {'test1': 1, 'test2': 2} """ args_as_dict = self._resolve_args(args, date) self._argument_checks(args_as_dict) self._init_data_from_passed_args(args_as_dict) def _resolve_args(self, args: tuple, date:datetime.datetime) -> dict: ## We can either be passed a dict or a list of args ## If we're passed a dict, we put the date in if available ## Otherwise it's a list and if len(args) == 1: if isinstance(args[0], dict): args_as_dict = self._resolve_args_passed_as_dict(args, date) return args_as_dict args_as_dict = self._resolve_args_passed_as_star_args(args, date) return args_as_dict def _resolve_args_passed_as_dict(self, args: tuple, date: datetime.datetime) -> dict: args_as_dict = args[0] if DATE_KEY_NAME not in args_as_dict: args_as_dict[DATE_KEY_NAME] = date return args_as_dict def _resolve_args_passed_as_star_args(self, args: tuple, date: datetime.datetime) -> dict: required_args = self.required_argument_names try: assert len(required_args) == len(args) except BaseException: raise Exception( "Expecting to be passed arguments of length %d to match %s, instead got %d arguments" % (len(required_args), str(required_args), len(args))) args_as_dict = {} for arg_name, arg_value in zip(required_args, args): args_as_dict[arg_name] = arg_value args_as_dict[DATE_KEY_NAME] = date return args_as_dict def _init_data_from_passed_args(self, args_as_dict: dict): date = args_as_dict.pop(DATE_KEY_NAME) if date is arg_not_supplied: date = datetime.datetime.now() assert type(date) is datetime.datetime self._date = date for arg_name in args_as_dict.keys(): setattr(self, arg_name, args_as_dict[arg_name]) self._arg_names = list(args_as_dict.keys()) @property def date(self) -> datetime.datetime: return self._date @property def arg_names(self)-> list: return self._arg_names @property def _all_arg_names_including_date(self) -> list: return [DATE_KEY_NAME]+self.arg_names @property def _arg_dict_excluding_date(self) -> dict: result = dict([(key, getattr(self, key)) for key in self.arg_names]) return result @property def _arg_dict_including_date(self) -> dict: result = dict([(key, getattr(self, key)) for key in self._all_arg_names_including_date]) return result def __repr__(self): return "%s %s: %s" % (self._name_, self.date, str(self._arg_dict_excluding_date)) def as_dict(self): return self._arg_dict_including_date @classmethod def from_dict(timedEntry, entry_as_dict: dict): return timedEntry(entry_as_dict) def check_args_match(self, another_entry): my_args = self.arg_names another_args = another_entry.arg_names my_args.sort() another_args.sort() try: assert my_args == another_args except BaseException: raise Exception( "Parameters for %s (%s) don't match with %s" % (self._name_, my_args, another_args) ) return success class listOfEntriesAsListOfDicts(list): def as_list_of_entries(self, class_of_entry_list): class_of_each_individual_entry = class_of_entry_list.as_empty()._entry_class() list_of_class_entries = [class_of_each_individual_entry.from_dict( entry_as_dict) for entry_as_dict in self] return class_of_entry_list(list_of_class_entries) def as_plain_list(self): return list(self) class listOfEntries(list): """ A list of timedEntry """ def _entry_class(self): return timedEntry def __init__(self, list_of_entries: list): super().__init__([]) self._arg_names = [] for entry in list_of_entries: self.append(entry) @property def arg_names(self) -> list: return getattr(self, "_arg_names", []) def as_list_of_dict(self) -> list: list_of_dict = [entry.as_dict() for entry in self] return listOfEntriesAsListOfDicts(list_of_dict) @classmethod def from_list_of_dict(cls, list_of_dict: listOfEntriesAsListOfDicts): class_of_each_individual_entry = cls.as_empty()._entry_class() list_of_class_entries = [class_of_each_individual_entry.from_dict( entry_as_dict) for entry_as_dict in list_of_dict] return cls(list_of_class_entries) @classmethod def as_empty(listOfEntries): return listOfEntries([]) def sort(self): super().sort(key=lambda x: x.date) def final_entry(self): if len(self) == 0: return missing_data self.sort() return self[-1] def append(self, item): previous_final_entry = self.final_entry() if len(self) > 0: try: previous_final_entry.check_args_match(item) except Exception as e: raise Exception("%s ; can't add to list" % e) else: ## no entries yet, init argument names self._arg_names = item.arg_names super().append(item) def delete_last_entry(self): self.sort() self.pop() def _as_list_of_dates_and_dict_of_lists(self) -> (list, dict): """ :return: list of lists; date """ list_of_dates = [item.date for item in self] dict_of_lists = {} arg_names = self.arg_names for entry in self: for item_name in arg_names: existing_list = dict_of_lists.get(item_name, []) existing_list.append(getattr(entry, item_name, None)) dict_of_lists[item_name] = existing_list return (list_of_dates, dict_of_lists) def as_pd_df(self): """ :return: pd.DataFrame """ if len(self) == 0: return missing_data ( list_of_dates, dict_of_lists, ) = self._as_list_of_dates_and_dict_of_lists() self_as_df = pd.DataFrame(dict_of_lists, index=list_of_dates) self_as_df = self_as_df.sort_index() return self_as_df
gpl-3.0
ianlini/feagen
feagen/tests/lifetime_feature_generator.py
1
3903
from __future__ import unicode_literals from io import StringIO import feagen as fg from feagen.decorators import ( require, will_generate, ) import numpy as np import pandas as pd from scipy.sparse import csr_matrix from sklearn.model_selection import train_test_split class LifetimeFeatureGenerator(fg.FeatureGenerator): @will_generate('memory', ('data_df',)) def gen_data_df(self): csv = StringIO("""\ id,lifetime,tested_age,weight,height,gender,income 0, 68, 50, 60.1, 170.5, f, 22000 1, 59, 41, 90.4, 168.9, m, 19000 2, 52, 39, 46.2, 173.6, m, 70000 3, 68, 25, 93.9, 180.0, m, 1000000 4, 99, 68, 65.7, 157.6, f, 46000 5, 90, 81, 56.3, 170.2, f, 17000 """) return {'data_df': pd.read_csv(csv, index_col='id')} @require('data_df') @will_generate('h5py', 'lifetime') def gen_lifetime(self, data, will_generate_key): data_df = data['data_df'] return data_df['lifetime'] @require('data_df') @will_generate('h5py', ['weight', 'height']) def gen_raw_data_features(self, data): data_df = data['data_df'] return data_df[['weight', 'height']] @require('data_df') @will_generate('memory', 'mem_raw_data') def gen_mem_raw_data(self, data, will_generate_key): data_df = data['data_df'] return data_df[['weight', 'height']].values @require('data_df') @will_generate('h5py', 'man_raw_data', manually_create_dataset=True) def gen_man_raw_data(self, data, create_dataset_functions, will_generate_key): data_df = data['data_df'] dset = create_dataset_functions['man_raw_data']( shape=(data_df.shape[0], 2)) dset[...] = data_df[['weight', 'height']].values @require('data_df') @will_generate('pandas_hdf', ['pd_weight', 'pd_height']) def gen_raw_data_table(self, data): data_df = data['data_df'] result_df = data_df.loc[:, ['weight', 'height']] result_df.rename(columns={'weight': 'pd_weight', 'height': 'pd_height'}, inplace=True) return result_df @require('data_df') @will_generate('pandas_hdf', 'pd_raw_data') def gen_raw_data_df(self, data, will_generate_key): data_df = data['data_df'] return data_df[['weight', 'height']] @require('pd_raw_data') @will_generate('pandas_hdf', 'pd_raw_data_append', manually_append=True) def gen_raw_data_append_df(self, data, will_generate_key, append_functions): df = data['pd_raw_data'].value append_functions[will_generate_key](df.iloc[:3]) append_functions[will_generate_key](df.iloc[3:]) @require('data_df') @will_generate('h5py', 'BMI') def gen_bmi(self, data, will_generate_key): data_df = data['data_df'] bmi = data_df['weight'] / ((data_df['height'] / 100) ** 2) return bmi @require(('{dividend}', '{divisor}')) @will_generate('h5py', r'(?P<dividend>\w+)_divided_by_(?P<divisor>\w+)', mode='one') def gen_divided_by(self, will_generate_key, data, re_args): division_result = data['{dividend}'].value / data['{divisor}'].value return division_result @require('data_df') @will_generate('pickle', 'train_test_split') def gen_train_test_split(self, data, will_generate_key): data_df = data['data_df'] train_id, test_id = train_test_split( data_df.index, test_size=0.5, random_state=0) return (train_id, test_id) @require(('data_df', 'train_test_split')) @will_generate('h5py', 'is_in_test_set') def gen_is_in_test_set(self, data, will_generate_key): data_df = data['data_df'] _, test_id = data['train_test_split'] is_in_test_set = data_df.index.isin(test_id) sparse_is_in_test_set = csr_matrix(is_in_test_set[:, np.newaxis]) return sparse_is_in_test_set
bsd-2-clause
sarahgrogan/scikit-learn
examples/cluster/plot_dict_face_patches.py
337
2747
""" Online learning of a dictionary of parts of faces ================================================== This example uses a large dataset of faces to learn a set of 20 x 20 images patches that constitute faces. From the programming standpoint, it is interesting because it shows how to use the online API of the scikit-learn to process a very large dataset by chunks. The way we proceed is that we load an image at a time and extract randomly 50 patches from this image. Once we have accumulated 500 of these patches (using 10 images), we run the `partial_fit` method of the online KMeans object, MiniBatchKMeans. The verbose setting on the MiniBatchKMeans enables us to see that some clusters are reassigned during the successive calls to partial-fit. This is because the number of patches that they represent has become too low, and it is better to choose a random new cluster. """ print(__doc__) import time import matplotlib.pyplot as plt import numpy as np from sklearn import datasets from sklearn.cluster import MiniBatchKMeans from sklearn.feature_extraction.image import extract_patches_2d faces = datasets.fetch_olivetti_faces() ############################################################################### # Learn the dictionary of images print('Learning the dictionary... ') rng = np.random.RandomState(0) kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True) patch_size = (20, 20) buffer = [] index = 1 t0 = time.time() # The online learning part: cycle over the whole dataset 6 times index = 0 for _ in range(6): for img in faces.images: data = extract_patches_2d(img, patch_size, max_patches=50, random_state=rng) data = np.reshape(data, (len(data), -1)) buffer.append(data) index += 1 if index % 10 == 0: data = np.concatenate(buffer, axis=0) data -= np.mean(data, axis=0) data /= np.std(data, axis=0) kmeans.partial_fit(data) buffer = [] if index % 100 == 0: print('Partial fit of %4i out of %i' % (index, 6 * len(faces.images))) dt = time.time() - t0 print('done in %.2fs.' % dt) ############################################################################### # Plot the results plt.figure(figsize=(4.2, 4)) for i, patch in enumerate(kmeans.cluster_centers_): plt.subplot(9, 9, i + 1) plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray, interpolation='nearest') plt.xticks(()) plt.yticks(()) plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' % (dt, 8 * len(faces.images)), fontsize=16) plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23) plt.show()
bsd-3-clause
linuxlizard/q60
debezel.py
1
2104
#!python # Q&D remove bezel (black along top/left). Untuned firmware can capture parts # of the plastic bezel holding the glass. # # davep import sys import numpy as np #import matplotlib.pyplot as plt import scipy.ndimage.filters import imtools from basename import get_basename import rgbtogray def find_bezel( ndata ): gray = rgbtogray.togray(ndata)[0:50,0:50] gray = scipy.ndimage.filters.median_filter( gray, size=(5,5) ) imtools.save_image(gray,"gray.tif") print ndata.shape numrows = ndata.shape[0] numcols = ndata.shape[1] print "rows={0} cols={1}".format(numrows,numcols) # Do we have a narrow band of black followed by white? # # look for bezel along the top col = 50 pixel_diff = 10 # search for bezel vertically diffs = np.diff( gray[:,-1] ) print diffs print diffs.min(), diffs.max(), np.mean(diffs), np.median(diffs) bezel_row = 0 for row,d in enumerate(diffs) : if d > pixel_diff : bezel_row = row # search for bezel horizonstally diffs = np.diff( gray[-1,:] ) print diffs print diffs.min(), diffs.max(), np.mean(diffs), np.median(diffs) bezel_col = 0 for col,d in enumerate(diffs) : if d > 10 : bezel_col = col # plt.gray() # plt.grid() # plt.plot(d) # plt.show() # best guess, no bezel found return bezel_row, bezel_col def debezel( infilename, outfilename ) : ndata = imtools.load_image(infilename,dtype="uint8") print ndata.shape bezel_edges = find_bezel( ndata ) if bezel_edges is None : # no bezel edges found return False bezel_row,bezel_col = bezel_edges print "row={0} col={1}".format(bezel_row,bezel_col) ndata2 = ndata[bezel_row:,bezel_col:,:] imtools.clip_and_save(ndata2,outfilename) return True def main() : infilename = sys.argv[1] basename = get_basename(infilename) outfilename = "{0}_debezel.tif".format(basename) debezel_done = debezel( infilename, outfilename ) if __name__=='__main__' : main()
apache-2.0
tsbischof/photon_correlation
python/photon_correlation/FLID.py
1
2022
#!/usr/bin/env python import sys import csv import bz2 import matplotlib.pyplot as plt import numpy def force_aspect(ax,aspect=1): im = ax.get_images() extent = im[0].get_extent() ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/aspect) class FLID(object): def __init__(self, filename=None): self.intensity = list() self.arrival_time = list() self.events = list() self.counts = list() if filename is not None: self.from_filename(filename) def from_filename(self, filename): if not os.path.exists(filename): bz2_name = "{}.bz2".format(filename) if os.path.exists(bz2_name): filename = bz2_name if filename.endswith("bz2"): open_f = lambda x: bz2.open(x, "rt") else: open_f = open with open_f(filename) as stream_in: return(self.from_stream(stream_in)) def from_stream(self, stream_in): reader = csv.reader(stream_in) time_bins = (next(reader), next(reader)) for left, right in zip(time_bins[0][3:], time_bins[1][3:]): self.arrival_time.append((float(left), float(right))) for line in reader: self.intensity.append((float(line[0]), float(line[1]))) self.events.append(int(line[2])) self.counts.append(list(map(int, line[3:]))) return(self) def make_figure(self): fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.imshow(self.counts, interpolation="none", origin="lower", extent=[self.arrival_time[0][0], self.arrival_time[-1][1], self.intensity[0][0], self.intensity[-1][1]]) force_aspect(ax) ax.set_xlabel("Time/ps") ax.set_ylabel("Counts per bin") fig.tight_layout() return(fig)
bsd-3-clause
JT5D/scikit-learn
examples/plot_learning_curve.py
1
2253
""" ======================== Plotting Learning Curves ======================== A learning curve shows the validation and training score of a learning algorithm for varying numbers of training samples. It is a tool to find out how much we benefit from adding more training data. If both the validation score and the training score converge to a value that is too low, we will not benefit much from more training data and we will probably have to use a learning algorithm or a parametrization of the current learning algorithm that can learn more complex concepts (i.e. has a lower bias). In this example, on the left side the learning curve of a naive Bayes classifier is shown for the digits dataset. Note that the training score and the cross-validation score are both not very good at the end. However, the shape of the curve can be found in more complex datasets very often: the training score is very high at the beginning and decreases and the cross-validation score is very low at the beginning and increases. On the right side we see the learning curve of an SVM with RBF kernel. We can see clearly that the training score is still around the maximum and the validation score could be increased with more training samples. """ print(__doc__) import matplotlib.pyplot as plt from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn.datasets import load_digits from sklearn.learning_curve import learning_curve digits = load_digits() X, y = digits.data, digits.target plt.figure() plt.title("Learning Curve (Naive Bayes)") plt.xlabel("Training examples") plt.ylabel("Score") train_sizes, train_scores, test_scores = learning_curve( GaussianNB(), X, y, cv=10, n_jobs=1) plt.plot(train_sizes, train_scores, label="Training score") plt.plot(train_sizes, test_scores, label="Cross-validation score") plt.legend(loc="best") plt.figure() plt.title("Learning Curve (SVM, RBF kernel, $\gamma=0.001$)") plt.xlabel("Training examples") plt.ylabel("Score") train_sizes, train_scores, test_scores = learning_curve( SVC(gamma=0.001), X, y, cv=10, n_jobs=1) plt.plot(train_sizes, train_scores, label="Training score") plt.plot(train_sizes, test_scores, label="Cross-validation score") plt.legend(loc="best") plt.show()
bsd-3-clause
aflaxman/scikit-learn
sklearn/datasets/mldata.py
32
8031
"""Automatically download MLdata datasets.""" # Copyright (c) 2011 Pietro Berkes # License: BSD 3 clause import os from os.path import join, exists import re import numbers try: # Python 2 from urllib2 import HTTPError from urllib2 import quote from urllib2 import urlopen except ImportError: # Python 3+ from urllib.error import HTTPError from urllib.parse import quote from urllib.request import urlopen import numpy as np import scipy as sp from scipy import io from shutil import copyfileobj from .base import get_data_home from ..utils import Bunch MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s" def mldata_filename(dataname): """Convert a raw name for a data set in a mldata.org filename. Parameters ---------- dataname : str Name of dataset Returns ------- fname : str The converted dataname. """ dataname = dataname.lower().replace(' ', '-') return re.sub(r'[().]', '', dataname) def fetch_mldata(dataname, target_name='label', data_name='data', transpose_data=True, data_home=None): """Fetch an mldata.org data set If the file does not exist yet, it is downloaded from mldata.org . mldata.org does not have an enforced convention for storing data or naming the columns in a data set. The default behavior of this function works well with the most common cases: 1) data values are stored in the column 'data', and target values in the column 'label' 2) alternatively, the first column stores target values, and the second data values 3) the data array is stored as `n_features x n_samples` , and thus needs to be transposed to match the `sklearn` standard Keyword arguments allow to adapt these defaults to specific data sets (see parameters `target_name`, `data_name`, `transpose_data`, and the examples below). mldata.org data sets may have multiple columns, which are stored in the Bunch object with their original name. Parameters ---------- dataname : str Name of the data set on mldata.org, e.g.: "leukemia", "Whistler Daily Snowfall", etc. The raw name is automatically converted to a mldata.org URL . target_name : optional, default: 'label' Name or index of the column containing the target values. data_name : optional, default: 'data' Name or index of the column containing the data. transpose_data : optional, default: True If True, transpose the downloaded data array. data_home : optional, default: None Specify another download and cache folder for the data sets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'data', the data to learn, 'target', the classification labels, 'DESCR', the full description of the dataset, and 'COL_NAMES', the original names of the dataset columns. Examples -------- Load the 'iris' dataset from mldata.org: >>> from sklearn.datasets.mldata import fetch_mldata >>> import tempfile >>> test_data_home = tempfile.mkdtemp() >>> iris = fetch_mldata('iris', data_home=test_data_home) >>> iris.target.shape (150,) >>> iris.data.shape (150, 4) Load the 'leukemia' dataset from mldata.org, which needs to be transposed to respects the scikit-learn axes convention: >>> leuk = fetch_mldata('leukemia', transpose_data=True, ... data_home=test_data_home) >>> leuk.data.shape (72, 7129) Load an alternative 'iris' dataset, which has different names for the columns: >>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1, ... data_name=0, data_home=test_data_home) >>> iris3 = fetch_mldata('datasets-UCI iris', ... target_name='class', data_name='double0', ... data_home=test_data_home) >>> import shutil >>> shutil.rmtree(test_data_home) """ # normalize dataset name dataname = mldata_filename(dataname) # check if this data set has been already downloaded data_home = get_data_home(data_home=data_home) data_home = join(data_home, 'mldata') if not exists(data_home): os.makedirs(data_home) matlab_name = dataname + '.mat' filename = join(data_home, matlab_name) # if the file does not exist, download it if not exists(filename): urlname = MLDATA_BASE_URL % quote(dataname) try: mldata_url = urlopen(urlname) except HTTPError as e: if e.code == 404: e.msg = "Dataset '%s' not found on mldata.org." % dataname raise # store Matlab file try: with open(filename, 'w+b') as matlab_file: copyfileobj(mldata_url, matlab_file) except: os.remove(filename) raise mldata_url.close() # load dataset matlab file with open(filename, 'rb') as matlab_file: matlab_dict = io.loadmat(matlab_file, struct_as_record=True) # -- extract data from matlab_dict # flatten column names col_names = [str(descr[0]) for descr in matlab_dict['mldata_descr_ordering'][0]] # if target or data names are indices, transform then into names if isinstance(target_name, numbers.Integral): target_name = col_names[target_name] if isinstance(data_name, numbers.Integral): data_name = col_names[data_name] # rules for making sense of the mldata.org data format # (earlier ones have priority): # 1) there is only one array => it is "data" # 2) there are multiple arrays # a) copy all columns in the bunch, using their column name # b) if there is a column called `target_name`, set "target" to it, # otherwise set "target" to first column # c) if there is a column called `data_name`, set "data" to it, # otherwise set "data" to second column dataset = {'DESCR': 'mldata.org dataset: %s' % dataname, 'COL_NAMES': col_names} # 1) there is only one array => it is considered data if len(col_names) == 1: data_name = col_names[0] dataset['data'] = matlab_dict[data_name] # 2) there are multiple arrays else: for name in col_names: dataset[name] = matlab_dict[name] if target_name in col_names: del dataset[target_name] dataset['target'] = matlab_dict[target_name] else: del dataset[col_names[0]] dataset['target'] = matlab_dict[col_names[0]] if data_name in col_names: del dataset[data_name] dataset['data'] = matlab_dict[data_name] else: del dataset[col_names[1]] dataset['data'] = matlab_dict[col_names[1]] # set axes to scikit-learn conventions if transpose_data: dataset['data'] = dataset['data'].T if 'target' in dataset: if not sp.sparse.issparse(dataset['target']): dataset['target'] = dataset['target'].squeeze() return Bunch(**dataset) # The following is used by test runners to setup the docstring tests fixture def setup_module(module): # setup mock urllib2 module to avoid downloading from mldata.org from sklearn.utils.testing import install_mldata_mock install_mldata_mock({ 'iris': { 'data': np.empty((150, 4)), 'label': np.empty(150), }, 'datasets-uci-iris': { 'double0': np.empty((150, 4)), 'class': np.empty((150,)), }, 'leukemia': { 'data': np.empty((72, 7129)), }, }) def teardown_module(module): from sklearn.utils.testing import uninstall_mldata_mock uninstall_mldata_mock()
bsd-3-clause
lekshmideepu/nest-simulator
pynest/examples/spatial/conncomp.py
20
3879
# -*- coding: utf-8 -*- # # conncomp.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """ Create two populations of pyramidal cells and two populations of interneurons ----------------------------------------------------------------------------- Create two populations of pyramidal cells and two populations of interneurons on a 30x30 grid. Connect with two projections, one pyr->pyr, one pyr->in, and visualize. BCCN Tutorial @ CNS*09 Hans Ekkehard Plesser, UMB """ import nest import matplotlib.pyplot as plt import numpy as np nest.ResetKernel() nest.set_verbosity('M_WARNING') nest.CopyModel('iaf_psc_alpha', 'pyr') nest.CopyModel('iaf_psc_alpha', 'in') ######################################################################### # same positions for all populations pos = nest.spatial.grid(shape=[30, 30], extent=[3., 3.]) a_pyr = nest.Create('pyr', positions=pos) a_in = nest.Create('in', positions=pos) b_pyr = nest.Create('pyr', positions=pos) b_in = nest.Create('in', positions=pos) nest.Connect(a_pyr, b_pyr, {'rule': 'pairwise_bernoulli', 'p': 0.5, 'mask': {'circular': {'radius': 0.5}}}) nest.Connect(a_pyr, b_in, {'rule': 'pairwise_bernoulli', 'p': 0.2, 'mask': {'circular': {'radius': 1.}}}) plt.clf() ###################################################### # plot targets of neurons in different grid locations # obtain node id for center: pick first node of composite ctr_index = 30 * 15 + 15 ctr_id = a_pyr[ctr_index:ctr_index + 1] # get all projection targets of center neuron conn = nest.GetConnections(ctr_id) tgts = conn.get('target') tpyr = nest.GetTargetPositions(ctr_id, b_pyr)[0] tin = nest.GetTargetPositions(ctr_id, b_in)[0] tpyr_x = np.array([x for x, y in tpyr]) tpyr_y = np.array([y for x, y in tpyr]) tin_x = np.array([x for x, y in tin]) tin_y = np.array([y for x, y in tin]) # scatter-plot plt.scatter(tpyr_x - 0.02, tpyr_y - 0.02, 20, 'b', zorder=10) plt.scatter(tin_x + 0.02, tin_y + 0.02, 20, 'r', zorder=10) # mark locations with background grey circle plt.plot(tpyr_x, tpyr_y, 'o', markerfacecolor=(0.7, 0.7, 0.7), markersize=10, markeredgewidth=0, zorder=1, label='_nolegend_') plt.plot(tin_x, tin_y, 'o', markerfacecolor=(0.7, 0.7, 0.7), markersize=10, markeredgewidth=0, zorder=1, label='_nolegend_') # mark sender position with transparent red circle ctrpos = nest.GetPosition(ctr_id) plt.gca().add_patch(plt.Circle(ctrpos, radius=0.15, zorder=99, fc='r', alpha=0.4, ec='none')) # mark mask positions with open red/blue circles plt.gca().add_patch(plt.Circle(ctrpos, radius=0.5, zorder=2, fc='none', ec='b', lw=3)) plt.gca().add_patch(plt.Circle(ctrpos, radius=1.0, zorder=2, fc='none', ec='r', lw=3)) # mark layer edge plt.gca().add_patch(plt.Rectangle((-1.5, -1.5), 3.0, 3.0, zorder=1, fc='none', ec='k', lw=3)) # beautify plt.axes().set_xticks(np.arange(-1.5, 1.55, 0.5)) plt.axes().set_yticks(np.arange(-1.5, 1.55, 0.5)) plt.grid(True) plt.axis([-1.6, 1.6, -1.6, 1.6]) plt.axes().set_aspect('equal', 'box') plt.show()
gpl-2.0
tapomayukh/projects_in_python
sandbox_tapo/src/skin_related/AI_Surface_Recognition/src/hmm_crossvalidation_force_1_states.py
1
14805
# Hidden Markov Model Implementation import pylab as pyl import numpy as np import matplotlib.pyplot as pp #from enthought.mayavi import mlab import scipy as scp import scipy.ndimage as ni import scipy.io import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3') import rospy #import hrl_lib.mayavi2_util as mu import hrl_lib.viz as hv import hrl_lib.util as ut import hrl_lib.matplotlib_util as mpu import pickle import ghmm # Returns mu,sigma for 20 hidden-states from feature-vectors(123,35) for Smooth, Moderate, and Rough Surface Models def feature_to_mu_sigma(fvec): index = 0 m,n = np.shape(fvec) #print m,n mu = np.matrix(np.zeros((1,1))) sigma = np.matrix(np.zeros((1,1))) DIVS = m/1 while (index < 1): m_init = index*DIVS temp_fvec = fvec[(m_init):(m_init+DIVS),0:] #if index == 1: #print temp_fvec mu[index] = scp.mean(temp_fvec) sigma[index] = scp.std(temp_fvec) index = index+1 return mu,sigma # Returns sequence given raw data def create_seq(fvec): m,n = np.shape(fvec) #print m,n seq = np.matrix(np.zeros((1,n))) DIVS = m/1 for i in range(n): index = 0 while (index < 1): m_init = index*DIVS temp_fvec = fvec[(m_init):(m_init+DIVS),i] #if index == 1: #print temp_fvec seq[index,i] = scp.mean(temp_fvec) index = index+1 return seq if __name__ == '__main__': ### Simulation Data tSamples = 400 datasmooth = scipy.io.loadmat('smooth.mat') datamoderate = scipy.io.loadmat('medium.mat') datarough = scipy.io.loadmat('rough.mat') simulforce = np.zeros((tSamples,150)) datatime = np.arange(0,4,0.01) dataforceSmooth = np.transpose(datasmooth['force']) dataforceModerate = np.transpose(datamoderate['force']) dataforceRough = np.transpose(datarough['force']) j = 0 for i in dataforceSmooth: simulforce[:,j] = i j = j+1 j = 50 for i in dataforceModerate: simulforce[:,j] = i j = j+1 j = 100 for i in dataforceRough: simulforce[:,j] = i j = j+1 Fmat = np.matrix(simulforce) # Checking the Data-Matrix m_tot, n_tot = np.shape(Fmat) #print " " #print 'Total_Matrix_Shape:',m_tot,n_tot mu_smooth,sigma_smooth = feature_to_mu_sigma(Fmat[0:tSamples,0:50]) mu_moderate,sigma_moderate = feature_to_mu_sigma(Fmat[0:tSamples,50:100]) mu_rough,sigma_rough = feature_to_mu_sigma(Fmat[0:tSamples,100:150]) #print [mu_smooth, sigma_smooth] # HMM - Implementation: # 10 Hidden States # Force as Continuous Gaussian Observations from each hidden state # Three HMM-Models for Smooth, Moderate, Rough Surfaces # Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch) # For new objects, it is classified according to which model it represenst the closest.. F = ghmm.Float() # emission domain of this model # A - Transition Matrix A = [[1.0]] # B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma) B_smooth = np.zeros((1,2)) B_moderate = np.zeros((1,2)) B_rough = np.zeros((1,2)) for num_states in range(1): B_smooth[num_states,0] = mu_smooth[num_states] B_smooth[num_states,1] = sigma_smooth[num_states] B_moderate[num_states,0] = mu_moderate[num_states] B_moderate[num_states,1] = sigma_moderate[num_states] B_rough[num_states,0] = mu_rough[num_states] B_rough[num_states,1] = sigma_rough[num_states] B_smooth = B_smooth.tolist() B_moderate = B_moderate.tolist() B_rough = B_rough.tolist() # pi - initial probabilities per state pi = [1.0] * 1 # generate Smooth, Moderate, Rough Surface models from parameters model_smooth = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_smooth, pi) # Will be Trained model_moderate = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_moderate, pi) # Will be Trained model_rough = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rough, pi) # Will be Trained trial_number = 1 smooth_final = np.matrix(np.zeros((30,1))) moderate_final = np.matrix(np.zeros((30,1))) rough_final = np.matrix(np.zeros((30,1))) while (trial_number < 6): # For Training total_seq = Fmat[0:tSamples,:] m_total, n_total = np.shape(total_seq) #print 'Total_Sequence_Shape:', m_total, n_total if (trial_number == 1): j = 5 total_seq_smooth = total_seq[0:tSamples,1:5] total_seq_moderate = total_seq[0:tSamples,51:55] total_seq_rough = total_seq[0:tSamples,101:105] while (j < 50): total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+1:j+5])) total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+51:j+55])) total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+101:j+105])) j = j+5 if (trial_number == 2): j = 5 total_seq_smooth = np.column_stack((total_seq[0:tSamples,0],total_seq[0:tSamples,2:5])) total_seq_moderate = np.column_stack((total_seq[0:tSamples,50],total_seq[0:tSamples,52:55])) total_seq_rough = np.column_stack((total_seq[0:tSamples,100],total_seq[0:tSamples,102:105])) while (j < 50): total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+0],total_seq[0:tSamples,j+2:j+5])) total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50],total_seq[0:tSamples,j+52:j+55])) total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100],total_seq[0:tSamples,j+102:j+105])) j = j+5 if (trial_number == 3): j = 5 total_seq_smooth = np.column_stack((total_seq[0:tSamples,0:2],total_seq[0:tSamples,3:5])) total_seq_moderate = np.column_stack((total_seq[0:tSamples,50:52],total_seq[0:tSamples,53:55])) total_seq_rough = np.column_stack((total_seq[0:tSamples,100:102],total_seq[0:tSamples,103:105])) while (j < 50): total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+0:j+2],total_seq[0:tSamples,j+3:j+5])) total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50:j+52],total_seq[0:tSamples,j+53:j+55])) total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100:j+102],total_seq[0:tSamples,j+103:j+105])) j = j+5 if (trial_number == 4): j = 5 total_seq_smooth = np.column_stack((total_seq[0:tSamples,0:3],total_seq[0:tSamples,4:5])) total_seq_moderate = np.column_stack((total_seq[0:tSamples,50:53],total_seq[0:tSamples,54:55])) total_seq_rough = np.column_stack((total_seq[0:tSamples,100:103],total_seq[0:tSamples,104:105])) while (j < 50): total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+0:j+3],total_seq[0:tSamples,j+4:j+5])) total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50:j+53],total_seq[0:tSamples,j+54:j+55])) total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100:j+103],total_seq[0:tSamples,j+104:j+105])) j = j+5 if (trial_number == 5): j = 5 total_seq_smooth = total_seq[0:tSamples,0:4] total_seq_moderate = total_seq[0:tSamples,50:54] total_seq_rough = total_seq[0:tSamples,100:104] while (j < 50): total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+0:j+4])) total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50:j+54])) total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100:j+104])) j = j+5 train_seq_smooth = (np.array(total_seq_smooth).T).tolist() train_seq_moderate = (np.array(total_seq_moderate).T).tolist() train_seq_rough = (np.array(total_seq_rough).T).tolist() #m,n = np.shape(train_seq_smooth) #print m,n #print train_seq_smooth final_ts_smooth = ghmm.SequenceSet(F,train_seq_smooth) final_ts_moderate = ghmm.SequenceSet(F,train_seq_moderate) final_ts_rough = ghmm.SequenceSet(F,train_seq_rough) model_smooth.baumWelch(final_ts_smooth) model_moderate.baumWelch(final_ts_moderate) model_rough.baumWelch(final_ts_rough) # For Testing if (trial_number == 1): j = 5 total_seq_smooth = total_seq[0:tSamples,0] total_seq_moderate = total_seq[0:tSamples,50] total_seq_rough = total_seq[0:tSamples,100] while (j < 50): total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j])) total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50])) total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100])) j = j+5 if (trial_number == 2): j = 5 total_seq_smooth = total_seq[0:tSamples,1] total_seq_moderate = total_seq[0:tSamples,51] total_seq_rough = total_seq[0:tSamples,101] while (j < 50): total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+1])) total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+51])) total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+101])) j = j+5 if (trial_number == 3): j = 5 total_seq_smooth = total_seq[0:tSamples,2] total_seq_moderate = total_seq[0:tSamples,52] total_seq_rough = total_seq[0:tSamples,102] while (j < 50): total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+2])) total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+52])) total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+102])) j = j+5 if (trial_number == 4): j = 5 total_seq_smooth = total_seq[0:tSamples,3] total_seq_moderate = total_seq[0:tSamples,53] total_seq_rough = total_seq[0:tSamples,103] while (j < 50): total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+3])) total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+53])) total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+103])) j = j+5 if (trial_number == 5): j = 5 total_seq_smooth = total_seq[0:tSamples,4] total_seq_moderate = total_seq[0:tSamples,54] total_seq_rough = total_seq[0:tSamples,104] while (j < 50): total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+4])) total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+54])) total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+104])) j = j+5 total_seq_obj = np.matrix(np.column_stack((total_seq_smooth,total_seq_moderate,total_seq_rough))) smooth = np.matrix(np.zeros(np.size(total_seq_obj,1))) moderate = np.matrix(np.zeros(np.size(total_seq_obj,1))) rough = np.matrix(np.zeros(np.size(total_seq_obj,1))) m,n = np.shape(smooth) print m,n k = 0 while (k < np.size(total_seq_obj,1)): test_seq_obj = (np.array(total_seq_obj[0:tSamples,k]).T).tolist() new_test_seq_obj = np.array(sum(test_seq_obj,[])) ts_obj = new_test_seq_obj final_ts_obj = ghmm.EmissionSequence(F,ts_obj.tolist()) # Find Viterbi Path path_smooth_obj = model_smooth.viterbi(final_ts_obj) path_moderate_obj = model_moderate.viterbi(final_ts_obj) path_rough_obj = model_rough.viterbi(final_ts_obj) obj = max(path_smooth_obj[1],path_moderate_obj[1],path_rough_obj[1]) if obj == path_smooth_obj[1]: smooth[0,k] = 1 elif obj == path_moderate_obj[1]: moderate[0,k] = 1 else: rough[0,k] = 1 k = k+1 #print smooth.T smooth_final = smooth_final + smooth.T moderate_final = moderate_final + moderate.T rough_final = rough_final + rough.T trial_number = trial_number + 1 #print smooth_final #print moderate_final #print rough_final # Confusion Matrix cmat = np.zeros((3,3)) arrsum_smooth = np.zeros((3,1)) arrsum_moderate = np.zeros((3,1)) arrsum_rough= np.zeros((3,1)) k = 10 i = 0 while (k < 31): arrsum_smooth[i] = np.sum(smooth_final[k-10:k,0]) arrsum_moderate[i] = np.sum(moderate_final[k-10:k,0]) arrsum_rough[i] = np.sum(rough_final[k-10:k,0]) i = i+1 k = k+10 i=0 while (i < 3): j=0 while (j < 3): if (i == 0): cmat[i][j] = arrsum_smooth[j] elif (i == 1): cmat[i][j] = arrsum_moderate[j] else: cmat[i][j] = arrsum_rough[j] j = j+1 i = i+1 #print cmat # Plot Confusion Matrix Nlabels = 3 fig = pp.figure() ax = fig.add_subplot(111) figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels]) ax.set_title('Performance of HMM Models') pp.xlabel("Targets") pp.ylabel("Predictions") ax.set_xticks([0.5,1.5,2.5]) ax.set_xticklabels(['Smooth', 'Moderate', 'Rough']) ax.set_yticks([2.5,1.5,0.5]) ax.set_yticklabels(['Smooth', 'Moderate', 'Rough']) figbar = fig.colorbar(figplot) i = 0 while (i < 3): j = 0 while (j < 3): pp.text(j+0.5,2.5-i,cmat[i][j]) j = j+1 i = i+1 pp.show()
mit
xiaoxiamii/scikit-learn
benchmarks/bench_plot_parallel_pairwise.py
297
1247
# Author: Mathieu Blondel <[email protected]> # License: BSD 3 clause import time import pylab as pl from sklearn.utils import check_random_state from sklearn.metrics.pairwise import pairwise_distances from sklearn.metrics.pairwise import pairwise_kernels def plot(func): random_state = check_random_state(0) one_core = [] multi_core = [] sample_sizes = range(1000, 6000, 1000) for n_samples in sample_sizes: X = random_state.rand(n_samples, 300) start = time.time() func(X, n_jobs=1) one_core.append(time.time() - start) start = time.time() func(X, n_jobs=-1) multi_core.append(time.time() - start) pl.figure('scikit-learn parallel %s benchmark results' % func.__name__) pl.plot(sample_sizes, one_core, label="one core") pl.plot(sample_sizes, multi_core, label="multi core") pl.xlabel('n_samples') pl.ylabel('Time (s)') pl.title('Parallel %s' % func.__name__) pl.legend() def euclidean_distances(X, n_jobs): return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs) def rbf_kernels(X, n_jobs): return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1) plot(euclidean_distances) plot(rbf_kernels) pl.show()
bsd-3-clause
jnez71/demos
signals/all_pole_modeling.py
1
2452
#!/usr/bin/env python2 """ Tunes a linear IIR filter to have any specified impulse response. The desired impulse response is specified over some finite time window. Then the matched IIR filter can be said to "predict" the rest of this function at times outside the window by simple simulation. Since this tuning is not for a real-time application, instead of using an iterative "adaptive filter" method, analytical least-squares was used. """ from __future__ import division import numpy as np import numpy.linalg as npl from scipy.linalg import toeplitz import matplotlib.pyplot as plt # Function for forming an IIR-filter regression matrix given data x and order M regmat = lambda x, M: np.hstack((np.ones((len(x), 1)), toeplitz(x, np.concatenate(([x[0]], np.zeros(M-1)))))) # Function that takes data x and regression matrix X and returns the autocor, X.T.dot(X), and crosscor, X.T.dot(x) for x = x[1:]. cormats = lambda x, X: (X.T.dot(X), X.T.dot(x[1:])) # Weird function to predict, take your pick func = lambda t: np.arctan(np.cos(-0.01*t)+np.cos(0.01*t+2)+(np.sin(0.01*1/(t+10000)-np.pi/2)+np.sin(0.01*t)*np.sin(0.1*t)**2*np.exp(np.cos(0.001*np.sign(t-100)*np.sin(t))))) # func = lambda t: 0.1*np.random.randn(np.shape(t)[0]) + np.arctan(np.cos(-0.01*t)+np.cos(0.01*t+2)+(np.sin(0.01*1/(t+10000)-np.pi/2)+np.sin(0.01*t)*np.sin(0.1*t)**2*np.exp(np.cos(0.001*np.sign(t-100)*np.sin(t))))) # func = lambda t: np.cos(1*np.exp(-t/500)*t) # Training data samples = 1600 x = func(np.arange(0, samples)) # Filter order M = 800 # Regression matrix X = regmat(x[:-1], M) # Optimal weights w = npl.pinv(X).dot(x[1:]) #### # Full simulation time T = 3000 time = np.arange(0, T) # Prepare to record y_pred = [] y_pred.extend(x) # Let the "filter" run "autonomously" for samples beyond the ones it trained on for i, t in enumerate(np.arange(samples-1, T-1)): filtout = w[0] for d in np.arange(0, M): filtout += w[d+1]*y_pred[t-d] y_pred.append(filtout) print("MSE: {}".format(np.sqrt(np.mean(np.square(np.subtract(func(time[samples:]), y_pred[samples:])))))) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(time, np.concatenate((x, func(time[samples:]))), 'g', linewidth=2, label="Desired Response") ax.plot(time[samples-1:], y_pred[samples-1:], 'k', linewidth=1, label="Filter's Response") ax.set_xlabel("Training Time (left) and Prediction Time (right)", fontsize=22) plt.axvline(samples-1) plt.legend() plt.show()
mit
tensorflow/datasets
tensorflow_datasets/core/utils/benchmark.py
1
4240
# coding=utf-8 # Copyright 2021 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Benchmark utils.""" import time from typing import Any, Dict, Iterable, Optional, Union from absl import logging import dataclasses from tensorflow_datasets.core.utils import tqdm_utils try: import pandas as pd # pylint: disable=g-import-not-at-top except ImportError: pd = Any # pylint: disable=logging-format-interpolation StatDict = Dict[str, Union[int, float]] @dataclasses.dataclass(frozen=True) class BenchmarkResult: stats: 'pd.DataFrame' raw_stats: 'pd.DataFrame' def _repr_html_(self) -> str: """Colab/notebook representation.""" return '<strong>BenchmarkResult:</strong><br/>' + self.stats._repr_html_() # pylint: disable=protected-access def benchmark( ds: Iterable[Any], *, num_iter: Optional[int] = None, batch_size: int = 1, ) -> BenchmarkResult: """Benchmarks any iterable (e.g `tf.data.Dataset`). Usage: ```py ds = tfds.load('mnist', split='train').batch(32).prefetch() tfds.benchmark(ds, batch_size=32) ``` Reports: - Total execution time - Setup time (first warmup batch) - Number of examples/sec Args: ds: Dataset to benchmark. Can be any iterable. Note: The iterable will be fully consumed. num_iter: Number of iteration to perform (iteration might be batched) batch_size: Batch size of the dataset, used to normalize iterations Returns: statistics: The recorded statistics, for eventual post-processing """ try: total = len(ds) # pytype: disable=wrong-arg-types except TypeError: total = None # Benchmark the first batch separatelly (setup overhead) start_time = time.perf_counter() ds_iter = iter(ds) try: next(ds_iter) # First warmup batch except StopIteration: raise ValueError('Cannot benchmark dataset with 0 elements.') first_batch_time = time.perf_counter() # Benchmark the following batches i = -1 for i, _ in tqdm_utils.tqdm(enumerate(ds_iter), initial=1, total=total): if num_iter and i > num_iter: break end_time = time.perf_counter() if num_iter and i < num_iter: logging.warning( 'Number of iteration shorter than expected ({} vs {})'.format( i, num_iter)) if i == -1: # No iteration besides the second batch end_time = first_batch_time print('\n************ Summary ************\n') num_examples = (i + 1) * batch_size stats = { 'first+lasts': _log_stats('First included', start_time, end_time, num_examples + batch_size), 'first': _log_stats('First only', start_time, first_batch_time, batch_size), 'lasts': _log_stats('First excluded', first_batch_time, end_time, num_examples) } raw_stats = { 'start_time': start_time, 'first_batch_time': first_batch_time, 'end_time': end_time, 'num_iter': i + 2, # First batch and zero-shifted } stats = pd.DataFrame.from_dict(stats, orient='index') raw_stats = pd.DataFrame.from_dict( raw_stats, orient='index', columns=['duration']) return BenchmarkResult(stats=stats, raw_stats=raw_stats) def _log_stats(msg: str, start: float, end: float, num_examples: int) -> StatDict: """Log and returns stats.""" if not num_examples: stats = { 'duration': 0., 'num_examples': 0, 'avg': 0., } else: total_time = end - start stats = { 'duration': total_time, 'num_examples': num_examples, 'avg': num_examples / total_time, } print('Examples/sec ({}) {avg:.2f} ex/sec (total: {num_examples} ex, ' '{duration:.2f} sec)'.format(msg, **stats)) return stats
apache-2.0
blondegeek/pymatgen
pymatgen/io/abinit/works.py
2
74708
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ Works for Abinit """ import os import shutil import time import abc import collections import numpy as np import copy from monty.collections import AttrDict from monty.itertools import chunks from monty.functools import lazy_property from monty.fnmatch import WildCard from pydispatch import dispatcher from pymatgen.core.units import EnergyArray from . import wrappers from .nodes import Dependency, Node, NodeError, NodeResults, FileNode, check_spectator from .tasks import (Task, AbinitTask, ScfTask, NscfTask, DfptTask, PhononTask, ElasticTask, DdkTask, BseTask, RelaxTask, DdeTask, BecTask, ScrTask, SigmaTask, TaskManager, DteTask, EphTask, CollinearThenNonCollinearScfTask) from .utils import Directory from .netcdf import ETSF_Reader, NetcdfReader from .abitimer import AbinitTimerParser import logging logger = logging.getLogger(__name__) __author__ = "Matteo Giantomassi" __copyright__ = "Copyright 2013, The Materials Project" __version__ = "0.1" __maintainer__ = "Matteo Giantomassi" __all__ = [ "Work", "BandStructureWork", "RelaxWork", "G0W0Work", "QptdmWork", "SigmaConvWork", "BseMdfWork", "PhononWork", "PhononWfkqWork", "GKKPWork", "BecWork", "DteWork", ] class WorkResults(NodeResults): JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy() @classmethod def from_node(cls, work): """Initialize an instance from a :class:`Work` instance.""" new = super().from_node(work) # Will put all files found in outdir in GridFs # Warning: assuming binary files. d = {os.path.basename(f): f for f in work.outdir.list_filepaths()} new.register_gridfs_files(**d) return new class WorkError(NodeError): """Base class for the exceptions raised by Work objects.""" class BaseWork(Node, metaclass=abc.ABCMeta): Error = WorkError Results = WorkResults # interface modeled after subprocess.Popen @property @abc.abstractmethod def processes(self): """Return a list of objects that support the `subprocess.Popen` protocol.""" def poll(self): """ Check if all child processes have terminated. Set and return returncode attribute. """ return [task.poll() for task in self] def wait(self): """ Wait for child processed to terminate. Set and return returncode attribute. """ return [task.wait() for task in self] def communicate(self, input=None): """ Interact with processes: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. Wait for process to terminate. The optional input argument should be a string to be sent to the child processed, or None, if no data should be sent to the children. communicate() returns a list of tuples (stdoutdata, stderrdata). """ return [task.communicate(input) for task in self] @property def returncodes(self): """ The children return codes, set by poll() and wait() (and indirectly by communicate()). A None value indicates that the process hasn't terminated yet. A negative value -N indicates that the child was terminated by signal N (Unix only). """ return [task.returncode for task in self] @property def ncores_reserved(self): """ Returns the number of cores reserved in this moment. A core is reserved if it's still not running but we have submitted the task to the queue manager. """ return sum(task.manager.num_cores for task in self if task.status == task.S_SUB) @property def ncores_allocated(self): """ Returns the number of CPUs allocated in this moment. A core is allocated if it's running a task or if we have submitted a task to the queue manager but the job is still pending. """ return sum(task.manager.num_cores for task in self if task.status in [task.S_SUB, task.S_RUN]) @property def ncores_used(self): """ Returns the number of cores used in this moment. A core is used if there's a job that is running on it. """ return sum(task.manager.num_cores for task in self if task.status == task.S_RUN) def fetch_task_to_run(self): """ Returns the first task that is ready to run or None if no task can be submitted at present" Raises: `StopIteration` if all tasks are done. """ # All the tasks are done so raise an exception # that will be handled by the client code. if all(task.is_completed for task in self): raise StopIteration("All tasks completed.") for task in self: if task.can_run: return task # No task found, this usually happens when we have dependencies. # Beware of possible deadlocks here! logger.warning("Possible deadlock in fetch_task_to_run!") return None def fetch_alltasks_to_run(self): """ Returns a list with all the tasks that can be submitted. Empty list if not task has been found. """ return [task for task in self if task.can_run] @abc.abstractmethod def setup(self, *args, **kwargs): """Method called before submitting the calculations.""" def _setup(self, *args, **kwargs): self.setup(*args, **kwargs) def connect_signals(self): """ Connect the signals within the work. The :class:`Work` is responsible for catching the important signals raised from its task and raise new signals when some particular condition occurs. """ for task in self: dispatcher.connect(self.on_ok, signal=task.S_OK, sender=task) def disconnect_signals(self): """ Disable the signals within the work. This function reverses the process of `connect_signals` """ for task in self: try: dispatcher.disconnect(self.on_ok, signal=task.S_OK, sender=task) except dispatcher.errors.DispatcherKeyError as exc: logger.debug(str(exc)) @property def all_ok(self): return all(task.status == task.S_OK for task in self) #@check_spectator def on_ok(self, sender): """ This callback is called when one task reaches status `S_OK`. It executes on_all_ok when all tasks in self have reached `S_OK`. """ logger.debug("in on_ok with sender %s" % sender) if self.all_ok: if self.finalized: return AttrDict(returncode=0, message="Work has been already finalized") else: # Set finalized here, because on_all_ok might change it (e.g. Relax + EOS in a single work) self.finalized = True try: results = AttrDict(**self.on_all_ok()) except Exception as exc: self.history.critical("on_all_ok raises %s" % str(exc)) self.finalized = False raise # Signal to possible observers that the `Work` reached S_OK self.history.info("Work %s is finalized and broadcasts signal S_OK" % str(self)) if self._finalized: self.send_signal(self.S_OK) return results return AttrDict(returncode=1, message="Not all tasks are OK!") #@check_spectator def on_all_ok(self): """ This method is called once the `Work` is completed i.e. when all tasks have reached status S_OK. Subclasses should provide their own implementation Returns: Dictionary that must contain at least the following entries: returncode: 0 on success. message: a string that should provide a human-readable description of what has been performed. """ return dict(returncode=0, message="Calling on_all_ok of the base class!") def get_results(self, **kwargs): """ Method called once the calculations are completed. The base version returns a dictionary task_name: TaskResults for each task in self. """ results = self.Results.from_node(self) return results def get_graphviz(self, engine="automatic", graph_attr=None, node_attr=None, edge_attr=None): """ Generate task graph in the DOT language (only parents and children of this work). Args: engine: Layout command used. ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage'] graph_attr: Mapping of (attribute, value) pairs for the graph. node_attr: Mapping of (attribute, value) pairs set for all nodes. edge_attr: Mapping of (attribute, value) pairs set for all edges. Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph> """ from graphviz import Digraph fg = Digraph("work", #filename="work_%s.gv" % os.path.basename(self.workdir), engine="fdp" if engine == "automatic" else engine) # Set graph attributes. # https://www.graphviz.org/doc/info/ #fg.attr(label="%s@%s" % (self.__class__.__name__, self.relworkdir)) fg.attr(label=repr(self)) #fg.attr(fontcolor="white", bgcolor='purple:pink') fg.attr(rankdir="LR", pagedir="BL") #fg.attr(constraint="false", pack="true", packMode="clust") fg.node_attr.update(color='lightblue2', style='filled') #fg.node_attr.update(ranksep='equally') # Add input attributes. if graph_attr is not None: fg.graph_attr.update(**graph_attr) if node_attr is not None: fg.node_attr.update(**node_attr) if edge_attr is not None: fg.edge_attr.update(**edge_attr) def node_kwargs(node): return dict( #shape="circle", color=node.color_hex, label=(str(node) if not hasattr(node, "pos_str") else node.pos_str + "\n" + node.__class__.__name__), ) edge_kwargs = dict(arrowType="vee", style="solid") cluster_kwargs = dict(rankdir="LR", pagedir="BL", style="rounded", bgcolor="azure2") # Build cluster with tasks in *this* work cluster_name = "cluster%s" % self.name with fg.subgraph(name=cluster_name) as wg: wg.attr(**cluster_kwargs) wg.attr(label="%s (%s)" % (self.__class__.__name__, self.name)) for task in self: wg.node(task.name, **node_kwargs(task)) # Connect task to children for child in task.get_children(): # Test if child is in this cluster (self). myg = wg if child in self else fg myg.node(child.name, **node_kwargs(child)) # Find file extensions required by this task i = [dep.node for dep in child.deps].index(task) edge_label = "+".join(child.deps[i].exts) myg.edge(task.name, child.name, label=edge_label, color=task.color_hex, **edge_kwargs) # Connect task to parents for parent in task.get_parents(): # Test if parent is in this cluster (self). myg = wg if parent in self else fg myg.node(parent.name, **node_kwargs(parent)) # Find file extensions required by this task i = [dep.node for dep in task.deps].index(parent) edge_label = "+".join(task.deps[i].exts) myg.edge(parent.name, task.name, label=edge_label, color=parent.color_hex, **edge_kwargs) # Treat the case in which we have a work producing output for tasks in *this* work. #for work in self.flow: # children = work.get_children() # if not children or all(child not in self for child in children): # continue # cluster_name = "cluster%s" % work.name # seen = set() # for child in children: # if child not in self: continue # # This is not needed, too much confusing # #fg.edge(cluster_name, child.name, color=work.color_hex, **edge_kwargs) # # Find file extensions required by work # i = [dep.node for dep in child.deps].index(work) # for ext in child.deps[i].exts: # out = "%s (%s)" % (ext, work.name) # fg.node(out) # fg.edge(out, child.name, **edge_kwargs) # key = (cluster_name, out) # if key not in seen: # fg.edge(cluster_name, out, color=work.color_hex, **edge_kwargs) # seen.add(key) return fg class NodeContainer(metaclass=abc.ABCMeta): """ Mixin classes for `Work` and `Flow` objects providing helper functions to register tasks in the container. The helper function call the `register` method of the container. """ # TODO: Abstract protocol for containers @abc.abstractmethod def register_task(self, *args, **kwargs): """ Register a task in the container. """ # TODO: shall flow.register_task return a Task or a Work? # Helper functions def register_scf_task(self, *args, **kwargs): """Register a Scf task.""" kwargs["task_class"] = ScfTask return self.register_task(*args, **kwargs) def register_collinear_then_noncollinear_scf_task(self, *args, **kwargs): """Register a Scf task that perform a SCF run first with nsppol = 2 and then nspinor = 2""" kwargs["task_class"] = CollinearThenNonCollinearScfTask return self.register_task(*args, **kwargs) def register_nscf_task(self, *args, **kwargs): """Register a nscf task.""" kwargs["task_class"] = NscfTask return self.register_task(*args, **kwargs) def register_relax_task(self, *args, **kwargs): """Register a task for structural optimization.""" kwargs["task_class"] = RelaxTask return self.register_task(*args, **kwargs) def register_phonon_task(self, *args, **kwargs): """Register a phonon task.""" kwargs["task_class"] = PhononTask return self.register_task(*args, **kwargs) def register_elastic_task(self, *args, **kwargs): """Register an elastic task.""" kwargs["task_class"] = ElasticTask return self.register_task(*args, **kwargs) def register_ddk_task(self, *args, **kwargs): """Register a ddk task.""" kwargs["task_class"] = DdkTask return self.register_task(*args, **kwargs) def register_scr_task(self, *args, **kwargs): """Register a screening task.""" kwargs["task_class"] = ScrTask return self.register_task(*args, **kwargs) def register_sigma_task(self, *args, **kwargs): """Register a sigma task.""" kwargs["task_class"] = SigmaTask return self.register_task(*args, **kwargs) def register_dde_task(self, *args, **kwargs): """Register a Dde task.""" kwargs["task_class"] = DdeTask return self.register_task(*args, **kwargs) def register_dte_task(self, *args, **kwargs): """Register a Dte task.""" kwargs["task_class"] = DteTask return self.register_task(*args, **kwargs) def register_bec_task(self, *args, **kwargs): """Register a BEC task.""" kwargs["task_class"] = BecTask return self.register_task(*args, **kwargs) def register_bse_task(self, *args, **kwargs): """Register a Bethe-Salpeter task.""" kwargs["task_class"] = BseTask return self.register_task(*args, **kwargs) def register_eph_task(self, *args, **kwargs): """Register an electron-phonon task.""" kwargs["task_class"] = EphTask return self.register_task(*args, **kwargs) def walknset_vars(self, task_class=None, *args, **kwargs): """ Set the values of the ABINIT variables in the input files of the nodes Args: task_class: If not None, only the input files of the tasks belonging to class `task_class` are modified. Example: flow.walknset_vars(ecut=10, kptopt=4) """ def change_task(task): if task_class is not None and task.__class__ is not task_class: return False return True if self.is_work: for task in self: if not change_task(task): continue task.set_vars(*args, **kwargs) elif self.is_flow: for task in self.iflat_tasks(): if not change_task(task): continue task.set_vars(*args, **kwargs) else: raise TypeError("Don't know how to set variables for object class %s" % self.__class__.__name__) class Work(BaseWork, NodeContainer): """ A Work is a list of (possibly connected) tasks. """ def __init__(self, workdir=None, manager=None): """ Args: workdir: Path to the working directory. manager: :class:`TaskManager` object. """ super().__init__() self._tasks = [] if workdir is not None: self.set_workdir(workdir) if manager is not None: self.set_manager(manager) def set_manager(self, manager): """Set the :class:`TaskManager` to use to launch the :class:`Task`.""" self.manager = manager.deepcopy() for task in self: task.set_manager(manager) @property def flow(self): """The flow containing this :class:`Work`.""" return self._flow def set_flow(self, flow): """Set the flow associated to this :class:`Work`.""" if not hasattr(self, "_flow"): self._flow = flow else: if self._flow != flow: raise ValueError("self._flow != flow") @lazy_property def pos(self): """The position of self in the :class:`Flow`""" for i, work in enumerate(self.flow): if self == work: return i raise ValueError("Cannot find the position of %s in flow %s" % (self, self.flow)) @property def pos_str(self): """String representation of self.pos""" return "w" + str(self.pos) def set_workdir(self, workdir, chroot=False): """Set the working directory. Cannot be set more than once unless chroot is True""" if not chroot and hasattr(self, "workdir") and self.workdir != workdir: raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir)) self.workdir = os.path.abspath(workdir) # Directories with (input|output|temporary) data. # The work will use these directories to connect # itself to other works and/or to produce new data # that will be used by its children. self.indir = Directory(os.path.join(self.workdir, "indata")) self.outdir = Directory(os.path.join(self.workdir, "outdata")) self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata")) self.wdir = Directory(self.workdir) def chroot(self, new_workdir): self.set_workdir(new_workdir, chroot=True) for i, task in enumerate(self): new_tdir = os.path.join(self.workdir, "t" + str(i)) task.set_workdir(new_tdir, chroot=True) def __len__(self): return len(self._tasks) def __iter__(self): return self._tasks.__iter__() def __getitem__(self, slice): return self._tasks[slice] def chunks(self, chunk_size): """Yield successive chunks of tasks of lenght chunk_size.""" for tasks in chunks(self, chunk_size): yield tasks def opath_from_ext(self, ext): """ Returns the path of the output file with extension ext. Use it when the file does not exist yet. """ return self.indir.path_in("in_" + ext) def opath_from_ext(self, ext): """ Returns the path of the output file with extension ext. Use it when the file does not exist yet. """ return self.outdir.path_in("out_" + ext) @property def processes(self): return [task.process for task in self] @property def all_done(self): """True if all the :class:`Task` objects in the :class:`Work` are done.""" return all(task.status >= task.S_DONE for task in self) @property def isnc(self): """True if norm-conserving calculation.""" return all(task.isnc for task in self) @property def ispaw(self): """True if PAW calculation.""" return all(task.ispaw for task in self) @property def status_counter(self): """ Returns a `Counter` object that counts the number of task with given status (use the string representation of the status as key). """ counter = collections.Counter() for task in self: counter[str(task.status)] += 1 return counter def allocate(self, manager=None): """ This function is called once we have completed the initialization of the :class:`Work`. It sets the manager of each task (if not already done) and defines the working directories of the tasks. Args: manager: :class:`TaskManager` object or None """ for i, task in enumerate(self): if not hasattr(task, "manager"): # Set the manager # Use the one provided in input else the one of the work/flow. if manager is not None: task.set_manager(manager) else: # Look first in work and then in the flow. if hasattr(self, "manager"): task.set_manager(self.manager) else: task.set_manager(self.flow.manager) task_workdir = os.path.join(self.workdir, "t" + str(i)) if not hasattr(task, "workdir"): task.set_workdir(task_workdir) else: if task.workdir != task_workdir: raise ValueError("task.workdir != task_workdir: %s, %s" % (task.workdir, task_workdir)) def register(self, obj, deps=None, required_files=None, manager=None, task_class=None): """ Registers a new :class:`Task` and add it to the internal list, taking into account possible dependencies. Args: obj: :class:`AbinitInput` instance or `Task` object. deps: Dictionary specifying the dependency of this node or list of dependencies None means that this obj has no dependency. required_files: List of strings with the path of the files used by the task. Note that the files must exist when the task is registered. Use the standard approach based on Works, Tasks and deps if the files will be produced in the future. manager: The :class:`TaskManager` responsible for the submission of the task. If manager is None, we use the `TaskManager` specified during the creation of the :class:`Work`. task_class: Task subclass to instantiate. Default: :class:`AbinitTask` Returns: :class:`Task` object """ task_workdir = None if hasattr(self, "workdir"): task_workdir = os.path.join(self.workdir, "t" + str(len(self))) if isinstance(obj, Task): task = obj else: # Set the class if task_class is None: task_class = AbinitTask task = task_class.from_input(obj, task_workdir, manager) self._tasks.append(task) # Handle possible dependencies given either as dict or list. if deps is not None: if hasattr(deps, "items"): deps = [Dependency(node, exts) for node, exts in deps.items()] task.add_deps(deps) # Handle possible dependencies. if required_files is not None: task.add_required_files(required_files) return task # Needed by NodeContainer register_task = register def path_in_workdir(self, filename): """Create the absolute path of filename in the working directory.""" return os.path.join(self.workdir, filename) def setup(self, *args, **kwargs): """ Method called before running the calculations. The default implementation is empty. """ def build(self, *args, **kwargs): """Creates the top level directory.""" # Create the directories of the work. self.indir.makedirs() self.outdir.makedirs() self.tmpdir.makedirs() # Build dirs and files of each task. for task in self: task.build(*args, **kwargs) # Connect signals within the work. self.connect_signals() @property def status(self): """ Returns the status of the work i.e. the minimum of the status of the tasks. """ return self.get_all_status(only_min=True) def get_all_status(self, only_min=False): """ Returns a list with the status of the tasks in self. Args: only_min: If True, the minimum of the status is returned. """ if len(self) == 0: # The work will be created in the future. if only_min: return self.S_INIT else: return [self.S_INIT] self.check_status() status_list = [task.status for task in self] if only_min: return min(status_list) else: return status_list def check_status(self): """Check the status of the tasks.""" # Recompute the status of the tasks # Ignore OK and LOCKED tasks. for task in self: if task.status in (task.S_OK, task.S_LOCKED): continue task.check_status() # Take into account possible dependencies. Use a list instead of generators for task in self: if task.status == task.S_LOCKED: continue if task.status < task.S_SUB and all(status == task.S_OK for status in task.deps_status): task.set_status(task.S_READY, "Status set to Ready") def rmtree(self, exclude_wildcard=""): """ Remove all files and directories in the working directory Args: exclude_wildcard: Optional string with regular expressions separated by `|`. Files matching one of the regular expressions will be preserved. example: exclude_wildard="*.nc|*.txt" preserves all the files whose extension is in ["nc", "txt"]. """ if not exclude_wildcard: shutil.rmtree(self.workdir) else: w = WildCard(exclude_wildcard) for dirpath, dirnames, filenames in os.walk(self.workdir): for fname in filenames: path = os.path.join(dirpath, fname) if not w.match(fname): os.remove(path) def rm_indatadir(self): """Remove all the indata directories.""" for task in self: task.rm_indatadir() def rm_outdatadir(self): """Remove all the indata directories.""" for task in self: task.rm_outatadir() def rm_tmpdatadir(self): """Remove all the tmpdata directories.""" for task in self: task.rm_tmpdatadir() def move(self, dest, isabspath=False): """ Recursively move self.workdir to another location. This is similar to the Unix "mv" command. The destination path must not already exist. If the destination already exists but is not a directory, it may be overwritten depending on os.rename() semantics. Be default, dest is located in the parent directory of self.workdir, use isabspath=True to specify an absolute path. """ if not isabspath: dest = os.path.join(os.path.dirname(self.workdir), dest) shutil.move(self.workdir, dest) def submit_tasks(self, wait=False): """ Submits the task in self and wait. TODO: change name. """ for task in self: task.start() if wait: for task in self: task.wait() def start(self, *args, **kwargs): """ Start the work. Calls build and _setup first, then submit the tasks. Non-blocking call unless wait is set to True """ wait = kwargs.pop("wait", False) # Initial setup self._setup(*args, **kwargs) # Build dirs and files. self.build(*args, **kwargs) # Submit tasks (does not block) self.submit_tasks(wait=wait) def read_etotals(self, unit="Ha"): """ Reads the total energy from the GSR file produced by the task. Return a numpy array with the total energies in Hartree The array element is set to np.inf if an exception is raised while reading the GSR file. """ if not self.all_done: raise self.Error("Some task is still in running/submitted state") etotals = [] for task in self: # Open the GSR file and read etotal (Hartree) gsr_path = task.outdir.has_abiext("GSR") etot = np.inf if gsr_path: with ETSF_Reader(gsr_path) as r: etot = r.read_value("etotal") etotals.append(etot) return EnergyArray(etotals, "Ha").to(unit) def parse_timers(self): """ Parse the TIMER section reported in the ABINIT output files. Returns: :class:`AbinitTimerParser` object """ filenames = list(filter(os.path.exists, [task.output_file.path for task in self])) parser = AbinitTimerParser() parser.parse(filenames) return parser class BandStructureWork(Work): """Work for band structure calculations.""" def __init__(self, scf_input, nscf_input, dos_inputs=None, workdir=None, manager=None): """ Args: scf_input: Input for the SCF run nscf_input: Input for the NSCF run defining the band structure calculation. dos_inputs: Input(s) for the DOS. DOS is computed only if dos_inputs is not None. workdir: Working directory. manager: :class:`TaskManager` object. """ super().__init__(workdir=workdir, manager=manager) # Register the GS-SCF run. self.scf_task = self.register_scf_task(scf_input) # Register the NSCF run and its dependency. self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"}) # Add DOS computation(s) if requested. self.dos_tasks = [] if dos_inputs is not None: if not isinstance(dos_inputs, (list, tuple)): dos_inputs = [dos_inputs] for dos_input in dos_inputs: dos_task = self.register_nscf_task(dos_input, deps={self.scf_task: "DEN"}) self.dos_tasks.append(dos_task) def plot_ebands(self, **kwargs): """ Plot the band structure. kwargs are passed to the plot method of :class:`ElectronBands`. Returns: `matplotlib` figure """ with self.nscf_task.open_gsr() as gsr: return gsr.ebands.plot(**kwargs) def plot_ebands_with_edos(self, dos_pos=0, method="gaussian", step=0.01, width=0.1, **kwargs): """ Plot the band structure and the DOS. Args: dos_pos: Index of the task from which the DOS should be obtained (note: 0 refers to the first DOS task). method: String defining the method for the computation of the DOS. step: Energy step (eV) of the linear mesh. width: Standard deviation (eV) of the gaussian. kwargs: Keyword arguments passed to `plot_with_edos` method to customize the plot. Returns: `matplotlib` figure. """ with self.nscf_task.open_gsr() as gsr: gs_ebands = gsr.ebands with self.dos_tasks[dos_pos].open_gsr() as gsr: dos_ebands = gsr.ebands edos = dos_ebands.get_edos(method=method, step=step, width=width) return gs_ebands.plot_with_edos(edos, **kwargs) def plot_edoses(self, dos_pos=None, method="gaussian", step=0.01, width=0.1, **kwargs): """ Plot the band structure and the DOS. Args: dos_pos: Index of the task from which the DOS should be obtained. None is all DOSes should be displayed. Accepts integer or list of integers. method: String defining the method for the computation of the DOS. step: Energy step (eV) of the linear mesh. width: Standard deviation (eV) of the gaussian. kwargs: Keyword arguments passed to `plot` method to customize the plot. Returns: `matplotlib` figure. """ if dos_pos is not None and not isinstance(dos_pos, (list, tuple)): dos_pos = [dos_pos] from abipy.electrons.ebands import ElectronDosPlotter plotter = ElectronDosPlotter() for i, task in enumerate(self.dos_tasks): if dos_pos is not None and i not in dos_pos: continue with task.open_gsr() as gsr: edos = gsr.ebands.get_edos(method=method, step=step, width=width) ngkpt = task.get_inpvar("ngkpt") plotter.add_edos("ngkpt %s" % str(ngkpt), edos) return plotter.combiplot(**kwargs) class RelaxWork(Work): """ Work for structural relaxations. The first task relaxes the atomic position while keeping the unit cell parameters fixed. The second task uses the final structure to perform a structural relaxation in which both the atomic positions and the lattice parameters are optimized. """ def __init__(self, ion_input, ioncell_input, workdir=None, manager=None, target_dilatmx=None): """ Args: ion_input: Input for the relaxation of the ions (cell is fixed) ioncell_input: Input for the relaxation of the ions and the unit cell. workdir: Working directory. manager: :class:`TaskManager` object. """ super().__init__(workdir=workdir, manager=manager) self.ion_task = self.register_relax_task(ion_input) # Note: # 1) It would be nice to restart from the WFK file but ABINIT crashes due to the # different unit cell parameters if paral_kgb == 1 #paral_kgb = ion_input[0]["paral_kgb"] #if paral_kgb == 1: #deps = {self.ion_task: "WFK"} # --> FIXME: Problem in rwwf #deps = {self.ion_task: "DEN"} deps = None self.ioncell_task = self.register_relax_task(ioncell_input, deps=deps) # Lock ioncell_task as ion_task should communicate to ioncell_task that # the calculation is OK and pass the final structure. self.ioncell_task.lock(source_node=self) self.transfer_done = False self.target_dilatmx = target_dilatmx #@check_spectator def on_ok(self, sender): """ This callback is called when one task reaches status S_OK. If sender == self.ion_task, we update the initial structure used by self.ioncell_task and we unlock it so that the job can be submitted. """ logger.debug("in on_ok with sender %s" % sender) if sender == self.ion_task and not self.transfer_done: # Get the relaxed structure from ion_task ion_structure = self.ion_task.get_final_structure() # Transfer it to the ioncell task (we do it only once). self.ioncell_task._change_structure(ion_structure) self.transfer_done = True # Unlock ioncell_task so that we can submit it. self.ioncell_task.unlock(source_node=self) elif sender == self.ioncell_task and self.target_dilatmx: actual_dilatmx = self.ioncell_task.get_inpvar('dilatmx', 1.) if self.target_dilatmx < actual_dilatmx: self.ioncell_task.reduce_dilatmx(target=self.target_dilatmx) self.history.info('Converging dilatmx. Value reduce from {} to {}.' .format(actual_dilatmx, self.ioncell_task.get_inpvar('dilatmx'))) self.ioncell_task.reset_from_scratch() return super().on_ok(sender) def plot_ion_relaxation(self, **kwargs): """ Plot the history of the ion-cell relaxation. kwargs are passed to the plot method of :class:`HistFile` Return `matplotlib` figure or None if hist file is not found. """ with self.ion_task.open_hist() as hist: return hist.plot(**kwargs) if hist else None def plot_ioncell_relaxation(self, **kwargs): """ Plot the history of the ion-cell relaxation. kwargs are passed to the plot method of :class:`HistFile` Return `matplotlib` figure or None if hist file is not found. """ with self.ioncell_task.open_hist() as hist: return hist.plot(**kwargs) if hist else None class G0W0Work(Work): """ Work for general G0W0 calculations. All input can be either single inputs or lists of inputs """ def __init__(self, scf_inputs, nscf_inputs, scr_inputs, sigma_inputs, workdir=None, manager=None): """ Args: scf_inputs: Input(s) for the SCF run, if it is a list add all but only link to the last input (used for convergence studies on the KS band gap) nscf_inputs: Input(s) for the NSCF run, if it is a list add all but only link to the last (i.e. addditiona DOS and BANDS) scr_inputs: Input for the screening run sigma_inputs: List of :class:AbinitInput`for the self-energy run. if scr and sigma are lists of the same length, every sigma gets its own screening. if there is only one screening all sigma inputs are linked to this one workdir: Working directory of the calculation. manager: :class:`TaskManager` object. """ super().__init__(workdir=workdir, manager=manager) spread_scr = (isinstance(sigma_inputs, (list, tuple)) and isinstance(scr_inputs, (list, tuple)) and len(sigma_inputs) == len(scr_inputs)) #print("spread_scr", spread_scr) self.sigma_tasks = [] # Register the GS-SCF run. # register all scf_inputs but link the nscf only the last scf in the list # multiple scf_inputs can be provided to perform convergence studies if isinstance(scf_inputs, (list, tuple)): for scf_input in scf_inputs: self.scf_task = self.register_scf_task(scf_input) else: self.scf_task = self.register_scf_task(scf_inputs) # Register the NSCF run (s). if isinstance(nscf_inputs, (list, tuple)): for nscf_input in nscf_inputs: self.nscf_task = nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"}) else: self.nscf_task = nscf_task = self.register_nscf_task(nscf_inputs, deps={self.scf_task: "DEN"}) # Register the SCR and SIGMA run(s). if spread_scr: for scr_input, sigma_input in zip(scr_inputs, sigma_inputs): scr_task = self.register_scr_task(scr_input, deps={nscf_task: "WFK"}) sigma_task = self.register_sigma_task(sigma_input, deps={nscf_task: "WFK", scr_task: "SCR"}) self.sigma_tasks.append(sigma_task) else: # Sigma work(s) connected to the same screening. scr_task = self.register_scr_task(scr_inputs, deps={nscf_task: "WFK"}) if isinstance(sigma_inputs, (list, tuple)): for inp in sigma_inputs: task = self.register_sigma_task(inp, deps={nscf_task: "WFK", scr_task: "SCR"}) self.sigma_tasks.append(task) else: task = self.register_sigma_task(sigma_inputs, deps={nscf_task: "WFK", scr_task: "SCR"}) self.sigma_tasks.append(task) class SigmaConvWork(Work): """ Work for self-energy convergence studies. """ def __init__(self, wfk_node, scr_node, sigma_inputs, workdir=None, manager=None): """ Args: wfk_node: The node who has produced the WFK file or filepath pointing to the WFK file. scr_node: The node who has produced the SCR file or filepath pointing to the SCR file. sigma_inputs: List of :class:`AbinitInput` for the self-energy runs. workdir: Working directory of the calculation. manager: :class:`TaskManager` object. """ # Cast to node instances. wfk_node, scr_node = Node.as_node(wfk_node), Node.as_node(scr_node) super().__init__(workdir=workdir, manager=manager) # Register the SIGMA runs. if not isinstance(sigma_inputs, (list, tuple)): sigma_inputs = [sigma_inputs] for sigma_input in sigma_inputs: self.register_sigma_task(sigma_input, deps={wfk_node: "WFK", scr_node: "SCR"}) class BseMdfWork(Work): """ Work for simple BSE calculations in which the self-energy corrections are approximated by the scissors operator and the screening is modeled with the model dielectric function. """ def __init__(self, scf_input, nscf_input, bse_inputs, workdir=None, manager=None): """ Args: scf_input: Input for the SCF run. nscf_input: Input for the NSCF run. bse_inputs: List of Inputs for the BSE run. workdir: Working directory of the calculation. manager: :class:`TaskManager`. """ super().__init__(workdir=workdir, manager=manager) # Register the GS-SCF run. self.scf_task = self.register_scf_task(scf_input) # Construct the input for the NSCF run. self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"}) # Construct the input(s) for the BSE run. if not isinstance(bse_inputs, (list, tuple)): bse_inputs = [bse_inputs] for bse_input in bse_inputs: self.register_bse_task(bse_input, deps={self.nscf_task: "WFK"}) def get_mdf_robot(self): """Builds and returns a :class:`MdfRobot` for analyzing the results in the MDF files.""" from abilab.robots import MdfRobot robot = MdfRobot() for task in self[2:]: mdf_path = task.outdir.has_abiext(robot.EXT) if mdf_path: robot.add_file(str(task), mdf_path) return robot class QptdmWork(Work): """ This work parallelizes the calculation of the q-points of the screening. It also provides the callback `on_all_ok` that calls mrgscr to merge all the partial screening files produced. """ def create_tasks(self, wfk_file, scr_input): """ Create the SCR tasks and register them in self. Args: wfk_file: Path to the ABINIT WFK file to use for the computation of the screening. scr_input: Input for the screening calculation. """ assert len(self) == 0 wfk_file = self.wfk_file = os.path.abspath(wfk_file) # Build a temporary work in the tmpdir that will use a shell manager # to run ABINIT in order to get the list of q-points for the screening. shell_manager = self.manager.to_shell_manager(mpi_procs=1) w = Work(workdir=self.tmpdir.path_join("_qptdm_run"), manager=shell_manager) fake_input = scr_input.deepcopy() fake_task = w.register(fake_input) w.allocate() w.build() # Create the symbolic link and add the magic value # nqpdm = -1 to the input to get the list of q-points. fake_task.inlink_file(wfk_file) fake_task.set_vars({"nqptdm": -1}) fake_task.start_and_wait() # Parse the section with the q-points with NetcdfReader(fake_task.outdir.has_abiext("qptdms.nc")) as reader: qpoints = reader.read_value("reduced_coordinates_of_kpoints") #print("qpoints) # Now we can register the task for the different q-points for qpoint in qpoints: qptdm_input = scr_input.deepcopy() qptdm_input.set_vars(nqptdm=1, qptdm=qpoint) new_task = self.register_scr_task(qptdm_input, manager=self.manager) # Add the garbage collector. if self.flow.gc is not None: new_task.set_gc(self.flow.gc) self.allocate() def merge_scrfiles(self, remove_scrfiles=True): """ This method is called when all the q-points have been computed. It runs `mrgscr` in sequential on the local machine to produce the final SCR file in the outdir of the `Work`. If remove_scrfiles is True, the partial SCR files are removed after the merge. """ scr_files = list(filter(None, [task.outdir.has_abiext("SCR") for task in self])) self.history.info("Will call mrgscr to merge %s SCR files:\n" % len(scr_files)) assert len(scr_files) == len(self) mrgscr = wrappers.Mrgscr(manager=self[0].manager, verbose=1) final_scr = mrgscr.merge_qpoints(self.outdir.path, scr_files, out_prefix="out") if remove_scrfiles: for scr_file in scr_files: try: os.remove(scr_file) except IOError: pass return final_scr #@check_spectator def on_all_ok(self): """ This method is called when all the q-points have been computed. It runs `mrgscr` in sequential on the local machine to produce the final SCR file in the outdir of the `Work`. """ final_scr = self.merge_scrfiles() return self.Results(node=self, returncode=0, message="mrgscr done", final_scr=final_scr) # TODO: MergeDdb --> DfptWork(Work) postpone it because it may break pickle. class MergeDdb: """Mixin class for Works that have to merge the DDB files produced by the tasks.""" def add_becs_from_scf_task(self, scf_task, ddk_tolerance, ph_tolerance): """ Build tasks for the computation of Born effective charges and add them to the work. Args: scf_task: ScfTask object. ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run. None to use AbiPy default. ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run. None to use AbiPy default. Return: (ddk_tasks, bec_tasks) """ if not isinstance(scf_task, ScfTask): raise TypeError("task `%s` does not inherit from ScfTask" % scf_task) # DDK calculations (self-consistent to get electric field). multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance) ddk_tasks = [] for ddk_inp in multi_ddk: ddk_task = self.register_ddk_task(ddk_inp, deps={scf_task: "WFK"}) ddk_tasks.append(ddk_task) # Build the list of inputs for electric field perturbation and phonons # Each BEC task is connected to all the previous DDK task and to the scf_task. bec_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks} bec_deps.update({scf_task: "WFK"}) bec_inputs = scf_task.input.make_bec_inputs(tolerance=ph_tolerance) bec_tasks = [] for bec_inp in bec_inputs: bec_task = self.register_bec_task(bec_inp, deps=bec_deps) bec_tasks.append(bec_task) return ddk_tasks, bec_tasks def merge_ddb_files(self, delete_source_ddbs=True, only_dfpt_tasks=True, exclude_tasks=None, include_tasks=None): """ This method is called when all the q-points have been computed. It runs `mrgddb` in sequential on the local machine to produce the final DDB file in the outdir of the `Work`. Args: delete_source_ddbs: True if input DDB should be removed once final DDB is created. only_dfpt_tasks: False to merge all DDB files produced by the tasks of the work Useful e.g. for finite stress corrections in which the stress in the initial configuration should be merged in the final DDB. exclude_tasks: List of tasks that should be excluded when merging the partial DDB files. include_tasks: List of tasks that should be included when merging the partial DDB files. Mutually exclusive with exclude_tasks. Returns: path to the output DDB file """ if exclude_tasks: my_tasks = [task for task in self if task not in exclude_tasks] elif include_tasks: my_tasks = [task for task in self if task in include_tasks] else: my_tasks = [task for task in self] if only_dfpt_tasks: ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in my_tasks \ if isinstance(task, DfptTask)])) else: ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in my_tasks])) self.history.info("Will call mrgddb to merge %s DDB files:" % len(ddb_files)) # DDB files are always produces so this should never happen! if not ddb_files: raise RuntimeError("Cannot find any DDB file to merge by the task of " % self) # Final DDB file will be produced in the outdir of the work. out_ddb = self.outdir.path_in("out_DDB") if len(ddb_files) == 1: # Avoid the merge. Just copy the DDB file to the outdir of the work. shutil.copy(ddb_files[0], out_ddb) else: # Call mrgddb desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime()) mrgddb = wrappers.Mrgddb(manager=self[0].manager, verbose=0) mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc, delete_source_ddbs=delete_source_ddbs) return out_ddb def merge_pot1_files(self, delete_source=True): """ This method is called when all the q-points have been computed. It runs `mrgdvdb` in sequential on the local machine to produce the final DVDB file in the outdir of the `Work`. Args: delete_source: True if POT1 files should be removed after (successful) merge. Returns: path to the output DVDB file. None if not DFPT POT file is found. """ natom = len(self[0].input.structure) max_pertcase = 3 * natom pot1_files = [] for task in self: if not isinstance(task, DfptTask): continue paths = task.outdir.list_filepaths(wildcard="*_POT*") for path in paths: # Include only atomic perturbations i.e. files whose ext <= 3 * natom i = path.rindex("_POT") pertcase = int(path[i+4:].replace(".nc", "")) if pertcase <= max_pertcase: pot1_files.append(path) # prtpot = 0 disables the output of the DFPT POT files so an empty list is not fatal here. if not pot1_files: return None self.history.info("Will call mrgdvdb to merge %s files:" % len(pot1_files)) # Final DDB file will be produced in the outdir of the work. out_dvdb = self.outdir.path_in("out_DVDB") if len(pot1_files) == 1: # Avoid the merge. Just move the DDB file to the outdir of the work shutil.copy(pot1_files[0], out_dvdb) else: # FIXME: The merge may require a non-negligible amount of memory if lots of qpts. # Besides there are machines such as lemaitre3 that are problematic when # running MPI applications on the front-end mrgdvdb = wrappers.Mrgdvdb(manager=self[0].manager, verbose=0) mrgdvdb.merge(self.outdir.path, pot1_files, out_dvdb, delete_source=delete_source) return out_dvdb class PhononWork(Work, MergeDdb): """ This work consists of nirred Phonon tasks where nirred is the number of irreducible atomic perturbations for a given set of q-points. It provides the callback method (on_all_ok) that calls mrgddb (mrgdv) to merge all the partial DDB (POT) files produced. The two files are available in the output directory of the Work. """ @classmethod def from_scf_task(cls, scf_task, qpoints, is_ngqpt=False, tolerance=None, with_becs=False, ddk_tolerance=None, manager=None): """ Construct a `PhononWork` from a :class:`ScfTask` object. The input file for phonons is automatically generated from the input of the ScfTask. Each phonon task depends on the WFK file produced by the `scf_task`. Args: scf_task: ScfTask object. qpoints: q-points in reduced coordinates. Accepts single q-point, list of q-points or three integers defining the q-mesh if `is_ngqpt`. is_ngqpt: True if `qpoints` should be interpreted as divisions instead of q-points. tolerance: dict {"varname": value} with the tolerance to be used in the phonon run. None to use AbiPy default. with_becs: Activate calculation of Electric field and Born effective charges. ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run if with_becs. None to use AbiPy default. manager: :class:`TaskManager` object. """ if not isinstance(scf_task, ScfTask): raise TypeError("task `%s` does not inherit from ScfTask" % scf_task) if is_ngqpt: qpoints = scf_task.input.abiget_ibz(ngkpt=qpoints, shiftk=[0, 0, 0], kptopt=1).points qpoints = np.reshape(qpoints, (-1, 3)) new = cls(manager=manager) if with_becs: new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance=tolerance) for qpt in qpoints: if with_becs and np.sum(qpt ** 2) < 1e-12: continue multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=tolerance) for ph_inp in multi: new.register_phonon_task(ph_inp, deps={scf_task: "WFK"}) return new @classmethod def from_scf_input(cls, scf_input, qpoints, is_ngqpt=False, tolerance=None, with_becs=False, ddk_tolerance=None, manager=None): """ Similar to `from_scf_task`, the difference is that this method requires an input for SCF calculation. A new ScfTask is created and added to the Work. This API should be used if the DDB of the GS task should be merged. """ if is_ngqpt: qpoints = scf_input.abiget_ibz(ngkpt=qpoints, shiftk=[0, 0, 0], kptopt=1).points qpoints = np.reshape(qpoints, (-1, 3)) new = cls(manager=manager) # Create ScfTask scf_task = new.register_scf_task(scf_input) if with_becs: new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance=tolerance) for qpt in qpoints: if with_becs and np.sum(qpt ** 2) < 1e-12: continue multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=tolerance) for ph_inp in multi: new.register_phonon_task(ph_inp, deps={scf_task: "WFK"}) return new #@check_spectator def on_all_ok(self): """ This method is called when all the q-points have been computed. Ir runs `mrgddb` in sequential on the local machine to produce the final DDB file in the outdir of the `Work`. """ # Merge DDB files. out_ddb = self.merge_ddb_files() # Merge DVDB files. out_dvdb = self.merge_pot1_files() return self.Results(node=self, returncode=0, message="DDB merge done") class PhononWfkqWork(Work, MergeDdb): """ This work computes phonons with DFPT on an arbitrary q-mesh (usually denser than the k-mesh for electrons) by computing WKQ files for each q-point. The number of irreducible atomic perturbations for each q-point are taken into account. It provides the callback method (on_all_ok) that calls mrgddb (mrgdv) to merge all the partial DDB (POT) files produced. The two files are available in the output directory of the Work. The WKQ files are removed at runtime. """ @classmethod def from_scf_task(cls, scf_task, ngqpt, ph_tolerance=None, tolwfr=1.0e-22, nband=None, with_becs=False, ddk_tolerance=None, shiftq=(0, 0, 0), is_ngqpt=True, remove_wfkq=True, manager=None): """ Construct a `PhononWfkqWork` from a :class:`ScfTask` object. The input files for WFQ and phonons are automatically generated from the input of the ScfTask. Each phonon task depends on the WFK file produced by scf_task and the associated WFQ file. Args: scf_task: ScfTask object. ngqpt: three integers defining the q-mesh with_becs: Activate calculation of Electric field and Born effective charges. ph_tolerance: dict {"varname": value} with the tolerance for the phonon run. None to use AbiPy default. tolwfr: tolerance used to compute WFQ. ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run if with_becs. None to use AbiPy default. shiftq: Q-mesh shift. Multiple shifts are not supported. is_ngqpt: the ngqpt is interpreted as a set of integers defining the q-mesh, otherwise is an explicit list of q-points remove_wfkq: Remove WKQ files when the children are completed. manager: :class:`TaskManager` object. .. note: Use k-meshes with one shift and q-meshes that are multiple of ngkpt to decrease the number of WFQ files to be computed. """ if not isinstance(scf_task, ScfTask): raise TypeError("task `%s` does not inherit from ScfTask" % scf_task) shiftq = np.reshape(shiftq, (3,)) if is_ngqpt: qpoints = scf_task.input.abiget_ibz(ngkpt=ngqpt, shiftk=shiftq, kptopt=1).points else: qpoints = ngqpt new = cls(manager=manager) new.remove_wfkq = remove_wfkq new.wfkq_tasks = [] new.wfkq_task_children = collections.defaultdict(list) if with_becs: # Add DDK and BECS. new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance) # Get ngkpt, shift for electrons from input. # Won't try to skip WFQ if multiple shifts or off-diagonal kptrlatt ngkpt, shiftk = scf_task.input.get_ngkpt_shiftk() try_to_skip_wfkq = True if ngkpt is None or len(shiftk) > 1 and is_ngqpt: try_to_skip_wfkq = True # TODO: One could avoid kptopt 3 by computing WFK in the IBZ and then rotating. # but this has to be done inside Abinit. for qpt in qpoints: is_gamma = np.sum(qpt ** 2) < 1e-12 if with_becs and is_gamma: continue # Avoid WFQ if k + q = k (requires ngkpt, multiple shifts are not supported) need_wfkq = True if is_gamma: need_wfkq = False elif try_to_skip_wfkq: # k = (i + shiftk) / ngkpt qinds = np.rint(qpt * ngqpt - shiftq) f = (qinds * ngkpt) % ngqpt need_wfkq = np.any(f != 0) if need_wfkq: nscf_inp = scf_task.input.new_with_vars(qpt=qpt, nqpt=1, iscf=-2, kptopt=3, tolwfr=tolwfr) if nband: nbdbuf = max(2,nband*0.1) nscf_inp.set_vars(nband=nband+nbdbuf, nbdbuf=nbdbuf) wfkq_task = new.register_nscf_task(nscf_inp, deps={scf_task: ["DEN", "WFK"]}) new.wfkq_tasks.append(wfkq_task) multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=ph_tolerance) for ph_inp in multi: deps = {scf_task: "WFK", wfkq_task: "WFQ"} if need_wfkq else {scf_task: "WFK"} #ph_inp["prtwf"] = -1 t = new.register_phonon_task(ph_inp, deps=deps) if need_wfkq: new.wfkq_task_children[wfkq_task].append(t) return new def on_ok(self, sender): """ This callback is called when one task reaches status `S_OK`. It removes the WFKQ file if all its children have reached `S_OK`. """ if self.remove_wfkq: for task in self.wfkq_tasks: if task.status != task.S_OK: continue children = self.wfkq_task_children[task] if all(child.status == child.S_OK for child in children): path = task.outdir.has_abiext("WFQ") if path: self.history.info("Removing WFQ: %s" % path) os.remove(path) return super().on_ok(sender) #@check_spectator def on_all_ok(self): """ This method is called when all the q-points have been computed. Ir runs `mrgddb` in sequential on the local machine to produce the final DDB file in the outdir of the `Work`. """ # Merge DDB files. out_ddb = self.merge_ddb_files() # Merge DVDB files. out_dvdb = self.merge_pot1_files() return self.Results(node=self, returncode=0, message="DDB merge done") class GKKPWork(Work): """ This work computes electron-phonon matrix elements for all the q-points present in a DVDB and DDB file """ @classmethod def from_den_ddb_dvdb(cls, inp, den_path, ddb_path, dvdb_path, mpiprocs=1, remove_wfkq=True, qpath=None, with_ddk=True, expand=True, manager=None): """ Construct a `PhononWfkqWork` from a DDB and DVDB file. For each q found, a WFQ task and an EPH task computing the matrix elements are created. """ import abipy.abilab as abilab # Create file nodes den_file = FileNode(den_path) ddb_file = FileNode(ddb_path) dvdb_file = FileNode(dvdb_path) # Create new work new = cls(manager=manager) new.remove_wfkq = remove_wfkq new.wfkq_tasks = [] new.wfkq_task_children = collections.defaultdict(list) if manager is None: manager = TaskManager.from_user_config() tm = manager.new_with_fixed_mpi_omp(mpiprocs, 1) # Create a WFK task kptopt = 1 if expand else 3 nscf_inp = inp.new_with_vars(iscf=-2, kptopt=kptopt) wfk_task = new.register_nscf_task(nscf_inp, deps={den_file: "DEN"},manager=tm) new.wfkq_tasks.append(wfk_task) new.wfk_task = wfk_task # Read path and regular grid from DDB file with abilab.abiopen(ddb_path) as ddb: q_frac_coords = np.array([k.frac_coords for k in ddb.qpoints]) ddb_ngqpt = ddb.guessed_ngqpt # If qpath is set, we read the list of q-points to be used to interpolate the DVDB file. # The DVDB and DDB file have to correspond to a regular grid. dvdb = dvdb_file if qpath is None: qpath = q_frac_coords else: interp_inp = inp.new_with_vars(optdriver=7, eph_task=-5, ddb_ngqpt=ddb_ngqpt, ph_nqpath=len(qpath), ph_qpath=qpath, prtphdos=0) dvdb = new.register_eph_task(interp_inp, deps={wfk_task: "WFK", ddb_file: "DDB", dvdb_file: "DVDB"}, manager=tm) # Create a WFK expansion task if expand: fbz_nscf_inp = inp.new_with_vars(optdriver=8) fbz_nscf_inp.set_spell_check(False) fbz_nscf_inp.set_vars(wfk_task="wfk_fullbz") tm_serial = manager.new_with_fixed_mpi_omp(1,1) wfk_task = new.register_nscf_task(fbz_nscf_inp, deps={wfk_task: "WFK", den_file: "DEN"}, manager=tm_serial) new.wfkq_tasks.append(wfk_task) new.wfk_task = wfk_task if with_ddk: kptopt = 3 if expand else 1 ddk_inp = inp.new_with_vars(optdriver=8,kptopt=kptopt) ddk_inp.set_spell_check(False) ddk_inp.set_vars(wfk_task="wfk_ddk") ddk_task = new.register_nscf_task(ddk_inp, deps={wfk_task: "WFK", den_file: "DEN"}, manager=tm) new.wfkq_tasks.append(ddk_task) # For each qpoint for qpt in qpath: is_gamma = np.sum(qpt ** 2) < 1e-12 if is_gamma: # Create a link from WFK to WFQ on_ok wfkq_task = wfk_task deps = {wfk_task: ["WFK","WFQ"], ddb_file: "DDB", dvdb: "DVDB" } else: # Create a WFQ task nscf_inp = nscf_inp.new_with_vars(kptopt=3, qpt=qpt, nqpt=1) wfkq_task = new.register_nscf_task(nscf_inp, deps={den_file: "DEN"}, manager=tm) new.wfkq_tasks.append(wfkq_task) deps = {wfk_task: "WFK", wfkq_task: "WFQ", ddb_file: "DDB", dvdb: "DVDB" } # Create a EPH task eph_inp = inp.new_with_vars(optdriver=7, prtphdos=0, eph_task=-2, kptopt=3, ddb_ngqpt=[1,1,1], nqpt=1, qpt=qpt) t = new.register_eph_task(eph_inp, deps=deps, manager=tm) new.wfkq_task_children[wfkq_task].append(t) return new @classmethod def from_phononwfkq_work(cls, phononwfkq_work, nscf_vars={}, remove_wfkq=True, with_ddk=True, manager=None): """ Construct a `GKKPWork` from a `PhononWfkqWork` object. The WFQ are the ones used for PhononWfkqWork so in principle have only valence bands """ # Get list of qpoints from the the phonon tasks in this work qpoints = [] qpoints_deps = [] for task in phononwfkq_work: if isinstance(task,PhononTask): # Store qpoints qpt = task.input.get("qpt", [0,0,0]) qpoints.append(qpt) # Store dependencies qpoints_deps.append(task.deps) # Create file nodes ddb_path = phononwfkq_work.outdir.has_abiext("DDB") dvdb_path = phononwfkq_work.outdir.has_abiext("DVDB") ddb_file = FileNode(ddb_path) dvdb_file = FileNode(dvdb_path) # Get scf_task from first q-point for dep in qpoints_deps[0]: if isinstance(dep.node,ScfTask) and dep.exts[0] == 'WFK': scf_task = dep.node # Create new work new = cls(manager=manager) new.remove_wfkq = remove_wfkq new.wfkq_tasks = [] new.wfk_task = [] # Add one eph task per qpoint for qpt,qpoint_deps in zip(qpoints,qpoints_deps): # Create eph task eph_input = scf_task.input.new_with_vars(optdriver=7, prtphdos=0, eph_task=-2, ddb_ngqpt=[1,1,1], nqpt=1, qpt=qpt) deps = {ddb_file: "DDB", dvdb_file: "DVDB" } for dep in qpoint_deps: deps[dep.node] = dep.exts[0] # If no WFQ in deps link the WFK with WFQ extension if 'WFQ' not in deps.values(): inv_deps = dict((v, k) for k, v in deps.items()) wfk_task = inv_deps['WFK'] wfk_path = wfk_task.outdir.has_abiext("WFK") # Check if netcdf filename, extension = os.path.splitext(wfk_path) infile = 'out_WFQ' + extension wfq_path = os.path.join(os.path.dirname(wfk_path), infile) if not os.path.isfile(wfq_path): os.symlink(wfk_path, wfq_path) deps[FileNode(wfq_path)] = 'WFQ' new.register_eph_task(eph_input, deps=deps) return new def on_ok(self, sender): """ This callback is called when one task reaches status `S_OK`. It removes the WFKQ file if all its children have reached `S_OK`. """ if self.remove_wfkq: for task in self.wfkq_tasks: if task.status != task.S_OK: continue children = self.wfkq_task_children[task] if all(child.status == child.S_OK for child in children): path = task.outdir.has_abiext("WFQ") if path: self.history.info("Removing WFQ: %s" % path) os.remove(path) # If wfk task we create a link to a wfq file so abinit is happy if sender == self.wfk_task: wfk_path = self.wfk_task.outdir.has_abiext("WFK") # Check if netcdf filename, extension = os.path.splitext(wfk_path) infile = 'out_WFQ' + extension infile = os.path.join(os.path.dirname(wfk_path), infile) os.symlink(wfk_path, infile) return super().on_ok(sender) class BecWork(Work, MergeDdb): """ Work for the computation of the Born effective charges. This work consists of DDK tasks and phonon + electric field perturbation It provides the callback method (on_all_ok) that calls mrgddb to merge the partial DDB files produced by the work. """ @classmethod def from_scf_task(cls, scf_task, ddk_tolerance=None, ph_tolerance=None, manager=None): """ Build tasks for the computation of Born effective charges from a ground-state task. Args: scf_task: ScfTask object. ddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default. ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run. None to use AbiPy default. manager: :class:`TaskManager` object. """ new = cls(manager=manager) new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance) return new def on_all_ok(self): """ This method is called when all tasks reach S_OK Ir runs `mrgddb` in sequential on the local machine to produce the final DDB file in the outdir of the `Work`. """ # Merge DDB files. out_ddb = self.merge_ddb_files() return self.Results(node=self, returncode=0, message="DDB merge done") class DteWork(Work, MergeDdb): """ Work for the computation of the third derivative of the energy. This work consists of DDK tasks and electric field perturbation. It provides the callback method (on_all_ok) that calls mrgddb to merge the partial DDB files produced """ @classmethod def from_scf_task(cls, scf_task, ddk_tolerance=None, manager=None): """ Build a DteWork from a ground-state task. Args: scf_task: ScfTask object. ddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default. manager: :class:`TaskManager` object. """ if not isinstance(scf_task, ScfTask): raise TypeError("task `%s` does not inherit from ScfTask" % scf_task) new = cls(manager=manager) # DDK calculations multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance) ddk_tasks = [] for ddk_inp in multi_ddk: ddk_task = new.register_ddk_task(ddk_inp, deps={scf_task: "WFK"}) ddk_tasks.append(ddk_task) # Build the list of inputs for electric field perturbation # Each task is connected to all the previous DDK, DDE task and to the scf_task. multi_dde = scf_task.input.make_dde_inputs(use_symmetries=False) # To compute the nonlinear coefficients all the directions of the perturbation # have to be taken in consideration # DDE calculations dde_tasks = [] dde_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks} dde_deps.update({scf_task: "WFK"}) for dde_inp in multi_dde: dde_task = new.register_dde_task(dde_inp, deps=dde_deps) dde_tasks.append(dde_task) # DTE calculations dte_deps = {scf_task: "WFK DEN"} dte_deps.update({dde_task: "1WF 1DEN" for dde_task in dde_tasks}) multi_dte = scf_task.input.make_dte_inputs() dte_tasks = [] for dte_inp in multi_dte: dte_task = new.register_dte_task(dte_inp, deps=dte_deps) dte_tasks.append(dte_task) return new def on_all_ok(self): """ This method is called when all tasks reach S_OK Ir runs `mrgddb` in sequential on the local machine to produce the final DDB file in the outdir of the `Work`. """ # Merge DDB files. out_ddb = self.merge_ddb_files() return self.Results(node=self, returncode=0, message="DDB merge done")
mit
ldirer/scikit-learn
sklearn/linear_model/tests/test_perceptron.py
378
1815
import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_raises from sklearn.utils import check_random_state from sklearn.datasets import load_iris from sklearn.linear_model import Perceptron iris = load_iris() random_state = check_random_state(12) indices = np.arange(iris.data.shape[0]) random_state.shuffle(indices) X = iris.data[indices] y = iris.target[indices] X_csr = sp.csr_matrix(X) X_csr.sort_indices() class MyPerceptron(object): def __init__(self, n_iter=1): self.n_iter = n_iter def fit(self, X, y): n_samples, n_features = X.shape self.w = np.zeros(n_features, dtype=np.float64) self.b = 0.0 for t in range(self.n_iter): for i in range(n_samples): if self.predict(X[i])[0] != y[i]: self.w += y[i] * X[i] self.b += y[i] def project(self, X): return np.dot(X, self.w) + self.b def predict(self, X): X = np.atleast_2d(X) return np.sign(self.project(X)) def test_perceptron_accuracy(): for data in (X, X_csr): clf = Perceptron(n_iter=30, shuffle=False) clf.fit(data, y) score = clf.score(data, y) assert_true(score >= 0.7) def test_perceptron_correctness(): y_bin = y.copy() y_bin[y != 1] = -1 clf1 = MyPerceptron(n_iter=2) clf1.fit(X, y_bin) clf2 = Perceptron(n_iter=2, shuffle=False) clf2.fit(X, y_bin) assert_array_almost_equal(clf1.w, clf2.coef_.ravel()) def test_undefined_methods(): clf = Perceptron() for meth in ("predict_proba", "predict_log_proba"): assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
bsd-3-clause
wanglei828/apollo
modules/tools/mapshow/libs/subplot_traj_path.py
3
2931
#!/usr/bin/env python ############################################################################### # Copyright 2017 The Apollo Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### import matplotlib.pyplot as plt from matplotlib import cm as cmx from matplotlib import colors as mcolors class TrajPathSubplot: def __init__(self, ax): self.ax = ax self.path_lines = [] self.path_lines_size = 30 self.colors = [] self.init_colors() # self.colors = ['b','r', 'y', 'k'] for i in range(self.path_lines_size): line, = ax.plot( [0], [0], c=self.colors[i % len(self.colors)], ls="-", marker='', lw=8, alpha=0.3) self.path_lines.append(line) ax.set_xlabel("x (m)") # ax.set_xlim([-2, 10]) # ax.set_ylim([-6, 6]) self.ax.autoscale_view() # self.ax.relim() # ax.set_ylabel("y (m)") ax.set_title("PLANNING ACC") self.set_visible(False) def init_colors(self): self.colors = [] values = range(self.path_lines_size) jet = plt.get_cmap('brg') color_norm = mcolors.Normalize(vmin=0, vmax=values[-1]) scalar_map = cmx.ScalarMappable(norm=color_norm, cmap=jet) for val in values: color_val = scalar_map.to_rgba(val) self.colors.append(color_val) def set_visible(self, visible): for line in self.path_lines: line.set_visible(visible) def show(self, planning): planning.traj_data_lock.acquire() for i in range(len(planning.traj_path_x_history)): if i >= self.path_lines_size: print "WARNING: number of path lines is more than " \ + str(self.path_lines_size) continue speed_line = self.path_lines[self.path_lines_size - i - 1] speed_line.set_xdata(planning.traj_path_x_history[i]) speed_line.set_ydata(planning.traj_path_y_history[i]) speed_line.set_visible(True) # self.ax.legend(loc="upper left", borderaxespad=0., ncol=5) # self.ax.axis('equal') planning.traj_data_lock.release() self.ax.autoscale_view() self.ax.relim()
apache-2.0
ahmadia/bokeh
sphinx/source/docs/tutorials/exercises/stocks.py
23
2098
### ### NOTE: This exercise requires a network connection ### import numpy as np import pandas as pd from bokeh.plotting import figure, output_file, show, VBox # Here is some code to read in some stock data from the Yahoo Finance API AAPL = pd.read_csv( "http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010", parse_dates=['Date']) MSFT = pd.read_csv( "http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010", parse_dates=['Date']) IBM = pd.read_csv( "http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010", parse_dates=['Date']) output_file("stocks.html", title="stocks.py example") # create a figure p1 = figure(title="Stocks", x_axis_label="Date", y_axis_label="Close price", x_axis_type="datetime") p1.below[0].formatter.formats = dict(years=['%Y'], months=['%b %Y'], days=['%d %b %Y']) # EXERCISE: finish this line plot, and add more for the other stocks. Each one should # have a legend, and its own color. p1.line( AAPL['Date'], # x coordinates AAPL['Adj Close'], # y coordinates color='#A6CEE3', # set a color for the line legend='AAPL', # attach a legend label ) # EXERCISE: style the plot, set a title, lighten the gridlines, etc. # EXERCISE: start a new figure # Here is some code to compute the 30-day moving average for AAPL aapl = AAPL['Adj Close'] aapl_dates = AAPL['Date'] window_size = 30 window = np.ones(window_size)/float(window_size) aapl_avg = np.convolve(aapl, window, 'same') # EXERCISE: plot a scatter of circles for the individual AAPL prices with legend # 'close'. Remember to set the x axis type and tools on the first renderer # EXERCISE: plot a line of the AAPL moving average data with the legeng 'avg' # EXERCISE: style the plot, set a title, lighten the gridlines, etc. show(VBox(p1, p2)) # open a browser
bsd-3-clause
bsautermeister/machine-learning-examples
one_shot_learning/keras/siamese_cifar100.py
1
15282
""" Trains a Siamese MLP on pairs of digits from the CIFAR100 small dataset. """ from __future__ import absolute_import from __future__ import print_function import argparse import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tqdm import tqdm DISTANCE_THRESHOLD = 0.50 NUM_CLASSES = 100 SEED = 42 def l1_distance_vector(vects): x, y = vects l1 = tf.abs(x - y) return tf.keras.backend.maximum(l1, tf.keras.backend.epsilon()) def create_pairs(x, digit_indices, oversample_factor=1): """Positive and negative pair creation. Alternates between positive and negative pairs. """ pairs = [] labels = [] n = min([len(digit_indices[d]) for d in range(NUM_CLASSES)]) for o in range(oversample_factor): for d in range(NUM_CLASSES): for i in range(n - 1): for j in range(i + 1, n): z1, z2 = digit_indices[d][i], digit_indices[d][j] pairs += [[x[z1], x[z2]]] rand_inc = np.random.randint(1, NUM_CLASSES) rand_idx = np.random.randint(0, len(digit_indices[d])) dn = (d + rand_inc) % NUM_CLASSES z1, z2 = digit_indices[d][i], digit_indices[dn][rand_idx] pairs += [[x[z1], x[z2]]] labels += [1, 0] return np.array(pairs), np.array(labels) def create_base_cnn_network(input_shape): """Base CNN network to be shared (eq. to feature extraction). """ inputs = tf.keras.layers.Input(shape=input_shape) x = tf.keras.layers.Conv2D(32, kernel_size=(5, 5), strides=(1, 1), padding='same', kernel_regularizer=tf.keras.regularizers.l2(5e-4))(inputs) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.MaxPool2D()(x) x = tf.keras.layers.Dropout(0.225)(x) x = tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_regularizer=tf.keras.regularizers.l2(5e-4))(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.MaxPool2D()(x) x = tf.keras.layers.Dropout(0.225)(x) x = tf.keras.layers.Flatten()(x) x = tf.keras.layers.Dense(128, kernel_regularizer=tf.keras.regularizers.l2(5e-4))(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Dropout(0.5)(x) x = tf.keras.layers.Dense(128, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(5e-4))(x) return tf.keras.models.Model(inputs, x) def create_dense_siamese_model(input_shape): base_network = create_base_cnn_network(input_shape) input_a = tf.keras.layers.Input(shape=input_shape) input_b = tf.keras.layers.Input(shape=input_shape) processed_a = base_network(input_a) processed_b = base_network(input_b) # output_shape=lambda x: x[0] embedding = tf.keras.layers.Lambda(l1_distance_vector)([processed_a, processed_b]) embedding = tf.keras.layers.BatchNormalization()(embedding) x = tf.keras.layers.Dropout(0.5)(embedding) x = tf.keras.layers.Dense(512, kernel_regularizer=tf.keras.regularizers.l2(5e-4))(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Dropout(0.5)(x) prediction = tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(5e-4), activation='sigmoid')(x) model = tf.keras.models.Model([input_a, input_b], prediction) opt = tf.keras.optimizers.RMSprop() model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) return model def create_per_feature_nn_siamese_model(input_shape): base_network = create_base_cnn_network(input_shape) input_a = tf.keras.layers.Input(shape=input_shape) input_b = tf.keras.layers.Input(shape=input_shape) processed_a = base_network(input_a) processed_b = base_network(input_b) mid = 32 x1 = tf.keras.layers.Lambda(lambda x: x[0] * x[1])([processed_a, processed_b]) x2 = tf.keras.layers.Lambda(lambda x: x[0] + x[1])([processed_a, processed_b]) x3 = tf.keras.layers.Lambda(lambda x: tf.keras.backend.abs(x[0] - x[1]))([processed_a, processed_b]) x4 = tf.keras.layers.Lambda(lambda x: tf.keras.backend.square(x))(x3) x = tf.keras.layers.Concatenate()([x1, x2, x3, x4]) x = tf.keras.layers.Reshape((4, base_network.output_shape[1], 1), name='reshape1')(x) # Per feature NN with shared weight is implemented using CONV2D with appropriate stride. x = tf.keras.layers.Conv2D(mid, kernel_size=(4, 1), activation='relu', padding='valid')(x) x = tf.keras.layers.Reshape((base_network.output_shape[1], mid, 1))(x) x = tf.keras.layers.Conv2D(1, kernel_size=(1, mid), activation='linear', padding='valid')(x) x = tf.keras.layers.Flatten(name='flatten')(x) # Weighted sum implemented as a Dense layer. x = tf.keras.layers.Dense(1, use_bias=True, activation='sigmoid', name='weighted-average')(x) model = tf.keras.models.Model([input_a, input_b], x, name='head') opt = tf.keras.optimizers.RMSprop() model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) return model def compute_accuracy(y_true, y_pred): """Compute classification accuracy with a fixed threshold on distances. """ pred = y_pred.ravel() < DISTANCE_THRESHOLD return np.mean(pred == y_true) def acc(y_true, y_pred): """Compute classification accuracy with a fixed threshold on distances. """ return tf.keras.backend.mean(tf.equal(y_true, tf.cast(y_pred < DISTANCE_THRESHOLD, y_true.dtype))) def get_digit_indices(labels, examples_per_class): digit_indices = [np.where(labels == i)[0] for i in range(NUM_CLASSES)] return [di[:examples_per_class] for di in digit_indices] def plot_values(train_values, valid_values, y_label): epochs = range(1, len(train_values) + 1) plt.clf() plt.plot(epochs, train_values, 'b') if valid_values is not None: plt.plot(epochs, valid_values, 'g') plt.xlabel('Epochs') plt.ylabel(y_label) plt.show() def plot_examples_separated(image_pairs, labels, predictions): num = image_pairs.shape[0] fig = plt.figure(1) for i in range(0, num): # works because labels are alternating in unshuffled dataset img0 = image_pairs[i, 0][:, :, 0] img1 = image_pairs[i, 1][:, :, 0] label = labels[i] distance = predictions[i, 0] fig.add_subplot(num // 2, 4, (i * 2 + 1)) plt.imshow(img0) fig.add_subplot(num // 2, 4, (i * 2 + 2)) plt.imshow(img1) plt.xlabel('==' if label == 0 else '!=') plt.ylabel('{:.4f}'.format(distance)) plt.show() def plot_examples(image_pairs, predictions): num = image_pairs.shape[0] fig = plt.figure(1) for i in range(0, num): img0 = image_pairs[i, 0][:, :, 0] img1 = image_pairs[i, 1][:, :, 0] distance = predictions[i, 0] fig.add_subplot(num, 2, (2 * i + 1)) plt.imshow(img0) fig.add_subplot(num, 2, (2 * i + 2)) plt.imshow(img1) plt.ylabel('{:.4f}'.format(distance)) plt.show() def main(args): # results can still be non-deterministic when running on GPU, due to cuDNN tf.set_random_seed(SEED) np.random.seed(SEED) # the data, split between train and test sets (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar100.load_data(label_mode='fine') x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 # create training+test positive and negative pairs tr_digit_indices = get_digit_indices(y_train, args.examples_per_class) tr_pairs, tr_y = create_pairs(x_train, tr_digit_indices, args.oversample) te_digit_indices = get_digit_indices(y_test, args.examples_per_class) te_pairs, te_y = create_pairs(x_test, te_digit_indices, args.oversample) # network definition input_shape = x_train.shape[1:] if args.model == 'dense_head': model = create_dense_siamese_model(input_shape) elif args.model == 'per_feature_nn': model = create_per_feature_nn_siamese_model(input_shape) else: raise Exception('Unknown model type.') model.summary() # train callbacks = [] if args.early_stopping: callbacks.append(tf.keras.callbacks.EarlyStopping(patience=20, monitor='val_acc')) callbacks.append(tf.keras.callbacks.ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=10, verbose=1)) callbacks.append(tf.keras.callbacks.ModelCheckpoint( filepath='checkpoints/ckp', monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True, period=1)) print('Training...') history = model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y, batch_size=args.batch_size, epochs=args.max_epochs, callbacks=callbacks, verbose=2, validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y)) plot_values(history.history['loss'], history.history['val_loss'], 'Loss') plot_values(history.history['acc'], history.history['val_acc'], 'Accuracy') # load the best model from checkpoint latest = tf.train.latest_checkpoint('checkpoints') model.load_weights(latest) # compute final accuracy on training and test sets tr_pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]]) te_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]]) if args.model == 'simple_head': tr_acc = compute_accuracy(tr_y, tr_pred) te_acc = compute_accuracy(te_y, te_pred) else: tr_scores = model.evaluate([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y, verbose=2) tr_acc = tr_scores[1] te_scores = model.evaluate([te_pairs[:, 0], te_pairs[:, 1]], te_y, verbose=2) te_acc = te_scores[1] print('>>> Accuracy on training set: {:.2f}%'.format(tr_acc * 100)) print('>>> Accuracy on test set: {:.2f}%'.format(te_acc * 100)) # plot first 20 examples image_pairs = te_pairs[:20, :] labels = te_y[:20] predictions = te_pred[:20] plot_examples_separated(image_pairs, labels, predictions) # plot first 10 FPs if args.model == 'simple_head': # minimum assessment_criteria = lambda new, prev: new < prev else: # maximum assessment_criteria = lambda new, prev: new > prev image_pairs = [] labels = np.zeros(10) predictions = np.zeros((10, 1)) index = 0 while len(image_pairs) < 10: if assessment_criteria(te_pred[index], DISTANCE_THRESHOLD) and te_y[index] == 0: image_pairs += [[te_pairs[index, 0], te_pairs[index, 1]]] labels[len(image_pairs) - 1] = te_y[index] predictions[len(image_pairs) - 1, 0] = te_pred[index, 0] index += 1 image_pairs = np.array(image_pairs) plot_examples(image_pairs, predictions) # plot first 10 FNs image_pairs = [] labels = np.zeros(10) predictions = np.zeros((10, 1)) index = 0 while len(image_pairs) < 10: if not assessment_criteria(te_pred[index], DISTANCE_THRESHOLD) and te_y[index] == 1: image_pairs += [[te_pairs[index, 0], te_pairs[index, 1]]] labels[len(image_pairs) - 1] = te_y[index] predictions[len(image_pairs) - 1, 0] = te_pred[index, 0] index += 1 image_pairs = np.array(image_pairs) plot_examples(image_pairs, predictions) # classify (using minimum distance) print('Classifying test set...') min_correct_counter = 0 median_correct_counter = 0 mean_correct_counter = 0 for t in tqdm(range(x_test.shape[0])): test_img = x_test[t] test_img_label = y_test[t] n = min([len(tr_digit_indices[d]) for d in range(NUM_CLASSES)]) - 1 assessment_criteria = lambda new, prev: new > prev min_aggregated_distance = -999 mean_aggregated_distance = -999 median_aggregated_distance = -999 min_aggregated_distance_label = -1 mean_aggregated_distance_label = -1 median_aggregated_distance_label = -1 for d in range(NUM_CLASSES): image_pairs = [] for i in range(n): z1 = tr_digit_indices[d][i] img = x_train[z1] image_pairs += [[img, test_img]] image_pairs = np.array(image_pairs) predictions = model.predict([image_pairs[:, 0], image_pairs[:, 1]]) min_distance = np.min(predictions) if assessment_criteria(min_distance, min_aggregated_distance): min_aggregated_distance = min_distance min_aggregated_distance_label = d median_distance = np.median(predictions) if assessment_criteria(median_distance, median_aggregated_distance): median_aggregated_distance = median_distance median_aggregated_distance_label = d mean_distance = np.mean(predictions) if assessment_criteria(mean_distance, mean_aggregated_distance): mean_aggregated_distance = mean_distance mean_aggregated_distance_label = d if test_img_label == min_aggregated_distance_label: min_correct_counter += 1 if test_img_label == median_aggregated_distance_label: median_correct_counter += 1 if test_img_label == mean_aggregated_distance_label: mean_correct_counter += 1 print('Classification accuracy using MIN: {}'.format(min_correct_counter / x_test.shape[0])) print('Classification accuracy using MEDIAN: {}'.format(median_correct_counter / x_test.shape[0])) print('Classification accuracy using MEAN: {}'.format(mean_correct_counter / x_test.shape[0])) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--max_epochs', type=int, default=500, help='The maximum number of training epochs') parser.add_argument('--batch_size', type=int, default=32, help='The batch size while training') parser.add_argument('--examples_per_class', type=int, default=25, help='Maximum number of examples per class') parser.add_argument('--model', choices=['dense_head', 'per_feature_nn'], type=str, default='dense_head', help='The network model of the siamese, which mainly differs in the head model used') parser.add_argument('--early_stopping', type=bool, default=True, help='Whether to use early stopping or not') parser.add_argument('--oversample', type=int, default=2, help='Oversampling factor, that indicates the number of time the identical pair ' + 'of a positive-label is added, in order to get more different negative-label pairs') args = parser.parse_args() main(args)
mit
zrhans/pythonanywhere
pyscripts/ply_CH4.py
1
9135
""" DATA,Chuva,Chuva_min,Chuva_max,VVE,VVE_min,VVE_max,DVE,DVE_min,DVE_max, Temp.,Temp._min,Temp._max,Umidade,Umidade_min,Umidade_max,Rad.,Rad._min,Rad._max, Pres.Atm.,Pres.Atm._min,Pres.Atm._max, Temp.Int.,Temp.Int._min,Temp.Int._max, CH4,CH4_min,CH4_max,HCnM,HCnM_min,HCnM_max,HCT,HCT_min,HCT_max, SO2,SO2_min,SO2_max, O3,O3_min,O3_max, NO,NO_min,NO_max,NO2,NO2_min,NO2_max,NOx,NOx_min,NOx_max, CO,CO_min,CO_max, MP10,MP10_min,MP10_max,MPT,MPT_min,MPT_max, Fin,Fin_min,Fin_max,Vin,Vin_min,Vin_max,Vout,Vout_min,Vout_max """ import plotly.plotly as py # Every function in this module will communicate with an external plotly server import plotly.graph_objs as go import pandas as pd DATAFILE = r'/home/zrhans/w3/bns/bns_2016-1.csv' df = pd.read_csv(DATAFILE, parse_dates=True, sep=',', header=0, index_col='DATA') r0 = df.CH4 r1 = df.HCnM r2 = df.HCT t0 = df.DVE #print(y) # Definindo as series dedados trace1 = go.Scatter( r=r0,#[6.804985785265978, 3.389596010612268, 5.3814721107464445, 8.059540219420184, 5.318229227868589, 2.9850999356273773, 1.9665870023752283, 6.769265408206589, 4.073401898721205, 6.50437182526841, 7.556369818996649, 4.047456094066775, 7.386662496070009, 5.413624736983931, 7.470716531163242, 7.982110216939738, 4.737814080093381, 4.206453042929911, 5.478604804594065, 4.824520280697772, 5.599600609899737, 6.8667952170824735, 3.0856713662561464, 7.771810943227382, 3.6877944350967193, 5.360356685192225, 5.140446739300986, 6.045445680928888, 6.833920940193708, 3.6207694625408364, 3.9894305834039687, 5.3118244995018, 4.608213480282062, 6.640584716151912, 3.055188854482986, 7.492564163752965, 5.4850781777896715, 3.8977949966209358, 5.976245114026165, 5.447061560910957, 5.37703411681004, 4.690805787731301, 4.711640491184845, 3.629919329394875, 5.957668076372498, 5.357121284391151, 3.849235282821748, 6.250507136319218, 7.122243357145468, 3.399404233835391, 3.5105566722713313, 4.100997603660974, 4.096382100199779, 6.233583074805102, 3.939488526772935, 3.9254450773976983, 6.118132501462698, 3.9404503462852323, 7.583015573261159, 3.513202145338516], t=t0,#[-30.352944361883697, -25.611459854524096, -12.425227452676078, 13.96138051872652, -4.9509328406707445, -25.692274190905437, 12.46876416157031, -4.913764107032951, -10.967380287631935, 30.814194054910676, 2.4749594311442737, 17.97554375239156, 0.7711305933623585, 6.137488485631386, -14.451963574013497, 28.184534112915948, 12.538680065954864, -8.983230337131154, 5.231285164762417, -64.48900253584051, 11.357486681772649, 3.4540747915125176, 13.924346613092862, -25.364002046782343, -16.81800638602268, -10.260051030559755, -13.212134125591882, 2.5793388653025744, 8.717574965852519, -10.675498719239487, -2.926366012522306, 25.195880754767717, 40.59032932155964, -9.121433630189772, -24.297362381339184, -3.1769445056889345, 10.85049841917252, -31.33205974736701, 4.849567462214266, 15.048276954124187, 3.2951046992599635, -6.197091873129837, -8.77857413578066, 29.549174119407287, -5.1374487928814645, 23.02686048794348, -6.634816578371129, 2.7550149918614695, 21.733250113653973, -24.816994960101756, -7.83054706253201, 28.325796210205855, 12.300977467795988, -21.563157240034112, -19.335516283813288, 26.146443170846787, -1.7060712026841085, 16.071723694996702, 2.053266302846965, -5.097911612332572], mode='markers', name='CH4', marker=dict( color='rgb(27,158,119)', size=110, line=dict( color='white' ), opacity=0.6 ) ) trace2 = go.Scatter( r=r1,#[3.488043923008057, 2.9184785763552368, 4.201827359971069, 8.227324606851074, 4.776690427237194, 3.041912303114453, 4.789947719076336, 5.663880780360856, 3.858262393172743, 8.260212881141047, 6.868624486428106, 5.7401975996748895, 6.594979282458134, 5.692703778211614, 5.337916574462772, 9.283604185175781, 5.7645908931363365, 4.028864552051332, 5.662344748373121, 0.42283723110061455, 6.201266463929336, 6.43926538131984, 5.096758513060891, 4.632081908733815, 3.4218461363102217, 4.369404703352921, 4.02833441941273, 5.80576719754376, 6.848189921425055, 3.8092955127795802, 4.385268183833586, 6.98332684554596, 7.396273186029126, 5.215125003141, 3.0861487792429205, 6.335394491488218, 6.0904147140584834, 2.4480560069033306, 5.942784020305152, 6.373129885590045, 5.454205341176391, 4.393337616563476, 4.2059446799773, 6.155542287959513, 5.119087171162872, 6.869860830828341, 4.104599860575049, 5.954348125582761, 8.092332877153778, 2.9617697054526295, 3.974012187582175, 6.373384128907529, 5.415409143179902, 3.876890919980343, 3.261446947424557, 6.145808529699159, 5.502451987192818, 5.571553295311899, 6.853049261089887, 4.140355074942654], t=t0,#[14.80662578088746, 79.00634037258273, 49.02206554130045, 49.69908313603149, 54.137491082859476, 86.41932102054662, 96.95239193571373, 41.463488263612184, 67.13769169339066, 68.06103943971128, 42.68193032273406, 76.39865660811795, 42.19479347220856, 59.57788897461255, 27.510866799296068, 60.7534448322685, 68.37083279914752, 65.74802814945305, 58.53300837209963, -176.7441064584909, 61.17401857996598, 47.4515085890397, 84.4266531857914, 12.479346550525074, 72.48080276184626, 50.578831757750606, 51.560228240214684, 52.43785618126272, 51.586827992137934, 73.87294477733714, 70.21705692787259, 70.71429915430754, 82.23439442637098, 38.93539044700985, 84.7093666701594, 38.16582843645038, 61.70405365378903, 70.19695629244305, 54.454292590141606, 64.33489496861428, 58.273893146586325, 60.49982239038519, 59.155232538950266, 83.86561846759426, 47.87340989732011, 69.28260156593979, 71.1899104286971, 51.04839646304676, 59.427582415206295, 78.59873696166098, 75.75586451521559, 79.97048372322382, 73.89378024632016, 31.733411131690488, 68.08475117701943, 80.41107997857199, 48.92425070886502, 76.65025575535202, 42.18286436288056, 76.03333589453311], mode='markers', name='HCnM', marker=dict( color='rgb(217,95,2)', size=110, line=dict( color='white' ), opacity=0.6 ) ) trace3 = go.Scatter( r=r2,#[1.855870835032611, 5.2869620620428215, 3.886013391943573, 6.282863313001057, 4.4534148477405155, 5.688008050761193, 7.330864282608489, 3.825660594787748, 4.989604176963506, 7.8974314697670955, 4.6566931130229525, 6.667153696311044, 4.4310062871369515, 5.346113253377259, 2.4799456958789263, 8.113477348526397, 6.081311682312096, 4.968216896207305, 5.24445392063028, 5.422207884171506, 5.792774616023354, 4.787580592225452, 6.784318637182092, 1.10893690948093, 5.138911105244, 4.042929657287297, 4.022892029681135, 4.828428791305017, 5.417378374307972, 5.378635210668265, 5.421097175459842, 7.1205619788552434, 8.349308539903586, 3.4104855883231204, 5.6283784708757345, 3.9149369761396855, 5.7639402623551526, 4.764374106798512, 5.076236267895072, 6.165558183200791, 5.105576516279834, 4.761036376933375, 4.5962495409437905, 7.504188411346776, 4.107031417919988, 6.920422299379973, 5.349128949563397, 4.798065719385607, 7.023251532304466, 5.283680965457643, 5.569071152430292, 7.383794908447646, 6.269233210443127, 2.656529645009885, 4.843984338804117, 7.247992361555254, 4.372959394411489, 6.570981081360247, 4.602479243892371, 5.6700520508263965], t=t0,#[151.29425518111282, 147.1880250282001, 125.2821571123002, 87.0672979717481, 119.62789835678657, 147.74082414730458, 139.56459814488954, 101.39149710201973, 134.5601842795838, 104.02444470480259, 89.39314294483763, 123.19403140008181, 91.47434051519816, 113.33237361373538, 96.1499255673322, 93.28073452263222, 118.21556522583221, 132.32293737819631, 112.9411863909871, -179.74623313781393, 110.3035135586484, 97.7508361660772, 131.60808925703367, 115.49691923085226, 140.58118216037175, 123.39666211932598, 128.34200904453573, 107.6088103983526, 97.90468978746796, 137.12844797536522, 130.43124491245027, 112.2270844807976, 118.63020224581525, 106.05822558950737, 146.90810970600344, 90.27734955816595, 111.50528236323856, 151.0897425364386, 107.7213941567982, 111.30085499702685, 114.68027793629503, 126.56937949315531, 128.2189522328928, 125.35485719537647, 112.4180682532985, 111.79735567917727, 133.41805225814235, 105.18411684151033, 97.23103612064705, 146.66803680360073, 136.23931520140337, 121.79184419346764, 123.91132797113666, 129.86224497019268, 141.34395084996186, 123.27096774880371, 108.45882172345216, 124.41237705630053, 89.02711073868319, 134.8767011451154], mode='markers', name='HCT', marker=dict( color='rgb(117,112,179)', size=110, line=dict( color='white' ), opacity=0.6 ) ) layout = go.Layout( title='BSE01- Hidrocarbonetos - Médias horárias', font=dict( size=15 ), plot_bgcolor='rgb(223, 223, 223)', angularaxis=dict( tickcolor='rgb(253,253,253)' ), orientation=270, radialaxis=dict( ticksuffix='ppm' ), ) #Gerando multiplos diagramas ld data = [trace1, trace2, trace3] fig = go.Figure(data=data, layout=layout) # Tracando o objeto py.plot( fig, filename='hans/BSE01/2016/ld_CH4', # name of the file as saved in your plotly account sharing='public' )
apache-2.0
andaag/scikit-learn
sklearn/manifold/isomap.py
229
7169
"""Isomap for manifold learning""" # Author: Jake Vanderplas -- <[email protected]> # License: BSD 3 clause (C) 2011 import numpy as np from ..base import BaseEstimator, TransformerMixin from ..neighbors import NearestNeighbors, kneighbors_graph from ..utils import check_array from ..utils.graph import graph_shortest_path from ..decomposition import KernelPCA from ..preprocessing import KernelCenterer class Isomap(BaseEstimator, TransformerMixin): """Isomap Embedding Non-linear dimensionality reduction through Isometric Mapping Read more in the :ref:`User Guide <isomap>`. Parameters ---------- n_neighbors : integer number of neighbors to consider for each point. n_components : integer number of coordinates for the manifold eigen_solver : ['auto'|'arpack'|'dense'] 'auto' : Attempt to choose the most efficient solver for the given problem. 'arpack' : Use Arnoldi decomposition to find the eigenvalues and eigenvectors. 'dense' : Use a direct solver (i.e. LAPACK) for the eigenvalue decomposition. tol : float Convergence tolerance passed to arpack or lobpcg. not used if eigen_solver == 'dense'. max_iter : integer Maximum number of iterations for the arpack solver. not used if eigen_solver == 'dense'. path_method : string ['auto'|'FW'|'D'] Method to use in finding shortest path. 'auto' : attempt to choose the best algorithm automatically. 'FW' : Floyd-Warshall algorithm. 'D' : Dijkstra's algorithm. neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree'] Algorithm to use for nearest neighbors search, passed to neighbors.NearestNeighbors instance. Attributes ---------- embedding_ : array-like, shape (n_samples, n_components) Stores the embedding vectors. kernel_pca_ : object `KernelPCA` object used to implement the embedding. training_data_ : array-like, shape (n_samples, n_features) Stores the training data. nbrs_ : sklearn.neighbors.NearestNeighbors instance Stores nearest neighbors instance, including BallTree or KDtree if applicable. dist_matrix_ : array-like, shape (n_samples, n_samples) Stores the geodesic distance matrix of training data. References ---------- .. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric framework for nonlinear dimensionality reduction. Science 290 (5500) """ def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto', tol=0, max_iter=None, path_method='auto', neighbors_algorithm='auto'): self.n_neighbors = n_neighbors self.n_components = n_components self.eigen_solver = eigen_solver self.tol = tol self.max_iter = max_iter self.path_method = path_method self.neighbors_algorithm = neighbors_algorithm self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors, algorithm=neighbors_algorithm) def _fit_transform(self, X): X = check_array(X) self.nbrs_.fit(X) self.training_data_ = self.nbrs_._fit_X self.kernel_pca_ = KernelPCA(n_components=self.n_components, kernel="precomputed", eigen_solver=self.eigen_solver, tol=self.tol, max_iter=self.max_iter) kng = kneighbors_graph(self.nbrs_, self.n_neighbors, mode='distance') self.dist_matrix_ = graph_shortest_path(kng, method=self.path_method, directed=False) G = self.dist_matrix_ ** 2 G *= -0.5 self.embedding_ = self.kernel_pca_.fit_transform(G) def reconstruction_error(self): """Compute the reconstruction error for the embedding. Returns ------- reconstruction_error : float Notes ------- The cost function of an isomap embedding is ``E = frobenius_norm[K(D) - K(D_fit)] / n_samples`` Where D is the matrix of distances for the input data X, D_fit is the matrix of distances for the output embedding X_fit, and K is the isomap kernel: ``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)`` """ G = -0.5 * self.dist_matrix_ ** 2 G_center = KernelCenterer().fit_transform(G) evals = self.kernel_pca_.lambdas_ return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0] def fit(self, X, y=None): """Compute the embedding vectors for data X Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors} Sample data, shape = (n_samples, n_features), in the form of a numpy array, precomputed tree, or NearestNeighbors object. Returns ------- self : returns an instance of self. """ self._fit_transform(X) return self def fit_transform(self, X, y=None): """Fit the model from data in X and transform X. Parameters ---------- X: {array-like, sparse matrix, BallTree, KDTree} Training vector, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new: array-like, shape (n_samples, n_components) """ self._fit_transform(X) return self.embedding_ def transform(self, X): """Transform X. This is implemented by linking the points X into the graph of geodesic distances of the training data. First the `n_neighbors` nearest neighbors of X are found in the training data, and from these the shortest geodesic distances from each point in X to each point in the training data are computed in order to construct the kernel. The embedding of X is the projection of this kernel onto the embedding vectors of the training set. Parameters ---------- X: array-like, shape (n_samples, n_features) Returns ------- X_new: array-like, shape (n_samples, n_components) """ X = check_array(X) distances, indices = self.nbrs_.kneighbors(X, return_distance=True) #Create the graph of shortest distances from X to self.training_data_ # via the nearest neighbors of X. #This can be done as a single array operation, but it potentially # takes a lot of memory. To avoid that, use a loop: G_X = np.zeros((X.shape[0], self.training_data_.shape[0])) for i in range(X.shape[0]): G_X[i] = np.min((self.dist_matrix_[indices[i]] + distances[i][:, None]), 0) G_X **= 2 G_X *= -0.5 return self.kernel_pca_.transform(G_X)
bsd-3-clause
luis-rr/nest-simulator
pynest/examples/intrinsic_currents_spiking.py
13
5954
# -*- coding: utf-8 -*- # # intrinsic_currents_spiking.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. # ''' Intrinsic currents spiking -------------------------- This example illustrates a neuron receiving spiking input through several different receptors (AMPA, NMDA, GABA_A, GABA_B), provoking spike output. The model, `ht_neuron`, also has intrinsic currents (I_NaP, I_KNa, I_T, and I_h). It is a slightly simplified implementation of neuron model proposed in Hill and Tononi (2005) **Modeling Sleep and Wakefulness in the Thalamocortical System** *J Neurophysiol* 93:1671 http://dx.doi.org/10.1152/jn.00915.2004. The neuron is bombarded with spike trains from four Poisson generators, which are connected to the AMPA, NMDA, GABA_A, and GABA_B receptors, respectively. See also: intrinsic_currents_subthreshold.py ''' ''' We imported all necessary modules for simulation, analysis and plotting. ''' import nest import numpy as np import matplotlib.pyplot as plt ''' Additionally, we set the verbosity using `set_verbosity` to suppress info messages. We also reset the kernel to be sure to start with a clean NEST. ''' nest.set_verbosity("M_WARNING") nest.ResetKernel() ''' We define the simulation parameters: - The rate of the input spike trains - The weights of the different receptors (names must match receptor types) - The time to simulate Note that all parameter values should be doubles, since NEST expects doubles. ''' rate_in = 100. w_recep = {'AMPA': 30., 'NMDA': 30., 'GABA_A': 5., 'GABA_B': 10.} t_sim = 250. num_recep = len(w_recep) ''' We create - one neuron instance - one Poisson generator instance for each synapse type - one multimeter to record from the neuron: - membrane potential - threshold potential - synaptic conductances - intrinsic currents See `intrinsic_currents_subthreshold.py` for more details on `multimeter` configuration. ''' nrn = nest.Create('ht_neuron') p_gens = nest.Create('poisson_generator', 4, params={'rate': rate_in}) mm = nest.Create('multimeter', params={'interval': 0.1, 'record_from': ['V_m', 'theta', 'g_AMPA', 'g_NMDA', 'g_GABA_A', 'g_GABA_B', 'I_NaP', 'I_KNa', 'I_T', 'I_h']}) ''' We now connect each Poisson generator with the neuron through a different receptor type. First, we need to obtain the numerical codes for the receptor types from the model. The `receptor_types` entry of the default dictionary for the `ht_neuron` model is a dictionary mapping receptor names to codes. In the loop, we use Python's tuple unpacking mechanism to unpack dictionary entries from our w_recep dictionary. Note that we need to pack the ``pg`` variable into a list before passing it to `Connect`, because iterating over the `p_gens` list makes `pg` a "naked" GID. ''' receptors = nest.GetDefaults('ht_neuron')['receptor_types'] for pg, (rec_name, rec_wgt) in zip(p_gens, w_recep.items()): nest.Connect([pg], nrn, syn_spec={'receptor_type': receptors[rec_name], 'weight': rec_wgt}) ''' We then connnect the multimeter. Note that the multimeter is connected to the neuron, not the other way around. ''' nest.Connect(mm, nrn) ''' We are now ready to simulate. ''' nest.Simulate(t_sim) ''' We now fetch the data recorded by the multimeter. The data are returned as a dictionary with entry ``'times'`` containing timestamps for all recorded data, plus one entry per recorded quantity. All data is contained in the ``'events'`` entry of the status dictionary returned by the multimeter. Because all NEST function return arrays, we need to pick out element ``0`` from the result of `GetStatus`. ''' data = nest.GetStatus(mm)[0]['events'] t = data['times'] ''' The following function turns a name such as I_NaP into proper TeX code $I_{\mathrm{NaP}}$ for a pretty label. ''' def texify_name(name): return r'${}_{{\mathrm{{{}}}}}$'.format(*name.split('_')) ''' The next step is to plot the results. We create a new figure, and add one subplot each for membrane and threshold potential, synaptic conductances, and intrinsic currents. ''' fig = plt.figure() Vax = fig.add_subplot(311) Vax.plot(t, data['V_m'], 'b', lw=2, label=r'$V_m$') Vax.plot(t, data['theta'], 'g', lw=2, label=r'$\Theta$') Vax.set_ylabel('Potential [mV]') try: Vax.legend(fontsize='small') except TypeError: Vax.legend() # work-around for older Matplotlib versions Vax.set_title('ht_neuron driven by Poisson processes') Gax = fig.add_subplot(312) for gname in ('g_AMPA', 'g_NMDA', 'g_GABA_A', 'g_GABA_B'): Gax.plot(t, data[gname], lw=2, label=texify_name(gname)) try: Gax.legend(fontsize='small') except TypeError: Gax.legend() # work-around for older Matplotlib versions Gax.set_ylabel('Conductance [nS]') Iax = fig.add_subplot(313) for iname, color in (('I_h', 'maroon'), ('I_T', 'orange'), ('I_NaP', 'crimson'), ('I_KNa', 'aqua')): Iax.plot(t, data[iname], color=color, lw=2, label=texify_name(iname)) try: Iax.legend(fontsize='small') except TypeError: Iax.legend() # work-around for older Matplotlib versions Iax.set_ylabel('Current [pA]') Iax.set_xlabel('Time [ms]')
gpl-2.0
jblackburne/scikit-learn
sklearn/neighbors/tests/test_dist_metrics.py
38
6118
import itertools import pickle import numpy as np from numpy.testing import assert_array_almost_equal import scipy from scipy.spatial.distance import cdist from sklearn.neighbors.dist_metrics import DistanceMetric from nose import SkipTest def dist_func(x1, x2, p): return np.sum((x1 - x2) ** p) ** (1. / p) def cmp_version(version1, version2): version1 = tuple(map(int, version1.split('.')[:2])) version2 = tuple(map(int, version2.split('.')[:2])) if version1 < version2: return -1 elif version1 > version2: return 1 else: return 0 class TestMetrics: def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5, rseed=0, dtype=np.float64): np.random.seed(rseed) self.X1 = np.random.random((n1, d)).astype(dtype) self.X2 = np.random.random((n2, d)).astype(dtype) # make boolean arrays: ones and zeros self.X1_bool = self.X1.round(0) self.X2_bool = self.X2.round(0) V = np.random.random((d, d)) VI = np.dot(V, V.T) self.metrics = {'euclidean': {}, 'cityblock': {}, 'minkowski': dict(p=(1, 1.5, 2, 3)), 'chebyshev': {}, 'seuclidean': dict(V=(np.random.random(d),)), 'wminkowski': dict(p=(1, 1.5, 3), w=(np.random.random(d),)), 'mahalanobis': dict(VI=(VI,)), 'hamming': {}, 'canberra': {}, 'braycurtis': {}} self.bool_metrics = ['matching', 'jaccard', 'dice', 'kulsinski', 'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath'] def test_cdist(self): for metric, argdict in self.metrics.items(): keys = argdict.keys() for vals in itertools.product(*argdict.values()): kwargs = dict(zip(keys, vals)) D_true = cdist(self.X1, self.X2, metric, **kwargs) yield self.check_cdist, metric, kwargs, D_true for metric in self.bool_metrics: D_true = cdist(self.X1_bool, self.X2_bool, metric) yield self.check_cdist_bool, metric, D_true def check_cdist(self, metric, kwargs, D_true): if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0: raise SkipTest("Canberra distance incorrect in scipy < 0.9") dm = DistanceMetric.get_metric(metric, **kwargs) D12 = dm.pairwise(self.X1, self.X2) assert_array_almost_equal(D12, D_true) def check_cdist_bool(self, metric, D_true): dm = DistanceMetric.get_metric(metric) D12 = dm.pairwise(self.X1_bool, self.X2_bool) assert_array_almost_equal(D12, D_true) def test_pdist(self): for metric, argdict in self.metrics.items(): keys = argdict.keys() for vals in itertools.product(*argdict.values()): kwargs = dict(zip(keys, vals)) D_true = cdist(self.X1, self.X1, metric, **kwargs) yield self.check_pdist, metric, kwargs, D_true for metric in self.bool_metrics: D_true = cdist(self.X1_bool, self.X1_bool, metric) yield self.check_pdist_bool, metric, D_true def check_pdist(self, metric, kwargs, D_true): if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0: raise SkipTest("Canberra distance incorrect in scipy < 0.9") dm = DistanceMetric.get_metric(metric, **kwargs) D12 = dm.pairwise(self.X1) assert_array_almost_equal(D12, D_true) def check_pdist_bool(self, metric, D_true): dm = DistanceMetric.get_metric(metric) D12 = dm.pairwise(self.X1_bool) assert_array_almost_equal(D12, D_true) def test_pickle(self): for metric, argdict in self.metrics.items(): keys = argdict.keys() for vals in itertools.product(*argdict.values()): kwargs = dict(zip(keys, vals)) yield self.check_pickle, metric, kwargs for metric in self.bool_metrics: yield self.check_pickle_bool, metric def check_pickle_bool(self, metric): dm = DistanceMetric.get_metric(metric) D1 = dm.pairwise(self.X1_bool) dm2 = pickle.loads(pickle.dumps(dm)) D2 = dm2.pairwise(self.X1_bool) assert_array_almost_equal(D1, D2) def check_pickle(self, metric, kwargs): dm = DistanceMetric.get_metric(metric, **kwargs) D1 = dm.pairwise(self.X1) dm2 = pickle.loads(pickle.dumps(dm)) D2 = dm2.pairwise(self.X1) assert_array_almost_equal(D1, D2) def test_haversine_metric(): def haversine_slow(x1, x2): return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2 + np.cos(x1[0]) * np.cos(x2[0]) * np.sin(0.5 * (x1[1] - x2[1])) ** 2)) X = np.random.random((10, 2)) haversine = DistanceMetric.get_metric("haversine") D1 = haversine.pairwise(X) D2 = np.zeros_like(D1) for i, x1 in enumerate(X): for j, x2 in enumerate(X): D2[i, j] = haversine_slow(x1, x2) assert_array_almost_equal(D1, D2) assert_array_almost_equal(haversine.dist_to_rdist(D1), np.sin(0.5 * D2) ** 2) def test_pyfunc_metric(): X = np.random.random((10, 3)) euclidean = DistanceMetric.get_metric("euclidean") pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2) # Check if both callable metric and predefined metric initialized # DistanceMetric object is picklable euclidean_pkl = pickle.loads(pickle.dumps(euclidean)) pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc)) D1 = euclidean.pairwise(X) D2 = pyfunc.pairwise(X) D1_pkl = euclidean_pkl.pairwise(X) D2_pkl = pyfunc_pkl.pairwise(X) assert_array_almost_equal(D1, D2) assert_array_almost_equal(D1_pkl, D2_pkl)
bsd-3-clause
exepulveda/swfc
python/plot_fig11-15.py
1
3458
import numpy as np import math from collections import Counter import matplotlib as mpl mpl.use('agg') from case_study_bm import attributes,setup_case_study_ore,setup_distances import matplotlib.pyplot as plt from cluster_utils import relabel if __name__ == "__main__": NC = 3 kmeans = np.int32(np.loadtxt('../results/final_bm_clusters_kmeans_%d.csv'%NC,delimiter=",")) pca = np.int32(np.loadtxt('../results/final_bm_clusters_pca_%d.csv'%NC,delimiter=",")) #fcew = np.loadtxt('../results/final_2d_clusters_fcew_4.csv',delimiter=",") #fc = np.loadtxt('../results/final_2d_clusters_fc_4.csv',delimiter=",") #sfcew = np.loadtxt('../results/final_2d_clusters_sfcew_4.csv',delimiter=",") #swfc_no_target = np.loadtxt('../results/bm_clusters_swfc_3_no_target.csv',delimiter=",")[:,-1] wfc = np.int32(np.loadtxt('../results/bm_clusters_wfc_%d.csv'%NC,delimiter=",")[:,-1]) swfc = np.int32(np.loadtxt('../results/final_bm_clusters_swfc_%d.csv'%NC,delimiter=",")) locations,data,min_values,max_values,scale,var_types,categories = setup_case_study_ore() N,ND = data.shape variables_include = set(['RockType','Fe','Fe_Rec','Ap','Mgt']) print(kmeans.shape,pca.shape,N,ND) #names = ['kmeans','pca','fcew','sfcew','fc','sfc'] names = ['kmeans','pca','WFC','SWFC'] labels = ["All"] + ["C"+str(k+1) for k in range(NC)] cluster_target = swfc clay_color = ['r','b','g','c','m','y'] #for i,cluster in enumerate([kmeans,pca,fcew,sfcew,fc,sfc]): for i,cluster in enumerate([kmeans,pca,wfc,swfc]): adjust_cluster = relabel(cluster_target,cluster,NC) for var in range(ND): if attributes[var] in variables_include: fig, ax = plt.subplots(figsize=(6, 6)) #ax.set_ylim([mins[v],maxs[v]]) if var_types[var] == 3: counter = Counter(data[:,var]) mostc = counter.most_common() cats = len(mostc) width = 0.4 ind = np.arange(NC+1) counts = np.zeros((cats,NC+1)) for k,vc in mostc: k = int(math.floor(k)) counts[k,0] = vc for c in range(NC): indices = np.where(np.int8(adjust_cluster)==c)[0] counter = Counter(data[indices,var]) for k,vc in counter.most_common(): k = int(math.floor(k)) counts[k,c+1] = vc sumc = np.zeros(NC+1) for k in range(cats): ax.bar(ind, counts[k,:], width, bottom=sumc,color=clay_color[k]) sumc += counts[k,:] ax.set_xticks(ind+width/2) ax.set_xticklabels(labels) ax.set_xlim(-width,ind[-1]+2*width) else: d = [data[:,var]] for c in range(NC): d += [data[adjust_cluster==c,var]] bx = ax.boxplot(d,labels=labels,showmeans=True) plt.savefig("../figures/case_bm/fig11-{var}-bm-{cm}.pdf".format(var=attributes[var],cm=names[i]),bbox_inches='tight') plt.close('all')
gpl-3.0
lancezlin/ml_template_py
lib/python2.7/site-packages/pandas/io/tests/test_excel.py
7
87414
# pylint: disable=E1101 from pandas.compat import u, range, map, openpyxl_compat, BytesIO, iteritems from datetime import datetime, date, time import sys import os from distutils.version import LooseVersion import warnings import operator import functools import nose from numpy import nan import numpy as np import pandas as pd from pandas import DataFrame, Index, MultiIndex from pandas.io.parsers import read_csv from pandas.io.excel import ( ExcelFile, ExcelWriter, read_excel, _XlwtWriter, _Openpyxl1Writer, _Openpyxl20Writer, _Openpyxl22Writer, register_writer, _XlsxWriter ) from pandas.io.common import URLError from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf from pandas.core.config import set_option, get_option import pandas.util.testing as tm def _skip_if_no_xlrd(): try: import xlrd ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2])) if ver < (0, 9): raise nose.SkipTest('xlrd < 0.9, skipping') except ImportError: raise nose.SkipTest('xlrd not installed, skipping') def _skip_if_no_xlwt(): try: import xlwt # NOQA except ImportError: raise nose.SkipTest('xlwt not installed, skipping') def _skip_if_no_openpyxl(): try: import openpyxl # NOQA except ImportError: raise nose.SkipTest('openpyxl not installed, skipping') def _skip_if_no_xlsxwriter(): try: import xlsxwriter # NOQA except ImportError: raise nose.SkipTest('xlsxwriter not installed, skipping') def _skip_if_no_excelsuite(): _skip_if_no_xlrd() _skip_if_no_xlwt() _skip_if_no_openpyxl() def _skip_if_no_boto(): try: import boto # NOQA except ImportError: raise nose.SkipTest('boto not installed, skipping') _seriesd = tm.getSeriesData() _tsd = tm.getTimeSeriesData() _frame = DataFrame(_seriesd)[:10] _frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10] _tsframe = tm.makeTimeDataFrame()[:5] _mixed_frame = _frame.copy() _mixed_frame['foo'] = 'bar' class SharedItems(object): def setUp(self): self.dirpath = tm.get_data_path() self.frame = _frame.copy() self.frame2 = _frame2.copy() self.tsframe = _tsframe.copy() self.mixed_frame = _mixed_frame.copy() def get_csv_refdf(self, basename): """ Obtain the reference data from read_csv with the Python engine. Test data path is defined by pandas.util.testing.get_data_path() Parameters ---------- basename : str File base name, excluding file extension. Returns ------- dfref : DataFrame """ pref = os.path.join(self.dirpath, basename + '.csv') dfref = read_csv(pref, index_col=0, parse_dates=True, engine='python') return dfref def get_excelfile(self, basename): """ Return test data ExcelFile instance. Test data path is defined by pandas.util.testing.get_data_path() Parameters ---------- basename : str File base name, excluding file extension. Returns ------- excel : io.excel.ExcelFile """ return ExcelFile(os.path.join(self.dirpath, basename + self.ext)) def get_exceldf(self, basename, *args, **kwds): """ Return test data DataFrame. Test data path is defined by pandas.util.testing.get_data_path() Parameters ---------- basename : str File base name, excluding file extension. Returns ------- df : DataFrame """ pth = os.path.join(self.dirpath, basename + self.ext) return read_excel(pth, *args, **kwds) class ReadingTestsBase(SharedItems): # This is based on ExcelWriterBase # # Base class for test cases to run with different Excel readers. # To add a reader test, define the following: # 1. A check_skip function that skips your tests if your reader isn't # installed. # 2. Add a property ext, which is the file extension that your reader # reades from. (needs to start with '.' so it's a valid path) # 3. Add a property engine_name, which is the name of the reader class. # For the reader this is not used for anything at the moment. def setUp(self): self.check_skip() super(ReadingTestsBase, self).setUp() def test_parse_cols_int(self): dfref = self.get_csv_refdf('test1') dfref = dfref.reindex(columns=['A', 'B', 'C']) df1 = self.get_exceldf('test1', 'Sheet1', index_col=0, parse_cols=3) df2 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0, parse_cols=3) # TODO add index to xls file) tm.assert_frame_equal(df1, dfref, check_names=False) tm.assert_frame_equal(df2, dfref, check_names=False) def test_parse_cols_list(self): dfref = self.get_csv_refdf('test1') dfref = dfref.reindex(columns=['B', 'C']) df1 = self.get_exceldf('test1', 'Sheet1', index_col=0, parse_cols=[0, 2, 3]) df2 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0, parse_cols=[0, 2, 3]) # TODO add index to xls file) tm.assert_frame_equal(df1, dfref, check_names=False) tm.assert_frame_equal(df2, dfref, check_names=False) def test_parse_cols_str(self): dfref = self.get_csv_refdf('test1') df1 = dfref.reindex(columns=['A', 'B', 'C']) df2 = self.get_exceldf('test1', 'Sheet1', index_col=0, parse_cols='A:D') df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0, parse_cols='A:D') # TODO add index to xls, read xls ignores index name ? tm.assert_frame_equal(df2, df1, check_names=False) tm.assert_frame_equal(df3, df1, check_names=False) df1 = dfref.reindex(columns=['B', 'C']) df2 = self.get_exceldf('test1', 'Sheet1', index_col=0, parse_cols='A,C,D') df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0, parse_cols='A,C,D') # TODO add index to xls file tm.assert_frame_equal(df2, df1, check_names=False) tm.assert_frame_equal(df3, df1, check_names=False) df1 = dfref.reindex(columns=['B', 'C']) df2 = self.get_exceldf('test1', 'Sheet1', index_col=0, parse_cols='A,C:D') df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0, parse_cols='A,C:D') tm.assert_frame_equal(df2, df1, check_names=False) tm.assert_frame_equal(df3, df1, check_names=False) def test_excel_stop_iterator(self): parsed = self.get_exceldf('test2', 'Sheet1') expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1']) tm.assert_frame_equal(parsed, expected) def test_excel_cell_error_na(self): parsed = self.get_exceldf('test3', 'Sheet1') expected = DataFrame([[np.nan]], columns=['Test']) tm.assert_frame_equal(parsed, expected) def test_excel_passes_na(self): excel = self.get_excelfile('test4') parsed = read_excel(excel, 'Sheet1', keep_default_na=False, na_values=['apple']) expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']], columns=['Test']) tm.assert_frame_equal(parsed, expected) parsed = read_excel(excel, 'Sheet1', keep_default_na=True, na_values=['apple']) expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']], columns=['Test']) tm.assert_frame_equal(parsed, expected) # 13967 excel = self.get_excelfile('test5') parsed = read_excel(excel, 'Sheet1', keep_default_na=False, na_values=['apple']) expected = DataFrame([['1.#QNAN'], [1], ['nan'], [np.nan], ['rabbit']], columns=['Test']) tm.assert_frame_equal(parsed, expected) parsed = read_excel(excel, 'Sheet1', keep_default_na=True, na_values=['apple']) expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']], columns=['Test']) tm.assert_frame_equal(parsed, expected) def test_excel_table_sheet_by_index(self): excel = self.get_excelfile('test1') dfref = self.get_csv_refdf('test1') df1 = read_excel(excel, 0, index_col=0) df2 = read_excel(excel, 1, skiprows=[1], index_col=0) tm.assert_frame_equal(df1, dfref, check_names=False) tm.assert_frame_equal(df2, dfref, check_names=False) df1 = excel.parse(0, index_col=0) df2 = excel.parse(1, skiprows=[1], index_col=0) tm.assert_frame_equal(df1, dfref, check_names=False) tm.assert_frame_equal(df2, dfref, check_names=False) df3 = read_excel(excel, 0, index_col=0, skipfooter=1) df4 = read_excel(excel, 0, index_col=0, skip_footer=1) tm.assert_frame_equal(df3, df1.ix[:-1]) tm.assert_frame_equal(df3, df4) df3 = excel.parse(0, index_col=0, skipfooter=1) df4 = excel.parse(0, index_col=0, skip_footer=1) tm.assert_frame_equal(df3, df1.ix[:-1]) tm.assert_frame_equal(df3, df4) import xlrd with tm.assertRaises(xlrd.XLRDError): read_excel(excel, 'asdf') def test_excel_table(self): dfref = self.get_csv_refdf('test1') df1 = self.get_exceldf('test1', 'Sheet1', index_col=0) df2 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0) # TODO add index to file tm.assert_frame_equal(df1, dfref, check_names=False) tm.assert_frame_equal(df2, dfref, check_names=False) df3 = self.get_exceldf('test1', 'Sheet1', index_col=0, skipfooter=1) df4 = self.get_exceldf('test1', 'Sheet1', index_col=0, skip_footer=1) tm.assert_frame_equal(df3, df1.ix[:-1]) tm.assert_frame_equal(df3, df4) def test_reader_special_dtypes(self): expected = DataFrame.from_items([ ("IntCol", [1, 2, -3, 4, 0]), ("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]), ("BoolCol", [True, False, True, True, False]), ("StrCol", [1, 2, 3, 4, 5]), # GH5394 - this is why convert_float isn't vectorized ("Str2Col", ["a", 3, "c", "d", "e"]), ("DateCol", [datetime(2013, 10, 30), datetime(2013, 10, 31), datetime(1905, 1, 1), datetime(2013, 12, 14), datetime(2015, 3, 14)]) ]) basename = 'test_types' # should read in correctly and infer types actual = self.get_exceldf(basename, 'Sheet1') tm.assert_frame_equal(actual, expected) # if not coercing number, then int comes in as float float_expected = expected.copy() float_expected["IntCol"] = float_expected["IntCol"].astype(float) float_expected.loc[1, "Str2Col"] = 3.0 actual = self.get_exceldf(basename, 'Sheet1', convert_float=False) tm.assert_frame_equal(actual, float_expected) # check setting Index (assuming xls and xlsx are the same here) for icol, name in enumerate(expected.columns): actual = self.get_exceldf(basename, 'Sheet1', index_col=icol) exp = expected.set_index(name) tm.assert_frame_equal(actual, exp) # convert_float and converters should be different but both accepted expected["StrCol"] = expected["StrCol"].apply(str) actual = self.get_exceldf( basename, 'Sheet1', converters={"StrCol": str}) tm.assert_frame_equal(actual, expected) no_convert_float = float_expected.copy() no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str) actual = self.get_exceldf(basename, 'Sheet1', convert_float=False, converters={"StrCol": str}) tm.assert_frame_equal(actual, no_convert_float) # GH8212 - support for converters and missing values def test_reader_converters(self): basename = 'test_converters' expected = DataFrame.from_items([ ("IntCol", [1, 2, -3, -1000, 0]), ("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]), ("BoolCol", ['Found', 'Found', 'Found', 'Not found', 'Found']), ("StrCol", ['1', np.nan, '3', '4', '5']), ]) converters = {'IntCol': lambda x: int(x) if x != '' else -1000, 'FloatCol': lambda x: 10 * x if x else np.nan, 2: lambda x: 'Found' if x != '' else 'Not found', 3: lambda x: str(x) if x else '', } # should read in correctly and set types of single cells (not array # dtypes) actual = self.get_exceldf(basename, 'Sheet1', converters=converters) tm.assert_frame_equal(actual, expected) def test_reading_all_sheets(self): # Test reading all sheetnames by setting sheetname to None, # Ensure a dict is returned. # See PR #9450 basename = 'test_multisheet' dfs = self.get_exceldf(basename, sheetname=None) expected_keys = ['Alpha', 'Beta', 'Charlie'] tm.assert_contains_all(expected_keys, dfs.keys()) def test_reading_multiple_specific_sheets(self): # Test reading specific sheetnames by specifying a mixed list # of integers and strings, and confirm that duplicated sheet # references (positions/names) are removed properly. # Ensure a dict is returned # See PR #9450 basename = 'test_multisheet' # Explicitly request duplicates. Only the set should be returned. expected_keys = [2, 'Charlie', 'Charlie'] dfs = self.get_exceldf(basename, sheetname=expected_keys) expected_keys = list(set(expected_keys)) tm.assert_contains_all(expected_keys, dfs.keys()) assert len(expected_keys) == len(dfs.keys()) def test_reading_all_sheets_with_blank(self): # Test reading all sheetnames by setting sheetname to None, # In the case where some sheets are blank. # Issue #11711 basename = 'blank_with_header' dfs = self.get_exceldf(basename, sheetname=None) expected_keys = ['Sheet1', 'Sheet2', 'Sheet3'] tm.assert_contains_all(expected_keys, dfs.keys()) # GH6403 def test_read_excel_blank(self): actual = self.get_exceldf('blank', 'Sheet1') tm.assert_frame_equal(actual, DataFrame()) def test_read_excel_blank_with_header(self): expected = DataFrame(columns=['col_1', 'col_2']) actual = self.get_exceldf('blank_with_header', 'Sheet1') tm.assert_frame_equal(actual, expected) # GH 12292 : error when read one empty column from excel file def test_read_one_empty_col_no_header(self): df = pd.DataFrame( [["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]] ) with ensure_clean(self.ext) as path: df.to_excel(path, 'no_header', index=False, header=False) actual_header_none = read_excel( path, 'no_header', parse_cols=[0], header=None ) actual_header_zero = read_excel( path, 'no_header', parse_cols=[0], header=0 ) expected = DataFrame() tm.assert_frame_equal(actual_header_none, expected) tm.assert_frame_equal(actual_header_zero, expected) def test_read_one_empty_col_with_header(self): _skip_if_no_xlwt() _skip_if_no_openpyxl() df = pd.DataFrame( [["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]] ) with ensure_clean(self.ext) as path: df.to_excel(path, 'with_header', index=False, header=True) actual_header_none = read_excel( path, 'with_header', parse_cols=[0], header=None ) actual_header_zero = read_excel( path, 'with_header', parse_cols=[0], header=0 ) expected_header_none = DataFrame(pd.Series([0], dtype='int64')) tm.assert_frame_equal(actual_header_none, expected_header_none) expected_header_zero = DataFrame(columns=[0], dtype='int64') tm.assert_frame_equal(actual_header_zero, expected_header_zero) def test_set_column_names_in_parameter(self): # GH 12870 : pass down column names associated with # keyword argument names refdf = pd.DataFrame([[1, 'foo'], [2, 'bar'], [3, 'baz']], columns=['a', 'b']) with ensure_clean(self.ext) as pth: with ExcelWriter(pth) as writer: refdf.to_excel(writer, 'Data_no_head', header=False, index=False) refdf.to_excel(writer, 'Data_with_head', index=False) refdf.columns = ['A', 'B'] with ExcelFile(pth) as reader: xlsdf_no_head = read_excel(reader, 'Data_no_head', header=None, names=['A', 'B']) xlsdf_with_head = read_excel(reader, 'Data_with_head', index_col=None, names=['A', 'B']) tm.assert_frame_equal(xlsdf_no_head, refdf) tm.assert_frame_equal(xlsdf_with_head, refdf) def test_date_conversion_overflow(self): # GH 10001 : pandas.ExcelFile ignore parse_dates=False expected = pd.DataFrame([[pd.Timestamp('2016-03-12'), 'Marc Johnson'], [pd.Timestamp('2016-03-16'), 'Jack Black'], [1e+20, 'Timothy Brown']], columns=['DateColWithBigInt', 'StringCol']) result = self.get_exceldf('testdateoverflow') tm.assert_frame_equal(result, expected) class XlrdTests(ReadingTestsBase): """ This is the base class for the xlrd tests, and 3 different file formats are supported: xls, xlsx, xlsm """ def test_excel_read_buffer(self): pth = os.path.join(self.dirpath, 'test1' + self.ext) expected = read_excel(pth, 'Sheet1', index_col=0) with open(pth, 'rb') as f: actual = read_excel(f, 'Sheet1', index_col=0) tm.assert_frame_equal(expected, actual) with open(pth, 'rb') as f: xls = ExcelFile(f) actual = read_excel(xls, 'Sheet1', index_col=0) tm.assert_frame_equal(expected, actual) def test_read_xlrd_Book(self): _skip_if_no_xlwt() import xlrd df = self.frame with ensure_clean('.xls') as pth: df.to_excel(pth, "SheetA") book = xlrd.open_workbook(pth) with ExcelFile(book, engine="xlrd") as xl: result = read_excel(xl, "SheetA") tm.assert_frame_equal(df, result) result = read_excel(book, sheetname="SheetA", engine="xlrd") tm.assert_frame_equal(df, result) @tm.network def test_read_from_http_url(self): url = ('https://raw.github.com/pandas-dev/pandas/master/' 'pandas/io/tests/data/test1' + self.ext) url_table = read_excel(url) local_table = self.get_exceldf('test1') tm.assert_frame_equal(url_table, local_table) @tm.network(check_before_test=True) def test_read_from_s3_url(self): _skip_if_no_boto() url = ('s3://pandas-test/test1' + self.ext) url_table = read_excel(url) local_table = self.get_exceldf('test1') tm.assert_frame_equal(url_table, local_table) @tm.slow def test_read_from_file_url(self): # FILE if sys.version_info[:2] < (2, 6): raise nose.SkipTest("file:// not supported with Python < 2.6") localtable = os.path.join(self.dirpath, 'test1' + self.ext) local_table = read_excel(localtable) try: url_table = read_excel('file://localhost/' + localtable) except URLError: # fails on some systems import platform raise nose.SkipTest("failing on %s" % ' '.join(platform.uname()).strip()) tm.assert_frame_equal(url_table, local_table) def test_read_from_pathlib_path(self): # GH12655 tm._skip_if_no_pathlib() from pathlib import Path str_path = os.path.join(self.dirpath, 'test1' + self.ext) expected = read_excel(str_path, 'Sheet1', index_col=0) path_obj = Path(self.dirpath, 'test1' + self.ext) actual = read_excel(path_obj, 'Sheet1', index_col=0) tm.assert_frame_equal(expected, actual) def test_read_from_py_localpath(self): # GH12655 tm._skip_if_no_localpath() from py.path import local as LocalPath str_path = os.path.join(self.dirpath, 'test1' + self.ext) expected = read_excel(str_path, 'Sheet1', index_col=0) abs_dir = os.path.abspath(self.dirpath) path_obj = LocalPath(abs_dir).join('test1' + self.ext) actual = read_excel(path_obj, 'Sheet1', index_col=0) tm.assert_frame_equal(expected, actual) def test_reader_closes_file(self): pth = os.path.join(self.dirpath, 'test1' + self.ext) f = open(pth, 'rb') with ExcelFile(f) as xlsx: # parses okay read_excel(xlsx, 'Sheet1', index_col=0) self.assertTrue(f.closed) def test_creating_and_reading_multiple_sheets(self): # Test reading multiple sheets, from a runtime created excel file # with multiple sheets. # See PR #9450 _skip_if_no_xlwt() _skip_if_no_openpyxl() def tdf(sheetname): d, i = [11, 22, 33], [1, 2, 3] return DataFrame(d, i, columns=[sheetname]) sheets = ['AAA', 'BBB', 'CCC'] dfs = [tdf(s) for s in sheets] dfs = dict(zip(sheets, dfs)) with ensure_clean(self.ext) as pth: with ExcelWriter(pth) as ew: for sheetname, df in iteritems(dfs): df.to_excel(ew, sheetname) dfs_returned = read_excel(pth, sheetname=sheets) for s in sheets: tm.assert_frame_equal(dfs[s], dfs_returned[s]) def test_reader_seconds(self): # Test reading times with and without milliseconds. GH5945. import xlrd if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"): # Xlrd >= 0.9.3 can handle Excel milliseconds. expected = DataFrame.from_items([("Time", [time(1, 2, 3), time(2, 45, 56, 100000), time(4, 29, 49, 200000), time(6, 13, 42, 300000), time(7, 57, 35, 400000), time(9, 41, 28, 500000), time(11, 25, 21, 600000), time(13, 9, 14, 700000), time(14, 53, 7, 800000), time(16, 37, 0, 900000), time(18, 20, 54)])]) else: # Xlrd < 0.9.3 rounds Excel milliseconds. expected = DataFrame.from_items([("Time", [time(1, 2, 3), time(2, 45, 56), time(4, 29, 49), time(6, 13, 42), time(7, 57, 35), time(9, 41, 29), time(11, 25, 22), time(13, 9, 15), time(14, 53, 8), time(16, 37, 1), time(18, 20, 54)])]) actual = self.get_exceldf('times_1900', 'Sheet1') tm.assert_frame_equal(actual, expected) actual = self.get_exceldf('times_1904', 'Sheet1') tm.assert_frame_equal(actual, expected) def test_read_excel_multiindex(self): # GH 4679 mi = MultiIndex.from_product([['foo', 'bar'], ['a', 'b']]) mi_file = os.path.join(self.dirpath, 'testmultiindex' + self.ext) expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True], [2, 3.5, pd.Timestamp('2015-01-02'), False], [3, 4.5, pd.Timestamp('2015-01-03'), False], [4, 5.5, pd.Timestamp('2015-01-04'), True]], columns=mi) actual = read_excel(mi_file, 'mi_column', header=[0, 1]) tm.assert_frame_equal(actual, expected) actual = read_excel(mi_file, 'mi_column', header=[0, 1], index_col=0) tm.assert_frame_equal(actual, expected) expected.columns = ['a', 'b', 'c', 'd'] expected.index = mi actual = read_excel(mi_file, 'mi_index', index_col=[0, 1]) tm.assert_frame_equal(actual, expected, check_names=False) expected.columns = mi actual = read_excel(mi_file, 'both', index_col=[0, 1], header=[0, 1]) tm.assert_frame_equal(actual, expected, check_names=False) expected.index = mi.set_names(['ilvl1', 'ilvl2']) expected.columns = ['a', 'b', 'c', 'd'] actual = read_excel(mi_file, 'mi_index_name', index_col=[0, 1]) tm.assert_frame_equal(actual, expected) expected.index = list(range(4)) expected.columns = mi.set_names(['c1', 'c2']) actual = read_excel(mi_file, 'mi_column_name', header=[0, 1], index_col=0) tm.assert_frame_equal(actual, expected) # Issue #11317 expected.columns = mi.set_levels( [1, 2], level=1).set_names(['c1', 'c2']) actual = read_excel(mi_file, 'name_with_int', index_col=0, header=[0, 1]) tm.assert_frame_equal(actual, expected) expected.columns = mi.set_names(['c1', 'c2']) expected.index = mi.set_names(['ilvl1', 'ilvl2']) actual = read_excel(mi_file, 'both_name', index_col=[0, 1], header=[0, 1]) tm.assert_frame_equal(actual, expected) actual = read_excel(mi_file, 'both_name', index_col=[0, 1], header=[0, 1]) tm.assert_frame_equal(actual, expected) actual = read_excel(mi_file, 'both_name_skiprows', index_col=[0, 1], header=[0, 1], skiprows=2) tm.assert_frame_equal(actual, expected) def test_read_excel_multiindex_empty_level(self): # GH 12453 _skip_if_no_xlsxwriter() with ensure_clean('.xlsx') as path: df = DataFrame({ ('Zero', ''): {0: 0}, ('One', 'x'): {0: 1}, ('Two', 'X'): {0: 3}, ('Two', 'Y'): {0: 7} }) expected = DataFrame({ ('Zero', 'Unnamed: 3_level_1'): {0: 0}, ('One', u'x'): {0: 1}, ('Two', u'X'): {0: 3}, ('Two', u'Y'): {0: 7} }) df.to_excel(path) actual = pd.read_excel(path, header=[0, 1]) tm.assert_frame_equal(actual, expected) df = pd.DataFrame({ ('Beg', ''): {0: 0}, ('Middle', 'x'): {0: 1}, ('Tail', 'X'): {0: 3}, ('Tail', 'Y'): {0: 7} }) expected = pd.DataFrame({ ('Beg', 'Unnamed: 0_level_1'): {0: 0}, ('Middle', u'x'): {0: 1}, ('Tail', u'X'): {0: 3}, ('Tail', u'Y'): {0: 7} }) df.to_excel(path) actual = pd.read_excel(path, header=[0, 1]) tm.assert_frame_equal(actual, expected) def test_excel_multindex_roundtrip(self): # GH 4679 _skip_if_no_xlsxwriter() with ensure_clean('.xlsx') as pth: for c_idx_names in [True, False]: for r_idx_names in [True, False]: for c_idx_levels in [1, 3]: for r_idx_levels in [1, 3]: # column index name can't be serialized unless # MultiIndex if (c_idx_levels == 1 and c_idx_names): continue # empty name case current read in as unamed levels, # not Nones check_names = True if not r_idx_names and r_idx_levels > 1: check_names = False df = mkdf(5, 5, c_idx_names, r_idx_names, c_idx_levels, r_idx_levels) df.to_excel(pth) act = pd.read_excel( pth, index_col=list(range(r_idx_levels)), header=list(range(c_idx_levels))) tm.assert_frame_equal( df, act, check_names=check_names) df.iloc[0, :] = np.nan df.to_excel(pth) act = pd.read_excel( pth, index_col=list(range(r_idx_levels)), header=list(range(c_idx_levels))) tm.assert_frame_equal( df, act, check_names=check_names) df.iloc[-1, :] = np.nan df.to_excel(pth) act = pd.read_excel( pth, index_col=list(range(r_idx_levels)), header=list(range(c_idx_levels))) tm.assert_frame_equal( df, act, check_names=check_names) def test_excel_oldindex_format(self): # GH 4679 data = np.array([['R0C0', 'R0C1', 'R0C2', 'R0C3', 'R0C4'], ['R1C0', 'R1C1', 'R1C2', 'R1C3', 'R1C4'], ['R2C0', 'R2C1', 'R2C2', 'R2C3', 'R2C4'], ['R3C0', 'R3C1', 'R3C2', 'R3C3', 'R3C4'], ['R4C0', 'R4C1', 'R4C2', 'R4C3', 'R4C4']]) columns = ['C_l0_g0', 'C_l0_g1', 'C_l0_g2', 'C_l0_g3', 'C_l0_g4'] mi = MultiIndex(levels=[['R_l0_g0', 'R_l0_g1', 'R_l0_g2', 'R_l0_g3', 'R_l0_g4'], ['R_l1_g0', 'R_l1_g1', 'R_l1_g2', 'R_l1_g3', 'R_l1_g4']], labels=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], names=['R0', 'R1']) si = Index(['R_l0_g0', 'R_l0_g1', 'R_l0_g2', 'R_l0_g3', 'R_l0_g4'], name='R0') in_file = os.path.join( self.dirpath, 'test_index_name_pre17' + self.ext) expected = pd.DataFrame(data, index=si, columns=columns) with tm.assert_produces_warning(FutureWarning): actual = pd.read_excel( in_file, 'single_names', has_index_names=True) tm.assert_frame_equal(actual, expected) expected.index.name = None actual = pd.read_excel(in_file, 'single_no_names') tm.assert_frame_equal(actual, expected) with tm.assert_produces_warning(FutureWarning): actual = pd.read_excel( in_file, 'single_no_names', has_index_names=False) tm.assert_frame_equal(actual, expected) expected.index = mi with tm.assert_produces_warning(FutureWarning): actual = pd.read_excel( in_file, 'multi_names', has_index_names=True) tm.assert_frame_equal(actual, expected) expected.index.names = [None, None] actual = pd.read_excel(in_file, 'multi_no_names', index_col=[0, 1]) tm.assert_frame_equal(actual, expected, check_names=False) with tm.assert_produces_warning(FutureWarning): actual = pd.read_excel(in_file, 'multi_no_names', index_col=[0, 1], has_index_names=False) tm.assert_frame_equal(actual, expected, check_names=False) def test_read_excel_bool_header_arg(self): # GH 6114 for arg in [True, False]: with tm.assertRaises(TypeError): pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext), header=arg) def test_read_excel_chunksize(self): # GH 8011 with tm.assertRaises(NotImplementedError): pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext), chunksize=100) def test_read_excel_parse_dates(self): # GH 11544 with tm.assertRaises(NotImplementedError): pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext), parse_dates=True) def test_read_excel_date_parser(self): # GH 11544 with tm.assertRaises(NotImplementedError): dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S') pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext), date_parser=dateparse) def test_read_excel_skiprows_list(self): # GH 4903 actual = pd.read_excel(os.path.join(self.dirpath, 'testskiprows' + self.ext), 'skiprows_list', skiprows=[0, 2]) expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True], [2, 3.5, pd.Timestamp('2015-01-02'), False], [3, 4.5, pd.Timestamp('2015-01-03'), False], [4, 5.5, pd.Timestamp('2015-01-04'), True]], columns=['a', 'b', 'c', 'd']) tm.assert_frame_equal(actual, expected) actual = pd.read_excel(os.path.join(self.dirpath, 'testskiprows' + self.ext), 'skiprows_list', skiprows=np.array([0, 2])) tm.assert_frame_equal(actual, expected) def test_read_excel_squeeze(self): # GH 12157 f = os.path.join(self.dirpath, 'test_squeeze' + self.ext) actual = pd.read_excel(f, 'two_columns', index_col=0, squeeze=True) expected = pd.Series([2, 3, 4], [4, 5, 6], name='b') expected.index.name = 'a' tm.assert_series_equal(actual, expected) actual = pd.read_excel(f, 'two_columns', squeeze=True) expected = pd.DataFrame({'a': [4, 5, 6], 'b': [2, 3, 4]}) tm.assert_frame_equal(actual, expected) actual = pd.read_excel(f, 'one_column', squeeze=True) expected = pd.Series([1, 2, 3], name='a') tm.assert_series_equal(actual, expected) class XlsReaderTests(XlrdTests, tm.TestCase): ext = '.xls' engine_name = 'xlrd' check_skip = staticmethod(_skip_if_no_xlrd) class XlsxReaderTests(XlrdTests, tm.TestCase): ext = '.xlsx' engine_name = 'xlrd' check_skip = staticmethod(_skip_if_no_xlrd) class XlsmReaderTests(XlrdTests, tm.TestCase): ext = '.xlsm' engine_name = 'xlrd' check_skip = staticmethod(_skip_if_no_xlrd) class ExcelWriterBase(SharedItems): # Base class for test cases to run with different Excel writers. # To add a writer test, define the following: # 1. A check_skip function that skips your tests if your writer isn't # installed. # 2. Add a property ext, which is the file extension that your writer # writes to. (needs to start with '.' so it's a valid path) # 3. Add a property engine_name, which is the name of the writer class. # Test with MultiIndex and Hierarchical Rows as merged cells. merge_cells = True def setUp(self): self.check_skip() super(ExcelWriterBase, self).setUp() self.option_name = 'io.excel.%s.writer' % self.ext.strip('.') self.prev_engine = get_option(self.option_name) set_option(self.option_name, self.engine_name) def tearDown(self): set_option(self.option_name, self.prev_engine) def test_excel_sheet_by_name_raise(self): _skip_if_no_xlrd() import xlrd with ensure_clean(self.ext) as pth: gt = DataFrame(np.random.randn(10, 2)) gt.to_excel(pth) xl = ExcelFile(pth) df = read_excel(xl, 0) tm.assert_frame_equal(gt, df) with tm.assertRaises(xlrd.XLRDError): read_excel(xl, '0') def test_excelwriter_contextmanager(self): _skip_if_no_xlrd() with ensure_clean(self.ext) as pth: with ExcelWriter(pth) as writer: self.frame.to_excel(writer, 'Data1') self.frame2.to_excel(writer, 'Data2') with ExcelFile(pth) as reader: found_df = read_excel(reader, 'Data1') found_df2 = read_excel(reader, 'Data2') tm.assert_frame_equal(found_df, self.frame) tm.assert_frame_equal(found_df2, self.frame2) def test_roundtrip(self): _skip_if_no_xlrd() with ensure_clean(self.ext) as path: self.frame['A'][:5] = nan self.frame.to_excel(path, 'test1') self.frame.to_excel(path, 'test1', columns=['A', 'B']) self.frame.to_excel(path, 'test1', header=False) self.frame.to_excel(path, 'test1', index=False) # test roundtrip self.frame.to_excel(path, 'test1') recons = read_excel(path, 'test1', index_col=0) tm.assert_frame_equal(self.frame, recons) self.frame.to_excel(path, 'test1', index=False) recons = read_excel(path, 'test1', index_col=None) recons.index = self.frame.index tm.assert_frame_equal(self.frame, recons) self.frame.to_excel(path, 'test1', na_rep='NA') recons = read_excel(path, 'test1', index_col=0, na_values=['NA']) tm.assert_frame_equal(self.frame, recons) # GH 3611 self.frame.to_excel(path, 'test1', na_rep='88') recons = read_excel(path, 'test1', index_col=0, na_values=['88']) tm.assert_frame_equal(self.frame, recons) self.frame.to_excel(path, 'test1', na_rep='88') recons = read_excel(path, 'test1', index_col=0, na_values=[88, 88.0]) tm.assert_frame_equal(self.frame, recons) # GH 6573 self.frame.to_excel(path, 'Sheet1') recons = read_excel(path, index_col=0) tm.assert_frame_equal(self.frame, recons) self.frame.to_excel(path, '0') recons = read_excel(path, index_col=0) tm.assert_frame_equal(self.frame, recons) def test_mixed(self): _skip_if_no_xlrd() with ensure_clean(self.ext) as path: self.mixed_frame.to_excel(path, 'test1') reader = ExcelFile(path) recons = read_excel(reader, 'test1', index_col=0) tm.assert_frame_equal(self.mixed_frame, recons) def test_tsframe(self): _skip_if_no_xlrd() df = tm.makeTimeDataFrame()[:5] with ensure_clean(self.ext) as path: df.to_excel(path, 'test1') reader = ExcelFile(path) recons = read_excel(reader, 'test1') tm.assert_frame_equal(df, recons) def test_basics_with_nan(self): _skip_if_no_xlrd() with ensure_clean(self.ext) as path: self.frame['A'][:5] = nan self.frame.to_excel(path, 'test1') self.frame.to_excel(path, 'test1', columns=['A', 'B']) self.frame.to_excel(path, 'test1', header=False) self.frame.to_excel(path, 'test1', index=False) def test_int_types(self): _skip_if_no_xlrd() for np_type in (np.int8, np.int16, np.int32, np.int64): with ensure_clean(self.ext) as path: # Test np.int values read come back as int (rather than float # which is Excel's format). frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)), dtype=np_type) frame.to_excel(path, 'test1') reader = ExcelFile(path) recons = read_excel(reader, 'test1') int_frame = frame.astype(np.int64) tm.assert_frame_equal(int_frame, recons) recons2 = read_excel(path, 'test1') tm.assert_frame_equal(int_frame, recons2) # test with convert_float=False comes back as float float_frame = frame.astype(float) recons = read_excel(path, 'test1', convert_float=False) tm.assert_frame_equal(recons, float_frame, check_index_type=False, check_column_type=False) def test_float_types(self): _skip_if_no_xlrd() for np_type in (np.float16, np.float32, np.float64): with ensure_clean(self.ext) as path: # Test np.float values read come back as float. frame = DataFrame(np.random.random_sample(10), dtype=np_type) frame.to_excel(path, 'test1') reader = ExcelFile(path) recons = read_excel(reader, 'test1').astype(np_type) tm.assert_frame_equal(frame, recons, check_dtype=False) def test_bool_types(self): _skip_if_no_xlrd() for np_type in (np.bool8, np.bool_): with ensure_clean(self.ext) as path: # Test np.bool values read come back as float. frame = (DataFrame([1, 0, True, False], dtype=np_type)) frame.to_excel(path, 'test1') reader = ExcelFile(path) recons = read_excel(reader, 'test1').astype(np_type) tm.assert_frame_equal(frame, recons) def test_inf_roundtrip(self): _skip_if_no_xlrd() frame = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)]) with ensure_clean(self.ext) as path: frame.to_excel(path, 'test1') reader = ExcelFile(path) recons = read_excel(reader, 'test1') tm.assert_frame_equal(frame, recons) def test_sheets(self): _skip_if_no_xlrd() with ensure_clean(self.ext) as path: self.frame['A'][:5] = nan self.frame.to_excel(path, 'test1') self.frame.to_excel(path, 'test1', columns=['A', 'B']) self.frame.to_excel(path, 'test1', header=False) self.frame.to_excel(path, 'test1', index=False) # Test writing to separate sheets writer = ExcelWriter(path) self.frame.to_excel(writer, 'test1') self.tsframe.to_excel(writer, 'test2') writer.save() reader = ExcelFile(path) recons = read_excel(reader, 'test1', index_col=0) tm.assert_frame_equal(self.frame, recons) recons = read_excel(reader, 'test2', index_col=0) tm.assert_frame_equal(self.tsframe, recons) self.assertEqual(2, len(reader.sheet_names)) self.assertEqual('test1', reader.sheet_names[0]) self.assertEqual('test2', reader.sheet_names[1]) def test_colaliases(self): _skip_if_no_xlrd() with ensure_clean(self.ext) as path: self.frame['A'][:5] = nan self.frame.to_excel(path, 'test1') self.frame.to_excel(path, 'test1', columns=['A', 'B']) self.frame.to_excel(path, 'test1', header=False) self.frame.to_excel(path, 'test1', index=False) # column aliases col_aliases = Index(['AA', 'X', 'Y', 'Z']) self.frame2.to_excel(path, 'test1', header=col_aliases) reader = ExcelFile(path) rs = read_excel(reader, 'test1', index_col=0) xp = self.frame2.copy() xp.columns = col_aliases tm.assert_frame_equal(xp, rs) def test_roundtrip_indexlabels(self): _skip_if_no_xlrd() with ensure_clean(self.ext) as path: self.frame['A'][:5] = nan self.frame.to_excel(path, 'test1') self.frame.to_excel(path, 'test1', columns=['A', 'B']) self.frame.to_excel(path, 'test1', header=False) self.frame.to_excel(path, 'test1', index=False) # test index_label frame = (DataFrame(np.random.randn(10, 2)) >= 0) frame.to_excel(path, 'test1', index_label=['test'], merge_cells=self.merge_cells) reader = ExcelFile(path) recons = read_excel(reader, 'test1', index_col=0, ).astype(np.int64) frame.index.names = ['test'] self.assertEqual(frame.index.names, recons.index.names) frame = (DataFrame(np.random.randn(10, 2)) >= 0) frame.to_excel(path, 'test1', index_label=['test', 'dummy', 'dummy2'], merge_cells=self.merge_cells) reader = ExcelFile(path) recons = read_excel(reader, 'test1', index_col=0, ).astype(np.int64) frame.index.names = ['test'] self.assertEqual(frame.index.names, recons.index.names) frame = (DataFrame(np.random.randn(10, 2)) >= 0) frame.to_excel(path, 'test1', index_label='test', merge_cells=self.merge_cells) reader = ExcelFile(path) recons = read_excel(reader, 'test1', index_col=0, ).astype(np.int64) frame.index.names = ['test'] tm.assert_frame_equal(frame, recons.astype(bool)) with ensure_clean(self.ext) as path: self.frame.to_excel(path, 'test1', columns=['A', 'B', 'C', 'D'], index=False, merge_cells=self.merge_cells) # take 'A' and 'B' as indexes (same row as cols 'C', 'D') df = self.frame.copy() df = df.set_index(['A', 'B']) reader = ExcelFile(path) recons = read_excel(reader, 'test1', index_col=[0, 1]) tm.assert_frame_equal(df, recons, check_less_precise=True) def test_excel_roundtrip_indexname(self): _skip_if_no_xlrd() df = DataFrame(np.random.randn(10, 4)) df.index.name = 'foo' with ensure_clean(self.ext) as path: df.to_excel(path, merge_cells=self.merge_cells) xf = ExcelFile(path) result = read_excel(xf, xf.sheet_names[0], index_col=0) tm.assert_frame_equal(result, df) self.assertEqual(result.index.name, 'foo') def test_excel_roundtrip_datetime(self): _skip_if_no_xlrd() # datetime.date, not sure what to test here exactly tsf = self.tsframe.copy() with ensure_clean(self.ext) as path: tsf.index = [x.date() for x in self.tsframe.index] tsf.to_excel(path, 'test1', merge_cells=self.merge_cells) reader = ExcelFile(path) recons = read_excel(reader, 'test1') tm.assert_frame_equal(self.tsframe, recons) # GH4133 - excel output format strings def test_excel_date_datetime_format(self): _skip_if_no_xlrd() df = DataFrame([[date(2014, 1, 31), date(1999, 9, 24)], [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)]], index=['DATE', 'DATETIME'], columns=['X', 'Y']) df_expected = DataFrame([[datetime(2014, 1, 31), datetime(1999, 9, 24)], [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)]], index=['DATE', 'DATETIME'], columns=['X', 'Y']) with ensure_clean(self.ext) as filename1: with ensure_clean(self.ext) as filename2: writer1 = ExcelWriter(filename1) writer2 = ExcelWriter(filename2, date_format='DD.MM.YYYY', datetime_format='DD.MM.YYYY HH-MM-SS') df.to_excel(writer1, 'test1') df.to_excel(writer2, 'test1') writer1.close() writer2.close() reader1 = ExcelFile(filename1) reader2 = ExcelFile(filename2) rs1 = read_excel(reader1, 'test1', index_col=None) rs2 = read_excel(reader2, 'test1', index_col=None) tm.assert_frame_equal(rs1, rs2) # since the reader returns a datetime object for dates, we need # to use df_expected to check the result tm.assert_frame_equal(rs2, df_expected) def test_to_excel_periodindex(self): _skip_if_no_xlrd() frame = self.tsframe xp = frame.resample('M', kind='period').mean() with ensure_clean(self.ext) as path: xp.to_excel(path, 'sht1') reader = ExcelFile(path) rs = read_excel(reader, 'sht1', index_col=0) tm.assert_frame_equal(xp, rs.to_period('M')) def test_to_excel_multiindex(self): _skip_if_no_xlrd() frame = self.frame arrays = np.arange(len(frame.index) * 2).reshape(2, -1) new_index = MultiIndex.from_arrays(arrays, names=['first', 'second']) frame.index = new_index with ensure_clean(self.ext) as path: frame.to_excel(path, 'test1', header=False) frame.to_excel(path, 'test1', columns=['A', 'B']) # round trip frame.to_excel(path, 'test1', merge_cells=self.merge_cells) reader = ExcelFile(path) df = read_excel(reader, 'test1', index_col=[0, 1], parse_dates=False) tm.assert_frame_equal(frame, df) # GH13511 def test_to_excel_multiindex_nan_label(self): _skip_if_no_xlrd() frame = pd.DataFrame({'A': [None, 2, 3], 'B': [10, 20, 30], 'C': np.random.sample(3)}) frame = frame.set_index(['A', 'B']) with ensure_clean(self.ext) as path: frame.to_excel(path, merge_cells=self.merge_cells) df = read_excel(path, index_col=[0, 1]) tm.assert_frame_equal(frame, df) # Test for Issue 11328. If column indices are integers, make # sure they are handled correctly for either setting of # merge_cells def test_to_excel_multiindex_cols(self): _skip_if_no_xlrd() frame = self.frame arrays = np.arange(len(frame.index) * 2).reshape(2, -1) new_index = MultiIndex.from_arrays(arrays, names=['first', 'second']) frame.index = new_index new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2), (50, 1), (50, 2)]) frame.columns = new_cols_index header = [0, 1] if not self.merge_cells: header = 0 with ensure_clean(self.ext) as path: # round trip frame.to_excel(path, 'test1', merge_cells=self.merge_cells) reader = ExcelFile(path) df = read_excel(reader, 'test1', header=header, index_col=[0, 1], parse_dates=False) if not self.merge_cells: fm = frame.columns.format(sparsify=False, adjoin=False, names=False) frame.columns = [".".join(map(str, q)) for q in zip(*fm)] tm.assert_frame_equal(frame, df) def test_to_excel_multiindex_dates(self): _skip_if_no_xlrd() # try multiindex with dates tsframe = self.tsframe.copy() new_index = [tsframe.index, np.arange(len(tsframe.index))] tsframe.index = MultiIndex.from_arrays(new_index) with ensure_clean(self.ext) as path: tsframe.index.names = ['time', 'foo'] tsframe.to_excel(path, 'test1', merge_cells=self.merge_cells) reader = ExcelFile(path) recons = read_excel(reader, 'test1', index_col=[0, 1]) tm.assert_frame_equal(tsframe, recons) self.assertEqual(recons.index.names, ('time', 'foo')) def test_to_excel_multiindex_no_write_index(self): _skip_if_no_xlrd() # Test writing and re-reading a MI witout the index. GH 5616. # Initial non-MI frame. frame1 = DataFrame({'a': [10, 20], 'b': [30, 40], 'c': [50, 60]}) # Add a MI. frame2 = frame1.copy() multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)]) frame2.index = multi_index with ensure_clean(self.ext) as path: # Write out to Excel without the index. frame2.to_excel(path, 'test1', index=False) # Read it back in. reader = ExcelFile(path) frame3 = read_excel(reader, 'test1') # Test that it is the same as the initial frame. tm.assert_frame_equal(frame1, frame3) def test_to_excel_float_format(self): _skip_if_no_xlrd() df = DataFrame([[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], index=['A', 'B'], columns=['X', 'Y', 'Z']) with ensure_clean(self.ext) as filename: df.to_excel(filename, 'test1', float_format='%.2f') reader = ExcelFile(filename) rs = read_excel(reader, 'test1', index_col=None) xp = DataFrame([[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]], index=['A', 'B'], columns=['X', 'Y', 'Z']) tm.assert_frame_equal(rs, xp) def test_to_excel_output_encoding(self): _skip_if_no_xlrd() # avoid mixed inferred_type df = DataFrame([[u'\u0192', u'\u0193', u'\u0194'], [u'\u0195', u'\u0196', u'\u0197']], index=[u'A\u0192', u'B'], columns=[u'X\u0193', u'Y', u'Z']) with ensure_clean('__tmp_to_excel_float_format__.' + self.ext)\ as filename: df.to_excel(filename, sheet_name='TestSheet', encoding='utf8') result = read_excel(filename, 'TestSheet', encoding='utf8') tm.assert_frame_equal(result, df) def test_to_excel_unicode_filename(self): _skip_if_no_xlrd() with ensure_clean(u('\u0192u.') + self.ext) as filename: try: f = open(filename, 'wb') except UnicodeEncodeError: raise nose.SkipTest('no unicode file names on this system') else: f.close() df = DataFrame([[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], index=['A', 'B'], columns=['X', 'Y', 'Z']) df.to_excel(filename, 'test1', float_format='%.2f') reader = ExcelFile(filename) rs = read_excel(reader, 'test1', index_col=None) xp = DataFrame([[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]], index=['A', 'B'], columns=['X', 'Y', 'Z']) tm.assert_frame_equal(rs, xp) # def test_to_excel_header_styling_xls(self): # import StringIO # s = StringIO( # """Date,ticker,type,value # 2001-01-01,x,close,12.2 # 2001-01-01,x,open ,12.1 # 2001-01-01,y,close,12.2 # 2001-01-01,y,open ,12.1 # 2001-02-01,x,close,12.2 # 2001-02-01,x,open ,12.1 # 2001-02-01,y,close,12.2 # 2001-02-01,y,open ,12.1 # 2001-03-01,x,close,12.2 # 2001-03-01,x,open ,12.1 # 2001-03-01,y,close,12.2 # 2001-03-01,y,open ,12.1""") # df = read_csv(s, parse_dates=["Date"]) # pdf = df.pivot_table(values="value", rows=["ticker"], # cols=["Date", "type"]) # try: # import xlwt # import xlrd # except ImportError: # raise nose.SkipTest # filename = '__tmp_to_excel_header_styling_xls__.xls' # pdf.to_excel(filename, 'test1') # wbk = xlrd.open_workbook(filename, # formatting_info=True) # self.assertEqual(["test1"], wbk.sheet_names()) # ws = wbk.sheet_by_name('test1') # self.assertEqual([(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)], # ws.merged_cells) # for i in range(0, 2): # for j in range(0, 7): # xfx = ws.cell_xf_index(0, 0) # cell_xf = wbk.xf_list[xfx] # font = wbk.font_list # self.assertEqual(1, font[cell_xf.font_index].bold) # self.assertEqual(1, cell_xf.border.top_line_style) # self.assertEqual(1, cell_xf.border.right_line_style) # self.assertEqual(1, cell_xf.border.bottom_line_style) # self.assertEqual(1, cell_xf.border.left_line_style) # self.assertEqual(2, cell_xf.alignment.hor_align) # os.remove(filename) # def test_to_excel_header_styling_xlsx(self): # import StringIO # s = StringIO( # """Date,ticker,type,value # 2001-01-01,x,close,12.2 # 2001-01-01,x,open ,12.1 # 2001-01-01,y,close,12.2 # 2001-01-01,y,open ,12.1 # 2001-02-01,x,close,12.2 # 2001-02-01,x,open ,12.1 # 2001-02-01,y,close,12.2 # 2001-02-01,y,open ,12.1 # 2001-03-01,x,close,12.2 # 2001-03-01,x,open ,12.1 # 2001-03-01,y,close,12.2 # 2001-03-01,y,open ,12.1""") # df = read_csv(s, parse_dates=["Date"]) # pdf = df.pivot_table(values="value", rows=["ticker"], # cols=["Date", "type"]) # try: # import openpyxl # from openpyxl.cell import get_column_letter # except ImportError: # raise nose.SkipTest # if openpyxl.__version__ < '1.6.1': # raise nose.SkipTest # # test xlsx_styling # filename = '__tmp_to_excel_header_styling_xlsx__.xlsx' # pdf.to_excel(filename, 'test1') # wbk = openpyxl.load_workbook(filename) # self.assertEqual(["test1"], wbk.get_sheet_names()) # ws = wbk.get_sheet_by_name('test1') # xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))] # xlsaddrs += ["A%s" % i for i in range(1, 6)] # xlsaddrs += ["B1", "D1", "F1"] # for xlsaddr in xlsaddrs: # cell = ws.cell(xlsaddr) # self.assertTrue(cell.style.font.bold) # self.assertEqual(openpyxl.style.Border.BORDER_THIN, # cell.style.borders.top.border_style) # self.assertEqual(openpyxl.style.Border.BORDER_THIN, # cell.style.borders.right.border_style) # self.assertEqual(openpyxl.style.Border.BORDER_THIN, # cell.style.borders.bottom.border_style) # self.assertEqual(openpyxl.style.Border.BORDER_THIN, # cell.style.borders.left.border_style) # self.assertEqual(openpyxl.style.Alignment.HORIZONTAL_CENTER, # cell.style.alignment.horizontal) # mergedcells_addrs = ["C1", "E1", "G1"] # for maddr in mergedcells_addrs: # self.assertTrue(ws.cell(maddr).merged) # os.remove(filename) def test_excel_010_hemstring(self): _skip_if_no_xlrd() if self.merge_cells: raise nose.SkipTest('Skip tests for merged MI format.') from pandas.util.testing import makeCustomDataframe as mkdf # ensure limited functionality in 0.10 # override of #2370 until sorted out in 0.11 def roundtrip(df, header=True, parser_hdr=0, index=True): with ensure_clean(self.ext) as path: df.to_excel(path, header=header, merge_cells=self.merge_cells, index=index) xf = ExcelFile(path) res = read_excel(xf, xf.sheet_names[0], header=parser_hdr) return res nrows = 5 ncols = 3 for use_headers in (True, False): for i in range(1, 4): # row multindex upto nlevel=3 for j in range(1, 4): # col "" df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j) # this if will be removed once multi column excel writing # is implemented for now fixing #9794 if j > 1: with tm.assertRaises(NotImplementedError): res = roundtrip(df, use_headers, index=False) else: res = roundtrip(df, use_headers) if use_headers: self.assertEqual(res.shape, (nrows, ncols + i)) else: # first row taken as columns self.assertEqual(res.shape, (nrows - 1, ncols + i)) # no nans for r in range(len(res.index)): for c in range(len(res.columns)): self.assertTrue(res.ix[r, c] is not np.nan) res = roundtrip(DataFrame([0])) self.assertEqual(res.shape, (1, 1)) self.assertTrue(res.ix[0, 0] is not np.nan) res = roundtrip(DataFrame([0]), False, None) self.assertEqual(res.shape, (1, 2)) self.assertTrue(res.ix[0, 0] is not np.nan) def test_excel_010_hemstring_raises_NotImplementedError(self): # This test was failing only for j>1 and header=False, # So I reproduced a simple test. _skip_if_no_xlrd() if self.merge_cells: raise nose.SkipTest('Skip tests for merged MI format.') from pandas.util.testing import makeCustomDataframe as mkdf # ensure limited functionality in 0.10 # override of #2370 until sorted out in 0.11 def roundtrip2(df, header=True, parser_hdr=0, index=True): with ensure_clean(self.ext) as path: df.to_excel(path, header=header, merge_cells=self.merge_cells, index=index) xf = ExcelFile(path) res = read_excel(xf, xf.sheet_names[0], header=parser_hdr) return res nrows = 5 ncols = 3 j = 2 i = 1 df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j) with tm.assertRaises(NotImplementedError): roundtrip2(df, header=False, index=False) def test_duplicated_columns(self): # Test for issue #5235 _skip_if_no_xlrd() with ensure_clean(self.ext) as path: write_frame = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) colnames = ['A', 'B', 'B'] write_frame.columns = colnames write_frame.to_excel(path, 'test1') read_frame = read_excel(path, 'test1') read_frame.columns = colnames tm.assert_frame_equal(write_frame, read_frame) # 11007 / #10970 write_frame = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=['A', 'B', 'A', 'B']) write_frame.to_excel(path, 'test1') read_frame = read_excel(path, 'test1') read_frame.columns = ['A', 'B', 'A', 'B'] tm.assert_frame_equal(write_frame, read_frame) # 10982 write_frame.to_excel(path, 'test1', index=False, header=False) read_frame = read_excel(path, 'test1', header=None) write_frame.columns = [0, 1, 2, 3] tm.assert_frame_equal(write_frame, read_frame) def test_swapped_columns(self): # Test for issue #5427. _skip_if_no_xlrd() with ensure_clean(self.ext) as path: write_frame = DataFrame({'A': [1, 1, 1], 'B': [2, 2, 2]}) write_frame.to_excel(path, 'test1', columns=['B', 'A']) read_frame = read_excel(path, 'test1', header=0) tm.assert_series_equal(write_frame['A'], read_frame['A']) tm.assert_series_equal(write_frame['B'], read_frame['B']) def test_invalid_columns(self): # 10982 _skip_if_no_xlrd() with ensure_clean(self.ext) as path: write_frame = DataFrame({'A': [1, 1, 1], 'B': [2, 2, 2]}) write_frame.to_excel(path, 'test1', columns=['B', 'C']) expected = write_frame.loc[:, ['B', 'C']] read_frame = read_excel(path, 'test1') tm.assert_frame_equal(expected, read_frame) with tm.assertRaises(KeyError): write_frame.to_excel(path, 'test1', columns=['C', 'D']) def test_datetimes(self): # Test writing and reading datetimes. For issue #9139. (xref #9185) _skip_if_no_xlrd() datetimes = [datetime(2013, 1, 13, 1, 2, 3), datetime(2013, 1, 13, 2, 45, 56), datetime(2013, 1, 13, 4, 29, 49), datetime(2013, 1, 13, 6, 13, 42), datetime(2013, 1, 13, 7, 57, 35), datetime(2013, 1, 13, 9, 41, 28), datetime(2013, 1, 13, 11, 25, 21), datetime(2013, 1, 13, 13, 9, 14), datetime(2013, 1, 13, 14, 53, 7), datetime(2013, 1, 13, 16, 37, 0), datetime(2013, 1, 13, 18, 20, 52)] with ensure_clean(self.ext) as path: write_frame = DataFrame.from_items([('A', datetimes)]) write_frame.to_excel(path, 'Sheet1') read_frame = read_excel(path, 'Sheet1', header=0) tm.assert_series_equal(write_frame['A'], read_frame['A']) # GH7074 def test_bytes_io(self): _skip_if_no_xlrd() bio = BytesIO() df = DataFrame(np.random.randn(10, 2)) writer = ExcelWriter(bio) df.to_excel(writer) writer.save() bio.seek(0) reread_df = read_excel(bio) tm.assert_frame_equal(df, reread_df) # GH8188 def test_write_lists_dict(self): _skip_if_no_xlrd() df = DataFrame({'mixed': ['a', ['b', 'c'], {'d': 'e', 'f': 2}], 'numeric': [1, 2, 3.0], 'str': ['apple', 'banana', 'cherry']}) expected = df.copy() expected.mixed = expected.mixed.apply(str) expected.numeric = expected.numeric.astype('int64') with ensure_clean(self.ext) as path: df.to_excel(path, 'Sheet1') read = read_excel(path, 'Sheet1', header=0) tm.assert_frame_equal(read, expected) # GH13347 def test_true_and_false_value_options(self): df = pd.DataFrame([['foo', 'bar']], columns=['col1', 'col2']) expected = df.replace({'foo': True, 'bar': False}) with ensure_clean(self.ext) as path: df.to_excel(path) read_frame = read_excel(path, true_values=['foo'], false_values=['bar']) tm.assert_frame_equal(read_frame, expected) def raise_wrapper(major_ver): def versioned_raise_wrapper(orig_method): @functools.wraps(orig_method) def wrapped(self, *args, **kwargs): _skip_if_no_openpyxl() if openpyxl_compat.is_compat(major_ver=major_ver): orig_method(self, *args, **kwargs) else: msg = (r'Installed openpyxl is not supported at this ' r'time\. Use.+') with tm.assertRaisesRegexp(ValueError, msg): orig_method(self, *args, **kwargs) return wrapped return versioned_raise_wrapper def raise_on_incompat_version(major_ver): def versioned_raise_on_incompat_version(cls): methods = filter(operator.methodcaller( 'startswith', 'test_'), dir(cls)) for method in methods: setattr(cls, method, raise_wrapper( major_ver)(getattr(cls, method))) return cls return versioned_raise_on_incompat_version @raise_on_incompat_version(1) class OpenpyxlTests(ExcelWriterBase, tm.TestCase): ext = '.xlsx' engine_name = 'openpyxl1' check_skip = staticmethod(lambda *args, **kwargs: None) def test_to_excel_styleconverter(self): _skip_if_no_openpyxl() if not openpyxl_compat.is_compat(major_ver=1): raise nose.SkipTest('incompatiable openpyxl version') import openpyxl hstyle = {"font": {"bold": True}, "borders": {"top": "thin", "right": "thin", "bottom": "thin", "left": "thin"}, "alignment": {"horizontal": "center", "vertical": "top"}} xlsx_style = _Openpyxl1Writer._convert_to_style(hstyle) self.assertTrue(xlsx_style.font.bold) self.assertEqual(openpyxl.style.Border.BORDER_THIN, xlsx_style.borders.top.border_style) self.assertEqual(openpyxl.style.Border.BORDER_THIN, xlsx_style.borders.right.border_style) self.assertEqual(openpyxl.style.Border.BORDER_THIN, xlsx_style.borders.bottom.border_style) self.assertEqual(openpyxl.style.Border.BORDER_THIN, xlsx_style.borders.left.border_style) self.assertEqual(openpyxl.style.Alignment.HORIZONTAL_CENTER, xlsx_style.alignment.horizontal) self.assertEqual(openpyxl.style.Alignment.VERTICAL_TOP, xlsx_style.alignment.vertical) def skip_openpyxl_gt21(cls): """Skip a TestCase instance if openpyxl >= 2.2""" @classmethod def setUpClass(cls): _skip_if_no_openpyxl() import openpyxl ver = openpyxl.__version__ if (not (LooseVersion(ver) >= LooseVersion('2.0.0') and LooseVersion(ver) < LooseVersion('2.2.0'))): raise nose.SkipTest("openpyxl %s >= 2.2" % str(ver)) cls.setUpClass = setUpClass return cls @raise_on_incompat_version(2) @skip_openpyxl_gt21 class Openpyxl20Tests(ExcelWriterBase, tm.TestCase): ext = '.xlsx' engine_name = 'openpyxl20' check_skip = staticmethod(lambda *args, **kwargs: None) def test_to_excel_styleconverter(self): import openpyxl from openpyxl import styles hstyle = { "font": { "color": '00FF0000', "bold": True, }, "borders": { "top": "thin", "right": "thin", "bottom": "thin", "left": "thin", }, "alignment": { "horizontal": "center", "vertical": "top", }, "fill": { "patternType": 'solid', 'fgColor': { 'rgb': '006666FF', 'tint': 0.3, }, }, "number_format": { "format_code": "0.00" }, "protection": { "locked": True, "hidden": False, }, } font_color = styles.Color('00FF0000') font = styles.Font(bold=True, color=font_color) side = styles.Side(style=styles.borders.BORDER_THIN) border = styles.Border(top=side, right=side, bottom=side, left=side) alignment = styles.Alignment(horizontal='center', vertical='top') fill_color = styles.Color(rgb='006666FF', tint=0.3) fill = styles.PatternFill(patternType='solid', fgColor=fill_color) # ahh openpyxl API changes ver = openpyxl.__version__ if ver >= LooseVersion('2.0.0') and ver < LooseVersion('2.1.0'): number_format = styles.NumberFormat(format_code='0.00') else: number_format = '0.00' # XXX: Only works with openpyxl-2.1.0 protection = styles.Protection(locked=True, hidden=False) kw = _Openpyxl20Writer._convert_to_style_kwargs(hstyle) self.assertEqual(kw['font'], font) self.assertEqual(kw['border'], border) self.assertEqual(kw['alignment'], alignment) self.assertEqual(kw['fill'], fill) self.assertEqual(kw['number_format'], number_format) self.assertEqual(kw['protection'], protection) def test_write_cells_merge_styled(self): from pandas.formats.format import ExcelCell from openpyxl import styles sheet_name = 'merge_styled' sty_b1 = {'font': {'color': '00FF0000'}} sty_a2 = {'font': {'color': '0000FF00'}} initial_cells = [ ExcelCell(col=1, row=0, val=42, style=sty_b1), ExcelCell(col=0, row=1, val=99, style=sty_a2), ] sty_merged = {'font': {'color': '000000FF', 'bold': True}} sty_kwargs = _Openpyxl20Writer._convert_to_style_kwargs(sty_merged) openpyxl_sty_merged = styles.Style(**sty_kwargs) merge_cells = [ ExcelCell(col=0, row=0, val='pandas', mergestart=1, mergeend=1, style=sty_merged), ] with ensure_clean('.xlsx') as path: writer = _Openpyxl20Writer(path) writer.write_cells(initial_cells, sheet_name=sheet_name) writer.write_cells(merge_cells, sheet_name=sheet_name) wks = writer.sheets[sheet_name] xcell_b1 = wks.cell('B1') xcell_a2 = wks.cell('A2') self.assertEqual(xcell_b1.style, openpyxl_sty_merged) self.assertEqual(xcell_a2.style, openpyxl_sty_merged) def skip_openpyxl_lt22(cls): """Skip a TestCase instance if openpyxl < 2.2""" @classmethod def setUpClass(cls): _skip_if_no_openpyxl() import openpyxl ver = openpyxl.__version__ if LooseVersion(ver) < LooseVersion('2.2.0'): raise nose.SkipTest("openpyxl %s < 2.2" % str(ver)) cls.setUpClass = setUpClass return cls @raise_on_incompat_version(2) @skip_openpyxl_lt22 class Openpyxl22Tests(ExcelWriterBase, tm.TestCase): ext = '.xlsx' engine_name = 'openpyxl22' check_skip = staticmethod(lambda *args, **kwargs: None) def test_to_excel_styleconverter(self): from openpyxl import styles hstyle = { "font": { "color": '00FF0000', "bold": True, }, "borders": { "top": "thin", "right": "thin", "bottom": "thin", "left": "thin", }, "alignment": { "horizontal": "center", "vertical": "top", }, "fill": { "patternType": 'solid', 'fgColor': { 'rgb': '006666FF', 'tint': 0.3, }, }, "number_format": { "format_code": "0.00" }, "protection": { "locked": True, "hidden": False, }, } font_color = styles.Color('00FF0000') font = styles.Font(bold=True, color=font_color) side = styles.Side(style=styles.borders.BORDER_THIN) border = styles.Border(top=side, right=side, bottom=side, left=side) alignment = styles.Alignment(horizontal='center', vertical='top') fill_color = styles.Color(rgb='006666FF', tint=0.3) fill = styles.PatternFill(patternType='solid', fgColor=fill_color) number_format = '0.00' protection = styles.Protection(locked=True, hidden=False) kw = _Openpyxl22Writer._convert_to_style_kwargs(hstyle) self.assertEqual(kw['font'], font) self.assertEqual(kw['border'], border) self.assertEqual(kw['alignment'], alignment) self.assertEqual(kw['fill'], fill) self.assertEqual(kw['number_format'], number_format) self.assertEqual(kw['protection'], protection) def test_write_cells_merge_styled(self): if not openpyxl_compat.is_compat(major_ver=2): raise nose.SkipTest('incompatiable openpyxl version') from pandas.formats.format import ExcelCell sheet_name = 'merge_styled' sty_b1 = {'font': {'color': '00FF0000'}} sty_a2 = {'font': {'color': '0000FF00'}} initial_cells = [ ExcelCell(col=1, row=0, val=42, style=sty_b1), ExcelCell(col=0, row=1, val=99, style=sty_a2), ] sty_merged = {'font': {'color': '000000FF', 'bold': True}} sty_kwargs = _Openpyxl22Writer._convert_to_style_kwargs(sty_merged) openpyxl_sty_merged = sty_kwargs['font'] merge_cells = [ ExcelCell(col=0, row=0, val='pandas', mergestart=1, mergeend=1, style=sty_merged), ] with ensure_clean('.xlsx') as path: writer = _Openpyxl22Writer(path) writer.write_cells(initial_cells, sheet_name=sheet_name) writer.write_cells(merge_cells, sheet_name=sheet_name) wks = writer.sheets[sheet_name] xcell_b1 = wks.cell('B1') xcell_a2 = wks.cell('A2') self.assertEqual(xcell_b1.font, openpyxl_sty_merged) self.assertEqual(xcell_a2.font, openpyxl_sty_merged) class XlwtTests(ExcelWriterBase, tm.TestCase): ext = '.xls' engine_name = 'xlwt' check_skip = staticmethod(_skip_if_no_xlwt) def test_excel_raise_error_on_multiindex_columns_and_no_index(self): _skip_if_no_xlwt() # MultiIndex as columns is not yet implemented 9794 cols = MultiIndex.from_tuples([('site', ''), ('2014', 'height'), ('2014', 'weight')]) df = DataFrame(np.random.randn(10, 3), columns=cols) with tm.assertRaises(NotImplementedError): with ensure_clean(self.ext) as path: df.to_excel(path, index=False) def test_excel_multiindex_columns_and_index_true(self): _skip_if_no_xlwt() cols = MultiIndex.from_tuples([('site', ''), ('2014', 'height'), ('2014', 'weight')]) df = pd.DataFrame(np.random.randn(10, 3), columns=cols) with ensure_clean(self.ext) as path: df.to_excel(path, index=True) def test_excel_multiindex_index(self): _skip_if_no_xlwt() # MultiIndex as index works so assert no error #9794 cols = MultiIndex.from_tuples([('site', ''), ('2014', 'height'), ('2014', 'weight')]) df = DataFrame(np.random.randn(3, 10), index=cols) with ensure_clean(self.ext) as path: df.to_excel(path, index=False) def test_to_excel_styleconverter(self): _skip_if_no_xlwt() import xlwt hstyle = {"font": {"bold": True}, "borders": {"top": "thin", "right": "thin", "bottom": "thin", "left": "thin"}, "alignment": {"horizontal": "center", "vertical": "top"}} xls_style = _XlwtWriter._convert_to_style(hstyle) self.assertTrue(xls_style.font.bold) self.assertEqual(xlwt.Borders.THIN, xls_style.borders.top) self.assertEqual(xlwt.Borders.THIN, xls_style.borders.right) self.assertEqual(xlwt.Borders.THIN, xls_style.borders.bottom) self.assertEqual(xlwt.Borders.THIN, xls_style.borders.left) self.assertEqual(xlwt.Alignment.HORZ_CENTER, xls_style.alignment.horz) self.assertEqual(xlwt.Alignment.VERT_TOP, xls_style.alignment.vert) class XlsxWriterTests(ExcelWriterBase, tm.TestCase): ext = '.xlsx' engine_name = 'xlsxwriter' check_skip = staticmethod(_skip_if_no_xlsxwriter) def test_column_format(self): # Test that column formats are applied to cells. Test for issue #9167. # Applicable to xlsxwriter only. _skip_if_no_xlsxwriter() with warnings.catch_warnings(): # Ignore the openpyxl lxml warning. warnings.simplefilter("ignore") _skip_if_no_openpyxl() import openpyxl with ensure_clean(self.ext) as path: frame = DataFrame({'A': [123456, 123456], 'B': [123456, 123456]}) writer = ExcelWriter(path) frame.to_excel(writer) # Add a number format to col B and ensure it is applied to cells. num_format = '#,##0' write_workbook = writer.book write_worksheet = write_workbook.worksheets()[0] col_format = write_workbook.add_format({'num_format': num_format}) write_worksheet.set_column('B:B', None, col_format) writer.save() read_workbook = openpyxl.load_workbook(path) read_worksheet = read_workbook.get_sheet_by_name(name='Sheet1') # Get the number format from the cell. This method is backward # compatible with older versions of openpyxl. cell = read_worksheet.cell('B2') try: read_num_format = cell.number_format except: read_num_format = cell.style.number_format._format_code self.assertEqual(read_num_format, num_format) class OpenpyxlTests_NoMerge(ExcelWriterBase, tm.TestCase): ext = '.xlsx' engine_name = 'openpyxl' check_skip = staticmethod(_skip_if_no_openpyxl) # Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows. merge_cells = False class XlwtTests_NoMerge(ExcelWriterBase, tm.TestCase): ext = '.xls' engine_name = 'xlwt' check_skip = staticmethod(_skip_if_no_xlwt) # Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows. merge_cells = False class XlsxWriterTests_NoMerge(ExcelWriterBase, tm.TestCase): ext = '.xlsx' engine_name = 'xlsxwriter' check_skip = staticmethod(_skip_if_no_xlsxwriter) # Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows. merge_cells = False class ExcelWriterEngineTests(tm.TestCase): def test_ExcelWriter_dispatch(self): with tm.assertRaisesRegexp(ValueError, 'No engine'): ExcelWriter('nothing') try: import xlsxwriter # noqa writer_klass = _XlsxWriter except ImportError: _skip_if_no_openpyxl() if not openpyxl_compat.is_compat(major_ver=1): raise nose.SkipTest('incompatible openpyxl version') writer_klass = _Openpyxl1Writer with ensure_clean('.xlsx') as path: writer = ExcelWriter(path) tm.assertIsInstance(writer, writer_klass) _skip_if_no_xlwt() with ensure_clean('.xls') as path: writer = ExcelWriter(path) tm.assertIsInstance(writer, _XlwtWriter) def test_register_writer(self): # some awkward mocking to test out dispatch and such actually works called_save = [] called_write_cells = [] class DummyClass(ExcelWriter): called_save = False called_write_cells = False supported_extensions = ['test', 'xlsx', 'xls'] engine = 'dummy' def save(self): called_save.append(True) def write_cells(self, *args, **kwargs): called_write_cells.append(True) def check_called(func): func() self.assertTrue(len(called_save) >= 1) self.assertTrue(len(called_write_cells) >= 1) del called_save[:] del called_write_cells[:] with pd.option_context('io.excel.xlsx.writer', 'dummy'): register_writer(DummyClass) writer = ExcelWriter('something.test') tm.assertIsInstance(writer, DummyClass) df = tm.makeCustomDataframe(1, 1) panel = tm.makePanel() func = lambda: df.to_excel('something.test') check_called(func) check_called(lambda: panel.to_excel('something.test')) check_called(lambda: df.to_excel('something.xlsx')) check_called(lambda: df.to_excel('something.xls', engine='dummy')) if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
mit
madphysicist/numpy
numpy/lib/tests/test_type_check.py
17
15119
import numpy as np from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_raises ) from numpy.lib.type_check import ( common_type, mintypecode, isreal, iscomplex, isposinf, isneginf, nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close ) def assert_all(x): assert_(np.all(x), x) class TestCommonType: def test_basic(self): ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32) af16 = np.array([[1, 2], [3, 4]], dtype=np.float16) af32 = np.array([[1, 2], [3, 4]], dtype=np.float32) af64 = np.array([[1, 2], [3, 4]], dtype=np.float64) acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle) acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble) assert_(common_type(ai32) == np.float64) assert_(common_type(af16) == np.float16) assert_(common_type(af32) == np.float32) assert_(common_type(af64) == np.float64) assert_(common_type(acs) == np.csingle) assert_(common_type(acd) == np.cdouble) class TestMintypecode: def test_default_1(self): for itype in '1bcsuwil': assert_equal(mintypecode(itype), 'd') assert_equal(mintypecode('f'), 'f') assert_equal(mintypecode('d'), 'd') assert_equal(mintypecode('F'), 'F') assert_equal(mintypecode('D'), 'D') def test_default_2(self): for itype in '1bcsuwil': assert_equal(mintypecode(itype+'f'), 'f') assert_equal(mintypecode(itype+'d'), 'd') assert_equal(mintypecode(itype+'F'), 'F') assert_equal(mintypecode(itype+'D'), 'D') assert_equal(mintypecode('ff'), 'f') assert_equal(mintypecode('fd'), 'd') assert_equal(mintypecode('fF'), 'F') assert_equal(mintypecode('fD'), 'D') assert_equal(mintypecode('df'), 'd') assert_equal(mintypecode('dd'), 'd') #assert_equal(mintypecode('dF',savespace=1),'F') assert_equal(mintypecode('dF'), 'D') assert_equal(mintypecode('dD'), 'D') assert_equal(mintypecode('Ff'), 'F') #assert_equal(mintypecode('Fd',savespace=1),'F') assert_equal(mintypecode('Fd'), 'D') assert_equal(mintypecode('FF'), 'F') assert_equal(mintypecode('FD'), 'D') assert_equal(mintypecode('Df'), 'D') assert_equal(mintypecode('Dd'), 'D') assert_equal(mintypecode('DF'), 'D') assert_equal(mintypecode('DD'), 'D') def test_default_3(self): assert_equal(mintypecode('fdF'), 'D') #assert_equal(mintypecode('fdF',savespace=1),'F') assert_equal(mintypecode('fdD'), 'D') assert_equal(mintypecode('fFD'), 'D') assert_equal(mintypecode('dFD'), 'D') assert_equal(mintypecode('ifd'), 'd') assert_equal(mintypecode('ifF'), 'F') assert_equal(mintypecode('ifD'), 'D') assert_equal(mintypecode('idF'), 'D') #assert_equal(mintypecode('idF',savespace=1),'F') assert_equal(mintypecode('idD'), 'D') class TestIsscalar: def test_basic(self): assert_(np.isscalar(3)) assert_(not np.isscalar([3])) assert_(not np.isscalar((3,))) assert_(np.isscalar(3j)) assert_(np.isscalar(4.0)) class TestReal: def test_real(self): y = np.random.rand(10,) assert_array_equal(y, np.real(y)) y = np.array(1) out = np.real(y) assert_array_equal(y, out) assert_(isinstance(out, np.ndarray)) y = 1 out = np.real(y) assert_equal(y, out) assert_(not isinstance(out, np.ndarray)) def test_cmplx(self): y = np.random.rand(10,)+1j*np.random.rand(10,) assert_array_equal(y.real, np.real(y)) y = np.array(1 + 1j) out = np.real(y) assert_array_equal(y.real, out) assert_(isinstance(out, np.ndarray)) y = 1 + 1j out = np.real(y) assert_equal(1.0, out) assert_(not isinstance(out, np.ndarray)) class TestImag: def test_real(self): y = np.random.rand(10,) assert_array_equal(0, np.imag(y)) y = np.array(1) out = np.imag(y) assert_array_equal(0, out) assert_(isinstance(out, np.ndarray)) y = 1 out = np.imag(y) assert_equal(0, out) assert_(not isinstance(out, np.ndarray)) def test_cmplx(self): y = np.random.rand(10,)+1j*np.random.rand(10,) assert_array_equal(y.imag, np.imag(y)) y = np.array(1 + 1j) out = np.imag(y) assert_array_equal(y.imag, out) assert_(isinstance(out, np.ndarray)) y = 1 + 1j out = np.imag(y) assert_equal(1.0, out) assert_(not isinstance(out, np.ndarray)) class TestIscomplex: def test_fail(self): z = np.array([-1, 0, 1]) res = iscomplex(z) assert_(not np.sometrue(res, axis=0)) def test_pass(self): z = np.array([-1j, 1, 0]) res = iscomplex(z) assert_array_equal(res, [1, 0, 0]) class TestIsreal: def test_pass(self): z = np.array([-1, 0, 1j]) res = isreal(z) assert_array_equal(res, [1, 1, 0]) def test_fail(self): z = np.array([-1j, 1, 0]) res = isreal(z) assert_array_equal(res, [0, 1, 1]) class TestIscomplexobj: def test_basic(self): z = np.array([-1, 0, 1]) assert_(not iscomplexobj(z)) z = np.array([-1j, 0, -1]) assert_(iscomplexobj(z)) def test_scalar(self): assert_(not iscomplexobj(1.0)) assert_(iscomplexobj(1+0j)) def test_list(self): assert_(iscomplexobj([3, 1+0j, True])) assert_(not iscomplexobj([3, 1, True])) def test_duck(self): class DummyComplexArray: @property def dtype(self): return np.dtype(complex) dummy = DummyComplexArray() assert_(iscomplexobj(dummy)) def test_pandas_duck(self): # This tests a custom np.dtype duck-typed class, such as used by pandas # (pandas.core.dtypes) class PdComplex(np.complex128): pass class PdDtype: name = 'category' names = None type = PdComplex kind = 'c' str = '<c16' base = np.dtype('complex128') class DummyPd: @property def dtype(self): return PdDtype dummy = DummyPd() assert_(iscomplexobj(dummy)) def test_custom_dtype_duck(self): class MyArray(list): @property def dtype(self): return complex a = MyArray([1+0j, 2+0j, 3+0j]) assert_(iscomplexobj(a)) class TestIsrealobj: def test_basic(self): z = np.array([-1, 0, 1]) assert_(isrealobj(z)) z = np.array([-1j, 0, -1]) assert_(not isrealobj(z)) class TestIsnan: def test_goodvalues(self): z = np.array((-1., 0., 1.)) res = np.isnan(z) == 0 assert_all(np.all(res, axis=0)) def test_posinf(self): with np.errstate(divide='ignore'): assert_all(np.isnan(np.array((1.,))/0.) == 0) def test_neginf(self): with np.errstate(divide='ignore'): assert_all(np.isnan(np.array((-1.,))/0.) == 0) def test_ind(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isnan(np.array((0.,))/0.) == 1) def test_integer(self): assert_all(np.isnan(1) == 0) def test_complex(self): assert_all(np.isnan(1+1j) == 0) def test_complex1(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isnan(np.array(0+0j)/0.) == 1) class TestIsfinite: # Fixme, wrong place, isfinite now ufunc def test_goodvalues(self): z = np.array((-1., 0., 1.)) res = np.isfinite(z) == 1 assert_all(np.all(res, axis=0)) def test_posinf(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isfinite(np.array((1.,))/0.) == 0) def test_neginf(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isfinite(np.array((-1.,))/0.) == 0) def test_ind(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isfinite(np.array((0.,))/0.) == 0) def test_integer(self): assert_all(np.isfinite(1) == 1) def test_complex(self): assert_all(np.isfinite(1+1j) == 1) def test_complex1(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isfinite(np.array(1+1j)/0.) == 0) class TestIsinf: # Fixme, wrong place, isinf now ufunc def test_goodvalues(self): z = np.array((-1., 0., 1.)) res = np.isinf(z) == 0 assert_all(np.all(res, axis=0)) def test_posinf(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isinf(np.array((1.,))/0.) == 1) def test_posinf_scalar(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isinf(np.array(1.,)/0.) == 1) def test_neginf(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isinf(np.array((-1.,))/0.) == 1) def test_neginf_scalar(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isinf(np.array(-1.)/0.) == 1) def test_ind(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isinf(np.array((0.,))/0.) == 0) class TestIsposinf: def test_generic(self): with np.errstate(divide='ignore', invalid='ignore'): vals = isposinf(np.array((-1., 0, 1))/0.) assert_(vals[0] == 0) assert_(vals[1] == 0) assert_(vals[2] == 1) class TestIsneginf: def test_generic(self): with np.errstate(divide='ignore', invalid='ignore'): vals = isneginf(np.array((-1., 0, 1))/0.) assert_(vals[0] == 1) assert_(vals[1] == 0) assert_(vals[2] == 0) class TestNanToNum: def test_generic(self): with np.errstate(divide='ignore', invalid='ignore'): vals = nan_to_num(np.array((-1., 0, 1))/0.) assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0])) assert_(vals[1] == 0) assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) assert_equal(type(vals), np.ndarray) # perform the same tests but with nan, posinf and neginf keywords with np.errstate(divide='ignore', invalid='ignore'): vals = nan_to_num(np.array((-1., 0, 1))/0., nan=10, posinf=20, neginf=30) assert_equal(vals, [30, 10, 20]) assert_all(np.isfinite(vals[[0, 2]])) assert_equal(type(vals), np.ndarray) # perform the same test but in-place with np.errstate(divide='ignore', invalid='ignore'): vals = np.array((-1., 0, 1))/0. result = nan_to_num(vals, copy=False) assert_(result is vals) assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0])) assert_(vals[1] == 0) assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) assert_equal(type(vals), np.ndarray) # perform the same test but in-place with np.errstate(divide='ignore', invalid='ignore'): vals = np.array((-1., 0, 1))/0. result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30) assert_(result is vals) assert_equal(vals, [30, 10, 20]) assert_all(np.isfinite(vals[[0, 2]])) assert_equal(type(vals), np.ndarray) def test_array(self): vals = nan_to_num([1]) assert_array_equal(vals, np.array([1], int)) assert_equal(type(vals), np.ndarray) vals = nan_to_num([1], nan=10, posinf=20, neginf=30) assert_array_equal(vals, np.array([1], int)) assert_equal(type(vals), np.ndarray) def test_integer(self): vals = nan_to_num(1) assert_all(vals == 1) assert_equal(type(vals), np.int_) vals = nan_to_num(1, nan=10, posinf=20, neginf=30) assert_all(vals == 1) assert_equal(type(vals), np.int_) def test_float(self): vals = nan_to_num(1.0) assert_all(vals == 1.0) assert_equal(type(vals), np.float_) vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30) assert_all(vals == 1.1) assert_equal(type(vals), np.float_) def test_complex_good(self): vals = nan_to_num(1+1j) assert_all(vals == 1+1j) assert_equal(type(vals), np.complex_) vals = nan_to_num(1+1j, nan=10, posinf=20, neginf=30) assert_all(vals == 1+1j) assert_equal(type(vals), np.complex_) def test_complex_bad(self): with np.errstate(divide='ignore', invalid='ignore'): v = 1 + 1j v += np.array(0+1.j)/0. vals = nan_to_num(v) # !! This is actually (unexpectedly) zero assert_all(np.isfinite(vals)) assert_equal(type(vals), np.complex_) def test_complex_bad2(self): with np.errstate(divide='ignore', invalid='ignore'): v = 1 + 1j v += np.array(-1+1.j)/0. vals = nan_to_num(v) assert_all(np.isfinite(vals)) assert_equal(type(vals), np.complex_) # Fixme #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals)) # !! This is actually (unexpectedly) positive # !! inf. Comment out for now, and see if it # !! changes #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals)) def test_do_not_rewrite_previous_keyword(self): # This is done to test that when, for instance, nan=np.inf then these # values are not rewritten by posinf keyword to the posinf value. with np.errstate(divide='ignore', invalid='ignore'): vals = nan_to_num(np.array((-1., 0, 1))/0., nan=np.inf, posinf=999) assert_all(np.isfinite(vals[[0, 2]])) assert_all(vals[0] < -1e10) assert_equal(vals[[1, 2]], [np.inf, 999]) assert_equal(type(vals), np.ndarray) class TestRealIfClose: def test_basic(self): a = np.random.rand(10) b = real_if_close(a+1e-15j) assert_all(isrealobj(b)) assert_array_equal(a, b) b = real_if_close(a+1e-7j) assert_all(iscomplexobj(b)) b = real_if_close(a+1e-7j, tol=1e-6) assert_all(isrealobj(b)) class TestArrayConversion: def test_asfarray(self): a = asfarray(np.array([1, 2, 3])) assert_equal(a.__class__, np.ndarray) assert_(np.issubdtype(a.dtype, np.floating)) # previously this would infer dtypes from arrays, unlike every single # other numpy function assert_raises(TypeError, asfarray, np.array([1, 2, 3]), dtype=np.array(1.0))
bsd-3-clause
quaquel/EMAworkbench
docs/source/pyplots/basicMultiplotDensity.py
1
1058
import numpy as np import matplotlib.pyplot as plt from analysis.pairs_plotting import pairs_scatter, pairs_density, pairs_lines from expWorkbench.util import load_results #load the data experiments, results = load_results(r'../../../src/analysis/1000 flu cases.cPickle', zipped=False) #transform the results to the required format newResults = {} #get time and remove it from the dict time = results.pop('TIME') for key, value in results.items(): if key == 'deceased population region 1': newResults[key] = value[:,-1] #we want the end value else: # we want the maximum value of the peak newResults['max peak'] = np.max(value, axis=1) # we want the time at which the maximum occurred # the code here is a bit obscure, I don't know why the transpose # of value is needed. This however does produce the appropriate results logicalIndex = value.T==np.max(value, axis=1) newResults['time of max'] = time[logicalIndex.T] pairs_density((experiments, newResults)) plt.show()
bsd-3-clause
saskartt/P4UL
pyRaster/extractDomainFromTile.py
1
7475
#!/usr/bin/env python import sys import argparse import numpy as np from mapTools import * from utilities import filesFromList, writeLog from plotTools import addImagePlot, addContourf, addScatterPlot import matplotlib.pyplot as plt ''' Description: Author: Mikko Auvinen [email protected] University of Helsinki & Finnish Meteorological Institute ''' #==========================================================# parser = argparse.ArgumentParser(prog='extractDomainFromTile.py') parser.add_argument("-f", "--filename",type=str, help="Name of raster data file.") parser.add_argument("-fo", "--fileOut",type=str, help="Name of output Palm mesh file.",\ default="PalmTopo") parser.add_argument("-iP","--iPivot", help="Local pixel ids [N,E] for the pivot in the raster file.",\ type=int,nargs=2,required=True) parser.add_argument("-N","--NxG", help="Number of points [Nx, Ny] in the 2D Palm grid.",\ type=int,nargs=2, default=[ 2048 , 1024]) parser.add_argument("-dx","--dxG", help="Resolution [dx, dy] of the 2D Palm grid.",\ type=float,nargs=2, default=[ 2. , 2.]) parser.add_argument("-r","--rLx", type=float,nargs=2, default=[ 0.9, 0.5],\ help="Pivot location [rLx, rLy] as ratio of Lx & Ly of grid domain (top left origo).") parser.add_argument("-wd", "--windDir", type=float,default=270.0,\ help="Wind direction (deg) --> Rotation angle around the pivot point. North wind = 0deg") parser.add_argument("-nr", "--noRotation", action="store_true",default=False,\ help="Do not rotate the grid.") parser.add_argument("-s", "--scale",type=float,\ help="Scale factor for the output. Default=1.", default=1.) parser.add_argument("-p", "--printOn", help="Print the resulting raster data.",\ action="store_true", default=False) parser.add_argument("-pp", "--printOnly", help="Only print the resulting data. Don't save.",\ action="store_true", default=False) parser.add_argument("-v", "--verbose", help="Print intermediates onto the screen.",\ action="store_true", default=False) args = parser.parse_args() writeLog( parser, args, args.printOnly ) #==========================================================# # Renaming the argument variables for brevity and clarity: filename = args.filename fileOut = args.fileOut NxG = args.NxG iPv = args.iPivot dxG = args.dxG rLx = args.rLx windDir = args.windDir noRotation= args.noRotation verbose = args.verbose printOn = args.printOn printOnly = args.printOnly # Read in the underlying topography data and obtain the pivot coordinates. dataOnly = False Rdict= readNumpyZTileForMesh( filename ) R = Rdict['R'] nY = Rdict['rowCoords'] eX = Rdict['colCoords'] if( verbose ): print(' [N] coords = {}...{}, [E] coords = {}...{}'\ .format(nY[0], nY[-1], eX[0], eX[-1] )) Rdims = np.array(np.shape(R)) # Retain information about rotation try: gridRot = Rdict['gridRot'] except: gridRot = 0 ROrig = Rdict['GlobOrig'] dPx = entry2Int( Rdict['dPx'] ) if( verbose ): print(' dPx = {} '.format(dPx)) # Pivot coordinates pY = nY[iPv[0]]; pX = eX[iPv[1]] print(' Origo in the Topography data: [N,E] = [{}, {}]'.format(ROrig[0],ROrig[1])) print(' Pivot Coords in Topography data: [N,E] = [{}, {}]'.format(pY,pX)) #NY, EX = np.meshgrid(nY,eX) ''' Create Palm grid which obeys the X,Y-coordinate layout. This might cause confusion so let's proceed carefully. NOTE: Even though the data is saved as raster array, the data points are now cell centers. ''' xbegin = dxG[0]/2. # First cell-centers. ybegin = dxG[1]/2. xend = NxG[0]*dxG[0] - xbegin # Last cell-center. yend = NxG[1]*dxG[1] - ybegin XgridCoords = np.linspace(xbegin,xend, NxG[0]) YgridCoords = np.linspace(ybegin,yend, NxG[1]) #Xg, Yg = np.meshgrid( XgridCoords, YgridCoords ) # Location of the pivot (indecies and coords) in the Palm grid. Not going into negative indices. iPGx = np.maximum(int(rLx[0]*NxG[0])-1,0) iPGy = np.maximum(int((1-rLx[1])*NxG[1])-1,0) pXG = XgridCoords[iPGx] pYG = YgridCoords[iPGy] if( verbose ): print ' Palm grid pivot indices: iPGx = {}, iPGy = {}'.format( iPGx, iPGy ) print ' Palm grid pivot coords: pXG = {}, pYG = {}'.format( pXG, pYG ) ''' From palm coordinates to underlying topography coordinates. We use the pivot point which is known for both systems. ''' dXT = pX - pXG dYT = pY - pYG #dXT = (pX - ROrig[1]) - pXG #dYT = (pY - ROrig[0]) - pYG XT = XgridCoords + dXT YT = YgridCoords + dYT if(verbose): print(' Coordinate transform: dXT = {}, dYT = {}'.format( dXT, dYT )) print(' Transformed coords: XT = {}...{}, YT = {}...{}'\ .format(np.min(XT), np.max(XT),np.min(YT), np.max(YT))) ''' Rotate the new coordinates according to the wind direction: Coordinate transformations for counterclockwise rotation. ''' # NOTE: At the pivot point XTR = pX XTM, YTM = np.meshgrid( XT, YT ) if (noRotation): theta = 0. else: theta = 270. - windDir if( theta != 0.): XTRM,YTRM = rotateGridAroundPivot(XTM,YTM, pX, pY,theta, deg=True) else: print(' No rotation! ') XTRM = XTM.copy(); YTRM = YTM.copy() ''' Bottom Left : XTRM[0,0], YTRM[0,0] Top Left : XTRM[-1,0], YTRM[-1,0] Bottom Right : XTRM[0,-1], YTRM[0,-1] Top Right : XTRM[-1,-1], YTRM[-1,-1]) ''' XT = None; YT = None XTM = None; YTM = None ''' Using the known transformed coordinates, we can extract the pixel values at those locations and copy them to the Palm grid. The grid arrays origo is located at the bottom left, which makes things a bit confusing here. ''' Irow = ((ROrig[0]-YTRM)/dPx ).astype(int) Jcol = ((XTRM-ROrig[1])/dPx ).astype(int) # Make sure the indecies don't run beyond the allowable bounds. if (np.amin(Irow) < 0 or np.amin(Jcol) < 0): # Warn the user about streching edges. print("WARNING: Domain out of raster data bounds! Streching edge cells to fill the domain.") Irow = np.maximum(Irow, 0); Jcol = np.maximum(Jcol, 0) Irow = np.minimum(Irow, Rdims[0]-1); Jcol = np.minimum(Jcol, Rdims[1]-1) #print " np.shape(Irow) = {}, Irow = {} ".format(np.shape(Irow) ,Irow[::4,::4]) #print " Jcol = {} ".format(Jcol[::4,::4] ) Xdims = np.array( np.shape(XTRM) ) PR = np.zeros( Xdims , float) PR[::-1,:] = R[Irow,Jcol] # The row order must be reversed. R = None ''' Reset the top left origo utilizing the NON-rotated coordinates. This allows the relative position of different raster maps (with identical coord. rotation) to be determined easily. ''' theta2 = gridRot/(np.pi/180.) XTRM,YTRM = rotateGridAroundPivot(XTRM,YTRM, ROrig[1], ROrig[0],theta2, deg=True) PROrig = np.array([ YTRM[-1,0], XTRM[-1,0] ]) # Reset top left origo print(' Top left origo coords. (cell centers!): [N,E] = {}'.format(PROrig)) rotation = (theta+theta2)*(np.pi/180.) print((theta+theta2)*(np.pi/180.)) # Retain unused keys from original raster PRdict = Rdict.copy() Rdict = None PRdict['R'] = PR PRdict['GlobOrig'] = PROrig PRdict['gridRot'] = rotation PRdict['dPx'] = np.array([dxG[0],dxG[1]]) if( not args.printOnly ): saveTileAsNumpyZ( fileOut, PRdict) # Print the raster map, first, in a coordinate system where x-axis is aligned with the windDir # and, second, in its original orientation. if( printOn or printOnly ): figDims = 13.*(Xdims[::-1].astype(float)/np.max(Xdims)) fig = plt.figure(num=1, figsize=figDims) fig = addImagePlot( fig, PR, args.fileOut ) CO = addContourf( XTRM, YTRM, PR[::-1,:], " Z(X,Y) ", "PALM DOMAIN ON MAP" ) plt.show() XTRM = None; YTRM = None PR = None; PRDict = None
mit
raymondxyang/tensorflow
tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py
137
2219
# encoding: utf-8 # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Categorical tests.""" # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS from tensorflow.contrib.learn.python.learn.preprocessing import categorical from tensorflow.python.platform import test class CategoricalTest(test.TestCase): """Categorical tests.""" def testSingleCategoricalProcessor(self): cat_processor = categorical.CategoricalProcessor(min_frequency=1) x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"], [1], ["0"], [np.nan], [3]]) self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]]) def testSingleCategoricalProcessorPandasSingleDF(self): if HAS_PANDAS: import pandas as pd # pylint: disable=g-import-not-at-top cat_processor = categorical.CategoricalProcessor() data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]}) x = list(cat_processor.fit_transform(data)) self.assertAllEqual(list(x), [[1], [2], [1]]) def testMultiCategoricalProcessor(self): cat_processor = categorical.CategoricalProcessor( min_frequency=0, share=False) x = cat_processor.fit_transform([["0", "Male"], [1, "Female"], ["3", "Male"]]) self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]]) if __name__ == "__main__": test.main()
apache-2.0
jseabold/scikit-learn
examples/bicluster/plot_spectral_biclustering.py
403
2011
""" ============================================= A demo of the Spectral Biclustering algorithm ============================================= This example demonstrates how to generate a checkerboard dataset and bicluster it using the Spectral Biclustering algorithm. The data is generated with the ``make_checkerboard`` function, then shuffled and passed to the Spectral Biclustering algorithm. The rows and columns of the shuffled matrix are rearranged to show the biclusters found by the algorithm. The outer product of the row and column label vectors shows a representation of the checkerboard structure. """ print(__doc__) # Author: Kemal Eren <[email protected]> # License: BSD 3 clause import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import make_checkerboard from sklearn.datasets import samples_generator as sg from sklearn.cluster.bicluster import SpectralBiclustering from sklearn.metrics import consensus_score n_clusters = (4, 3) data, rows, columns = make_checkerboard( shape=(300, 300), n_clusters=n_clusters, noise=10, shuffle=False, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Original dataset") data, row_idx, col_idx = sg._shuffle(data, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Shuffled dataset") model = SpectralBiclustering(n_clusters=n_clusters, method='log', random_state=0) model.fit(data) score = consensus_score(model.biclusters_, (rows[:, row_idx], columns[:, col_idx])) print("consensus score: {:.1f}".format(score)) fit_data = data[np.argsort(model.row_labels_)] fit_data = fit_data[:, np.argsort(model.column_labels_)] plt.matshow(fit_data, cmap=plt.cm.Blues) plt.title("After biclustering; rearranged to show biclusters") plt.matshow(np.outer(np.sort(model.row_labels_) + 1, np.sort(model.column_labels_) + 1), cmap=plt.cm.Blues) plt.title("Checkerboard structure of rearranged data") plt.show()
bsd-3-clause
eickenberg/scikit-learn
sklearn/tests/test_lda.py
22
1521
import numpy as np from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from .. import lda # Data is just 6 separable points in the plane X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) y = np.array([1, 1, 1, 2, 2, 2]) y3 = np.array([1, 1, 2, 2, 3, 3]) # Degenerate data with 1 feature (still should be separable) X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]]) def test_lda_predict(): """ LDA classification. This checks that LDA implements fit and predict and returns correct values for a simple toy dataset. """ clf = lda.LDA() y_pred = clf.fit(X, y).predict(X) assert_array_equal(y_pred, y) # Assure that it works with 1D data y_pred1 = clf.fit(X1, y).predict(X1) assert_array_equal(y_pred1, y) # Test probas estimates y_proba_pred1 = clf.predict_proba(X1) assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y) y_log_proba_pred1 = clf.predict_log_proba(X1) assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8) # Primarily test for commit 2f34950 -- "reuse" of priors y_pred3 = clf.fit(X, y3).predict(X) # LDA shouldn't be able to separate those assert_true(np.any(y_pred3 != y3)) def test_lda_transform(): clf = lda.LDA() X_transformed = clf.fit(X, y).transform(X) assert_equal(X_transformed.shape[1], 1)
bsd-3-clause
yyjiang/scikit-learn
benchmarks/bench_plot_ward.py
290
1260
""" Benchmark scikit-learn's Ward implement compared to SciPy's """ import time import numpy as np from scipy.cluster import hierarchy import pylab as pl from sklearn.cluster import AgglomerativeClustering ward = AgglomerativeClustering(n_clusters=3, linkage='ward') n_samples = np.logspace(.5, 3, 9) n_features = np.logspace(1, 3.5, 7) N_samples, N_features = np.meshgrid(n_samples, n_features) scikits_time = np.zeros(N_samples.shape) scipy_time = np.zeros(N_samples.shape) for i, n in enumerate(n_samples): for j, p in enumerate(n_features): X = np.random.normal(size=(n, p)) t0 = time.time() ward.fit(X) scikits_time[j, i] = time.time() - t0 t0 = time.time() hierarchy.ward(X) scipy_time[j, i] = time.time() - t0 ratio = scikits_time / scipy_time pl.figure("scikit-learn Ward's method benchmark results") pl.imshow(np.log(ratio), aspect='auto', origin="lower") pl.colorbar() pl.contour(ratio, levels=[1, ], colors='k') pl.yticks(range(len(n_features)), n_features.astype(np.int)) pl.ylabel('N features') pl.xticks(range(len(n_samples)), n_samples.astype(np.int)) pl.xlabel('N samples') pl.title("Scikit's time, in units of scipy time (log)") pl.show()
bsd-3-clause
vshtanko/scikit-learn
examples/cluster/plot_mini_batch_kmeans.py
265
4081
""" ==================================================================== Comparison of the K-Means and MiniBatchKMeans clustering algorithms ==================================================================== We want to compare the performance of the MiniBatchKMeans and KMeans: the MiniBatchKMeans is faster, but gives slightly different results (see :ref:`mini_batch_kmeans`). We will cluster a set of data, first with KMeans and then with MiniBatchKMeans, and plot the results. We will also plot the points that are labelled differently between the two algorithms. """ print(__doc__) import time import numpy as np import matplotlib.pyplot as plt from sklearn.cluster import MiniBatchKMeans, KMeans from sklearn.metrics.pairwise import pairwise_distances_argmin from sklearn.datasets.samples_generator import make_blobs ############################################################################## # Generate sample data np.random.seed(0) batch_size = 45 centers = [[1, 1], [-1, -1], [1, -1]] n_clusters = len(centers) X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7) ############################################################################## # Compute clustering with Means k_means = KMeans(init='k-means++', n_clusters=3, n_init=10) t0 = time.time() k_means.fit(X) t_batch = time.time() - t0 k_means_labels = k_means.labels_ k_means_cluster_centers = k_means.cluster_centers_ k_means_labels_unique = np.unique(k_means_labels) ############################################################################## # Compute clustering with MiniBatchKMeans mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size, n_init=10, max_no_improvement=10, verbose=0) t0 = time.time() mbk.fit(X) t_mini_batch = time.time() - t0 mbk_means_labels = mbk.labels_ mbk_means_cluster_centers = mbk.cluster_centers_ mbk_means_labels_unique = np.unique(mbk_means_labels) ############################################################################## # Plot result fig = plt.figure(figsize=(8, 3)) fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9) colors = ['#4EACC5', '#FF9C34', '#4E9A06'] # We want to have the same colors for the same cluster from the # MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per # closest one. order = pairwise_distances_argmin(k_means_cluster_centers, mbk_means_cluster_centers) # KMeans ax = fig.add_subplot(1, 3, 1) for k, col in zip(range(n_clusters), colors): my_members = k_means_labels == k cluster_center = k_means_cluster_centers[k] ax.plot(X[my_members, 0], X[my_members, 1], 'w', markerfacecolor=col, marker='.') ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=6) ax.set_title('KMeans') ax.set_xticks(()) ax.set_yticks(()) plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % ( t_batch, k_means.inertia_)) # MiniBatchKMeans ax = fig.add_subplot(1, 3, 2) for k, col in zip(range(n_clusters), colors): my_members = mbk_means_labels == order[k] cluster_center = mbk_means_cluster_centers[order[k]] ax.plot(X[my_members, 0], X[my_members, 1], 'w', markerfacecolor=col, marker='.') ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=6) ax.set_title('MiniBatchKMeans') ax.set_xticks(()) ax.set_yticks(()) plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (t_mini_batch, mbk.inertia_)) # Initialise the different array to all False different = (mbk_means_labels == 4) ax = fig.add_subplot(1, 3, 3) for l in range(n_clusters): different += ((k_means_labels == k) != (mbk_means_labels == order[k])) identic = np.logical_not(different) ax.plot(X[identic, 0], X[identic, 1], 'w', markerfacecolor='#bbbbbb', marker='.') ax.plot(X[different, 0], X[different, 1], 'w', markerfacecolor='m', marker='.') ax.set_title('Difference') ax.set_xticks(()) ax.set_yticks(()) plt.show()
bsd-3-clause
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAFactor/fetcher.py
2
42360
# /usr/bin/env python3 """ 网络查询接口: 1. 个股查询 - QA_fetch_get_individual_financial: 查询个股指定时间段指定财务报表指定报告类型数据 2. 截面查询 - QA_fetch_get_crosssection_financial: 查询指定报告期指定报表指定报告类型数据 本地查询接口: 1. 截面查询 - QA_fetch_crosssection_financial 2. 高级查询 - QA_fetch_financial_adv """ import datetime import time from typing import List, Tuple, Union import pandas as pd import pymongo import tushare as ts from QUANTAXIS.QAFactor.utils import QA_fmt_code, QA_fmt_code_list from QUANTAXIS.QAFetch.QAQuery_Advance import QA_fetch_stock_list from QUANTAXIS.QAFetch.QATushare import get_pro from QUANTAXIS.QAUtil import (DATABASE, QASETTING, QA_util_date_int2str, QA_util_date_stamp, QA_util_get_pre_trade_date, QA_util_get_real_date, QA_util_log_info, QA_util_to_json_from_pandas) REPORT_DATE_TAILS = ["0331", "0630", "0930", "1231"] SHEET_TYPE = ["income", "balancesheet", "cashflow"] REPORT_TYPE = ['1', '2', '3', '4', '5', '11'] def QA_fetch_get_individual_financial( code: str, start: Union[str, datetime.datetime, pd.Timestamp] = None, end: Union[str, datetime.datetime, pd.Timestamp] = None, report_date: Union[str, datetime.datetime] = None, sheet_type: str = "income", report_type: Union[int, str] = 1, fields: Union[str, Tuple, List] = None, wait_seconds: int = 61, max_trial: int = 3) -> pd.DataFrame: """个股财务报表网络查询接口,注意,这里的 start 与 end 是针对 report_date 进行范围查询 Args: code (str): 股票代码 start (Union[str, datetime.datetime, pd.Timestamp], optional): 查询起始时间,默认为 None end (Union[str, datetime.datetime, pd.Timestamp], optional): 查询结束时间,默认为 None report_date (Union[str, datetime.datetime], optional): 报告期. 默认为 None,如果使用了 report_date, 则 start 与 end 参数不再起作用 sheet_type (str, optional): 报表类型,默认为 "income" 类型 (利润表 "income"| 资产负债表 "balancesheet"| 现金流量表 "cashflow"| 业绩预告 "forecast"| 业绩快报 "express") report_type (Union[int, str], optional): 报告类型. 默认为 1。 (1 合并报表 上市公司最新报表(默认)| 2 单季合并 单一季度的合并报表 | 3 调整单季合并表 调整后的单季合并报表(如果有) | 4 调整合并报表 本年度公布上年同期的财务报表数据,报告期为上年度 | 5 调整前合并报表 数据发生变更,将原数据进行保留,即调整前的原数据 | 6 母公司报表 该公司母公司的财务报表数据 | 7 母公司单季表 母公司的单季度表 | 8 母公司调整单季表 母公司调整后的单季表 | 9 母公司调整表 该公司母公司的本年度公布上年同期的财务报表数据 | 10 母公司调整前报表 母公司调整之前的原始财务报表数据 | 11 调整前合并报表 调整之前合并报表原数据 | 12 母公司调整前报表 母公司报表发生变更前保留的原数据) fields (Union[str, Tuple, List], optional): 指定数据范围,如果设置为 None,则返回所有数据. 默认为 None. wait_seconds (int, optional): 等待重试时间. 默认为 61 秒. max_trial (int, optional): 最大重试次数. 默认为 3. Returns: pd.DataFrame: 返回指定个股时间范围内指定类型的报表数据 """ def _get_individual_financial(code, report_date, report_type, sheet_type, fields, wait_seconds, trial_count): nonlocal pro, max_trial if trial_count >= max_trial: raise ValueError("[ERROR]\tEXCEED MAX TRIAL!") try: if not fields: df = eval( f"pro.{sheet_type}(ts_code='{code}', period='{report_date}', report_type={report_type})") else: df = eval( f"pro.{sheet_type}(ts_code='{code}', period='{report_date}', report_type={report_type}, fields={fields})") return df.rename(columns={"ts_code": "code", "end_date": "report_date"}) except Exception as e: print(e) time.sleep(wait_seconds) _get_individual_financial( code, report_date, report_type, sheet_type, fields, wait_seconds, trial_count+1) pro = get_pro() report_type = int(report_type) if (not start) and (not end) and (not report_date): raise ValueError( "[QRY_DATES ERROR]\tparam 'start', 'end' and 'report_date' should not be none at the same time!") if isinstance(fields, str): fields = sorted(list(set([fields, "ts_code", "end_date", "ann_date", "f_ann_date", "report_type", "update_flag"]))) if report_date: report_date = pd.Timestamp(report_date) year = report_date.year report_date_lists = [ pd.Timestamp(str(year) + report_date_tail) for report_date_tail in REPORT_DATE_TAILS] if report_date not in report_date_lists: raise ValueError("[REPORT_DATE ERROR]") if sheet_type not in ["income", "balancesheet", "cashflow", "forecast", "express"]: raise ValueError("[SHEET_TYPE ERROR]") if report_type not in range(1, 13): raise ValueError("[REPORT_TYPE ERROR]") report_dates = [report_date] else: start = pd.Timestamp(start) start_year = start.year end = pd.Timestamp(end) end_year = end.year origin_year_ranges = pd.date_range( str(start_year), str(end_year+1), freq='Y').map(str).str.slice(0, 4).tolist() origin_report_ranges = pd.Series([ pd.Timestamp(year + report_date_tail) for year in origin_year_ranges for report_date_tail in REPORT_DATE_TAILS]) report_dates = origin_report_ranges.loc[( origin_report_ranges >= start) & (origin_report_ranges <= end)] df = pd.DataFrame() for report_date in report_dates: df = df.append(_get_individual_financial( code=QA_fmt_code(code, "ts"), report_date=report_date.strftime("%Y%m%d"), report_type=report_type, sheet_type=sheet_type, fields=fields, wait_seconds=wait_seconds, trial_count=0)) df.code = QA_fmt_code_list(df.code) return df.reset_index(drop=True) def QA_fetch_get_crosssection_financial( report_date: Union[str, datetime.datetime, pd.Timestamp], report_type: Union[int, str] = 1, sheet_type: str = "income", fields: Union[str, Tuple, List] = None, wait_seconds: int = 61, max_trial: int = 3) -> pd.DataFrame: """截面财务报表网络查询接口 Args: report_date (Union[str, datetime.datetime, pd.Timestamp]): 报告期 report_type (Union[int, str], optional): 报告类型,默认值为 1. (1 合并报表 上市公司最新报表(默认)| 2 单季合并 单一季度的合并报表 | 3 调整单季合并表 调整后的单季合并报表(如果有) | 4 调整合并报表 本年度公布上年同期的财务报表数据,报告期为上年度 | 5 调整前合并报表 数据发生变更,将原数据进行保留,即调整前的原数据 | 6 母公司报表 该公司母公司的财务报表数据 | 7 母公司单季表 母公司的单季度表 | 8 母公司调整单季表 母公司调整后的单季表 | 9 母公司调整表 该公司母公司的本年度公布上年同期的财务报表数据 | 10 母公司调整前报表 母公司调整之前的原始财务报表数据 | 11 调整前合并报表 调整之前合并报表原数据 | 12 母公司调整前报表 母公司报表发生变更前保留的原数据) sheet_type (str, optional): 报表类型,默认为 "income". (利润表 "income"| 资产负债表 "balancesheet"| 现金流量表 "cashflow"| 业绩预告 "forecast"| 业绩快报 "express") fields (Union[str, List], optional): 数据范围,默认为 None,返回所有数据. wait_seconds (int, optional): 查询超时时间, 默认为 61. max_trial (int, optional): 查询最大尝试次数, 默认为 3. Returns: pd.DataFrame: 指定报告期的指定财务报表数据 """ def _get_crosssection_financial(report_date, report_type, sheet_type, fields, wait_seconds, trial_count): nonlocal pro, max_trial if trial_count >= max_trial: raise ValueError("[ERROR]\tEXCEED MAX TRIAL!") try: if not fields: print( f"pro.{sheet_type}_vip(period='{report_date}', report_type={report_type})") df = eval( f"pro.{sheet_type}_vip(period='{report_date}', report_type={report_type})") else: df = eval( f"pro.{sheet_type}_vip(period='{report_date}', report_type={report_type}, fields={fields})") if df.empty: return df df.ts_code = QA_fmt_code_list(df.ts_code) return df.rename(columns={"ts_code": "code", "end_date": "report_date"}).sort_values(by=['ann_date', 'f_ann_date']) except Exception as e: print(e) time.sleep(wait_seconds) _get_crosssection_financial( report_date, report_type, sheet_type, fields, wait_seconds, trial_count + 1) # Tushare 账号配置 pro = get_pro() # 设置标准报告期格式 report_date = pd.Timestamp(report_date) report_type = int(report_type) year = report_date.year std_report_dates = [ str(year) + report_date_tail for report_date_tail in REPORT_DATE_TAILS] # Tushare 接口支持的日期格式 if report_date.strftime("%Y%m%d") not in std_report_dates: raise ValueError("[REPORT_DATE ERROR]") # fields 格式化处理 if isinstance(fields, str): fields = sorted(list(set([fields, "ts_code", "end_date", "ann_date", "f_ann_date", "report_type", "update_flag"]))) # 目前支持利润表,资产负债表和现金流量表 if sheet_type not in SHEET_TYPE: raise ValueError("[SHEET_TYPE ERROR]") if report_type not in range(1, 13): raise ValueError("[REPORT_TYTPE ERROR]") return _get_crosssection_financial( report_date=report_date.strftime("%Y%m%d"), report_type=report_type, sheet_type=sheet_type, fields=fields, wait_seconds=wait_seconds, trial_count=0) # FIXME: Add Fetch Get Method of Daily Basic def QA_fetch_get_daily_basic( code: Union[str, List, Tuple] = None, trade_date: Union[str, pd.Timestamp, datetime.datetime] = None, fields: Union[str, List, Tuple] = None, wait_seconds: int = 61, max_trial: int = 3 ) -> pd.DataFrame: """ 从网络获取市场指定交易日重要基本面指标,用于选股分析和报表展示 Args: code(Union[str, List, Tuple], optional): 指定股票代码,默认为 None,即对应交易日的全市场股票 trade_date(Union[str, pd.Timestamp, datetime.datetime], optional): 指定交易日期, 默认为 None, 即距离当前 日期最近的交易日 fields(Union[str, List, Tuple], optional): 默认为 None,如果指定为某一单个 str,默认返回 DataFrame 包括 交易日等附加信息 wait_seconds (int, optional): 查询超时时间, 默认为 61. max_trial (int, optional): 查询最大尝试次数, 默认为 3. Returns: pd.DataFrame: 指定交易日指定范围指定标的的每日基本面指标信息 """ def _fetch_get_daily_basic(trade_date, fields, trial_count): nonlocal pro, max_trial try: if trial_count >= max_trial: raise ValueError("[ERROR]\tEXCEED MAX TRIAL!") if not trade_date: trade_date = QA_util_get_pre_trade_date( datetime.date.today(), 1).replace("-", "") else: trade_date = pd.Timestamp(trade_date).strftime("%Y%m%d") if not fields: qry = f"pro.daily_basic(trade_date={trade_date})" else: if isinstance(fields, str): fields = list(set([fields] + ["ts_code", "trade_date"])) fields = ",".join(fields) qry = "pro.daily_basic(trade_date={trade_date}, fields={fields})" df = eval(qry) if df is None: raise ValueError("[ERROR]") return df except: time.sleep(61) _fetch_get_daily_basic( trade_date, fields, trial_count+1 ) pro = get_pro() df = _fetch_get_daily_basic( trade_date=trade_date, fields=fields, trial_count=0) if df.empty: return df else: df = df.rename(columns={"ts_code": "code"}) df.code = QA_fmt_code_list(df.code) df = df.set_index("code") if not code: return df if isinstance(code, str): code = (code,) # exclude code which not in rtn dataframe filter_idx = df.index.intersection(code) return df.loc[filter_idx] def QA_fetch_crosssection_financial( report_date: Union[str, datetime.datetime, pd.Timestamp], report_type: Union[int, str] = 1, sheet_type: str = "income", fields: Union[str, Tuple, List] = None) -> pd.DataFrame: """本地查询截面财务数据接口 Args: report_date (Union[str, datetime.datetime, pd.Timestamp]): 报告期 report_type (Union[int, str], optional): 报告类型,默认为 1. (1 合并报表 上市公司最新报表(默认)| 2 单季合并 单一季度的合并报表 | 3 调整单季合并表 调整后的单季合并报表(如果有) | 4 调整合并报表 本年度公布上年同期的财务报表数据,报告期为上年度 | 5 调整前合并报表 数据发生变更,将原数据进行保留,即调整前的原数据 | 11 调整前合并报表 调整之前合并报表原数据) sheet_type (str, optional): 报表类型,默认为 "income". fields (Union[str, Tuple, List], optional): 子段,默认为 None,返回所有字段. Returns: pd.DataFrame: 指定报告期指定报表数据 """ if isinstance(fields, str): fields = sorted(list(set([fields, "code", "report_date", "ann_date", "f_ann_date", "report_type", "update_flag"]))) coll = eval(f"DATABASE.{sheet_type}") report_date = pd.Timestamp(report_date).strftime("%Y%m%d") cursor = coll.find( { "report_date": report_date, "report_type": str(report_type) } ) res = pd.DataFrame([item for item in cursor]) if res.empty: return pd.DataFrame() res.report_date = pd.to_datetime(res.report_date, utc=False) if not fields: return res.drop(columns="_id") return res.drop(columns="_id")[fields] def QA_fetch_financial_adv( code: Union[str, Tuple, List] = None, start: Union[str, datetime.datetime, pd.Timestamp] = None, end: Union[str, datetime.datetime, pd.Timestamp] = None, report_date: Union[str, datetime.datetime, pd.Timestamp] = None, report_type: Union[int, str] = None, sheet_type: str = "income", fields: Union[str, Tuple, List] = None) -> pd.DataFrame: """本地获取指定股票或者指定股票列表,指定时间范围或者报告期,指定报告类型的指定财务报表数据 Args: code (Union[str, Tuple, List], optional): 指定股票代码或列表,默认为 None, 全市场股票 start (Union[str, datetime.datetime, pd.Timestamp], optional): 起始时间 end (Union[str, datetime.datetime, pd.Timestamp], optional): 结束时间 report_date (Union[str, datetime.datetime, pd.Timestamp], optional): 报告期 report_type (Union[int, str], optional): 报告类型,默认为 1. (1 合并报表 上市公司最新报表(默认)| 2 单季合并 单一季度的合并报表 | 3 调整单季合并表 调整后的单季合并报表(如果有) | 4 调整合并报表 本年度公布上年同期的财务报表数据,报告期为上年度 | 5 调整前合并报表 数据发生变更,将原数据进行保留,即调整前的原数据 | 11 调整前合并报表 调整之前合并报表原数据) sheet_type (str, optional): 报表类型,默认为 "income". fields (List, optional): 字段,默认为 None,返回所有字段. Returns: pd.DataFrame: 指定条件的本地报表数据 """ if (not start) and (not end) and (not report_date): raise ValueError( "[DATE ERROR]\t 'start', 'end' 与 'report_date' 不能同时为 None") if isinstance(code, str): code = (code,) if not report_type: report_type = ("1", "2", "4", "5", "11") if isinstance(report_type, int) or isinstance(report_type, str): report_type = (str(report_type), ) else: report_type = list(map(str, report_type)) coll = eval(f"DATABASE.{sheet_type}") qry = {} if not report_date: if not end: end = datetime.date.today() start = pd.Timestamp(start) end = pd.Timestamp(end) start_date_stamp = QA_util_date_stamp(start) end_date_stamp = QA_util_date_stamp(end) if not code: qry = { "f_ann_date_stamp": { "$gte": start_date_stamp, "$lte": end_date_stamp }, "report_type": { "$in": report_type } } else: qry = { "code": { "$in": code }, "f_ann_date_stamp": { "$gte": start_date_stamp, "$lte": end_date_stamp }, "report_type": { "$in": report_type } } else: report_date_stamp = QA_util_date_stamp(report_date) if not code: qry = { "report_date_stamp": report_date_stamp, "report_type": { "$in": report_type } } else: qry = { "code": { "$in": code }, "report_date_stamp": report_date_stamp, "report_type": { "$in": report_type } } if isinstance(fields, str): fields = list( set([fields, "code", "ann_date", "report_date", "f_ann_date"])) elif fields: fields = list( set(list(fields) + ["code", "ann_date", "report_date", "f_ann_date"])) cursor = coll.find(qry, batch_size=10000).sort([ ("report_date_stamp", pymongo.ASCENDING), ("f_ann_date_stamp", pymongo.ASCENDING)]) if fields: df = pd.DataFrame(cursor).drop(columns="_id")[fields].set_index("code") df.report_date = pd.to_datetime(df.report_date, utc=False) df.ann_date = pd.to_datetime(df.ann_date, utc=False) df.f_ann_date = pd.to_datetime(df.f_ann_date, utc=False) else: df = pd.DataFrame(cursor).drop(columns="_id").set_index("code") df.report_date = pd.to_datetime(df.report_date, utc=False) df.ann_date = pd.to_datetime(df.ann_date, utc=False) df.f_ann_date = pd.to_datetime(df.f_ann_date, utc=False) return df def QA_fetch_last_financial( code: Union[str, List, Tuple] = None, cursor_date: Union[str, datetime.datetime, pd.Timestamp] = None, report_label: Union[int, str] = None, report_type: Union[int, str, List, Tuple] = None, sheet_type: str = "income", fields: Union[str, List, Tuple] = None) -> pd.DataFrame: """获取距离指定日期 (cursor_date) 最近的原始数据 (不包含在 cursor_date 发布的财务数据), 当同时输入 cursor_date 与 report_date 时,以 report_date 作为查询标准 注意: 这里的 report_type 仅支持 (1,4, 5) 三种类型,以避免混淆合并数据和单季数据等 说明: 柳工 (000528) 在 2018 年 8 月 30 日发布半年报,之后在 2018 年 9 月 29 日发布修正报告, - 如果输入的 cursor_date 为 2018-08-31, 那么获取到的就是原始半年报,对应 report_type == 5 - 如果输入的 cursor_date 为 2018-09-30,那么获取到的就是最新合并报表,对应 report_type == 1 - 如果对应的 cursor_date 为 2019-08-31,需要获取 2018 年半年报,那么就返回柳工在 2019 年 8 月 29 日发布的上年同期基准,对应 report_type == 4 Args: code (Union[str, List, Tuple], optional): 股票代码或股票列表,默认为 None, 查询所有股票 cursor_date (Union[str, datetime.datetime, pd.Timestamp]): 查询截面日期 (一般指调仓日), 默认为 None report_label (Union[str, int], optional): 指定报表类型,这里的类型分类为一季报,半年报,三季报,年报, 默认为 None,即选择距离 cursor_date 最近的报表类型 report_type (Union[str, List, Tuple], optional): [description]. 报表类型,默认为 None. 即距离 cursor_date 最近的财报,不指定类型,避免引入未来数据 (1 合并报表 上市公司最新报表(默认)| 2 单季合并报表 4 调整合并报表 本年度公布上年同期的财务报表数据,报告期为上年度 | 5 调整前合并报表 数据发生变更,将原数据进行保留,即调整前的原数据) sheet_type (str, optional): 报表类型,默认为 "income". fields (Union[str, List, Tuple], optional): 字段, 默认为 None, 返回所有字段 Returns: pd.DataFrame: 复合条件的财务数据 """ def _trans_financial_type(x): if x.empty: return x if sheet_type == "balancesheet": # 资产负债表属于时点信息,直接返回 return x else: if x.iloc[0].report_date[4:] in ['0331', '1231']: # 一季报而言,单季合并与普通合并没有区别,直接返回 # 年报而言,不存在单季概念 return x.iloc[0] if x.iloc[0].report_type in ['1', '4', '5']: return x.iloc[0] if x.iloc[0].report_type == '2': # 尝试查找同一报告期报告类型为 '1' 或 '4' 的报表数据 # try: # if (x.shape[0] > 1) & (x.iloc[1].report_date == x.iloc[0].report_date) & (x.iloc[1].report_type in ['1', '4']): # return x.iloc[1] # except: # return pd.Series() # 尝试直接利用单季数据进行拼接 cursor_x = x.loc[x.report_date.map(str).str.slice( 0, 4) == x.iloc[0].report_date[:4]] cursor_x = cursor_x.drop_duplicates(subset = ['report_date'], keep='first') cursor_x = cursor_x.loc[cursor_x.report_date <= x.iloc[0].report_date] cursor_x = cursor_x.fillna(0) non_numeric_columns = sorted(["f_ann_date", "f_ann_date_stamp", "ann_date", "ann_date_stamp", "report_date", "report_date_stamp", "update_flag", "report_type", "code", "report_label"]) columns = sorted(list(set(cursor_x.columns) - set(non_numeric_columns))) rtn_se = cursor_x[columns].sum(axis=0) rtn_se = rtn_se.append(cursor_x[non_numeric_columns].iloc[0]) return rtn_se if isinstance(code, str): code = (code,) if not report_type: report_type = ["1", "2", "4", "5"] else: if isinstance(report_type, int): report_type = str(report_type) if isinstance(report_type, str): if report_type not in ["1", "4", "5"]: raise ValueError("[REPORT_TYPE ERROR]") report_type = (report_type,) else: report_type = list(set(report_type) & set('1', '2', '4', '5')) if sheet_type not in SHEET_TYPE: raise ValueError(f"[SHEET_TYPE ERROR]") if report_label: report_label = str(report_label) if isinstance(fields, str): fields = list( set([fields, "code", "ann_date", "report_date", "f_ann_date", "report_type"])) elif fields: fields = list( set(fields + ["code", "ann_date", "report_date", "f_ann_date", "report_type"])) coll = eval(f"DATABASE.{sheet_type}") if (not code) and (not report_label): # 为了加快检索速度,从当前日期往前至多回溯一季度,实际调仓时,仅考虑当前能拿到的最新数据,调仓周期一般以月, 季为单位, # 最长一般为年报,而修正报表如果超过 1 个季度,基本上怼调仓没有影响,这里以 1 年作为回溯基准 qry = { "f_ann_date_stamp": { "$gt": QA_util_date_stamp((pd.Timestamp(cursor_date) - pd.Timedelta(days=400)).strftime("%Y-%m-%d")), "$lt": QA_util_date_stamp(cursor_date) }, "report_type": { "$in": report_type }} cursor = coll.find(qry, batch_size=10000).sort([ ("report_date_stamp", pymongo.DESCENDING), ("f_ann_date_stamp", pymongo.DESCENDING)]) try: if not fields: df = pd.DataFrame(cursor).drop(columns="_id") else: df = pd.DataFrame(cursor).drop(columns="_id")[fields] except: raise ValueError("[QRY ERROR]") if sheet_type == "balancesheet": return df.groupby("code").apply(lambda x: x.iloc[0]) return df.groupby("code").apply(_trans_financial_type).unstack() if not report_label: qry = { "code": { "$in": code }, "f_ann_date_stamp": { "$gt": QA_util_date_stamp((pd.Timestamp(cursor_date) - pd.Timedelta(days=400)).strftime("%Y-%m-%d")), "$lt": QA_util_date_stamp(cursor_date) }, "report_type": {"$in": report_type}} cursor = coll.find(qry, batch_size=10000).sort([ ("report_date_stamp", pymongo.DESCENDING), ("f_ann_date_stamp", pymongo.DESCENDING)]) try: if not fields: df = pd.DataFrame(cursor).drop(columns="_id") else: df = pd.DataFrame(cursor).drop(columns="_id")[fields] except: raise ValueError("[QRY ERROR]") if sheet_type == "balancesheet": return df.groupby("code").apply(lambda x: x.iloc[0]) return df.groupby("code").apply(_trans_financial_type).unstack() if not code: qry = { "f_ann_date_stamp": { "$gt": QA_util_date_stamp((pd.Timestamp(cursor_date) - pd.Timedelta(days=400)).strftime("%Y-%m-%d")), "$lt": QA_util_date_stamp(cursor_date) }, "report_type": { "$in": report_type }, "report_label": report_label } cursor = coll.find(qry, batch_size=10000).sort([ ("report_date_stamp", pymongo.DESCENDING), ("f_ann_date_stamp", pymongo.DESCENDING)]) try: if not fields: df = pd.DataFrame(cursor).drop(columns="_id") else: df = pd.DataFrame(cursor).drop(columns="_id")[fields] except: raise ValueError("[QRY ERROR]") if sheet_type == "balancesheet": return df.groupby("code").apply(lambda x: x.iloc[0]) return df.groupby("code").apply(_trans_financial_type).unstack() else: qry = { "code": { "$in": code }, "f_ann_date_stamp": { "$gt": QA_util_date_stamp((pd.Timestamp(cursor_date) - pd.Timedelta(days=400)).strftime("%Y-%m-%d")), "$lt": QA_util_date_stamp(cursor_date) }, "report_type": { "$in": report_type }, "report_label": report_label } cursor = coll.find(qry, batch_size=10000).sort([ ("report_date_stamp", pymongo.DESCENDING), ("f_ann_date_stamp", pymongo.DESCENDING)]) try: if not fields: df = pd.DataFrame(cursor).drop(columns="_id") else: df = pd.DataFrame(cursor).drop(columns="_id")[fields] except: raise ValueError("[QRY ERROR]") # df.report_date = pd.to_datetime(df.report_date, utc=False) # df.ann_date = pd.to_datetime(df.ann_date, utc=False) # df.f_ann_date = pd.to_datetime(df.f_ann_date, utc=False) if sheet_type == "balancesheet": return df.groupby("code").apply(lambda x: x.iloc[0]) return df.groupby("code").apply(_trans_financial_type).unstack() def QA_fetch_stock_basic( code: Union[str, List, Tuple] = None, status: Union[str, List, Tuple] = 'L') -> pd.DataFrame: """获取股票基本信息 Args: code (Union[str, List, Tuple], optional): 股票代码或列表,默认为 None,获取全部股票 status (Union[str, List, Tuple], optional): 股票状态, 默认为 'L', 即仍在上市的股票,如果为 None, 则返回所有状态股票 Returns: pd.DataFrame: 股票基本信息 """ coll = DATABASE.stock_basic if isinstance(code, str): code = (code,) if isinstance(status, str): status = (status,) qry = {} if not status: if not code: qry = {} else: qry = { "code": { "$in": code } } else: if not code: qry = { "status": { "$in": status } } else: qry = { "code": { "$in": code }, "status": { "$in": status } } cursor = coll.find(qry) res = pd.DataFrame(cursor) if res.empty: return res else: res.list_date = pd.to_datetime(res.list_date, utc=False) return res.drop(columns="_id").set_index("code") def QA_fetch_stock_name( code: Union[str, List, Tuple] = None, cursor_date: Union[str, datetime.datetime, pd.Timestamp] = None ) -> pd.DataFrame: """获取股票历史曾用名 Args: code (Union[str, List, Tuple], optional): 股票代码或列表,默认为 None,查询所有股票. cursor (Union[str, datetime.datetime, pd.Timestamp], optional): 截止时间,股票名称距离 cursor_date 最近的名字 Returns: pd.DataFrame: 股票历史曾用名 """ coll = DATABASE.namechange if isinstance(code, str): code = [code] qry = {} if not code: if not cursor_date: qry = {} else: qry = { "start_date_stamp": { "$lte": QA_util_date_stamp(cursor_date) }, "end_date_stamp": { "$gte": QA_util_date_stamp(cursor_date) } } else: if not cursor_date: qry = { "code": { "$in": code } } else: qry = { "code": { "$in": code }, "start_date_stamp": { "$lte": QA_util_date_stamp(cursor_date) }, "end_date_stamp": { "$gte": QA_util_date_stamp(cursor_date) } } cursor = coll.find(qry) res = pd.DataFrame(cursor) if res.empty: return res else: res.start_date = pd.to_datetime(res.start_date, utc=False) res.end_date = pd.to_datetime(res.end_date, utc=False) return res.drop(columns="_id").set_index("code").sort_values(by="start_date_stamp").drop_duplicates(keep="last").sort_index() def QA_fetch_industry_adv( code: Union[str, List, Tuple] = None, cursor_date: Union[str, datetime.datetime] = None, start: Union[str, datetime.datetime] = None, end: Union[str, datetime.datetime] = None, levels: Union[str, List, Tuple] = None, src: str = "sw" ) -> pd.DataFrame: """本地获取指定股票或股票列表的行业 Args: code (Union[str, List, Tuple], optional): 股票代码或列表,默认为 None, 查询所有股票代码. cursor_date (Union[str, datetime.datetime], optional): 一般指调仓日,此时不需要再设置 start 与 end start(Union[str, datetime.datetime], optional): 起始时间,默认为 None. end(Union[str, datetime.datetime], optional): 截止时间, 默认为 None. levels (Union[str, List, Tuple], optional): [description]. 对应行业分级级别,默认为 None,查询所有行业分级数据 src (str, optional): 分级来源,默认为 "sw"(目前仅支持申万行业分类). Returns: pd.DataFrame: 行业信息 """ coll = DATABASE.industry if not code: code = QA_fetch_stock_list().index.tolist() if isinstance(code, str): code = [code] if isinstance(levels, str): levels = [levels, ] if not levels: levels = ["l1", "l2", "l3"] levels = list(map(lambda x: x.lower(), levels)) df_tmp = pd.DataFrame() if not cursor_date: if not start: qry = { "code": { "$in": code }, "level": { "$in": levels }, "src": src.lower() } else: qry = { "code": { "$in": code }, "level": { "$in": levels }, "src": src.lower(), "in_date_stamp": { "$lte": QA_util_date_stamp(pd.Timestamp(start).strftime("%Y-%m-%d")) } } if coll.count_documents(filter=qry) < 1: print("找不到对应行业数据") return pd.DataFrame() cursor = coll.find(qry) df_tmp = pd.DataFrame(cursor).drop(columns="_id") if end: df_tmp = df_tmp.loc[df_tmp.out_date_stamp > QA_util_date_stamp( pd.Timestamp(end).strftime("%Y-%m-%d"))] else: qry = { "code": { "$in": code }, "level": { "$in": levels }, "src": src.lower(), "in_date_stamp": { "$lte": QA_util_date_stamp(pd.Timestamp(cursor_date).strftime("%Y-%m-%d")) } } if coll.count_documents(filter=qry) < 1: print("找不到对应行业数据") return pd.DataFrame() cursor = coll.find(qry) df_tmp = pd.DataFrame(cursor).drop(columns="_id") df_tmp.loc[df_tmp.out_date_stamp > QA_util_date_stamp( pd.Timestamp(cursor_date).strftime("%Y-%m-%d"))] df_tmp.in_date = pd.to_datetime(df_tmp.in_date, utc=False) df_tmp.out_date = pd.to_datetime(df_tmp.out_date, utc=False) return df_tmp.drop(columns=["in_date_stamp", "out_date_stamp"]) def QA_fetch_daily_basic( code: Union[str, List, Tuple] = None, start: Union[str, pd.Timestamp, datetime.datetime] = None, end: Union[str, pd.Timestamp, datetime.datetime] = None, cursor_date: Union[str, pd.Timestamp, datetime.datetime] = None, fields: Union[str, Tuple, List]= None ) -> pd.DataFrame: """获取全部股票每日重要的基本面指标,可用于选股分析、报表展示等 Args: code (Union[str, List, Tuple], optional): 指定股票代码或列表, 默认为 None,获取全市场 start (Union[str, pd.Timestamp, datetime.datetime], optional): 起始日期,默认为 None end (Union[str, pd.Timestamp, datetime.datetime], optional): 结束日期,默认为 None cursor_date (Union[str, pd.Timestamp, datetime.datetime], optional): 指定日期,与 start 和 end 冲突,只能选择 cursor_date 或者 start, end fields (Union[str, Tuple, List], optional): 指定 fields Returns: pd.DataFrame: 以日期,股票名为 Multiindex 的基本信息 """ if isinstance(code, str): code = (code,) if not code: if (not start) and (not cursor_date): raise ValueError( "[ERROR]\tstart and end and cursor_date cannot all be none!") if not cursor_date: if not end: end_stamp = QA_util_date_stamp(datetime.date.today()) else: end_stamp = QA_util_date_stamp(end) start_stamp = QA_util_date_stamp(start) qry = { "trade_date_stamp": { "$gte": start_stamp, "$lte": end_stamp } } else: real_trade_date = QA_util_get_real_date(cursor_date) trade_date_stamp = QA_util_date_stamp(real_trade_date) qry = { "trade_date_stamp": trade_date_stamp } else: if (not start) and (not cursor_date): raise ValueError( "[ERROR]\tstart and end and cursor_date cannot all be none!") if not cursor_date: if not end: end_stamp = QA_util_date_stamp(datetime.date.today()) else: end_stamp = QA_util_date_stamp(end) start_stamp = QA_util_date_stamp(start) qry = { "code": { "$in": code }, "trade_date_stamp": { "$gte": start_stamp, "$lte": end_stamp } } else: real_trade_date = QA_util_get_real_date(cursor_date) trade_date_stamp = QA_util_date_stamp(real_trade_date) qry = { "code": { "$in": code }, "trade_date_stamp": trade_date_stamp } coll = DATABASE.daily_basic cursor = coll.find(qry) df = pd.DataFrame(cursor) if df.empty: return df df = df.rename(columns={"trade_date": "date"}).drop( columns="_id") df.date = pd.to_datetime(df.date, utc=False) df = df.set_index(["date", "code"]).sort_index() if not fields: return df return df[fields] if __name__ == "__main__": # print(QA_fetch_get_individual_financial( # "000001", "2020-01-01", "2020-12-31")) # print(QA_fetch_get_individual_financial( # "000001", report_date="2020-03-31", fields="basic_eps")) # print(QA_fetch_get_crosssection_financial('2020-03-31')) # print(QA_fetch_crosssection_financial("2020-03-31", fields="basic_eps")) # df = QA_fetch_financial_adv(start="2018-06-30", end="2018-09-30") # print(df.loc['000528', ["report_date", "f_ann_date", # "ann_date", "basic_eps", "report_type", "update_flag", "report_label"]]) # print(df) # print(QA_fetch_stock_basic(status="D")) # 最近财务数据获取测试 # print(QA_fetch_last_financial( # code="000596", cursor_date="2020-10-08")) # print(QA_fetch_last_financial( # code=QA_fetch_stock_list().index.tolist(), cursor_date="2020-10-08")) # print(QA_fetch_last_financial( # code = '000001', cursor_date = '2020-10-08' # )) code = QA_fetch_stock_list().index.tolist() cursor_date = '2020-10-08' df_origin = QA_fetch_last_financial(code = code, cursor_date = cursor_date, sheet_type = "balancesheet") # print(QA_fetch_last_financial( # cursor_date="2018-08-31")) # print(QA_fetch_last_financial( # cursor_date="2018-08-31", code=["000528"], fields=["report_date", "ann_date", "f_ann_date", "update_flag"])) # print(QA_fetch_financial_adv( # cursor_date="2018-08-31")) # 股票基本信息获取测试 # print(QA_fetch_stock_basic("000001")) # print(QA_fetch_stock_basic(status=["P", "D"])) # 行业获取测试 # print(QA_fetch_industry_adv(start="1998-01-01", end="2020-12-02").head()) # print(QA_fetch_industry_adv(["000001", "600000"], # start="1998-01-01", end="2020-12-02")) # print(QA_fetch_industry_adv( # ["000001", "600000"], cursor_date="2020-12-02")) # print(QA_fetch_stock_name( # code=['000001', '000002'], cursor_date="20081009")) # print(QA_fetch_daily_basic(cursor_date="2018-01-01"))
mit
peastman/msmbuilder
msmbuilder/lumping/pcca.py
6
4084
from __future__ import print_function, division, absolute_import import numpy as np from ..msm import MarkovStateModel class PCCA(MarkovStateModel): """Perron Cluster Cluster Analysis (PCCA) for coarse-graining (lumping) microstates into macrostates. Parameters ---------- n_macrostates : int The desired number of macrostates in the lumped model. kwargs : optional Additional keyword arguments to be passed to MarkovStateModel. See msmbuilder.msm.MarkovStateModel for possible options. Notes ----- PCCA is a subclass of MarkovStateModel. However, the MSM properties and attributes on PCCA refer to the MICROSTATE properties--e.g. pcca.transmat_ is the microstate transition matrix. To get the macrostate transition matrix, you must fit a new MarkovStateModel object on the output (assignments) of PCCA(). """ def __init__(self, n_macrostates, pcca_tolerance=1e-5, **kwargs): self.n_macrostates = n_macrostates self.pcca_tolerance = pcca_tolerance super(PCCA, self).__init__(**kwargs) def fit(self, sequences, y=None): """Fit a PCCA lumping model using a sequence of cluster assignments. Parameters ---------- sequences : list(np.ndarray(dtype='int')) List of arrays of cluster assignments y : None Unused, present for sklearn compatibility only. Returns ------- self """ super(PCCA, self).fit(sequences, y=y) self._do_lumping() return self def _do_lumping(self): """Do the PCCA lumping. Notes ------- 1. Iterate over the eigenvectors, starting with the slowest. 2. Calculate the spread of that eigenvector within each existing macrostate. 3. Pick the macrostate with the largest eigenvector spread. 4. Split the macrostate based on the sign of the eigenvector. """ # Extract non-perron eigenvectors right_eigenvectors = self.right_eigenvectors_[:, 1:] assert self.n_states_ > 0 microstate_mapping = np.zeros(self.n_states_, dtype=int) def spread(x): return x.max() - x.min() for i in range(self.n_macrostates - 1): v = right_eigenvectors[:, i] all_spreads = np.array([spread(v[microstate_mapping == k]) for k in range(i + 1)]) state_to_split = np.argmax(all_spreads) inds = ((microstate_mapping == state_to_split) & (v >= self.pcca_tolerance)) microstate_mapping[inds] = i + 1 self.microstate_mapping_ = microstate_mapping def partial_transform(self, sequence, mode='clip'): trimmed_sequence = super(PCCA, self).partial_transform(sequence, mode) if mode == 'clip': return [self.microstate_mapping_[seq] for seq in trimmed_sequence] elif mode == 'fill': def nan_get(x): try: x = int(x) return self.microstate_mapping_[x] except ValueError: return np.nan return np.asarray([nan_get(x) for x in trimmed_sequence]) else: raise ValueError @classmethod def from_msm(cls, msm, n_macrostates): """Create and fit lumped model from pre-existing MSM. Parameters ---------- msm : MarkovStateModel The input microstate msm to use. n_macrostates : int The number of macrostates Returns ------- lumper : cls The fit PCCA(+) object. """ params = msm.get_params() lumper = cls(n_macrostates, **params) lumper.transmat_ = msm.transmat_ lumper.populations_ = msm.populations_ lumper.mapping_ = msm.mapping_ lumper.countsmat_ = msm.countsmat_ lumper.n_states_ = msm.n_states_ lumper._do_lumping() return lumper
lgpl-2.1
sorgerlab/indra
indra/sources/trrust/processor.py
4
2252
from copy import deepcopy from indra.databases import hgnc_client from indra.statements import Agent, IncreaseAmount, DecreaseAmount, Evidence class TrrustProcessor(object): """Processor to extract INDRA Statements from Trrust data frame. Attributes ---------- df : pandas.DataFrame The Trrust table to process. statements : list[indra.statements.Statement] The list of INDRA Statements extracted from the table. """ def __init__(self, df): self.df = df self.statements = [] def extract_statements(self): """Process the table to extract Statements.""" for _, (tf, target, effect, refs) in self.df.iterrows(): tf_agent = get_grounded_agent(tf) target_agent = get_grounded_agent(target) if effect == 'Activation': stmt_cls = IncreaseAmount elif effect == 'Repression': stmt_cls = DecreaseAmount else: continue pmids = refs.split(';') for pmid in pmids: stmt = make_stmt(stmt_cls, tf_agent, target_agent, pmid) self.statements.append(stmt) def make_stmt(stmt_cls, tf_agent, target_agent, pmid): """Return a Statement based on its type, agents, and PMID.""" ev = Evidence(source_api='trrust', pmid=pmid) return stmt_cls(deepcopy(tf_agent), deepcopy(target_agent), evidence=[ev]) def get_grounded_agent(gene_name): """Return a grounded Agent based on an HGNC symbol.""" db_refs = {'TEXT': gene_name} if gene_name in hgnc_map: gene_name = hgnc_map[gene_name] hgnc_id = hgnc_client.get_hgnc_id(gene_name) if not hgnc_id: hgnc_id = hgnc_client.get_current_hgnc_id(gene_name) if hgnc_id: db_refs['HGNC'] = hgnc_id up_id = hgnc_client.get_uniprot_id(hgnc_id) if up_id and ',' not in up_id: db_refs['UP'] = up_id agent = Agent(gene_name, db_refs=db_refs) return agent hgnc_map = { 'CTGF': 'CCN2', 'CYR61': 'CCN1', 'MKL1': 'MRTFA', 'NOV': 'CCN3', 'RFWD2': 'COP1', 'SALL4A': 'SALL4', 'STAT5': 'STAT5A', 'TRAP': 'ACP5', 'AES': 'TLE5', 'SEPT7': 'SEPTIN7' }
bsd-2-clause
kastnerkyle/sklearn-theano
sklearn_theano/utils/ports.py
9
5242
import warnings from sklearn.cross_validation import ShuffleSplit from itertools import chain from sklearn.utils import safe_indexing import numpy as np import scipy.sparse as sp # A port of sklearn 0.16 utilities # to avoid validation issues in older sklearn def check_consistent_length(*arrays): """Check that all arrays have consistent first dimensions. Checks whether all objects in arrays have the same shape or length. Parameters ---------- arrays : list or tuple of input objects. Objects that will be checked for consistent length. """ uniques = np.unique([_num_samples(X) for X in arrays if X is not None]) if len(uniques) > 1: raise ValueError("Found arrays with inconsistent numbers of samples: %s" % str(uniques)) def indexable(*iterables): """Make arrays indexable for cross-validation. Checks consistent length, passes through None, and ensures that everything can be indexed by converting sparse matrices to csr and converting non-interable objects to arrays. Parameters ---------- iterables : lists, dataframes, arrays, sparse matrices List of objects to ensure sliceability. """ result = [] for X in iterables: if sp.issparse(X): result.append(X.tocsr()) elif hasattr(X, "__getitem__") or hasattr(X, "iloc"): result.append(X) elif X is None: result.append(X) else: result.append(np.array(X)) check_consistent_length(*result) return result def _num_samples(x): """Return number of samples in array-like x.""" if not hasattr(x, '__len__') and not hasattr(x, 'shape'): if hasattr(x, '__array__'): x = np.asarray(x) else: raise TypeError("Expected sequence or array-like, got %r" % x) return x.shape[0] if hasattr(x, 'shape') else len(x) def train_test_split(*arrays, **options): """Split arrays or matrices into random train and test subsets Quick utility that wraps input validation and ``next(iter(ShuffleSplit(n_samples)))`` and application to input data into a single call for splitting (and optionally subsampling) data in a oneliner. Parameters ---------- *arrays : sequence of arrays or scipy.sparse matrices with same shape[0] Python lists or tuples occurring in arrays are converted to 1D numpy arrays. test_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. If train size is also None, test size is set to 0.25. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. Returns ------- splitting : list of arrays, length=2 * len(arrays) List containing train-test split of input array. Examples -------- >>> import numpy as np >>> from sklearn.cross_validation import train_test_split >>> a, b = np.arange(10).reshape((5, 2)), range(5) >>> a array([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]) >>> list(b) [0, 1, 2, 3, 4] >>> a_train, a_test, b_train, b_test = train_test_split( ... a, b, test_size=0.33, random_state=42) ... >>> a_train array([[4, 5], [0, 1], [6, 7]]) >>> b_train [2, 0, 3] >>> a_test array([[2, 3], [8, 9]]) >>> b_test [1, 4] """ n_arrays = len(arrays) if n_arrays == 0: raise ValueError("At least one array required as input") test_size = options.pop('test_size', None) train_size = options.pop('train_size', None) random_state = options.pop('random_state', None) dtype = options.pop('dtype', None) if dtype is not None: warnings.warn("dtype option is ignored and will be removed in 0.18.") force_arrays = options.pop('force_arrays', False) if options: raise TypeError("Invalid parameters passed: %s" % str(options)) if force_arrays: warnings.warn("The force_arrays option is deprecated and will be " "removed in sklearn 0.18.", DeprecationWarning) if test_size is None and train_size is None: test_size = 0.25 arrays = indexable(*arrays) n_samples = _num_samples(arrays[0]) cv = ShuffleSplit(n_samples, test_size=test_size, train_size=train_size, random_state=random_state) train, test = next(iter(cv)) return list(chain.from_iterable((safe_indexing(a, train), safe_indexing(a, test)) for a in arrays))
bsd-3-clause
giorgiop/scikit-learn
examples/cluster/plot_dbscan.py
346
2479
# -*- coding: utf-8 -*- """ =================================== Demo of DBSCAN clustering algorithm =================================== Finds core samples of high density and expands clusters from them. """ print(__doc__) import numpy as np from sklearn.cluster import DBSCAN from sklearn import metrics from sklearn.datasets.samples_generator import make_blobs from sklearn.preprocessing import StandardScaler ############################################################################## # Generate sample data centers = [[1, 1], [-1, -1], [1, -1]] X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4, random_state=0) X = StandardScaler().fit_transform(X) ############################################################################## # Compute DBSCAN db = DBSCAN(eps=0.3, min_samples=10).fit(X) core_samples_mask = np.zeros_like(db.labels_, dtype=bool) core_samples_mask[db.core_sample_indices_] = True labels = db.labels_ # Number of clusters in labels, ignoring noise if present. n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) print('Estimated number of clusters: %d' % n_clusters_) print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels)) print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels)) print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels)) print("Adjusted Rand Index: %0.3f" % metrics.adjusted_rand_score(labels_true, labels)) print("Adjusted Mutual Information: %0.3f" % metrics.adjusted_mutual_info_score(labels_true, labels)) print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(X, labels)) ############################################################################## # Plot result import matplotlib.pyplot as plt # Black removed and is used for noise instead. unique_labels = set(labels) colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels))) for k, col in zip(unique_labels, colors): if k == -1: # Black used for noise. col = 'k' class_member_mask = (labels == k) xy = X[class_member_mask & core_samples_mask] plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=14) xy = X[class_member_mask & ~core_samples_mask] plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=6) plt.title('Estimated number of clusters: %d' % n_clusters_) plt.show()
bsd-3-clause
armada-ai/esp-idf
tools/tiny-test-fw/Utility/LineChart.py
3
1681
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import matplotlib # fix can't draw figure with docker matplotlib.use('Agg') import matplotlib.pyplot as plt # candidate colors LINE_STYLE_CANDIDATE = ['b-o', 'r-o', 'k-o', 'm-o', 'c-o', 'g-o', 'y-o', 'b-s', 'r-s', 'k-s', 'm-s', 'c-s', 'g-s', 'y-s'] def draw_line_chart(file_name, title, x_label, y_label, data_list): """ draw line chart and save to file. :param file_name: abs/relative file name to save chart figure :param title: chart title :param x_label: x-axis label :param y_label: y-axis label :param data_list: a list of line data. each line is a dict of ("x-axis": list, "y-axis": list, "label": string) """ plt.figure(figsize=(12, 6)) plt.grid(True) for i, data in enumerate(data_list): plt.plot(data["x-axis"], data["y-axis"], LINE_STYLE_CANDIDATE[i], label=data["label"]) plt.xlabel(x_label) plt.ylabel(y_label) plt.legend(fontsize=12) plt.title(title) plt.tight_layout(pad=3, w_pad=3, h_pad=3) plt.savefig(file_name) plt.close()
apache-2.0
r-mart/scikit-learn
sklearn/ensemble/tests/test_partial_dependence.py
365
6996
""" Testing for the partial dependence module. """ import numpy as np from numpy.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import if_matplotlib from sklearn.ensemble.partial_dependence import partial_dependence from sklearn.ensemble.partial_dependence import plot_partial_dependence from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import GradientBoostingRegressor from sklearn import datasets # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the boston dataset boston = datasets.load_boston() # also load the iris dataset iris = datasets.load_iris() def test_partial_dependence_classifier(): # Test partial dependence for classifier clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(X, y) pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5) # only 4 grid points instead of 5 because only 4 unique X[:,0] vals assert pdp.shape == (1, 4) assert axes[0].shape[0] == 4 # now with our own grid X_ = np.asarray(X) grid = np.unique(X_[:, 0]) pdp_2, axes = partial_dependence(clf, [0], grid=grid) assert axes is None assert_array_equal(pdp, pdp_2) def test_partial_dependence_multiclass(): # Test partial dependence for multi-class classifier clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(iris.data, iris.target) grid_resolution = 25 n_classes = clf.n_classes_ pdp, axes = partial_dependence( clf, [0], X=iris.data, grid_resolution=grid_resolution) assert pdp.shape == (n_classes, grid_resolution) assert len(axes) == 1 assert axes[0].shape[0] == grid_resolution def test_partial_dependence_regressor(): # Test partial dependence for regressor clf = GradientBoostingRegressor(n_estimators=10, random_state=1) clf.fit(boston.data, boston.target) grid_resolution = 25 pdp, axes = partial_dependence( clf, [0], X=boston.data, grid_resolution=grid_resolution) assert pdp.shape == (1, grid_resolution) assert axes[0].shape[0] == grid_resolution def test_partial_dependecy_input(): # Test input validation of partial dependence. clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(X, y) assert_raises(ValueError, partial_dependence, clf, [0], grid=None, X=None) assert_raises(ValueError, partial_dependence, clf, [0], grid=[0, 1], X=X) # first argument must be an instance of BaseGradientBoosting assert_raises(ValueError, partial_dependence, {}, [0], X=X) # Gradient boosting estimator must be fit assert_raises(ValueError, partial_dependence, GradientBoostingClassifier(), [0], X=X) assert_raises(ValueError, partial_dependence, clf, [-1], X=X) assert_raises(ValueError, partial_dependence, clf, [100], X=X) # wrong ndim for grid grid = np.random.rand(10, 2, 1) assert_raises(ValueError, partial_dependence, clf, [0], grid=grid) @if_matplotlib def test_plot_partial_dependence(): # Test partial dependence plot function. clf = GradientBoostingRegressor(n_estimators=10, random_state=1) clf.fit(boston.data, boston.target) grid_resolution = 25 fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)], grid_resolution=grid_resolution, feature_names=boston.feature_names) assert len(axs) == 3 assert all(ax.has_data for ax in axs) # check with str features and array feature names fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN', ('CRIM', 'ZN')], grid_resolution=grid_resolution, feature_names=boston.feature_names) assert len(axs) == 3 assert all(ax.has_data for ax in axs) # check with list feature_names feature_names = boston.feature_names.tolist() fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN', ('CRIM', 'ZN')], grid_resolution=grid_resolution, feature_names=feature_names) assert len(axs) == 3 assert all(ax.has_data for ax in axs) @if_matplotlib def test_plot_partial_dependence_input(): # Test partial dependence plot function input checks. clf = GradientBoostingClassifier(n_estimators=10, random_state=1) # not fitted yet assert_raises(ValueError, plot_partial_dependence, clf, X, [0]) clf.fit(X, y) assert_raises(ValueError, plot_partial_dependence, clf, np.array(X)[:, :0], [0]) # first argument must be an instance of BaseGradientBoosting assert_raises(ValueError, plot_partial_dependence, {}, X, [0]) # must be larger than -1 assert_raises(ValueError, plot_partial_dependence, clf, X, [-1]) # too large feature value assert_raises(ValueError, plot_partial_dependence, clf, X, [100]) # str feature but no feature_names assert_raises(ValueError, plot_partial_dependence, clf, X, ['foobar']) # not valid features value assert_raises(ValueError, plot_partial_dependence, clf, X, [{'foo': 'bar'}]) @if_matplotlib def test_plot_partial_dependence_multiclass(): # Test partial dependence plot function on multi-class input. clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(iris.data, iris.target) grid_resolution = 25 fig, axs = plot_partial_dependence(clf, iris.data, [0, 1], label=0, grid_resolution=grid_resolution) assert len(axs) == 2 assert all(ax.has_data for ax in axs) # now with symbol labels target = iris.target_names[iris.target] clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(iris.data, target) grid_resolution = 25 fig, axs = plot_partial_dependence(clf, iris.data, [0, 1], label='setosa', grid_resolution=grid_resolution) assert len(axs) == 2 assert all(ax.has_data for ax in axs) # label not in gbrt.classes_ assert_raises(ValueError, plot_partial_dependence, clf, iris.data, [0, 1], label='foobar', grid_resolution=grid_resolution) # label not provided assert_raises(ValueError, plot_partial_dependence, clf, iris.data, [0, 1], grid_resolution=grid_resolution)
bsd-3-clause
bnaul/scikit-learn
sklearn/utils/tests/test_sparsefuncs.py
4
23219
import pytest import numpy as np import scipy.sparse as sp from scipy import linalg from numpy.testing import assert_array_almost_equal, assert_array_equal from numpy.random import RandomState from sklearn.datasets import make_classification from sklearn.utils.sparsefuncs import (mean_variance_axis, incr_mean_variance_axis, inplace_column_scale, inplace_row_scale, inplace_swap_row, inplace_swap_column, min_max_axis, count_nonzero, csc_median_axis_0) from sklearn.utils.sparsefuncs_fast import (assign_rows_csr, inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2, csr_row_norms) from sklearn.utils._testing import assert_allclose def test_mean_variance_axis0(): X, _ = make_classification(5, 4, random_state=0) # Sparsify the array a little bit X[0, 0] = 0 X[2, 1] = 0 X[4, 3] = 0 X_lil = sp.lil_matrix(X) X_lil[1, 0] = 0 X[1, 0] = 0 with pytest.raises(TypeError): mean_variance_axis(X_lil, axis=0) X_csr = sp.csr_matrix(X_lil) X_csc = sp.csc_matrix(X_lil) expected_dtypes = [(np.float32, np.float32), (np.float64, np.float64), (np.int32, np.float64), (np.int64, np.float64)] for input_dtype, output_dtype in expected_dtypes: X_test = X.astype(input_dtype) for X_sparse in (X_csr, X_csc): X_sparse = X_sparse.astype(input_dtype) X_means, X_vars = mean_variance_axis(X_sparse, axis=0) assert X_means.dtype == output_dtype assert X_vars.dtype == output_dtype assert_array_almost_equal(X_means, np.mean(X_test, axis=0)) assert_array_almost_equal(X_vars, np.var(X_test, axis=0)) def test_mean_variance_axis1(): X, _ = make_classification(5, 4, random_state=0) # Sparsify the array a little bit X[0, 0] = 0 X[2, 1] = 0 X[4, 3] = 0 X_lil = sp.lil_matrix(X) X_lil[1, 0] = 0 X[1, 0] = 0 with pytest.raises(TypeError): mean_variance_axis(X_lil, axis=1) X_csr = sp.csr_matrix(X_lil) X_csc = sp.csc_matrix(X_lil) expected_dtypes = [(np.float32, np.float32), (np.float64, np.float64), (np.int32, np.float64), (np.int64, np.float64)] for input_dtype, output_dtype in expected_dtypes: X_test = X.astype(input_dtype) for X_sparse in (X_csr, X_csc): X_sparse = X_sparse.astype(input_dtype) X_means, X_vars = mean_variance_axis(X_sparse, axis=0) assert X_means.dtype == output_dtype assert X_vars.dtype == output_dtype assert_array_almost_equal(X_means, np.mean(X_test, axis=0)) assert_array_almost_equal(X_vars, np.var(X_test, axis=0)) def test_incr_mean_variance_axis(): for axis in [0, 1]: rng = np.random.RandomState(0) n_features = 50 n_samples = 10 data_chunks = [rng.randint(0, 2, size=n_features) for i in range(n_samples)] # default params for incr_mean_variance last_mean = np.zeros(n_features) last_var = np.zeros_like(last_mean) last_n = np.zeros_like(last_mean, dtype=np.int64) # Test errors X = np.array(data_chunks[0]) X = np.atleast_2d(X) X_lil = sp.lil_matrix(X) X_csr = sp.csr_matrix(X_lil) with pytest.raises(TypeError): incr_mean_variance_axis(X=axis, axis=last_mean, last_mean=last_var, last_var=last_n) with pytest.raises(TypeError): incr_mean_variance_axis(X_lil, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n) # Test _incr_mean_and_var with a 1 row input X_means, X_vars = mean_variance_axis(X_csr, axis) X_means_incr, X_vars_incr, n_incr = \ incr_mean_variance_axis(X_csr, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n) assert_array_almost_equal(X_means, X_means_incr) assert_array_almost_equal(X_vars, X_vars_incr) # X.shape[axis] picks # samples assert_array_equal(X.shape[axis], n_incr) X_csc = sp.csc_matrix(X_lil) X_means, X_vars = mean_variance_axis(X_csc, axis) assert_array_almost_equal(X_means, X_means_incr) assert_array_almost_equal(X_vars, X_vars_incr) assert_array_equal(X.shape[axis], n_incr) # Test _incremental_mean_and_var with whole data X = np.vstack(data_chunks) X_lil = sp.lil_matrix(X) X_csr = sp.csr_matrix(X_lil) X_csc = sp.csc_matrix(X_lil) expected_dtypes = [(np.float32, np.float32), (np.float64, np.float64), (np.int32, np.float64), (np.int64, np.float64)] for input_dtype, output_dtype in expected_dtypes: for X_sparse in (X_csr, X_csc): X_sparse = X_sparse.astype(input_dtype) last_mean = last_mean.astype(output_dtype) last_var = last_var.astype(output_dtype) X_means, X_vars = mean_variance_axis(X_sparse, axis) X_means_incr, X_vars_incr, n_incr = \ incr_mean_variance_axis(X_sparse, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n) assert X_means_incr.dtype == output_dtype assert X_vars_incr.dtype == output_dtype assert_array_almost_equal(X_means, X_means_incr) assert_array_almost_equal(X_vars, X_vars_incr) assert_array_equal(X.shape[axis], n_incr) @pytest.mark.parametrize( "X1, X2", [ (sp.random(5, 2, density=0.8, format='csr', random_state=0), sp.random(13, 2, density=0.8, format='csr', random_state=0)), (sp.random(5, 2, density=0.8, format='csr', random_state=0), sp.hstack([sp.csr_matrix(np.full((13, 1), fill_value=np.nan)), sp.random(13, 1, density=0.8, random_state=42)], format="csr")) ] ) def test_incr_mean_variance_axis_equivalence_mean_variance(X1, X2): # non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/16448 # check that computing the incremental mean and variance is equivalent to # computing the mean and variance on the stacked dataset. axis = 0 last_mean, last_var = np.zeros(X1.shape[1]), np.zeros(X1.shape[1]) last_n = np.zeros(X1.shape[1], dtype=np.int64) updated_mean, updated_var, updated_n = incr_mean_variance_axis( X1, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n ) updated_mean, updated_var, updated_n = incr_mean_variance_axis( X2, axis=axis, last_mean=updated_mean, last_var=updated_var, last_n=updated_n ) X = sp.vstack([X1, X2]) assert_allclose(updated_mean, np.nanmean(X.A, axis=axis)) assert_allclose(updated_var, np.nanvar(X.A, axis=axis)) assert_allclose(updated_n, np.count_nonzero(~np.isnan(X.A), axis=0)) def test_incr_mean_variance_no_new_n(): # check the behaviour when we update the variance with an empty matrix axis = 0 X1 = sp.random(5, 1, density=0.8, random_state=0).tocsr() X2 = sp.random(0, 1, density=0.8, random_state=0).tocsr() last_mean, last_var = np.zeros(X1.shape[1]), np.zeros(X1.shape[1]) last_n = np.zeros(X1.shape[1], dtype=np.int64) last_mean, last_var, last_n = incr_mean_variance_axis( X1, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n ) # update statistic with a column which should ignored updated_mean, updated_var, updated_n = incr_mean_variance_axis( X2, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n ) assert_allclose(updated_mean, last_mean) assert_allclose(updated_var, last_var) assert_allclose(updated_n, last_n) @pytest.mark.parametrize("axis", [0, 1]) @pytest.mark.parametrize("sparse_constructor", [sp.csc_matrix, sp.csr_matrix]) def test_incr_mean_variance_axis_ignore_nan(axis, sparse_constructor): old_means = np.array([535., 535., 535., 535.]) old_variances = np.array([4225., 4225., 4225., 4225.]) old_sample_count = np.array([2, 2, 2, 2], dtype=np.int64) X = sparse_constructor( np.array([[170, 170, 170, 170], [430, 430, 430, 430], [300, 300, 300, 300]])) X_nan = sparse_constructor( np.array([[170, np.nan, 170, 170], [np.nan, 170, 430, 430], [430, 430, np.nan, 300], [300, 300, 300, np.nan]])) # we avoid creating specific data for axis 0 and 1: translating the data is # enough. if axis: X = X.T X_nan = X_nan.T # take a copy of the old statistics since they are modified in place. X_means, X_vars, X_sample_count = incr_mean_variance_axis( X, axis=axis, last_mean=old_means.copy(), last_var=old_variances.copy(), last_n=old_sample_count.copy()) X_nan_means, X_nan_vars, X_nan_sample_count = incr_mean_variance_axis( X_nan, axis=axis, last_mean=old_means.copy(), last_var=old_variances.copy(), last_n=old_sample_count.copy()) assert_allclose(X_nan_means, X_means) assert_allclose(X_nan_vars, X_vars) assert_allclose(X_nan_sample_count, X_sample_count) def test_mean_variance_illegal_axis(): X, _ = make_classification(5, 4, random_state=0) # Sparsify the array a little bit X[0, 0] = 0 X[2, 1] = 0 X[4, 3] = 0 X_csr = sp.csr_matrix(X) with pytest.raises(ValueError): mean_variance_axis(X_csr, axis=-3) with pytest.raises(ValueError): mean_variance_axis(X_csr, axis=2) with pytest.raises(ValueError): mean_variance_axis(X_csr, axis=-1) with pytest.raises(ValueError): incr_mean_variance_axis(X_csr, axis=-3, last_mean=None, last_var=None, last_n=None) with pytest.raises(ValueError): incr_mean_variance_axis(X_csr, axis=2, last_mean=None, last_var=None, last_n=None) with pytest.raises(ValueError): incr_mean_variance_axis(X_csr, axis=-1, last_mean=None, last_var=None, last_n=None) def test_densify_rows(): for dtype in (np.float32, np.float64): X = sp.csr_matrix([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=dtype) X_rows = np.array([0, 2, 3], dtype=np.intp) out = np.ones((6, X.shape[1]), dtype=dtype) out_rows = np.array([1, 3, 4], dtype=np.intp) expect = np.ones_like(out) expect[out_rows] = X[X_rows, :].toarray() assign_rows_csr(X, X_rows, out_rows, out) assert_array_equal(out, expect) def test_inplace_column_scale(): rng = np.random.RandomState(0) X = sp.rand(100, 200, 0.05) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() scale = rng.rand(200) XA *= scale inplace_column_scale(Xc, scale) inplace_column_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) with pytest.raises(TypeError): inplace_column_scale(X.tolil(), scale) X = X.astype(np.float32) scale = scale.astype(np.float32) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() XA *= scale inplace_column_scale(Xc, scale) inplace_column_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) with pytest.raises(TypeError): inplace_column_scale(X.tolil(), scale) def test_inplace_row_scale(): rng = np.random.RandomState(0) X = sp.rand(100, 200, 0.05) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() scale = rng.rand(100) XA *= scale.reshape(-1, 1) inplace_row_scale(Xc, scale) inplace_row_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) with pytest.raises(TypeError): inplace_column_scale(X.tolil(), scale) X = X.astype(np.float32) scale = scale.astype(np.float32) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() XA *= scale.reshape(-1, 1) inplace_row_scale(Xc, scale) inplace_row_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) with pytest.raises(TypeError): inplace_column_scale(X.tolil(), scale) def test_inplace_swap_row(): X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[0], X[-1] = swap(X[0], X[-1]) inplace_swap_row(X_csr, 0, -1) inplace_swap_row(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[2], X[3] = swap(X[2], X[3]) inplace_swap_row(X_csr, 2, 3) inplace_swap_row(X_csc, 2, 3) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) with pytest.raises(TypeError): inplace_swap_row(X_csr.tolil()) X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[0], X[-1] = swap(X[0], X[-1]) inplace_swap_row(X_csr, 0, -1) inplace_swap_row(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[2], X[3] = swap(X[2], X[3]) inplace_swap_row(X_csr, 2, 3) inplace_swap_row(X_csc, 2, 3) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) with pytest.raises(TypeError): inplace_swap_row(X_csr.tolil()) def test_inplace_swap_column(): X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1]) inplace_swap_column(X_csr, 0, -1) inplace_swap_column(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1]) inplace_swap_column(X_csr, 0, 1) inplace_swap_column(X_csc, 0, 1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) with pytest.raises(TypeError): inplace_swap_column(X_csr.tolil()) X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1]) inplace_swap_column(X_csr, 0, -1) inplace_swap_column(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1]) inplace_swap_column(X_csr, 0, 1) inplace_swap_column(X_csc, 0, 1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) with pytest.raises(TypeError): inplace_swap_column(X_csr.tolil()) @pytest.mark.parametrize("dtype", [np.float32, np.float64]) @pytest.mark.parametrize("axis", [0, 1, None]) @pytest.mark.parametrize("sparse_format", [sp.csr_matrix, sp.csc_matrix]) @pytest.mark.parametrize( "missing_values, min_func, max_func, ignore_nan", [(0, np.min, np.max, False), (np.nan, np.nanmin, np.nanmax, True)] ) @pytest.mark.parametrize("large_indices", [True, False]) def test_min_max(dtype, axis, sparse_format, missing_values, min_func, max_func, ignore_nan, large_indices): X = np.array([[0, 3, 0], [2, -1, missing_values], [0, 0, 0], [9, missing_values, 7], [4, 0, 5]], dtype=dtype) X_sparse = sparse_format(X) if large_indices: X_sparse.indices = X_sparse.indices.astype('int64') X_sparse.indptr = X_sparse.indptr.astype('int64') mins_sparse, maxs_sparse = min_max_axis(X_sparse, axis=axis, ignore_nan=ignore_nan) assert_array_equal(mins_sparse, min_func(X, axis=axis)) assert_array_equal(maxs_sparse, max_func(X, axis=axis)) def test_min_max_axis_errors(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) with pytest.raises(TypeError): min_max_axis(X_csr.tolil(), axis=0) with pytest.raises(ValueError): min_max_axis(X_csr, axis=2) with pytest.raises(ValueError): min_max_axis(X_csc, axis=-3) def test_count_nonzero(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) X_nonzero = X != 0 sample_weight = [.5, .2, .3, .1, .1] X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None] for axis in [0, 1, -1, -2, None]: assert_array_almost_equal(count_nonzero(X_csr, axis=axis), X_nonzero.sum(axis=axis)) assert_array_almost_equal(count_nonzero(X_csr, axis=axis, sample_weight=sample_weight), X_nonzero_weighted.sum(axis=axis)) with pytest.raises(TypeError): count_nonzero(X_csc) with pytest.raises(ValueError): count_nonzero(X_csr, axis=2) assert (count_nonzero(X_csr, axis=0).dtype == count_nonzero(X_csr, axis=1).dtype) assert (count_nonzero(X_csr, axis=0, sample_weight=sample_weight).dtype == count_nonzero(X_csr, axis=1, sample_weight=sample_weight).dtype) # Check dtypes with large sparse matrices too # XXX: test fails on 32bit (Windows/Linux) try: X_csr.indices = X_csr.indices.astype(np.int64) X_csr.indptr = X_csr.indptr.astype(np.int64) assert (count_nonzero(X_csr, axis=0).dtype == count_nonzero(X_csr, axis=1).dtype) assert (count_nonzero(X_csr, axis=0, sample_weight=sample_weight).dtype == count_nonzero(X_csr, axis=1, sample_weight=sample_weight).dtype) except TypeError as e: assert ("according to the rule 'safe'" in e.args[0] and np.intp().nbytes < 8), e def test_csc_row_median(): # Test csc_row_median actually calculates the median. # Test that it gives the same output when X is dense. rng = np.random.RandomState(0) X = rng.rand(100, 50) dense_median = np.median(X, axis=0) csc = sp.csc_matrix(X) sparse_median = csc_median_axis_0(csc) assert_array_equal(sparse_median, dense_median) # Test that it gives the same output when X is sparse X = rng.rand(51, 100) X[X < 0.7] = 0.0 ind = rng.randint(0, 50, 10) X[ind] = -X[ind] csc = sp.csc_matrix(X) dense_median = np.median(X, axis=0) sparse_median = csc_median_axis_0(csc) assert_array_equal(sparse_median, dense_median) # Test for toy data. X = [[0, -2], [-1, -1], [1, 0], [2, 1]] csc = sp.csc_matrix(X) assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5])) X = [[0, -2], [-1, -5], [1, -3]] csc = sp.csc_matrix(X) assert_array_equal(csc_median_axis_0(csc), np.array([0., -3])) # Test that it raises an Error for non-csc matrices. with pytest.raises(TypeError): csc_median_axis_0(sp.csr_matrix(X)) def test_inplace_normalize(): ones = np.ones((10, 1)) rs = RandomState(10) for inplace_csr_row_normalize in (inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2): for dtype in (np.float64, np.float32): X = rs.randn(10, 5).astype(dtype) X_csr = sp.csr_matrix(X) for index_dtype in [np.int32, np.int64]: # csr_matrix will use int32 indices by default, # up-casting those to int64 when necessary if index_dtype is np.int64: X_csr.indptr = X_csr.indptr.astype(index_dtype) X_csr.indices = X_csr.indices.astype(index_dtype) assert X_csr.indices.dtype == index_dtype assert X_csr.indptr.dtype == index_dtype inplace_csr_row_normalize(X_csr) assert X_csr.dtype == dtype if inplace_csr_row_normalize is inplace_csr_row_normalize_l2: X_csr.data **= 2 assert_array_almost_equal(np.abs(X_csr).sum(axis=1), ones) @pytest.mark.parametrize("dtype", [np.float32, np.float64]) def test_csr_row_norms(dtype): # checks that csr_row_norms returns the same output as # scipy.sparse.linalg.norm, and that the dype is the same as X.dtype. X = sp.random(100, 10, format='csr', dtype=dtype, random_state=42) scipy_norms = sp.linalg.norm(X, axis=1)**2 norms = csr_row_norms(X) assert norms.dtype == dtype rtol = 1e-6 if dtype == np.float32 else 1e-7 assert_allclose(norms, scipy_norms, rtol=rtol)
bsd-3-clause
johnmgregoire/NanoCalorimetry
plotacanalysis_lia_1kHz.py
1
5551
import numpy, h5py, pylab, copy from PnSC_h5io import * from PnSC_math import * from matplotlib.ticker import FuncFormatter def myexpformat(x, pos): for ndigs in range(5): lab=(('%.'+'%d' %ndigs+'e') %x).replace('e+0','e').replace('e+','e').replace('e0','').replace('e-0','e-') if eval(lab)==x: return lab return lab ExpTickLabels=FuncFormatter(myexpformat) def lia_ampphaseTEST(x, ptspercyc, ncyclewin=1., returnphase=True, pad=True, phaseshift=0.): npts=numpy.round(ptspercyc*ncyclewin) s=x*sinarr(ptspercyc, x, ph=phaseshift) c=x*sinarr(ptspercyc, x, ph=numpy.pi/2.+phaseshift) amp=(numpy.array([(numpy.abs(numpy.fft.fft(s[i:i+npts])[0]))**2+(numpy.abs(numpy.fft.fft(c[i:i+npts])[0]))**2 for i in numpy.arange(len(x)-npts)])**.5)*2./npts if returnphase: phase=numpy.array([numpy.arctan(numpy.abs(numpy.fft.fft(s[i:i+npts])[0])/numpy.abs(numpy.fft.fft(c[i:i+npts])[0])) for i in numpy.arange(len(x)-npts)]) if pad: amp=numpy.concatenate([amp[:npts//2], amp, amp[-1*(len(x)-len(amp)-npts//2):]]) if returnphase: phase=numpy.concatenate([phase[:npts//2], phase, phase[-1*(len(x)-len(phase)-npts//2):]]) if returnphase: return amp, phase return amp ptspercyc=300. n1wcyc=4 if 0: p='C:/Users/JohnnyG/Documents/PythonCode/Vlassak/NanoCalorimetry/20110714_SnACcal.h5' f=h5py.File(p,mode='r') z=f['Calorimetry/1kHzcrop/measurement/HeatProgram/again/samplevoltage'][0,:] la=f['Calorimetry/1kHzcrop/analysis/again/LIAharmonics_voltage'][0,:,:,0] lp=f['Calorimetry/1kHzcrop/analysis/again/LIAharmonics_voltage'][0,:,:,1] f.close() if 0: p='C:/Users/JohnnyG/Documents/PythonCode/Vlassak/NanoCalorimetry/20110714_SnACcal.h5' f=h5py.File(p,mode='r') z=f['Calorimetry/1kHzcrop/measurement/HeatProgram/again/samplefilteredvoltage'][0,:] la=f['Calorimetry/1kHzcrop/analysis/again/LIAharmonics_filteredvoltage'][0,:,:,0] lp=f['Calorimetry/1kHzcrop/analysis/again/LIAharmonics_filteredvoltage'][0,:,:,1] f.close() if 0: p='C:/Users/JohnnyG/Documents/PythonCode/Vlassak/NanoCalorimetry/20110714_SnACcal.h5' f=h5py.File(p,mode='r') z=f['Calorimetry/1kHzcrop/measurement/HeatProgram/again/samplecurrent'][0,:] la=f['Calorimetry/1kHzcrop/analysis/again/LIAharmonics_current'][0,:,:,0] lp=f['Calorimetry/1kHzcrop/analysis/again/LIAharmonics_current'][0,:,:,1] f.close() if 0: p='C:/Users/JohnnyG/Documents/PythonCode/Vlassak/NanoCalorimetry/20110714_SnACcal.h5' f=h5py.File(p,mode='r') z=f['Calorimetry/1kHzcrop/measurement/HeatProgram/again/samplevoltage'][0,:]/1000. f.close() hlist=[1, 2, 3] ans=numpy.empty((len(z), len(hlist), 2), dtype='float32') for j, h in enumerate(hlist): ans[:, j, 0], ans[:, j, 1]=lia_ampphaseTEST(z, ptspercyc/h, ncyclewin=n1wcyc*h, phaseshift=0.) la=ans[:, :, 0] lp=ans[:, :, 1] if 0: p='C:/Users/JohnnyG/Documents/PythonCode/Vlassak/NanoCalorimetry/20110714_SnACcal.h5' f=h5py.File(p,mode='r') a=f['Calorimetry/1kHzcrop/measurement/HeatProgram/again/samplevoltage'][0,:]/1000. b=f['Calorimetry/1kHzcrop/measurement/HeatProgram/again/samplecurrent'][0,:]/1000. f.close() z=a/b hlist=[1, 2, 3] ans=numpy.empty((len(z), len(hlist), 2), dtype='float32') for j, h in enumerate(hlist): ans[:, j, 0], ans[:, j, 1]=lia_ampphase(z, ptspercyc/h, ncyclewin=n1wcyc*h, phaseshift=0.) la=ans[:, :, 0] lp=ans[:, :, 1] if 1: z=numpy.load('C:/Users/JohnnyG/Documents/HarvardWork/ACcal/20110714_Sn_analysis/Aug31kHzanalysis/fake_voltage.npy') hlist=[1, 2, 3] ans=numpy.empty((len(z), len(hlist), 2), dtype='float32') for j, h in enumerate(hlist): ans[:, j, 0], ans[:, j, 1]=lia_ampphase(z, ptspercyc/h, ncyclewin=n1wcyc*h, phaseshift=0.) la=ans[:, :, 0] lp=ans[:, :, 1] if 0: z=numpy.load('C:/Users/JohnnyG/Documents/HarvardWork/ACcal/20110714_Sn_analysis/Aug31kHzanalysis/fake_current.npy') hlist=[1, 2, 3] ans=numpy.empty((len(z), len(hlist), 2), dtype='float32') for j, h in enumerate(hlist): ans[:, j, 0], ans[:, j, 1]=lia_ampphase(z, ptspercyc/h, ncyclewin=n1wcyc*h, phaseshift=0.) la=ans[:, :, 0] lp=ans[:, :, 1] if 0: z=numpy.load('C:/Users/JohnnyG/Documents/HarvardWork/ACcal/20110714_Sn_analysis/Aug31kHzanalysis/fake2_smoothrcurrent.npy') hlist=[1, 2, 3] ans=numpy.empty((len(z), len(hlist), 2), dtype='float32') for j, h in enumerate(hlist): ans[:, j, 0], ans[:, j, 1]=lia_ampphase(z, ptspercyc/h, ncyclewin=n1wcyc*h, phaseshift=0.) la=ans[:, :, 0] lp=ans[:, :, 1] def sinarr(nptspercycle, npts, ph=0.): if isinstance(npts, numpy.ndarray): npts=len(npts) return numpy.sin(numpy.arange(npts)*2.*numpy.pi/nptspercycle+ph) if 0: z=3.*sinarr(ptspercyc, 500., .02) hlist=[1, 2, 3] ans=numpy.empty((len(z), len(hlist), 2), dtype='float32') for j, h in enumerate(hlist): ans[:, j, 0], ans[:, j, 1]=lia_ampphase(z, ptspercyc/h, ncyclewin=n1wcyc*h, phaseshift=0.) la=ans[:, :, 0] lp=ans[:, :, 1] if 1: for j in range(3): for i in range(2): ax1=pylab.subplot(3, 2, j*2+1+i) pylab.plot(la[:, j], 'b') pylab.gca().yaxis.set_major_formatter(ExpTickLabels) ax2 = ax1.twinx() ax2.plot(lp[:, j]*180./numpy.pi, 'r', alpha=.4) for tl in ax2.get_yticklabels(): tl.set_color('r') if 1: pylab.show()
bsd-3-clause
ueshin/apache-spark
python/pyspark/pandas/plot/matplotlib.py
14
30172
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from distutils.version import LooseVersion import matplotlib as mat import numpy as np import pandas as pd from matplotlib.axes._base import _process_plot_format from pandas.core.dtypes.inference import is_list_like from pandas.io.formats.printing import pprint_thing from pyspark.pandas.plot import ( TopNPlotBase, SampledPlotBase, HistogramPlotBase, BoxPlotBase, unsupported_function, KdePlotBase, ) if LooseVersion(pd.__version__) < LooseVersion("0.25"): from pandas.plotting._core import ( _all_kinds, BarPlot as PandasBarPlot, BoxPlot as PandasBoxPlot, HistPlot as PandasHistPlot, MPLPlot as PandasMPLPlot, PiePlot as PandasPiePlot, AreaPlot as PandasAreaPlot, LinePlot as PandasLinePlot, BarhPlot as PandasBarhPlot, ScatterPlot as PandasScatterPlot, KdePlot as PandasKdePlot, ) else: from pandas.plotting._matplotlib import ( BarPlot as PandasBarPlot, BoxPlot as PandasBoxPlot, HistPlot as PandasHistPlot, PiePlot as PandasPiePlot, AreaPlot as PandasAreaPlot, LinePlot as PandasLinePlot, BarhPlot as PandasBarhPlot, ScatterPlot as PandasScatterPlot, KdePlot as PandasKdePlot, ) from pandas.plotting._core import PlotAccessor from pandas.plotting._matplotlib.core import MPLPlot as PandasMPLPlot _all_kinds = PlotAccessor._all_kinds class PandasOnSparkBarPlot(PandasBarPlot, TopNPlotBase): def __init__(self, data, **kwargs): super().__init__(self.get_top_n(data), **kwargs) def _plot(self, ax, x, y, w, start=0, log=False, **kwds): self.set_result_text(ax) return ax.bar(x, y, w, bottom=start, log=log, **kwds) class PandasOnSparkBoxPlot(PandasBoxPlot, BoxPlotBase): def boxplot( self, ax, bxpstats, notch=None, sym=None, vert=None, whis=None, positions=None, widths=None, patch_artist=None, bootstrap=None, usermedians=None, conf_intervals=None, meanline=None, showmeans=None, showcaps=None, showbox=None, showfliers=None, boxprops=None, labels=None, flierprops=None, medianprops=None, meanprops=None, capprops=None, whiskerprops=None, manage_ticks=None, # manage_xticks is for compatibility of matplotlib < 3.1.0. # Remove this when minimum version is 3.0.0 manage_xticks=None, autorange=False, zorder=None, precision=None, ): def update_dict(dictionary, rc_name, properties): """Loads properties in the dictionary from rc file if not already in the dictionary""" rc_str = "boxplot.{0}.{1}" if dictionary is None: dictionary = dict() for prop_dict in properties: dictionary.setdefault(prop_dict, mat.rcParams[rc_str.format(rc_name, prop_dict)]) return dictionary # Common property dictionaries loading from rc flier_props = [ "color", "marker", "markerfacecolor", "markeredgecolor", "markersize", "linestyle", "linewidth", ] default_props = ["color", "linewidth", "linestyle"] boxprops = update_dict(boxprops, "boxprops", default_props) whiskerprops = update_dict(whiskerprops, "whiskerprops", default_props) capprops = update_dict(capprops, "capprops", default_props) medianprops = update_dict(medianprops, "medianprops", default_props) meanprops = update_dict(meanprops, "meanprops", default_props) flierprops = update_dict(flierprops, "flierprops", flier_props) if patch_artist: boxprops["linestyle"] = "solid" boxprops["edgecolor"] = boxprops.pop("color") # if non-default sym value, put it into the flier dictionary # the logic for providing the default symbol ('b+') now lives # in bxp in the initial value of final_flierprops # handle all of the `sym` related logic here so we only have to pass # on the flierprops dict. if sym is not None: # no-flier case, which should really be done with # 'showfliers=False' but none-the-less deal with it to keep back # compatibility if sym == "": # blow away existing dict and make one for invisible markers flierprops = dict(linestyle="none", marker="", color="none") # turn the fliers off just to be safe showfliers = False # now process the symbol string else: # process the symbol string # discarded linestyle _, marker, color = _process_plot_format(sym) # if we have a marker, use it if marker is not None: flierprops["marker"] = marker # if we have a color, use it if color is not None: # assume that if color is passed in the user want # filled symbol, if the users want more control use # flierprops flierprops["color"] = color flierprops["markerfacecolor"] = color flierprops["markeredgecolor"] = color # replace medians if necessary: if usermedians is not None: if len(np.ravel(usermedians)) != len(bxpstats) or np.shape(usermedians)[0] != len( bxpstats ): raise ValueError("usermedians length not compatible with x") else: # reassign medians as necessary for stats, med in zip(bxpstats, usermedians): if med is not None: stats["med"] = med if conf_intervals is not None: if np.shape(conf_intervals)[0] != len(bxpstats): err_mess = "conf_intervals length not compatible with x" raise ValueError(err_mess) else: for stats, ci in zip(bxpstats, conf_intervals): if ci is not None: if len(ci) != 2: raise ValueError("each confidence interval must " "have two values") else: if ci[0] is not None: stats["cilo"] = ci[0] if ci[1] is not None: stats["cihi"] = ci[1] should_manage_ticks = True if manage_xticks is not None: should_manage_ticks = manage_xticks if manage_ticks is not None: should_manage_ticks = manage_ticks if LooseVersion(mat.__version__) < LooseVersion("3.1.0"): extra_args = {"manage_xticks": should_manage_ticks} else: extra_args = {"manage_ticks": should_manage_ticks} artists = ax.bxp( bxpstats, positions=positions, widths=widths, vert=vert, patch_artist=patch_artist, shownotches=notch, showmeans=showmeans, showcaps=showcaps, showbox=showbox, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, meanprops=meanprops, meanline=meanline, showfliers=showfliers, capprops=capprops, whiskerprops=whiskerprops, zorder=zorder, **extra_args, ) return artists def _plot(self, ax, bxpstats, column_num=None, return_type="axes", **kwds): bp = self.boxplot(ax, bxpstats, **kwds) if return_type == "dict": return bp, bp elif return_type == "both": return self.BP(ax=ax, lines=bp), bp else: return ax, bp def _compute_plot_data(self): colname = self.data.name spark_column_name = self.data._internal.spark_column_name_for(self.data._column_label) data = self.data # Updates all props with the rc defaults from matplotlib self.kwds.update(PandasOnSparkBoxPlot.rc_defaults(**self.kwds)) # Gets some important kwds showfliers = self.kwds.get("showfliers", False) whis = self.kwds.get("whis", 1.5) labels = self.kwds.get("labels", [colname]) # This one is pandas-on-Spark specific to control precision for approx_percentile precision = self.kwds.get("precision", 0.01) # # Computes mean, median, Q1 and Q3 with approx_percentile and precision col_stats, col_fences = BoxPlotBase.compute_stats(data, spark_column_name, whis, precision) # # Creates a column to flag rows as outliers or not outliers = BoxPlotBase.outliers(data, spark_column_name, *col_fences) # # Computes min and max values of non-outliers - the whiskers whiskers = BoxPlotBase.calc_whiskers(spark_column_name, outliers) if showfliers: fliers = BoxPlotBase.get_fliers(spark_column_name, outliers, whiskers[0]) else: fliers = [] # Builds bxpstats dict stats = [] item = { "mean": col_stats["mean"], "med": col_stats["med"], "q1": col_stats["q1"], "q3": col_stats["q3"], "whislo": whiskers[0], "whishi": whiskers[1], "fliers": fliers, "label": labels[0], } stats.append(item) self.data = {labels[0]: stats} def _make_plot(self): bxpstats = list(self.data.values())[0] ax = self._get_ax(0) kwds = self.kwds.copy() for stats in bxpstats: if len(stats["fliers"]) > 1000: stats["fliers"] = stats["fliers"][:1000] ax.text( 1, 1, "showing top 1,000 fliers only", size=6, ha="right", va="bottom", transform=ax.transAxes, ) ret, bp = self._plot(ax, bxpstats, column_num=0, return_type=self.return_type, **kwds) self.maybe_color_bp(bp) self._return_obj = ret labels = [l for l, _ in self.data.items()] labels = [pprint_thing(l) for l in labels] if not self.use_index: labels = [pprint_thing(key) for key in range(len(labels))] self._set_ticklabels(ax, labels) @staticmethod def rc_defaults( notch=None, vert=None, whis=None, patch_artist=None, bootstrap=None, meanline=None, showmeans=None, showcaps=None, showbox=None, showfliers=None, **kwargs ): # Missing arguments default to rcParams. if whis is None: whis = mat.rcParams["boxplot.whiskers"] if bootstrap is None: bootstrap = mat.rcParams["boxplot.bootstrap"] if notch is None: notch = mat.rcParams["boxplot.notch"] if vert is None: vert = mat.rcParams["boxplot.vertical"] if patch_artist is None: patch_artist = mat.rcParams["boxplot.patchartist"] if meanline is None: meanline = mat.rcParams["boxplot.meanline"] if showmeans is None: showmeans = mat.rcParams["boxplot.showmeans"] if showcaps is None: showcaps = mat.rcParams["boxplot.showcaps"] if showbox is None: showbox = mat.rcParams["boxplot.showbox"] if showfliers is None: showfliers = mat.rcParams["boxplot.showfliers"] return dict( whis=whis, bootstrap=bootstrap, notch=notch, vert=vert, patch_artist=patch_artist, meanline=meanline, showmeans=showmeans, showcaps=showcaps, showbox=showbox, showfliers=showfliers, ) class PandasOnSparkHistPlot(PandasHistPlot, HistogramPlotBase): def _args_adjust(self): if is_list_like(self.bottom): self.bottom = np.array(self.bottom) def _compute_plot_data(self): self.data, self.bins = HistogramPlotBase.prepare_hist_data(self.data, self.bins) def _make_plot(self): # TODO: this logic is similar with KdePlot. Might have to deduplicate it. # 'num_colors' requires to calculate `shape` which has to count all. # Use 1 for now to save the computation. colors = self._get_colors(num_colors=1) stacking_id = self._get_stacking_id() output_series = HistogramPlotBase.compute_hist(self.data, self.bins) for (i, label), y in zip(enumerate(self.data._internal.column_labels), output_series): ax = self._get_ax(i) kwds = self.kwds.copy() label = pprint_thing(label if len(label) > 1 else label[0]) kwds["label"] = label style, kwds = self._apply_style_colors(colors, kwds, i, label) if style is not None: kwds["style"] = style kwds = self._make_plot_keywords(kwds, y) artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds) self._add_legend_handle(artists[0], label, index=i) @classmethod def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0, stacking_id=None, **kwds): if column_num == 0: cls._initialize_stacker(ax, stacking_id, len(bins) - 1) base = np.zeros(len(bins) - 1) bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds["label"]) # Since the counts were computed already, we use them as weights and just generate # one entry for each bin n, bins, patches = ax.hist(bins[:-1], bins=bins, bottom=bottom, weights=y, **kwds) cls._update_stacker(ax, stacking_id, n) return patches class PandasOnSparkPiePlot(PandasPiePlot, TopNPlotBase): def __init__(self, data, **kwargs): super().__init__(self.get_top_n(data), **kwargs) def _make_plot(self): self.set_result_text(self._get_ax(0)) super()._make_plot() class PandasOnSparkAreaPlot(PandasAreaPlot, SampledPlotBase): def __init__(self, data, **kwargs): super().__init__(self.get_sampled(data), **kwargs) def _make_plot(self): self.set_result_text(self._get_ax(0)) super()._make_plot() class PandasOnSparkLinePlot(PandasLinePlot, SampledPlotBase): def __init__(self, data, **kwargs): super().__init__(self.get_sampled(data), **kwargs) def _make_plot(self): self.set_result_text(self._get_ax(0)) super()._make_plot() class PandasOnSparkBarhPlot(PandasBarhPlot, TopNPlotBase): def __init__(self, data, **kwargs): super().__init__(self.get_top_n(data), **kwargs) def _make_plot(self): self.set_result_text(self._get_ax(0)) super()._make_plot() class PandasOnSparkScatterPlot(PandasScatterPlot, TopNPlotBase): def __init__(self, data, x, y, **kwargs): super().__init__(self.get_top_n(data), x, y, **kwargs) def _make_plot(self): self.set_result_text(self._get_ax(0)) super()._make_plot() class PandasOnSparkKdePlot(PandasKdePlot, KdePlotBase): def _compute_plot_data(self): self.data = KdePlotBase.prepare_kde_data(self.data) def _make_plot(self): # 'num_colors' requires to calculate `shape` which has to count all. # Use 1 for now to save the computation. colors = self._get_colors(num_colors=1) stacking_id = self._get_stacking_id() sdf = self.data._internal.spark_frame for i, label in enumerate(self.data._internal.column_labels): # 'y' is a Spark DataFrame that selects one column. y = sdf.select(self.data._internal.spark_column_for(label)) ax = self._get_ax(i) kwds = self.kwds.copy() label = pprint_thing(label if len(label) > 1 else label[0]) kwds["label"] = label style, kwds = self._apply_style_colors(colors, kwds, i, label) if style is not None: kwds["style"] = style kwds = self._make_plot_keywords(kwds, y) artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds) self._add_legend_handle(artists[0], label, index=i) def _get_ind(self, y): return KdePlotBase.get_ind(y, self.ind) @classmethod def _plot( cls, ax, y, style=None, bw_method=None, ind=None, column_num=None, stacking_id=None, **kwds ): y = KdePlotBase.compute_kde(y, bw_method=bw_method, ind=ind) lines = PandasMPLPlot._plot(ax, ind, y, style=style, **kwds) return lines _klasses = [ PandasOnSparkHistPlot, PandasOnSparkBarPlot, PandasOnSparkBoxPlot, PandasOnSparkPiePlot, PandasOnSparkAreaPlot, PandasOnSparkLinePlot, PandasOnSparkBarhPlot, PandasOnSparkScatterPlot, PandasOnSparkKdePlot, ] _plot_klass = {getattr(klass, "_kind"): klass for klass in _klasses} _common_kinds = {"area", "bar", "barh", "box", "hist", "kde", "line", "pie"} _series_kinds = _common_kinds.union(set()) _dataframe_kinds = _common_kinds.union({"scatter", "hexbin"}) _pandas_on_spark_all_kinds = _common_kinds.union(_series_kinds).union(_dataframe_kinds) def plot_pandas_on_spark(data, kind, **kwargs): if kind not in _pandas_on_spark_all_kinds: raise ValueError("{} is not a valid plot kind".format(kind)) from pyspark.pandas import DataFrame, Series if isinstance(data, Series): if kind not in _series_kinds: return unsupported_function(class_name="pd.Series", method_name=kind)() return plot_series(data=data, kind=kind, **kwargs) elif isinstance(data, DataFrame): if kind not in _dataframe_kinds: return unsupported_function(class_name="pd.DataFrame", method_name=kind)() return plot_frame(data=data, kind=kind, **kwargs) def plot_series( data, kind="line", ax=None, # Series unique figsize=None, use_index=True, title=None, grid=None, legend=False, style=None, logx=False, logy=False, loglog=False, xticks=None, yticks=None, xlim=None, ylim=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, label=None, secondary_y=False, # Series unique **kwds ): """ Make plots of Series using matplotlib / pylab. Each plot kind has a corresponding method on the ``Series.plot`` accessor: ``s.plot(kind='line')`` is equivalent to ``s.plot.line()``. Parameters ---------- data : Series kind : str - 'line' : line plot (default) - 'bar' : vertical bar plot - 'barh' : horizontal bar plot - 'hist' : histogram - 'box' : boxplot - 'kde' : Kernel Density Estimation plot - 'density' : same as 'kde' - 'area' : area plot - 'pie' : pie plot ax : matplotlib axes object If not passed, uses gca() figsize : a tuple (width, height) in inches use_index : boolean, default True Use index as ticks for x axis title : string or list Title to use for the plot. If a string is passed, print the string at the top of the figure. If a list is passed and `subplots` is True, print each item in the list above the corresponding subplot. grid : boolean, default None (matlab style default) Axis grid lines legend : False/True/'reverse' Place legend on axis subplots style : list or dict matplotlib line style per column logx : boolean, default False Use log scaling on x axis logy : boolean, default False Use log scaling on y axis loglog : boolean, default False Use log scaling on both x and y axes xticks : sequence Values to use for the xticks yticks : sequence Values to use for the yticks xlim : 2-tuple/list ylim : 2-tuple/list rot : int, default None Rotation for ticks (xticks for vertical, yticks for horizontal plots) fontsize : int, default None Font size for xticks and yticks colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. colorbar : boolean, optional If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots) position : float Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) table : boolean, Series or DataFrame, default False If True, draw a table using the data in the DataFrame and the data will be transposed to meet matplotlib's default layout. If a Series or DataFrame is passed, use passed data to draw a table. yerr : DataFrame, Series, array-like, dict and str See :ref:`Plotting with Error Bars <visualization.errorbars>` for detail. xerr : same types as yerr. label : label argument to provide to plot secondary_y : boolean or sequence of ints, default False If True then y-axis will be on the right mark_right : boolean, default True When using a secondary_y axis, automatically mark the column labels with "(right)" in the legend **kwds : keywords Options to pass to matplotlib plotting method Returns ------- axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them Notes ----- - See matplotlib documentation online for more on this subject - If `kind` = 'bar' or 'barh', you can specify relative alignments for bar plot layout by `position` keyword. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) """ # function copied from pandas.plotting._core # so it calls modified _plot below import matplotlib.pyplot as plt if ax is None and len(plt.get_fignums()) > 0: with plt.rc_context(): ax = plt.gca() ax = PandasMPLPlot._get_ax_layer(ax) return _plot( data, kind=kind, ax=ax, figsize=figsize, use_index=use_index, title=title, grid=grid, legend=legend, style=style, logx=logx, logy=logy, loglog=loglog, xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize, colormap=colormap, table=table, yerr=yerr, xerr=xerr, label=label, secondary_y=secondary_y, **kwds, ) def plot_frame( data, x=None, y=None, kind="line", ax=None, subplots=None, sharex=None, sharey=False, layout=None, figsize=None, use_index=True, title=None, grid=None, legend=True, style=None, logx=False, logy=False, loglog=False, xticks=None, yticks=None, xlim=None, ylim=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, secondary_y=False, sort_columns=False, **kwds ): """ Make plots of DataFrames using matplotlib / pylab. Each plot kind has a corresponding method on the ``DataFrame.plot`` accessor: ``psdf.plot(kind='line')`` is equivalent to ``psdf.plot.line()``. Parameters ---------- data : DataFrame kind : str - 'line' : line plot (default) - 'bar' : vertical bar plot - 'barh' : horizontal bar plot - 'hist' : histogram - 'box' : boxplot - 'kde' : Kernel Density Estimation plot - 'density' : same as 'kde' - 'area' : area plot - 'pie' : pie plot - 'scatter' : scatter plot ax : matplotlib axes object If not passed, uses gca() x : label or position, default None y : label, position or list of label, positions, default None Allows plotting of one column versus another. figsize : a tuple (width, height) in inches use_index : boolean, default True Use index as ticks for x axis title : string or list Title to use for the plot. If a string is passed, print the string at the top of the figure. If a list is passed and `subplots` is True, print each item in the list above the corresponding subplot. grid : boolean, default None (matlab style default) Axis grid lines legend : False/True/'reverse' Place legend on axis subplots style : list or dict matplotlib line style per column logx : boolean, default False Use log scaling on x axis logy : boolean, default False Use log scaling on y axis loglog : boolean, default False Use log scaling on both x and y axes xticks : sequence Values to use for the xticks yticks : sequence Values to use for the yticks xlim : 2-tuple/list ylim : 2-tuple/list sharex: bool or None, default is None Whether to share x axis or not. sharey: bool, default is False Whether to share y axis or not. rot : int, default None Rotation for ticks (xticks for vertical, yticks for horizontal plots) fontsize : int, default None Font size for xticks and yticks colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. colorbar : boolean, optional If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots) position : float Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) table : boolean, Series or DataFrame, default False If True, draw a table using the data in the DataFrame and the data will be transposed to meet matplotlib's default layout. If a Series or DataFrame is passed, use passed data to draw a table. yerr : DataFrame, Series, array-like, dict and str See :ref:`Plotting with Error Bars <visualization.errorbars>` for detail. xerr : same types as yerr. label : label argument to provide to plot secondary_y : boolean or sequence of ints, default False If True then y-axis will be on the right mark_right : boolean, default True When using a secondary_y axis, automatically mark the column labels with "(right)" in the legend sort_columns: bool, default is False When True, will sort values on plots. **kwds : keywords Options to pass to matplotlib plotting method Returns ------- axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them Notes ----- - See matplotlib documentation online for more on this subject - If `kind` = 'bar' or 'barh', you can specify relative alignments for bar plot layout by `position` keyword. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) """ return _plot( data, kind=kind, x=x, y=y, ax=ax, figsize=figsize, use_index=use_index, title=title, grid=grid, legend=legend, subplots=subplots, style=style, logx=logx, logy=logy, loglog=loglog, xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize, colormap=colormap, table=table, yerr=yerr, xerr=xerr, sharex=sharex, sharey=sharey, secondary_y=secondary_y, layout=layout, sort_columns=sort_columns, **kwds, ) def _plot(data, x=None, y=None, subplots=False, ax=None, kind="line", **kwds): from pyspark.pandas import DataFrame # function copied from pandas.plotting._core # and adapted to handle pandas-on-Spark DataFrame and Series kind = kind.lower().strip() kind = {"density": "kde"}.get(kind, kind) if kind in _all_kinds: klass = _plot_klass[kind] else: raise ValueError("%r is not a valid plot kind" % kind) # scatter and hexbin are inherited from PlanePlot which require x and y if kind in ("scatter", "hexbin"): plot_obj = klass(data, x, y, subplots=subplots, ax=ax, kind=kind, **kwds) else: # check data type and do preprocess before applying plot if isinstance(data, DataFrame): if x is not None: data = data.set_index(x) # TODO: check if value of y is plottable if y is not None: data = data[y] plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds) plot_obj.generate() plot_obj.draw() return plot_obj.result
apache-2.0
zyhhhhhhh/cs446-machine-learning
hw3/hw3-code/python/exp4.py
1
1600
import gen import algorithms import numpy as np import matplotlib.pyplot as plt def adagrad(y,x,r): w = np.zeros(len(x[0])+1) G = np.ones(len(x[0])+1) error = [] errcount = 0 Q = [] q = 0 for i in range(len(x)): if y[i]*(np.inner(w[0:-1],x[i])+w[-1]) <= 1: errcount += 1 q += max(0, 1- y[i]*(np.inner(w[0:-1],x[i])+w[-1])) for j in range(len(x[0])): G[j] += (-y[i]*x[i][j])**2 G[-1] += y[i]**2 x_new = np.append(x[i],1) for j in range(len(w)): w[j] += r*y[i]*np.divide(x_new[j], np.power(G[j], 0.5)) error.append(errcount) Q.append(q) return w[0:-1], w[-1], error, Q def test_bonus(): w1, theta1, error_t,Q = adagrad(np.tile(dy, 50), np.tile(dx, (50, 1)), 1.5 ) index = np.linspace(0, 50 * 10000, num=50, endpoint=False, dtype=int) # print(len(index)) error_plot = [error_t[i] for i in index] for i in range(49,0,-1): error_plot[i] = error_plot[i]-error_plot[i-1] plt.plot(np.linspace(1,50,dtype = int),error_plot,color="blue") plt.xlabel("round") plt.ylabel("# of mistakes") plt.title("AdaGrad mistakes over rounds") plt.show() plt.figure() Q_plot = [Q[i] for i in index] for i in range(49, 0, -1): Q_plot[i] = Q_plot[i] - Q_plot[i - 1] plt.plot(np.linspace(1, 50, dtype=int), Q_plot, color="blue") plt.xlabel("round") plt.ylabel("hinge loss") plt.title("hinge loss over rounds") plt.show() dy, dx = gen.gen(10, 20,40, 10000, True) test_bonus()
mit
google/active-qa
px/environments/docqa.py
1
13115
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Wraps the DocQA model for use as an environment. The environment uses a DocQA model to produce an answer from a specified document. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import pandas as pd import pickle import tensorflow as tf from tensorflow import gfile from docqa.data_processing.document_splitter import FirstN from docqa.data_processing.document_splitter import MergeParagraphs from docqa.data_processing.document_splitter import ShallowOpenWebRanker from docqa.data_processing.document_splitter import TopTfIdf from docqa.data_processing.preprocessed_corpus import preprocess_par from docqa.data_processing.qa_training_data import ParagraphAndQuestionDataset from docqa.data_processing.span_data import TokenSpans from docqa.data_processing.text_utils import NltkAndPunctTokenizer from docqa.data_processing.text_utils import NltkPlusStopWords from docqa.dataset import FixedOrderBatcher from docqa.eval.triviaqa_full_document_eval import RecordParagraphSpanPrediction from docqa.evaluator import AysncEvaluatorRunner from docqa.evaluator import EvaluatorRunner from docqa.model_dir import ModelDir from docqa.triviaqa.build_span_corpus import TriviaQaOpenDataset from docqa.triviaqa.build_span_corpus import TriviaQaWebDataset from docqa.triviaqa.build_span_corpus import TriviaQaWikiDataset from docqa.triviaqa.training_data import DocumentParagraphQuestion from docqa.triviaqa.training_data import ExtractMultiParagraphs from docqa.triviaqa.training_data import ExtractMultiParagraphsPerQuestion from docqa.utils import ResourceLoader class DocqaEnvironment(object): """Environment containing the DocQA model. This environment loads a DocQA model and preprocessed data for the chosen datasets. The environment is queried with a pointer to an existing datapoint, which contains preprocessed documents, and a question. DocQA is run using the given question against the documents and the top answer with its score is returned. """ def __init__(self, precomputed_data_path, corpus_dir, model_dir, nltk_dir, async=0, batch_size=32, corpus_name="wiki", debug_mode=False, filter_name=None, load_test=False, max_answer_len=8, max_tokens=400, n_paragraphs=5, n_processes=12, step="latest"): """Constructor loads the DocQA configuration, model and data. Args: precomputed_data_path: Path to the precomputed data stored in a pickle file. corpus_dir: Path to corpus directory. model_dir: Directory containing parameters of a pre-trained DocQA model. nltk_dir: Folder containing the nltk package. async: If greater than 0, run <async> evaluations in parallel. batch_size: Maximum batch size. corpus_name: Name of the corpus: "wiki" or "web". debug_mode: If true, logs additional debug information. filter_name: Type of the filter to select documents. Valid values are: "linear", "tfidf", or "truncate". load_test: If True, loads the test set as well. max_answer_len: Maximum number of tokens an answer will have. Truncate if it is longer. max_tokens: Maximum number of tokens per paragraph. n_paragraphs: Maximum number of paragraphs to be retrieved. n_processes: Number of parallel processes to use whe loading the data. step: Which step from the checkpoint the model will be loaded from. When step="latest", the lastest checkpoint in model_dir will be used. """ self.async = async self.debug_mode = debug_mode self.max_tokens = max_tokens self.evaluators = [RecordParagraphSpanPrediction(max_answer_len, True)] self.tokenizer = NltkAndPunctTokenizer(nltk_dir=nltk_dir) datasets = ["train", "dev"] if load_test: datasets.append("test") print("Loading model...") model_dir = ModelDir(model_dir) self.model = model_dir.get_model() print("Loading data...") self.corpus_name = corpus_name self.load_data( precomputed_data_path=precomputed_data_path, corpus_name=corpus_name, corpus_dir=corpus_dir, nltk_dir=nltk_dir, datasets=datasets, filter_name=filter_name, n_paragraphs=n_paragraphs, n_processes=n_processes) print("Setting up model") # Tell the model the batch size (can be None) and vocab to expect. This will # load the needed word vectors and fix the batch size to use when building # the graph / encoding the input. # This step is here to compute the vocabulary. data_flattened = [] for val in self.data.values(): data_flattened.extend(val) temp_data = ParagraphAndQuestionDataset(data_flattened, FixedOrderBatcher(batch_size, True)) self.model.set_inputs([temp_data], ResourceLoader()) if self.async > 0: self.evaluator_runner = AysncEvaluatorRunner(self.evaluators, self.model, self.async) inputs = self.evaluator_runner.dequeue_op else: self.evaluator_runner = EvaluatorRunner(self.evaluators, self.model) inputs = self.model.get_placeholders() input_dict = {p: x for p, x in zip(self.model.get_placeholders(), inputs)} self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) with self.sess.as_default(): pred = self.model.get_predictions_for(input_dict) self.evaluator_runner.set_input(pred) if step is not None: if step == "latest": checkpoint = model_dir.get_latest_checkpoint() else: checkpoint = model_dir.get_checkpoint(int(step)) else: checkpoint = model_dir.get_best_weights() if checkpoint is not None: print("Using best weights") else: print("Using latest checkpoint") checkpoint = model_dir.get_latest_checkpoint() saver = tf.train.Saver() saver.restore(self.sess, checkpoint) tf.get_default_graph().finalize() def GetAnswers(self, questions, document_ids): """Computes an answer for a given question and document id. Runs a DocQA model on a specified datapoint, but using the input question in place of the original. Args: questions: List of strings used to replace the original question. document_ids: A list of strings representing the identifiers for the context documents. Returns: A list Raises: ValueError: If the number of questions and document_ids differ. """ if len(questions) != len(document_ids): raise ValueError("Number of questions and document_ids must be equal.") # Prepare questions: data_preprocessed = [] question_ids = [] for question, document_id in zip(questions, document_ids): question_tokenized = self.tokenizer.tokenize_paragraph_flat(question) original_paragraph_questions = self.data[document_id] for rank, original_paragraph_question in enumerate( original_paragraph_questions): if rank == 0: question_ids.append(original_paragraph_question.question_id) new_paragraph_question = DocumentParagraphQuestion( q_id=original_paragraph_question.question_id, doc_id=original_paragraph_question.doc_id, para_range=original_paragraph_question.para_range, question=question_tokenized, context=original_paragraph_question.context, answer=original_paragraph_question.answer, rank=rank) data_preprocessed.append(new_paragraph_question) data_preprocessed = ParagraphAndQuestionDataset( data_preprocessed, FixedOrderBatcher(batch_size=len(questions), truncate_batches=True)) evaluation = self.evaluator_runner.run_evaluators( sess=self.sess, dataset=data_preprocessed, name=self.corpus_name, n_sample=None, feed_dict={}) # create a pandas dataframe that will have the following columns: # question_id, doc_id, n_answers, para_end, para_start, predicted_start, # predicted_end, predicted_score, rank, text_answer, text_em, text_f1 dataframe = pd.DataFrame(evaluation.per_sample) answers = self.best_answers(dataframe) # align questions and answers output = [answers[question_id] for question_id in question_ids] return output def best_answers(self, dataframe): """Return the best answer based on the predicted score. """ answers = {} for question_id, text_answer, predicted_score, text_f1 in dataframe[[ "question_id", "text_answer", "predicted_score", "text_f1" ]].itertuples(index=False): if question_id not in answers: answers[question_id] = (text_answer, predicted_score, text_f1) else: if predicted_score > answers[question_id][1]: answers[question_id] = (text_answer, predicted_score, text_f1) return answers def load_data(self, precomputed_data_path, corpus_name, corpus_dir, nltk_dir, datasets, filter_name, n_paragraphs, n_processes): """Load corpus and question-answer data onto memory. """ if corpus_name.startswith("web"): self.dataset = TriviaQaWebDataset(corpus_dir) elif corpus_name.startswith("wiki"): self.dataset = TriviaQaWikiDataset(corpus_dir) else: self.dataset = TriviaQaOpenDataset(corpus_dir) questions = [] if "train" in datasets: questions.extend(self.dataset.get_train()) if "dev" in datasets: questions.extend(self.dataset.get_dev()) if "test" in datasets: questions.extend(self.dataset.get_test()) # wiki and web are both multi-document per_document = corpus_name.startswith("web") if per_document: self.group_by = ["question_id", "doc_id"] else: self.group_by = ["question_id"] if gfile.Exists(precomputed_data_path): print("Loading precomputed data from {}".format(precomputed_data_path)) with gfile.GFile(precomputed_data_path, "rb") as f: self.data = pickle.load(f) else: print("Building question/paragraph pairs...") corpus = self.dataset.evidence splitter = MergeParagraphs(self.max_tokens) if filter_name is None: # Pick default depending on the kind of data we are using if per_document: filter_name = "tfidf" else: filter_name = "linear" if filter_name == "tfidf": para_filter = TopTfIdf( NltkPlusStopWords(punctuation=True, nltk_dir=nltk_dir), n_to_select=n_paragraphs) elif filter_name == "truncate": para_filter = FirstN(n_paragraphs) elif filter_name == "linear": para_filter = ShallowOpenWebRanker( n_to_select=n_paragraphs, nltk_dir=nltk_dir) else: raise ValueError() # Loads the relevant questions/documents, selects the right paragraphs, # and runs the model's preprocessor. if per_document: prep = ExtractMultiParagraphs( splitter, para_filter, self.model.preprocessor, require_an_answer=False) else: prep = ExtractMultiParagraphsPerQuestion( splitter, para_filter, self.model.preprocessor, require_an_answer=False) prepped_data = preprocess_par(questions, corpus, prep, n_processes, 1000) self.data = {} for q in prepped_data.data: self.data[q.question_id] = [] for rank, p in enumerate(q.paragraphs): if q.answer_text is None: ans = None else: ans = TokenSpans(q.answer_text, p.answer_spans) self.data[q.question_id].append( DocumentParagraphQuestion( q_id=q.question_id, doc_id=p.doc_id, para_range=(p.start, p.end), question=q.question, context=p.text, answer=ans, rank=rank)) print("Saving precomputed data to {}".format(precomputed_data_path)) with gfile.GFile(precomputed_data_path, "wb") as f: pickle.dump(self.data, f, -1) print("Done.")
apache-2.0
gronostajo/droogle
base.py
1
5821
from collections import Counter import gzip from operator import itemgetter from os import listdir, path import re import cPickle as pickle import json from math import log, sqrt from scipy.sparse import csr_matrix, lil_matrix, coo_matrix import numpy as np from sklearn.preprocessing import normalize import unicodedata __author__ = 'gronostaj' def list_dirs(dirpath): return [f for f in listdir(dirpath) if path.isdir(path.join(dirpath, f))] def list_files(dirpath): return [f for f in listdir(dirpath) if path.isfile(path.join(dirpath, f))] class Serializer: @staticmethod def serialize(obj, serializer, filename, gz=False, **kwargs): if gz: with gzip.open('%s.gz' % filename, 'wb', 5) as f: f.write(serializer.dumps(obj, **kwargs)) else: with open(filename, 'wb') as f: f.write(serializer.dumps(obj, **kwargs)) @staticmethod def deserialize(serializer, filename): gz = filename.endswith('.gz') if gz: with gzip.open(filename, 'rb') as f: obj = serializer.load(f) else: with open(filename, 'rb') as f: obj = serializer.load(f) return obj @staticmethod def pickle(obj, filename, gz=True): Serializer.serialize(obj, pickle, filename, gz) @staticmethod def unpickle(filename): return Serializer.deserialize(pickle, filename) @staticmethod def to_json(obj, filename, gz=True): Serializer.serialize(obj, json, filename, gz, sort_keys=True, indent=4, separators=(',', ': ')) @staticmethod def from_json(filename): return Serializer.deserialize(json, filename) class Droogle: SUFFIXES = ('%s.pickle', '%s.pickle.gz', '%s.json', '%s.json.gz') _WORDMAP = 'wordmap' _MATRIX = 'matrix' _CHUNKS = 'chunks' def __init__(self, indexdir): dbs = {} for req in (Droogle._WORDMAP, Droogle._MATRIX, Droogle._CHUNKS): satisfying = [ path.join(indexdir, suffix % req) for suffix in Droogle.SUFFIXES if path.isfile(path.join(indexdir, suffix % req)) ] if not satisfying: raise FileMissingError(req) else: dbs[req] = satisfying[0] self.dbs = { k: Serializer.unpickle(f) if f.endswith('.pickle') or f.endswith('.pickle.gz') else Serializer.from_json(f) for k, f in dbs.iteritems() } @staticmethod def _sanitize(str): return re.sub(r'[^\x00-\x7F]+', ' ', str.lower()) @staticmethod def _bagofwords(str): return Counter(re.findall(r'\w+', str)) @staticmethod def _indexstring(filename, str, separator): bags = {} chunks = {} wordset = set() for i, chunk in enumerate(re.split(separator, str)): bag = Droogle._bagofwords(Droogle._sanitize(chunk)) bags['%s_%d' % (filename, i)] = dict(bag) chunks['%s_%d' % (filename, i)] = chunk wordset = wordset | set(bag.keys()) return bags, chunks, wordset @staticmethod def index(dirpath, inputfiles, separator): bags = {} chunks = {} wordset = set() for inputfile in inputfiles: print("- Parsing file %s" % inputfile) with open(path.join(dirpath, inputfile), 'r') as f: thisbag, thischunks, thisset = Droogle._indexstring(inputfile, f.read(), separator) bags.update(thisbag) chunks.update(thischunks) wordset = wordset | thisset print("- Building matrix") wordmap = {w: i for i, w in enumerate(wordset)} chunkmap = {c: i for i, c in enumerate(bags.keys())} matrix = lil_matrix((len(wordset), len(bags))) chunks = {chunkmap[n]: c for n, c in chunks.items()} for chunkname, chunkid in chunkmap.iteritems(): bag = dict(bags[chunkname]) for word, quantity in bag.iteritems(): wordid = wordmap[word] matrix[wordid, chunkid] = quantity matrix = csr_matrix(matrix) print("- Optimizing matrix") nonzero = np.diff(matrix.indptr) idf = lil_matrix(np.array(map(lambda c: log(len(wordset) / c), nonzero))) matrix = matrix.transpose().multiply(idf) normalize(matrix, copy=False) matrix = matrix.transpose() print("- Saving files") Serializer.to_json(wordmap, path.join(dirpath, "%s.json" % Droogle._WORDMAP)) Serializer.pickle(matrix, path.join(dirpath, "%s.pickle" % Droogle._MATRIX)) Serializer.pickle(chunks, path.join(dirpath, "%s.pickle" % Droogle._CHUNKS)) return len(bags), len(wordset) def query(self, string): bag = Droogle._bagofwords(Droogle._sanitize(string)) norm = sqrt(reduce(lambda v, x: v + x ** 2, bag.values())) bag = {k: v / norm for k, v in dict(bag).iteritems()} bagmap = { self.dbs[Droogle._WORDMAP][word]: count for word, count in bag.iteritems() if word in self.dbs[Droogle._WORDMAP] } bagmap = zip(*bagmap.items()) lookup = coo_matrix( (bagmap[1], ([0] * len(bagmap[0]), bagmap[0])), dtype='double', shape=(1, self.dbs[Droogle._MATRIX].shape[0]) ).dot(self.dbs[Droogle._MATRIX]) results = [(self.dbs[Droogle._CHUNKS][i], lookup[0, i]) for i in xrange(self.dbs[Droogle._MATRIX].shape[1])] return map(itemgetter(0), sorted(results, key=itemgetter(1), reverse=True)) class FileMissingError(Exception): def __init__(self, filename): self.filename = filename
gpl-2.0
kjung/scikit-learn
examples/applications/plot_tomography_l1_reconstruction.py
81
5461
""" ====================================================================== Compressive sensing: tomography reconstruction with L1 prior (Lasso) ====================================================================== This example shows the reconstruction of an image from a set of parallel projections, acquired along different angles. Such a dataset is acquired in **computed tomography** (CT). Without any prior information on the sample, the number of projections required to reconstruct the image is of the order of the linear size ``l`` of the image (in pixels). For simplicity we consider here a sparse image, where only pixels on the boundary of objects have a non-zero value. Such data could correspond for example to a cellular material. Note however that most images are sparse in a different basis, such as the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is necessary to use prior information available on the sample (its sparsity): this is an example of **compressive sensing**. The tomography projection operation is a linear transformation. In addition to the data-fidelity term corresponding to a linear regression, we penalize the L1 norm of the image to account for its sparsity. The resulting optimization problem is called the :ref:`lasso`. We use the class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent algorithm. Importantly, this implementation is more computationally efficient on a sparse matrix, than the projection operator used here. The reconstruction with L1 penalization gives a result with zero error (all pixels are successfully labeled with 0 or 1), even if noise was added to the projections. In comparison, an L2 penalization (:class:`sklearn.linear_model.Ridge`) produces a large number of labeling errors for the pixels. Important artifacts are observed on the reconstructed image, contrary to the L1 penalization. Note in particular the circular artifact separating the pixels in the corners, that have contributed to fewer projections than the central disk. """ print(__doc__) # Author: Emmanuelle Gouillart <[email protected]> # License: BSD 3 clause import numpy as np from scipy import sparse from scipy import ndimage from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge import matplotlib.pyplot as plt def _weights(x, dx=1, orig=0): x = np.ravel(x) floor_x = np.floor((x - orig) / dx) alpha = (x - orig - floor_x * dx) / dx return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha)) def _generate_center_coordinates(l_x): X, Y = np.mgrid[:l_x, :l_x].astype(np.float64) center = l_x / 2. X += 0.5 - center Y += 0.5 - center return X, Y def build_projection_operator(l_x, n_dir): """ Compute the tomography design matrix. Parameters ---------- l_x : int linear size of image array n_dir : int number of angles at which projections are acquired. Returns ------- p : sparse matrix of shape (n_dir l_x, l_x**2) """ X, Y = _generate_center_coordinates(l_x) angles = np.linspace(0, np.pi, n_dir, endpoint=False) data_inds, weights, camera_inds = [], [], [] data_unravel_indices = np.arange(l_x ** 2) data_unravel_indices = np.hstack((data_unravel_indices, data_unravel_indices)) for i, angle in enumerate(angles): Xrot = np.cos(angle) * X - np.sin(angle) * Y inds, w = _weights(Xrot, dx=1, orig=X.min()) mask = np.logical_and(inds >= 0, inds < l_x) weights += list(w[mask]) camera_inds += list(inds[mask] + i * l_x) data_inds += list(data_unravel_indices[mask]) proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds))) return proj_operator def generate_synthetic_data(): """ Synthetic binary data """ rs = np.random.RandomState(0) n_pts = 36. x, y = np.ogrid[0:l, 0:l] mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2 mask = np.zeros((l, l)) points = l * rs.rand(2, n_pts) mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1 mask = ndimage.gaussian_filter(mask, sigma=l / n_pts) res = np.logical_and(mask > mask.mean(), mask_outer) return res - ndimage.binary_erosion(res) # Generate synthetic images, and projections l = 128 proj_operator = build_projection_operator(l, l / 7.) data = generate_synthetic_data() proj = proj_operator * data.ravel()[:, np.newaxis] proj += 0.15 * np.random.randn(*proj.shape) # Reconstruction with L2 (Ridge) penalization rgr_ridge = Ridge(alpha=0.2) rgr_ridge.fit(proj_operator, proj.ravel()) rec_l2 = rgr_ridge.coef_.reshape(l, l) # Reconstruction with L1 (Lasso) penalization # the best value of alpha was determined using cross validation # with LassoCV rgr_lasso = Lasso(alpha=0.001) rgr_lasso.fit(proj_operator, proj.ravel()) rec_l1 = rgr_lasso.coef_.reshape(l, l) plt.figure(figsize=(8, 3.3)) plt.subplot(131) plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest') plt.axis('off') plt.title('original image') plt.subplot(132) plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest') plt.title('L2 penalization') plt.axis('off') plt.subplot(133) plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest') plt.title('L1 penalization') plt.axis('off') plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1) plt.show()
bsd-3-clause
0asa/scikit-learn
sklearn/datasets/species_distributions.py
19
7870
""" ============================= Species distribution dataset ============================= This dataset represents the geographic distribution of species. The dataset is provided by Phillips et. al. (2006). The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References: * `"Maximum entropy modeling of species geographic distributions" <http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. Notes: * See examples/applications/plot_species_distribution_modeling.py for an example of using this dataset """ # Authors: Peter Prettenhofer <[email protected]> # Jake Vanderplas <[email protected]> # # License: BSD 3 clause from io import BytesIO from os import makedirs from os.path import join from os.path import exists try: # Python 2 from urllib2 import urlopen PY2 = True except ImportError: # Python 3 from urllib.request import urlopen PY2 = False import numpy as np from sklearn.datasets.base import get_data_home, Bunch from sklearn.externals import joblib DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/" SAMPLES_URL = join(DIRECTORY_URL, "samples.zip") COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip") DATA_ARCHIVE_NAME = "species_coverage.pkz" def _load_coverage(F, header_length=6, dtype=np.int16): """Load a coverage file from an open file object. This will return a numpy array of the given dtype """ header = [F.readline() for i in range(header_length)] make_tuple = lambda t: (t.split()[0], float(t.split()[1])) header = dict([make_tuple(line) for line in header]) M = np.loadtxt(F, dtype=dtype) nodata = header[b'NODATA_value'] if nodata != -9999: print(nodata) M[nodata] = -9999 return M def _load_csv(F): """Load csv file. Parameters ---------- F : file object CSV file open in byte mode. Returns ------- rec : np.ndarray record array representing the data """ if PY2: # Numpy recarray wants Python 2 str but not unicode names = F.readline().strip().split(',') else: # Numpy recarray wants Python 3 str but not bytes... names = F.readline().decode('ascii').strip().split(',') rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4') rec.dtype.names = names return rec def construct_grids(batch): """Construct the map grid from the batch object Parameters ---------- batch : Batch object The object returned by :func:`fetch_species_distributions` Returns ------- (xgrid, ygrid) : 1-D arrays The grid corresponding to the values in batch.coverages """ # x,y coordinates for corner cells xmin = batch.x_left_lower_corner + batch.grid_size xmax = xmin + (batch.Nx * batch.grid_size) ymin = batch.y_left_lower_corner + batch.grid_size ymax = ymin + (batch.Ny * batch.grid_size) # x coordinates of the grid cells xgrid = np.arange(xmin, xmax, batch.grid_size) # y coordinates of the grid cells ygrid = np.arange(ymin, ymax, batch.grid_size) return (xgrid, ygrid) def fetch_species_distributions(data_home=None, download_if_missing=True): """Loader for species distribution dataset from Phillips et. al. (2006) Parameters ---------- data_home : optional, default: None Specify another download and cache folder for the datasets. By default all scikit learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing: optional, True by default If False, raise a IOError if the data is not locally available instead of trying to download the data from the source site. Returns -------- The data is returned as a Bunch object with the following attributes: coverages : array, shape = [14, 1592, 1212] These represent the 14 features measured at each point of the map grid. The latitude/longitude values for the grid are discussed below. Missing data is represented by the value -9999. train : record array, shape = (1623,) The training points for the data. Each point has three fields: - train['species'] is the species name - train['dd long'] is the longitude, in degrees - train['dd lat'] is the latitude, in degrees test : record array, shape = (619,) The test points for the data. Same format as the training data. Nx, Ny : integers The number of longitudes (x) and latitudes (y) in the grid x_left_lower_corner, y_left_lower_corner : floats The (x,y) position of the lower-left corner, in degrees grid_size : float The spacing between points of the grid, in degrees Notes ------ This dataset represents the geographic distribution of species. The dataset is provided by Phillips et. al. (2006). The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References ---------- * `"Maximum entropy modeling of species geographic distributions" <http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. Notes ----- * See examples/applications/plot_species_distribution_modeling.py for an example of using this dataset with scikit-learn """ data_home = get_data_home(data_home) if not exists(data_home): makedirs(data_home) # Define parameters for the data files. These should not be changed # unless the data model changes. They will be saved in the npz file # with the downloaded data. extra_params = dict(x_left_lower_corner=-94.8, Nx=1212, y_left_lower_corner=-56.05, Ny=1592, grid_size=0.05) dtype = np.int16 if not exists(join(data_home, DATA_ARCHIVE_NAME)): print('Downloading species data from %s to %s' % (SAMPLES_URL, data_home)) X = np.load(BytesIO(urlopen(SAMPLES_URL).read())) for f in X.files: fhandle = BytesIO(X[f]) if 'train' in f: train = _load_csv(fhandle) if 'test' in f: test = _load_csv(fhandle) print('Downloading coverage data from %s to %s' % (COVERAGES_URL, data_home)) X = np.load(BytesIO(urlopen(COVERAGES_URL).read())) coverages = [] for f in X.files: fhandle = BytesIO(X[f]) print(' - converting', f) coverages.append(_load_coverage(fhandle)) coverages = np.asarray(coverages, dtype=dtype) bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params) joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9) else: bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME)) return bunch
bsd-3-clause
aray/spark
python/pyspark/worker.py
2
9636
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Worker that receives input from Piped RDD. """ from __future__ import print_function import os import sys import time import socket import traceback from pyspark.accumulators import _accumulatorRegistry from pyspark.broadcast import Broadcast, _broadcastRegistry from pyspark.taskcontext import TaskContext from pyspark.files import SparkFiles from pyspark.serializers import write_with_length, write_int, read_long, \ write_long, read_int, SpecialLengths, PythonEvalType, UTF8Deserializer, PickleSerializer, \ BatchedSerializer, ArrowStreamPandasSerializer from pyspark.sql.types import to_arrow_type, StructType from pyspark import shuffle pickleSer = PickleSerializer() utf8_deserializer = UTF8Deserializer() def report_times(outfile, boot, init, finish): write_int(SpecialLengths.TIMING_DATA, outfile) write_long(int(1000 * boot), outfile) write_long(int(1000 * init), outfile) write_long(int(1000 * finish), outfile) def add_path(path): # worker can be used, so donot add path multiple times if path not in sys.path: # overwrite system packages sys.path.insert(1, path) def read_command(serializer, file): command = serializer._read_with_length(file) if isinstance(command, Broadcast): command = serializer.loads(command.value) return command def chain(f, g): """chain two functions together """ return lambda *a: g(f(*a)) def wrap_udf(f, return_type): if return_type.needConversion(): toInternal = return_type.toInternal return lambda *a: toInternal(f(*a)) else: return lambda *a: f(*a) def wrap_pandas_udf(f, return_type): # If the return_type is a StructType, it indicates this is a groupby apply udf, # and has already been wrapped under apply(), otherwise, it's a vectorized column udf. # We can distinguish these two by return type because in groupby apply, we always specify # returnType as a StructType, and in vectorized column udf, StructType is not supported. # # TODO: Look into refactoring use of StructType to be more flexible for future pandas_udfs if isinstance(return_type, StructType): return lambda *a: f(*a) else: arrow_return_type = to_arrow_type(return_type) def verify_result_length(*a): result = f(*a) if not hasattr(result, "__len__"): raise TypeError("Return type of the user-defined functon should be " "Pandas.Series, but is {}".format(type(result))) if len(result) != len(a[0]): raise RuntimeError("Result vector from pandas_udf was not the required length: " "expected %d, got %d" % (len(a[0]), len(result))) return result return lambda *a: (verify_result_length(*a), arrow_return_type) def read_single_udf(pickleSer, infile, eval_type): num_arg = read_int(infile) arg_offsets = [read_int(infile) for i in range(num_arg)] row_func = None for i in range(read_int(infile)): f, return_type = read_command(pickleSer, infile) if row_func is None: row_func = f else: row_func = chain(row_func, f) # the last returnType will be the return type of UDF if eval_type == PythonEvalType.SQL_PANDAS_UDF: return arg_offsets, wrap_pandas_udf(row_func, return_type) else: return arg_offsets, wrap_udf(row_func, return_type) def read_udfs(pickleSer, infile, eval_type): num_udfs = read_int(infile) udfs = {} call_udf = [] for i in range(num_udfs): arg_offsets, udf = read_single_udf(pickleSer, infile, eval_type) udfs['f%d' % i] = udf args = ["a[%d]" % o for o in arg_offsets] call_udf.append("f%d(%s)" % (i, ", ".join(args))) # Create function like this: # lambda a: (f0(a0), f1(a1, a2), f2(a3)) # In the special case of a single UDF this will return a single result rather # than a tuple of results; this is the format that the JVM side expects. mapper_str = "lambda a: (%s)" % (", ".join(call_udf)) mapper = eval(mapper_str, udfs) func = lambda _, it: map(mapper, it) if eval_type == PythonEvalType.SQL_PANDAS_UDF: ser = ArrowStreamPandasSerializer() else: ser = BatchedSerializer(PickleSerializer(), 100) # profiling is not supported for UDF return func, None, ser, ser def main(infile, outfile): try: boot_time = time.time() split_index = read_int(infile) if split_index == -1: # for unit tests exit(-1) version = utf8_deserializer.loads(infile) if version != "%d.%d" % sys.version_info[:2]: raise Exception(("Python in worker has different version %s than that in " + "driver %s, PySpark cannot run with different minor versions." + "Please check environment variables PYSPARK_PYTHON and " + "PYSPARK_DRIVER_PYTHON are correctly set.") % ("%d.%d" % sys.version_info[:2], version)) # initialize global state taskContext = TaskContext._getOrCreate() taskContext._stageId = read_int(infile) taskContext._partitionId = read_int(infile) taskContext._attemptNumber = read_int(infile) taskContext._taskAttemptId = read_long(infile) shuffle.MemoryBytesSpilled = 0 shuffle.DiskBytesSpilled = 0 _accumulatorRegistry.clear() # fetch name of workdir spark_files_dir = utf8_deserializer.loads(infile) SparkFiles._root_directory = spark_files_dir SparkFiles._is_running_on_worker = True # fetch names of includes (*.zip and *.egg files) and construct PYTHONPATH add_path(spark_files_dir) # *.py files that were added will be copied here num_python_includes = read_int(infile) for _ in range(num_python_includes): filename = utf8_deserializer.loads(infile) add_path(os.path.join(spark_files_dir, filename)) if sys.version > '3': import importlib importlib.invalidate_caches() # fetch names and values of broadcast variables num_broadcast_variables = read_int(infile) for _ in range(num_broadcast_variables): bid = read_long(infile) if bid >= 0: path = utf8_deserializer.loads(infile) _broadcastRegistry[bid] = Broadcast(path=path) else: bid = - bid - 1 _broadcastRegistry.pop(bid) _accumulatorRegistry.clear() eval_type = read_int(infile) if eval_type == PythonEvalType.NON_UDF: func, profiler, deserializer, serializer = read_command(pickleSer, infile) else: func, profiler, deserializer, serializer = read_udfs(pickleSer, infile, eval_type) init_time = time.time() def process(): iterator = deserializer.load_stream(infile) serializer.dump_stream(func(split_index, iterator), outfile) if profiler: profiler.profile(process) else: process() except Exception: try: write_int(SpecialLengths.PYTHON_EXCEPTION_THROWN, outfile) write_with_length(traceback.format_exc().encode("utf-8"), outfile) except IOError: # JVM close the socket pass except Exception: # Write the error to stderr if it happened while serializing print("PySpark worker failed with exception:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) exit(-1) finish_time = time.time() report_times(outfile, boot_time, init_time, finish_time) write_long(shuffle.MemoryBytesSpilled, outfile) write_long(shuffle.DiskBytesSpilled, outfile) # Mark the beginning of the accumulators section of the output write_int(SpecialLengths.END_OF_DATA_SECTION, outfile) write_int(len(_accumulatorRegistry), outfile) for (aid, accum) in _accumulatorRegistry.items(): pickleSer._write_with_length((aid, accum._value), outfile) # check end of stream if read_int(infile) == SpecialLengths.END_OF_STREAM: write_int(SpecialLengths.END_OF_STREAM, outfile) else: # write a different value to tell JVM to not reuse this worker write_int(SpecialLengths.END_OF_DATA_SECTION, outfile) exit(-1) if __name__ == '__main__': # Read a local port to connect to from stdin java_port = int(sys.stdin.readline()) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(("127.0.0.1", java_port)) sock_file = sock.makefile("rwb", 65536) main(sock_file, sock_file)
apache-2.0
brightchen/h2o-3
h2o-py/h2o/h2o.py
1
78121
import warnings warnings.simplefilter('always', DeprecationWarning) import os import itertools import functools import os.path import re import urllib import urllib2 import imp import tabulate from connection import H2OConnection from job import H2OJob from expr import ExprNode from frame import H2OFrame, _py_tmp_key, _is_list_of_lists from model import H2OBinomialModel,H2OAutoEncoderModel,H2OClusteringModel,H2OMultinomialModel,H2ORegressionModel import h2o_model_builder def lazy_import(path): """ Import a single file or collection of files. Parameters ---------- path : str A path to a data file (remote or local). :return: A new H2OFrame """ return [_import(p)[0] for p in path] if isinstance(path,(list,tuple)) else _import(path) def _import(path): j = H2OConnection.get_json(url_suffix="ImportFiles", path=path) if j['fails']: raise ValueError("ImportFiles of " + path + " failed on " + str(j['fails'])) return j['destination_frames'] def upload_file(path, destination_frame=""): """ Upload a dataset at the path given from the local machine to the H2O cluster. Parameters ---------- path : str A path specifying the location of the data to upload. destination_frame : H2OFrame The name of the H2O Frame in the H2O Cluster. :return: A new H2OFrame """ fui = {"file": os.path.abspath(path)} destination_frame = _py_tmp_key() if destination_frame == "" else destination_frame H2OConnection.post_json(url_suffix="PostFile", file_upload_info=fui,destination_frame=destination_frame) return H2OFrame(raw_id=destination_frame) def import_file(path=None, destination_frame="", parse=True, header=(-1, 0, 1), sep="", col_names=None, col_types=None, na_strings=None): """ Import a frame from a file (remote or local machine). If you run H2O on Hadoop, you can access to HDFS Parameters ---------- path : str A path specifying the location of the data to import. destination_frame : (Optional) The unique hex key assigned to the imported file. If none is given, a key will automatically be generated. parse : (Optional) A logical value indicating whether the file should be parsed after import. header : (Optional) -1 means the first line is data, 0 means guess, 1 means first line is header. sep : (Optional) The field separator character. Values on each line of the file are separated by this character. If sep = "", the parser will automatically detect the separator. col_names : (Optional) A list of column names for the file. col_types : (Optional) A list of types to specify whether columns should be forced to a certain type upon import parsing. na_strings : (Optional) A list of strings which are to be interpreted as missing values. :return: A new H2OFrame """ if not parse: return lazy_import(path) return H2OFrame(file_path=path, destination_frame=destination_frame, header=header, separator=sep, column_names=col_names, column_types=col_types, na_strings=na_strings) def parse_setup(raw_frames, destination_frame="", header=(-1, 0, 1), separator="", column_names=None, column_types=None, na_strings=None): """ Parameters ---------- raw_frames : H2OFrame A collection of imported file frames destination_frame : (Optional) The unique hex key assigned to the imported file. If none is given, a key will automatically be generated. parse : (Optional) A logical value indicating whether the file should be parsed after import. header : (Optional) -1 means the first line is data, 0 means guess, 1 means first line is header. sep : (Optional) The field separator character. Values on each line of the file are separated by this character. If sep = "", the parser will automatically detect the separator. col_names : (Optional) A list of column names for the file. col_types : (Optional) A list of types to specify whether columns should be forced to a certain type upon import parsing. na_strings : (Optional) A list of strings which are to be interpreted as missing values. :return: A ParseSetup "object" """ # The H2O backend only accepts things that are quoted if isinstance(raw_frames, unicode): raw_frames = [raw_frames] j = H2OConnection.post_json(url_suffix="ParseSetup", source_frames=[_quoted(id) for id in raw_frames]) if destination_frame: j["destination_frame"] = destination_frame if not isinstance(header, tuple): if header not in (-1, 0, 1): raise ValueError("header should be -1, 0, or 1") j["check_header"] = header if separator: if not isinstance(separator, basestring) or len(separator) != 1: raise ValueError("separator should be a single character string") j["separator"] = separator if column_names: j["column_names"] = column_names if column_types: j["column_types"] = column_types if na_strings: j["na_strings"] = na_strings return j def parse(setup, h2o_name, first_line_is_header=(-1, 0, 1)): """ Trigger a parse; blocking; removeFrame just keep the Vecs. Parameters ---------- setup : dict The result of calling parse_setup. h2o_name : H2OFrame The name of the H2O Frame on the back end. first_line_is_header : int -1 means data, 0 means guess, 1 means header. :return: A new parsed object """ # Parse parameters (None values provided by setup) p = { 'destination_frame' : h2o_name, 'parse_type' : None, 'separator' : None, 'single_quotes' : None, 'check_header' : None, 'number_columns' : None, 'chunk_size' : None, 'delete_on_done' : True, 'blocking' : False, } if setup["destination_frame"]: setup["destination_frame"] = _quoted(setup["destination_frame"]) if isinstance(first_line_is_header, tuple): first_line_is_header = setup["check_header"] if isinstance(setup["separator"], basestring): setup["separator"] = ord(setup["separator"]) if setup["column_names"]: setup["column_names"] = [_quoted(name) for name in setup["column_names"]] p["column_names"] = None if setup["column_types"]: setup["column_types"] = [_quoted(name) for name in setup["column_types"]] p["column_types"] = None if setup["na_strings"]: if _is_list_of_lists(setup["na_strings"]): setup["na_strings"] = [[_quoted(na) for na in col] if col is not None else [] for col in setup["na_strings"]] else: setup["na_strings"] = [_quoted(na) for na in setup["na_strings"]] # quote the strings setup["na_strings"] = '\"' + str(list(itertools.repeat(setup["na_strings"], len(setup["column_types"])))) + '\"' p["na_strings"] = None # update the parse parameters with the parse_setup values p.update({k: v for k, v in setup.iteritems() if k in p}) p["check_header"] = first_line_is_header # Extract only 'name' from each src in the array of srcs p['source_frames'] = [_quoted(src['name']) for src in setup['source_frames']] # Request blocking parse j = H2OJob(H2OConnection.post_json(url_suffix="Parse", **p), "Parse").poll() return j.jobs def parse_raw(setup, id=None, first_line_is_header=(-1, 0, 1)): """ Used in conjunction with lazy_import and parse_setup in order to make alterations before parsing. Parameters ---------- setup : dict Result of h2o.parse_setup id : str An optional id for the frame. first_line_is_header : int -1,0,1 if the first line is to be used as the header :return: An H2OFrame object """ id = setup["destination_frame"] fr = H2OFrame() parsed = parse(setup, id, first_line_is_header) fr._computed = True fr._id = id fr._keep = True fr._nrows = int(H2OFrame(expr=ExprNode("nrow", fr))._scalar()) #parsed['rows'] fr._ncols = parsed["number_columns"] fr._col_names = parsed['column_names'] if parsed["column_names"] else ["C" + str(x) for x in range(1,fr._ncols+1)] return fr def _quoted(key, replace=True): if key == None: return "\"\"" #mimic behavior in R to replace "%" and "&" characters, which break the call to /Parse, with "." key = key.replace("%", ".") key = key.replace("&", ".") is_quoted = len(re.findall(r'\"(.+?)\"', key)) != 0 key = key if is_quoted else '"' + key + '"' return key def assign(data,id): if data._computed: rapids(data._id,id) data._id = id data._keep=True # named things are always safe return data def which(condition): """ Parameters ---------- condition : H2OFrame A conditional statement. :return: A H2OFrame of 1 column filled with 0-based indices for which the condition is True """ return H2OFrame(expr=ExprNode("h2o.which",condition))._frame() def ifelse(test,yes,no): """ Semantically equivalent to R's ifelse. Based on the booleans in the test vector, the output has the values of the yes and no vectors interleaved (or merged together). Parameters ---------- test : H2OFrame A "test" H2OFrame yes : H2OFrame A "yes" H2OFrame no : H2OFrame A "no" H2OFrame :return: An H2OFrame """ return H2OFrame(expr=ExprNode("ifelse",test,yes,no))._frame() def get_future_model(future_model): """ Waits for the future model to finish building, and then returns the model. Parameters ---------- future_model : H2OModelFuture an H2OModelFuture object :return: a resolved model (i.e. an H2OBinomialModel, H2ORegressionModel, H2OMultinomialModel, ...) """ return h2o_model_builder._resolve_model(future_model) def get_model(model_id): """ Return the specified model Parameters ---------- model_id : str The model identification in h2o :return: H2OModel """ model_json = H2OConnection.get_json("Models/"+model_id)["models"][0] model_type = model_json["output"]["model_category"] if model_type=="Binomial": return H2OBinomialModel(model_id, model_json) elif model_type=="Clustering": return H2OClusteringModel(model_id, model_json) elif model_type=="Regression": return H2ORegressionModel(model_id, model_json) elif model_type=="Multinomial": return H2OMultinomialModel(model_id, model_json) elif model_type=="AutoEncoder": return H2OAutoEncoderModel(model_id, model_json) else: raise NotImplementedError(model_type) def get_frame(frame_id): """ Obtain a handle to the frame in H2O with the frame_id key. :return: An H2OFrame """ return H2OFrame.get_frame(frame_id) def ou(): """ Where is my baguette!? :return: the name of the baguette. oh uhr uhr huhr """ from inspect import stack return stack()[2][1] def no_progress(): """ Disable the progress bar from flushing to stdout. The completed progress bar is printed when a job is complete so as to demarcate a log file. :return: None """ H2OJob.__PROGRESS_BAR__ = False def show_progress(): """ Enable the progress bar. (Progress bar is enabled by default). :return: None """ H2OJob.__PROGRESS_BAR__ = True def log_and_echo(message): """ Log a message on the server-side logs This is helpful when running several pieces of work one after the other on a single H2O cluster and you want to make a notation in the H2O server side log where one piece of work ends and the next piece of work begins. Sends a message to H2O for logging. Generally used for debugging purposes. Parameters ---------- message : str A character string with the message to write to the log. :return: None """ if message is None: message = "" H2OConnection.post_json("LogAndEcho", message=message) def remove(object): """ Remove object from H2O. This is a "hard" delete of the object. It removes all subparts. Parameters ---------- object : H2OFrame or str The object pointing to the object to be removed. :return: None """ if object is None: raise ValueError("remove with no object is not supported, for your protection") if isinstance(object, H2OFrame): H2OConnection.delete("DKV/"+object._id) if isinstance(object, str): H2OConnection.delete("DKV/"+object) def remove_all(): """ Remove all objects from H2O. :return None """ H2OConnection.delete("DKV") def removeFrameShallow(key): """ Do a shallow DKV remove of the frame (does not remove any internal Vecs). This is a "soft" delete. Just removes the top level pointer, but all big data remains! Parameters ---------- key : str A Frame Key to be removed :return: None """ rapids("(removeframe '"+key+"')") return None def rapids(expr, id=None): """ Fire off a Rapids expression. Parameters ---------- expr : str The rapids expression (ascii string). :return: The JSON response of the Rapids execution """ return H2OConnection.post_json("Rapids", ast=urllib.quote(expr), _rest_version=99) if id is None else H2OConnection.post_json("Rapids", ast=urllib.quote(expr), id=id, _rest_version=99) def ls(): """ List Keys on an H2O Cluster :return: Returns a list of keys in the current H2O instance """ return H2OFrame(expr=ExprNode("ls")).as_data_frame() def frame(frame_id, exclude=""): """ Retrieve metadata for a id that points to a Frame. Parameters ---------- frame_id : str A pointer to a Frame in H2O. :return: Meta information on the frame """ return H2OConnection.get_json("Frames/" + urllib.quote(frame_id+exclude)) def frames(): """ Retrieve all the Frames. :return: Meta information on the frames """ return H2OConnection.get_json("Frames") def download_pojo(model,path="", get_jar=True): """ Download the POJO for this model to the directory specified by path (no trailing slash!). If path is "", then dump to screen. Parameters ---------- model : H2OModel Retrieve this model's scoring POJO. path : str An absolute path to the directory where POJO should be saved. get_jar : bool Retreive the h2o genmodel jar also. :return: None """ java = H2OConnection.get( "Models.java/"+model._id ) file_path = path + "/" + model._id + ".java" if path == "": print java.text else: with open(file_path, 'wb') as f: f.write(java.text) if get_jar and path!="": url = H2OConnection.make_url("h2o-genmodel.jar") filename = path + "/" + "h2o-genmodel.jar" response = urllib2.urlopen(url) with open(filename, "wb") as f: f.write(response.read()) def download_csv(data, filename): """ Download an H2O data set to a CSV file on the local disk. Warning: Files located on the H2O server may be very large! Make sure you have enough hard drive space to accommodate the entire file. Parameters ---------- data : H2OFrame An H2OFrame object to be downloaded. filename : str A string indicating the name that the CSV file should be should be saved to. :return: None """ data._eager() if not isinstance(data, H2OFrame): raise(ValueError, "`data` argument must be an H2OFrame, but got " + type(data)) url = "http://{}:{}/3/DownloadDataset?frame_id={}".format(H2OConnection.ip(),H2OConnection.port(),data._id) with open(filename, 'w') as f: f.write(urllib2.urlopen(url).read()) def download_all_logs(dirname=".",filename=None): """ Download H2O Log Files to Disk Parameters ---------- dirname : str (Optional) A character string indicating the directory that the log file should be saved in. filename : str (Optional) A string indicating the name that the CSV file should be :return: path of logs written (as a string) """ url = 'http://{}:{}/Logs/download'.format(H2OConnection.ip(),H2OConnection.port()) response = urllib2.urlopen(url) if not os.path.exists(dirname): os.mkdir(dirname) if filename == None: for h in response.headers.headers: if 'filename=' in h: filename = h.split("filename=")[1].strip() break path = os.path.join(dirname,filename) print "Writing H2O logs to " + path with open(path, 'w') as f: f.write(urllib2.urlopen(url).read()) return path def save_model(model, path="", force=False): """ Save an H2O Model Object to Disk. Parameters ---------- model : H2OModel The model object to save. path : str A path to save the model at (hdfs, s3, local) force : bool Overwrite destination directory in case it exists or throw exception if set to false. :return: the path of the saved model (string) """ path=os.path.join(os.getcwd() if path=="" else path,model._id) return H2OConnection.get_json("Models.bin/"+model._id,dir=path,force=force,_rest_version=99)["dir"] def load_model(path): """ Load a saved H2O model from disk. Example: >>> path = h2o.save_model(my_model,dir=my_path) >>> h2o.load_model(path) # use the result of save_model Parameters ---------- path : str The full path of the H2O Model to be imported. :return: the model """ res = H2OConnection.post_json("Models.bin/",dir=path,_rest_version=99) return get_model(res['models'][0]['model_id']['name']) def cluster_status(): """ TODO: This isn't really a cluster status... it's a node status check for the node we're connected to. This is possibly confusing because this can come back without warning, but if a user tries to do any remoteSend, they will get a "cloud sick warning" Retrieve information on the status of the cluster running H2O. :return: None """ cluster_json = H2OConnection.get_json("Cloud?skip_ticks=true") print "Version: {0}".format(cluster_json['version']) print "Cloud name: {0}".format(cluster_json['cloud_name']) print "Cloud size: {0}".format(cluster_json['cloud_size']) if cluster_json['locked']: print "Cloud is locked\n" else: print "Accepting new members\n" if cluster_json['nodes'] == None or len(cluster_json['nodes']) == 0: print "No nodes found" return status = [] for node in cluster_json['nodes']: for k, v in zip(node.keys(),node.values()): if k in ["h2o", "healthy", "last_ping", "num_cpus", "sys_load", "mem_value_size", "total_value_size", "free_mem", "tot_mem", "max_mem", "free_disk", "max_disk", "pid", "num_keys", "tcps_active", "open_fds", "rpcs_active"]: status.append(k+": {0}".format(v)) print ', '.join(status) print def init(ip="localhost", port=54321, size=1, start_h2o=False, enable_assertions=False, license=None, max_mem_size_GB=None, min_mem_size_GB=None, ice_root=None, strict_version_check=False): """ Initiate an H2O connection to the specified ip and port. Parameters ---------- ip : str A string representing the hostname or IP address of the server where H2O is running. port : int A port, default is 54321 size : int THe expected number of h2o instances (ignored if start_h2o is True) start_h2o : bool A boolean dictating whether this module should start the H2O jvm. An attempt is made anyways if _connect fails. enable_assertions : bool If start_h2o, pass `-ea` as a VM option.s license : str If not None, is a path to a license file. max_mem_size_GB : int Maximum heap size (jvm option Xmx) in gigabytes. min_mem_size_GB : int Minimum heap size (jvm option Xms) in gigabytes. ice_root : str A temporary directory (default location is determined by tempfile.mkdtemp()) to hold H2O log files. :return: None """ H2OConnection(ip=ip, port=port,start_h2o=start_h2o,enable_assertions=enable_assertions,license=license,max_mem_size_GB=max_mem_size_GB,min_mem_size_GB=min_mem_size_GB,ice_root=ice_root,strict_version_check=strict_version_check) return None def export_file(frame,path,force=False): """ Export a given H2OFrame to a path on the machine this python session is currently connected to. To view the current session, call h2o.cluster_info(). Parameters ---------- frame : H2OFrame The Frame to save to disk. path : str The path to the save point on disk. force : bool Overwrite any preexisting file with the same path :return: None """ frame._eager() H2OJob(H2OConnection.get_json("Frames/"+frame._id+"/export/"+path+"/overwrite/"+("true" if force else "false")), "Export File").poll() def cluster_info(): """ Display the current H2O cluster information. :return: None """ H2OConnection._cluster_info() def shutdown(conn=None, prompt=True): """ Shut down the specified instance. All data will be lost. This method checks if H2O is running at the specified IP address and port, and if it is, shuts down that H2O instance. Parameters ---------- conn : H2OConnection An H2OConnection object containing the IP address and port of the server running H2O. prompt : bool A logical value indicating whether to prompt the user before shutting down the H2O server. :return: None """ if conn == None: conn = H2OConnection.current_connection() H2OConnection._shutdown(conn=conn, prompt=prompt) def deeplearning(x,y=None,validation_x=None,validation_y=None,training_frame=None,model_id=None, overwrite_with_best_model=None,validation_frame=None,checkpoint=None,autoencoder=None, use_all_factor_levels=None,activation=None,hidden=None,epochs=None,train_samples_per_iteration=None, seed=None,adaptive_rate=None,rho=None,epsilon=None,rate=None,rate_annealing=None,rate_decay=None, momentum_start=None,momentum_ramp=None,momentum_stable=None,nesterov_accelerated_gradient=None, input_dropout_ratio=None,hidden_dropout_ratios=None,l1=None,l2=None,max_w2=None,initial_weight_distribution=None, initial_weight_scale=None,loss=None,distribution=None,tweedie_power=None,score_interval=None,score_training_samples=None, score_validation_samples=None,score_duty_cycle=None,classification_stop=None,regression_stop=None,quiet_mode=None, max_confusion_matrix_size=None,max_hit_ratio_k=None,balance_classes=None,class_sampling_factors=None, max_after_balance_size=None,score_validation_sampling=None,diagnostics=None,variable_importances=None, fast_mode=None,ignore_const_cols=None,force_load_balance=None,replicate_training_data=None,single_node_mode=None, shuffle_training_data=None,sparse=None,col_major=None,average_activation=None,sparsity_beta=None, max_categorical_features=None,reproducible=None,export_weights_and_biases=None,offset_column=None,weights_column=None, nfolds=None,fold_column=None,fold_assignment=None,keep_cross_validation_predictions=None): """ Build a supervised Deep Learning model Performs Deep Learning neural networks on an H2OFrame Parameters ---------- x : H2OFrame An H2OFrame containing the predictors in the model. y : H2OFrame An H2OFrame of the response variable in the model. training_frame : H2OFrame (Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x. model_id : str (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated. overwrite_with_best_model : bool Logical. If True, overwrite the final model with the best model found during training. Defaults to True. validation_frame : H2OFrame (Optional) An H2OFrame object indicating the validation dataset used to construct the confusion matrix. If left blank, this defaults to the training data when nfolds = 0 checkpoint : H2ODeepLearningModel "Model checkpoint (either key or H2ODeepLearningModel) to resume training with." autoencoder : bool Enable auto-encoder for model building. use_all_factor_levels : bool Logical. Use all factor levels of categorical variance. Otherwise the first factor level is omitted (without loss of accuracy). Useful for variable importances and auto-enabled for autoencoder. activation : str A string indicating the activation function to use. Must be either "Tanh", "TanhWithDropout", "Rectifier", "RectifierWithDropout", "Maxout", or "MaxoutWithDropout" hidden : list Hidden layer sizes (e.g. c(100,100)) epochs : float How many times the dataset should be iterated (streamed), can be fractional train_samples_per_iteration : int Number of training samples (globally) per MapReduce iteration. Special values are: 0 one epoch; -1 all available data (e.g., replicated training data); or -2 auto-tuning (default) seed : int Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded adaptive_rate : bool Logical. Adaptive learning rate (ADAELTA) rho : float Adaptive learning rate time decay factor (similarity to prior updates) epsilon : float Adaptive learning rate parameter, similar to learn rate annealing during initial training phase. Typical values are between 1.0e-10 and 1.0e-4 rate : float Learning rate (higher => less stable, lower => slower convergence) rate_annealing : float Learning rate annealing: \eqn{(rate)/(1 + rate_annealing*samples) rate_decay : float Learning rate decay factor between layers (N-th layer: \eqn{rate*\alpha^(N-1)) momentum_start : float Initial momentum at the beginning of training (try 0.5) momentum_ramp : float Number of training samples for which momentum increases momentum_stable : float Final momentum after the amp is over (try 0.99) nesterov_accelerated_gradient : bool Logical. Use Nesterov accelerated gradient (recommended) input_dropout_ratio : float A fraction of the features for each training row to be omitted from training in order to improve generalization (dimension sampling). hidden_dropout_ratios : float Input layer dropout ratio (can improve generalization) specify one value per hidden layer, defaults to 0.5 l1 : float L1 regularization (can add stability and improve generalization, causes many weights to become 0) l2 : float L2 regularization (can add stability and improve generalization, causes many weights to be small) max_w2 : float Constraint for squared sum of incoming weights per unit (e.g. Rectifier) initial_weight_distribution : str Can be "Uniform", "UniformAdaptive", or "Normal" initial_weight_scale : str Uniform: -value ... value, Normal: stddev loss : str Loss function: "Automatic", "CrossEntropy" (for classification only), "MeanSquare", "Absolute" (experimental) or "Huber" (experimental) distribution : str A character string. The distribution function of the response. Must be "AUTO", "bernoulli", "multinomial", "poisson", "gamma", "tweedie", "laplace", "huber" or "gaussian" tweedie_power : float Tweedie power (only for Tweedie distribution, must be between 1 and 2) score_interval : int Shortest time interval (in secs) between model scoring score_training_samples : int Number of training set samples for scoring (0 for all) score_validation_samples : int Number of validation set samples for scoring (0 for all) score_duty_cycle : float Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring) classification_stop : float Stopping criterion for classification error fraction on training data (-1 to disable) regression_stop : float Stopping criterion for regression error (MSE) on training data (-1 to disable) quiet_mode : bool Enable quiet mode for less output to standard output max_confusion_matrix_size : int Max. size (number of classes) for confusion matrices to be shown max_hit_ratio_k : float Max number (top K) of predictions to use for hit ratio computation(for multi-class only, 0 to disable) balance_classes : bool Balance training data class counts via over/under-sampling (for imbalanced data) class_sampling_factors : list Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will be automatically computed to obtain class balance during training. Requires balance_classes. max_after_balance_size : float Maximum relative size of the training data after balancing class counts (can be less than 1.0) score_validation_sampling : Method used to sample validation dataset for scoring diagnostics : bool Enable diagnostics for hidden layers variable_importances : bool Compute variable importances for input features (Gedeon method) - can be slow for large networks) fast_mode : bool Enable fast mode (minor approximations in back-propagation) ignore_const_cols : bool Ignore constant columns (no information can be gained anyway) force_load_balance : bool Force extra load balancing to increase training speed for small datasets (to keep all cores busy) replicate_training_data : bool Replicate the entire training dataset onto every node for faster training single_node_mode : bool Run on a single node for fine-tuning of model parameters shuffle_training_data : bool Enable shuffling of training data (recommended if training data is replicated and train_samples_per_iteration is close to \eqn{numRows*numNodes sparse : bool Sparse data handling (Experimental) col_major : bool Use a column major weight matrix for input layer. Can speed up forward propagation, but might slow down backpropagation (Experimental) average_activation : float Average activation for sparse auto-encoder (Experimental) sparsity_beta : bool Sparsity regularization (Experimental) max_categorical_features : int Max. number of categorical features, enforced via hashing Experimental) reproducible : bool Force reproducibility on small data (will be slow - only uses 1 thread) export_weights_and_biases : bool Whether to export Neural Network weights and biases to H2O Frames" offset_column : H2OFrame Specify the offset column. weights_column : H2OFrame Specify the weights column. nfolds : int (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty. fold_column : H2OFrame (Optional) Column with cross-validation fold index assignment per observation fold_assignment : str Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo" keep_cross_validation_predictions : bool Whether to keep the predictions of the cross-validation models :return: Return a new classifier or regression model. """ parms = {k:v for k,v in locals().items() if k in ["y","training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None} parms["algo"]="deeplearning" return h2o_model_builder.supervised(parms) def autoencoder(x,training_frame=None,model_id=None,overwrite_with_best_model=None,checkpoint=None, use_all_factor_levels=None,activation=None,hidden=None,epochs=None,train_samples_per_iteration=None, seed=None,adaptive_rate=None,rho=None,epsilon=None,rate=None,rate_annealing=None,rate_decay=None, momentum_start=None,momentum_ramp=None,momentum_stable=None,nesterov_accelerated_gradient=None, input_dropout_ratio=None,hidden_dropout_ratios=None,l1=None,l2=None,max_w2=None,initial_weight_distribution=None, initial_weight_scale=None,loss=None,distribution=None,tweedie_power=None,score_interval=None,score_training_samples=None, score_duty_cycle=None,classification_stop=None,regression_stop=None,quiet_mode=None, max_confusion_matrix_size=None,max_hit_ratio_k=None,balance_classes=None,class_sampling_factors=None, max_after_balance_size=None,diagnostics=None,variable_importances=None, fast_mode=None,ignore_const_cols=None,force_load_balance=None,replicate_training_data=None,single_node_mode=None, shuffle_training_data=None,sparse=None,col_major=None,average_activation=None,sparsity_beta=None, max_categorical_features=None,reproducible=None,export_weights_and_biases=None): """ Build unsupervised auto encoder using H2O Deeplearning Parameters ---------- x : H2OFrame An H2OFrame containing the predictors in the model. training_frame : H2OFrame (Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x. model_id : str (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated. overwrite_with_best_model : bool Logical. If True, overwrite the final model with the best model found during training. Defaults to True. checkpoint : H2ODeepLearningModel "Model checkpoint (either key or H2ODeepLearningModel) to resume training with." use_all_factor_levels : bool Logical. Use all factor levels of categorical variance. Otherwise the first factor level is omitted (without loss of accuracy). Useful for variable importances and auto-enabled for autoencoder. activation : str A string indicating the activation function to use. Must be either "Tanh", "TanhWithDropout", "Rectifier", "RectifierWithDropout", "Maxout", or "MaxoutWithDropout" hidden : list Hidden layer sizes (e.g. c(100,100)) epochs : float How many times the dataset should be iterated (streamed), can be fractional train_samples_per_iteration : int Number of training samples (globally) per MapReduce iteration. Special values are: 0 one epoch; -1 all available data (e.g., replicated training data); or -2 auto-tuning (default) seed : int Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded adaptive_rate : bool Logical. Adaptive learning rate (ADAELTA) rho : float Adaptive learning rate time decay factor (similarity to prior updates) epsilon : float Adaptive learning rate parameter, similar to learn rate annealing during initial training phase. Typical values are between 1.0e-10 and 1.0e-4 rate : float Learning rate (higher => less stable, lower => slower convergence) rate_annealing : float Learning rate annealing: \eqn{(rate)/(1 + rate_annealing*samples) rate_decay : float Learning rate decay factor between layers (N-th layer: \eqn{rate*\alpha^(N-1)) momentum_start : float Initial momentum at the beginning of training (try 0.5) momentum_ramp : int Number of training samples for which momentum increases momentum_stable : float Final momentum after the amp is over (try 0.99) nesterov_accelerated_gradient : bool Logical. Use Nesterov accelerated gradient (recommended) input_dropout_ratio : float A fraction of the features for each training row to be omitted from training in order to improve generalization (dimension sampling). hidden_dropout_ratios : float Input layer dropout ratio (can improve generalization) specify one value per hidden layer, defaults to 0.5 l1 : float L1 regularization (can add stability and improve generalization, causes many weights to become 0) l2: float L2 regularization (can add stability and improve generalization, causes many weights to be small) max_w2 : float Constraint for squared sum of incoming weights per unit (e.g. Rectifier) initial_weight_distribution : str Can be "Uniform", "UniformAdaptive", or "Normal" initial_weight_scale : str Uniform: -value ... value, Normal: stddev loss : str Loss function: "Automatic", "CrossEntropy" (for classification only), "MeanSquare", "Absolute" (experimental) or "Huber" (experimental) distribution : str A character string. The distribution function of the response. Must be "AUTO", "bernoulli", "multinomial", "poisson", "gamma", "tweedie", "laplace", "huber" or "gaussian" tweedie_power : float Tweedie power (only for Tweedie distribution, must be between 1 and 2) score_interval : int Shortest time interval (in secs) between model scoring score_training_samples : int Number of training set samples for scoring (0 for all) score_duty_cycle : float Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring) classification_stop : float Stopping criterion for classification error fraction on training data (-1 to disable) regression_stop : float Stopping criterion for regression error (MSE) on training data (-1 to disable) quiet_mode : bool Enable quiet mode for less output to standard output max_confusion_matrix_size : int Max. size (number of classes) for confusion matrices to be shown max_hit_ratio_k : float Max number (top K) of predictions to use for hit ratio computation(for multi-class only, 0 to disable) balance_classes : bool Balance training data class counts via over/under-sampling (for imbalanced data) class_sampling_factors : list Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will be automatically computed to obtain class balance during training. Requires balance_classes. max_after_balance_size : float Maximum relative size of the training data after balancing class counts (can be less than 1.0) diagnostics : bool Enable diagnostics for hidden layers variable_importances : bool Compute variable importances for input features (Gedeon method) - can be slow for large networks) fast_mode : bool Enable fast mode (minor approximations in back-propagation) ignore_const_cols : bool Ignore constant columns (no information can be gained anyway) force_load_balance : bool Force extra load balancing to increase training speed for small datasets (to keep all cores busy) replicate_training_data : bool Replicate the entire training dataset onto every node for faster training single_node_mode : bool Run on a single node for fine-tuning of model parameters shuffle_training_data : bool Enable shuffling of training data (recommended if training data is replicated and train_samples_per_iteration is close to \eqn{numRows*numNodes sparse : bool Sparse data handling (Experimental) col_major : bool Use a column major weight matrix for input layer. Can speed up forward propagation, but might slow down backpropagation (Experimental) average_activation : float Average activation for sparse auto-encoder (Experimental) sparsity_beta : float Sparsity regularization (Experimental) max_categorical_features : int Max. number of categorical features, enforced via hashing Experimental) reproducible : bool Force reproducibility on small data (will be slow - only uses 1 thread) export_weights_and_biases : bool Whether to export Neural Network weights and biases to H2O Frames" :return: H2OAutoEncoderModel """ parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None} parms["algo"]="deeplearning" parms["autoencoder"]=True return h2o_model_builder.unsupervised(parms) def gbm(x,y,validation_x=None,validation_y=None,training_frame=None,model_id=None, distribution=None,tweedie_power=None,ntrees=None,max_depth=None,min_rows=None, learn_rate=None,nbins=None,nbins_cats=None,validation_frame=None, balance_classes=None,max_after_balance_size=None,seed=None,build_tree_one_node=None, nfolds=None,fold_column=None,fold_assignment=None,keep_cross_validation_predictions=None, score_each_iteration=None,offset_column=None,weights_column=None,do_future=None,checkpoint=None): """ Builds gradient boosted classification trees, and gradient boosted regression trees on a parsed data set. The default distribution function will guess the model type based on the response column typerun properly the response column must be an numeric for "gaussian" or an enum for "bernoulli" or "multinomial". Parameters ---------- x : H2OFrame An H2OFrame containing the predictors in the model. y : H2OFrame An H2OFrame of the response variable in the model. training_frame : H2OFrame (Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x. model_id : str (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated. distribution : str A character string. The distribution function of the response. Must be "AUTO", "bernoulli", "multinomial", "poisson", "gamma", "tweedie" or "gaussian" tweedie_power : float Tweedie power (only for Tweedie distribution, must be between 1 and 2) ntrees : int A non-negative integer that determines the number of trees to grow. max_depth : int Maximum depth to grow the tree. min_rows : int Minimum number of rows to assign to terminal nodes. learn_rate : float An integer from 0.0 to 1.0 nbins : int For numerical columns (real/int), build a histogram of this many bins, then split at the best point nbins_cats : int For categorical columns (enum), build a histogram of this many bins, then split at the best point. Higher values can lead to more overfitting. validation_frame : H2OFrame An H2OFrame object indicating the validation dataset used to contruct the confusion matrix. If left blank, this defaults to the training data when nfolds = 0 balance_classes : bool logical, indicates whether or not to balance training data class counts via over/under-sampling (for imbalanced data) max_after_balance_size : float Maximum relative size of the training data after balancing class counts (can be less than 1.0). Ignored if balance_classes is False, which is the default behavior. seed : int Seed for random numbers (affects sampling when balance_classes=T) build_tree_one_node : bool Run on one node only; no network overhead but fewer cpus used. Suitable for small datasets. nfolds : int (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty. fold_column : H2OFrame (Optional) Column with cross-validation fold index assignment per observation fold_assignment : str Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo" keep_cross_validation_predictions : bool Whether to keep the predictions of the cross-validation models score_each_iteration : bool Attempts to score each tree. offset_column : H2OFrame Specify the offset column. weights_column : H2OFrame Specify the weights column. :return: A new classifier or regression model. """ parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None} parms["algo"]="gbm" return h2o_model_builder.supervised(parms) def glm(x,y,validation_x=None,validation_y=None,training_frame=None,model_id=None,validation_frame=None, max_iterations=None,beta_epsilon=None,solver=None,standardize=None,family=None,link=None, tweedie_variance_power=None,tweedie_link_power=None,alpha=None,prior=None,lambda_search=None, nlambdas=None,lambda_min_ratio=None,beta_constraints=None,offset_column=None,weights_column=None, nfolds=None,fold_column=None,fold_assignment=None,keep_cross_validation_predictions=None, intercept=None, Lambda=None, max_active_predictors=None, do_future=None, checkpoint=None): """ Build a Generalized Linear Model Fit a generalized linear model, specified by a response variable, a set of predictors, and a description of the error distribution. Parameters ---------- x : H2OFrame An H2OFrame containing the predictors in the model. y : H2OFrame An H2OFrame of the response variable in the model. training_frame : H2OFrame (Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x. model_id : str (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated. validation_frame : H2OFrame An H2OFrame object containing the variables in the model. max_iterations : int A non-negative integer specifying the maximum number of iterations. beta_epsilon : int A non-negative number specifying the magnitude of the maximum difference between the coefficient estimates from successive iterations. Defines the convergence criterion for h2o.glm. solver : str A character string specifying the solver used: IRLSM (supports more features), L_BFGS (scales better for datasets with many columns) standardize : bool A logical value indicating whether the numeric predictors should be standardized to have a mean of 0 and a variance of 1 prior to training the models. family : str A character string specifying the distribution of the model: gaussian, binomial, poisson, gamma, tweedie. link : str A character string specifying the link function. The default is the canonical link for the family. The supported links for each of the family specifications are:\n "gaussian": "identity", "log", "inverse"\n "binomial": "logit", "log" "poisson": "log", "identity" "gamma": "inverse", "log", "identity" "tweedie": "tweedie" tweedie_variance_power : int numeric specifying the power for the variance function when family = "tweedie". tweedie_link_power : int A numeric specifying the power for the link function when family = "tweedie". alpha : float A numeric in [0, 1] specifying the elastic-net mixing parameter. The elastic-net penalty is defined to be: eqn{P(\alpha,\beta) = (1-\alpha)/2||\beta||_2^2 + \alpha||\beta||_1 = \sum_j [(1-\alpha)/2 \beta_j^2 + \alpha|\beta_j|], making alpha = 1 the lasso penalty and alpha = 0 the ridge penalty. Lambda : float A non-negative shrinkage parameter for the elastic-net, which multiplies \eqn{P(\alpha,\beta) in the objective function. When Lambda = 0, no elastic-net penalty is applied and ordinary generalized linear models are fit. prior : float (Optional) A numeric specifying the prior probability of class 1 in the response when family = "binomial". The default prior is the observational frequency of class 1. lambda_search : bool A logical value indicating whether to conduct a search over the space of lambda values starting from the lambda max, given lambda is interpreted as lambda minself. nlambdas : int The number of lambda values to use when lambda_search = TRUE. lambda_min_ratio : float Smallest value for lambda as a fraction of lambda.max. By default if the number of observations is greater than the the number of variables then lambda_min_ratio = 0.0001; if the number of observations is less than the number of variables then lambda_min_ratio = 0.01. beta_constraints : H2OFrame A data.frame or H2OParsedData object with the columns ["names", "lower_bounds", "upper_bounds", "beta_given"], where each row corresponds to a predictor in the GLM. "names" contains the predictor names, "lower"/"upper_bounds", are the lower and upper bounds of beta, and "beta_given" is some supplied starting values for the offset_column : H2OFrame Specify the offset column. weights_column : H2OFrame Specify the weights column. nfolds : int (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty. fold_column : H2OFrame (Optional) Column with cross-validation fold index assignment per observation fold_assignment : str Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo" keep_cross_validation_predictions : bool Whether to keep the predictions of the cross-validation models intercept : bool Logical, include constant term (intercept) in the model max_active_predictors : int (Optional) Convergence criteria for number of predictors when using L1 penalty. Returns: A subclass of ModelBase is returned. The specific subclass depends on the machine learning task at hand (if it's binomial classification, then an H2OBinomialModel is returned, if it's regression then a H2ORegressionModel is returned). The default print-out of the models is shown, but further GLM-specifc information can be queried out of the object. Upon completion of the GLM, the resulting object has coefficients, normalized coefficients, residual/null deviance, aic, and a host of model metrics including MSE, AUC (for logistic regression), degrees of freedom, and confusion matrices. """ parms = {k.lower():v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None} if "alpha" in parms and not isinstance(parms["alpha"], (list,tuple)): parms["alpha"] = [parms["alpha"]] parms["algo"]="glm" return h2o_model_builder.supervised(parms) def start_glm_job(x,y,validation_x=None,validation_y=None,**kwargs): """ Build a Generalized Linear Model Note: this function is the same as glm(), but it doesn't block on model-build. Instead, it returns and H2OModelFuture object immediately. The model can be retrieved from the H2OModelFuture object with get_future_model(). :return: H2OModelFuture """ kwargs["do_future"] = True return glm(x,y,validation_x,validation_y,**kwargs) def kmeans(x,validation_x=None,k=None,model_id=None,max_iterations=None,standardize=None,init=None,seed=None, nfolds=None,fold_column=None,fold_assignment=None,training_frame=None,validation_frame=None, user_points=None,ignored_columns=None,score_each_iteration=None,keep_cross_validation_predictions=None, ignore_const_cols=None,checkpoint=None): """ Performs k-means clustering on an H2O dataset. Parameters ---------- x : H2OFrame The data columns on which k-means operates.\n k : int The number of clusters. Must be between 1 and 1e7 inclusive. k may be omitted if the user specifies the initial centers in the init parameter. If k is not omitted, in this case, then it should be equal to the number of user-specified centers. model_id : str (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated. max_iterations : int The maximum number of iterations allowed. Must be between 0 and 1e6 inclusive. standardize : bool Indicates whether the data should be standardized before running k-means. init : str A character string that selects the initial set of k cluster centers. Possible values are "Random": for random initialization, "PlusPlus": for k-means plus initialization, or "Furthest": for initialization at the furthest point from each successive center. Additionally, the user may specify a the initial centers as a matrix, data.frame, H2OFrame, or list of vectors. For matrices, data.frames, and H2OFrames, each row of the respective structure is an initial center. For lists of vectors, each vector is an initial center. seed : int (Optional) Random seed used to initialize the cluster centroids. nfolds : int (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty. fold_column : H2OFrame (Optional) Column with cross-validation fold index assignment per observation fold_assignment : str Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo" :return: An instance of H2OClusteringModel. """ parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None} parms["algo"]="kmeans" return h2o_model_builder.unsupervised(parms) def random_forest(x,y,validation_x=None,validation_y=None,training_frame=None,model_id=None,mtries=None,sample_rate=None, build_tree_one_node=None,ntrees=None,max_depth=None,min_rows=None,nbins=None,nbins_cats=None, binomial_double_trees=None,validation_frame=None,balance_classes=None,max_after_balance_size=None, seed=None,offset_column=None,weights_column=None,nfolds=None,fold_column=None,fold_assignment=None, keep_cross_validation_predictions=None,checkpoint=None): """ Build a Big Data Random Forest Model Builds a Random Forest Model on an H2OFrame Parameters ---------- x : H2OFrame An H2OFrame containing the predictors in the model. y : H2OFrame An H2OFrame of the response variable in the model. training_frame : H2OFrame (Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x. model_id : str (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated. mtries : int Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p} for classification, and p/3 for regression, where p is the number of predictors. sample_rate : float Sample rate, from 0 to 1.0. build_tree_one_node : bool Run on one node only; no network overhead but fewer cpus used. Suitable for small datasets. ntrees : int A nonnegative integer that determines the number of trees to grow. max_depth : int Maximum depth to grow the tree. min_rows : int Minimum number of rows to assign to teminal nodes. nbins : int For numerical columns (real/int), build a histogram of this many bins, then split at the best point. nbins_cats : int For categorical columns (enum), build a histogram of this many bins, then split at the best point. Higher values can lead to more overfitting. binomial_double_trees : bool or binary classification: Build 2x as many trees (one per class) - can lead to higher accuracy. validation_frame : H2OFrame An H2OFrame object containing the variables in the model. balance_classes : bool logical, indicates whether or not to balance training data class counts via over/under-sampling (for imbalanced data) max_after_balance_size : float Maximum relative size of the training data after balancing class counts (can be less than 1.0). Ignored if balance_classes is False, which is the default behavior. seed : int Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded offset_column : H2OFrame Specify the offset column. weights_column : H2OFrame Specify the weights column. nfolds : int (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty. fold_column : H2OFrame (Optional) Column with cross-validation fold index assignment per observation fold_assignment : str Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo" keep_cross_validation_predictions : bool Whether to keep the predictions of the cross-validation models :return: A new classifier or regression model. """ parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None} parms["algo"]="drf" return h2o_model_builder.supervised(parms) def prcomp(x,validation_x=None,k=None,model_id=None,max_iterations=None,transform=None,seed=None,use_all_factor_levels=None, training_frame=None,validation_frame=None,pca_method=None): """ Principal components analysis of a H2O dataset. Parameters ---------- k : int The number of principal components to be computed. This must be between 1 and min(ncol(training_frame), nrow(training_frame)) inclusive. model_id : str (Optional) The unique hex key assigned to the resulting model. Automatically generated if none is provided. max_iterations : int The maximum number of iterations to run each power iteration loop. Must be between 1 and 1e6 inclusive. transform : str A character string that indicates how the training data should be transformed before running PCA. Possible values are "NONE": for no transformation, "DEMEAN": for subtracting the mean of each column, "DESCALE": for dividing by the standard deviation of each column, "STANDARDIZE": for demeaning and descaling, and "NORMALIZE": for demeaning and dividing each column by its range (max - min). seed : int (Optional) Random seed used to initialize the right singular vectors at the beginning of each power method iteration. use_all_factor_levels : bool (Optional) A logical value indicating whether all factor levels should be included in each categorical column expansion. If FALSE, the indicator column corresponding to the first factor level of every categorical variable will be dropped. Defaults to FALSE. pca_method : str A character string that indicates how PCA should be calculated. Possible values are "GramSVD": distributed computation of the Gram matrix followed by a local SVD using the JAMA package, "Power": computation of the SVD using the power iteration method, "GLRM": fit a generalized low rank model with an l2 loss function (no regularization) and solve for the SVD using local matrix algebra. :return: a new dim reduction model """ parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None} parms["algo"]="pca" return h2o_model_builder.unsupervised(parms) def svd(x,validation_x=None,training_frame=None,validation_frame=None,nv=None,max_iterations=None,transform=None,seed=None, use_all_factor_levels=None,svd_method=None): """ Singular value decomposition of a H2O dataset. Parameters ---------- nv : int The number of right singular vectors to be computed. This must be between 1 and min(ncol(training_frame), snrow(training_frame)) inclusive. max_iterations : int The maximum number of iterations to run each power iteration loop. Must be between 1 and 1e6 inclusive.max_iterations The maximum number of iterations to run each power iteration loop. Must be between 1 and 1e6 inclusive. transform : str A character string that indicates how the training data should be transformed before running SVD. Possible values are "NONE": for no transformation, "DEMEAN": for subtracting the mean of each column, "DESCALE": for dividing by the standard deviation of each column, "STANDARDIZE": for demeaning and descaling, and "NORMALIZE": for demeaning and dividing each column by its range (max - min). seed : int (Optional) Random seed used to initialize the right singular vectors at the beginning of each power method iteration. use_all_factor_levels : bool (Optional) A logical value indicating whether all factor levels should be included in each categorical column expansion. If FALSE, the indicator column corresponding to the first factor level of every categorical variable will be dropped. Defaults to TRUE. svd_method : str A character string that indicates how SVD should be calculated. Possible values are "GramSVD": distributed computation of the Gram matrix followed by a local SVD using the JAMA package, "Power": computation of the SVD using the power iteration method, "Randomized": approximate SVD by projecting onto a random subspace. :return: a new dim reduction model """ parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None} parms["algo"]="svd" parms['_rest_version']=99 return h2o_model_builder.unsupervised(parms) def glrm(x,validation_x=None,training_frame=None,validation_frame=None,k=None,max_iterations=None,transform=None,seed=None, ignore_const_cols=None,loss=None,multi_loss=None,loss_by_col=None,loss_by_col_idx=None,regularization_x=None, regularization_y=None,gamma_x=None,gamma_y=None,init_step_size=None,min_step_size=None,init=None,user_points=None,recover_svd=None): """ Builds a generalized low rank model of a H2O dataset. Parameters ---------- k : int The rank of the resulting decomposition. This must be between 1 and the number of columns in the training frame inclusive. max_iterations : int The maximum number of iterations to run the optimization loop. Each iteration consists of an update of the X matrix, followed by an update of the Y matrix. transform : str A character string that indicates how the training data should be transformed before running GLRM. Possible values are "NONE": for no transformation, "DEMEAN": for subtracting the mean of each column, "DESCALE": for dividing by the standard deviation of each column, "STANDARDIZE": for demeaning and descaling, and "NORMALIZE": for demeaning and dividing each column by its range (max - min). seed : int (Optional) Random seed used to initialize the X and Y matrices. ignore_const_cols : bool (Optional) A logical value indicating whether to ignore constant columns in the training frame. A column is constant if all of its non-missing values are the same value. loss : str A character string indicating the default loss function for numeric columns. Possible values are "Quadratic" (default), "L1", "Huber", "Poisson", "Hinge", and "Logistic". multi_loss : str A character string indicating the default loss function for enum columns. Possible values are "Categorical" and "Ordinal". loss_by_col : str (Optional) A list of strings indicating the loss function for specific columns by corresponding index in loss_by_col_idx. Will override loss for numeric columns and multi_loss for enum columns. loss_by_col_idx : str (Optional) A list of column indices to which the corresponding loss functions in loss_by_col are assigned. Must be zero indexed. regularization_x : str A character string indicating the regularization function for the X matrix. Possible values are "None" (default), "Quadratic", "L2", "L1", "NonNegative", "OneSparse", "UnitOneSparse", and "Simplex". regularization_y : str A character string indicating the regularization function for the Y matrix. Possible values are "None" (default), "Quadratic", "L2", "L1", "NonNegative", "OneSparse", "UnitOneSparse", and "Simplex". gamma_x : float The weight on the X matrix regularization term. gamma_y : float The weight on the Y matrix regularization term. init_step_size : float Initial step size. Divided by number of columns in the training frame when calculating the proximal gradient update. The algorithm begins at init_step_size and decreases the step size at each iteration until a termination condition is reached. min_step_size : float Minimum step size upon which the algorithm is terminated. init : str A character string indicating how to select the initial Y matrix. Possible values are "Random": for initialization to a random array from the standard normal distribution, "PlusPlus": for initialization using the clusters from k-means++ initialization, or "SVD": for initialization using the first k (approximate) right singular vectors. Additionally, the user may specify the initial Y as a matrix, data.frame, H2OFrame, or list of vectors. recover_svd : bool A logical value indicating whether the singular values and eigenvectors should be recovered during post-processing of the generalized low rank decomposition. :return: a new dim reduction model """ parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None} parms["algo"]="glrm" parms['_rest_version']=99 return h2o_model_builder.unsupervised(parms) def naive_bayes(x,y,validation_x=None,validation_y=None,training_frame=None,validation_frame=None, laplace=None,threshold=None,eps=None,compute_metrics=None,offset_column=None,weights_column=None, balance_classes=None,max_after_balance_size=None, nfolds=None,fold_column=None,fold_assignment=None, keep_cross_validation_predictions=None,checkpoint=None): """ The naive Bayes classifier assumes independence between predictor variables conditional on the response, and a Gaussian distribution of numeric predictors with mean and standard deviation computed from the training dataset. When building a naive Bayes classifier, every row in the training dataset that contains at least one NA will be skipped completely. If the test dataset has missing values, then those predictors are omitted in the probability calculation during prediction. Parameters ---------- laplace : int A positive number controlling Laplace smoothing. The default zero disables smoothing. threshold : float The minimum standard deviation to use for observations without enough data. Must be at least 1e-10. eps : float A threshold cutoff to deal with numeric instability, must be positive. compute_metrics : bool A logical value indicating whether model metrics should be computed. Set to FALSE to reduce the runtime of the algorithm. training_frame : H2OFrame Training Frame validation_frame : H2OFrame Validation Frame offset_column : H2OFrame Specify the offset column. weights_column : H2OFrame Specify the weights column. nfolds : int (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty. fold_column : H2OFrame (Optional) Column with cross-validation fold index assignment per observation fold_assignment : str Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo" keep_cross_validation_predictions : bool Whether to keep the predictions of the cross-validation models :return: Returns an H2OBinomialModel if the response has two categorical levels, H2OMultinomialModel otherwise. """ parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None} parms["algo"]="naivebayes" return h2o_model_builder.supervised(parms) def create_frame(id = None, rows = 10000, cols = 10, randomize = True, value = 0, real_range = 100, categorical_fraction = 0.2, factors = 100, integer_fraction = 0.2, integer_range = 100, binary_fraction = 0.1, binary_ones_fraction = 0.02, missing_fraction = 0.01, response_factors = 2, has_response = False, seed=None): """ Data Frame Creation in H2O. Creates a data frame in H2O with real-valued, categorical, integer, and binary columns specified by the user. Parameters ---------- id : str A string indicating the destination key. If empty, this will be auto-generated by H2O. rows : int The number of rows of data to generate. cols : int The number of columns of data to generate. Excludes the response column if has_response == True. randomize : bool A logical value indicating whether data values should be randomly generated. This must be TRUE if either categorical_fraction or integer_fraction is non-zero. value : int If randomize == FALSE, then all real-valued entries will be set to this value. real_range : float The range of randomly generated real values. categorical_fraction : float The fraction of total columns that are categorical. factors : int The number of (unique) factor levels in each categorical column. integer_fraction : float The fraction of total columns that are integer-valued. integer_range : list The range of randomly generated integer values. binary_fraction : float The fraction of total columns that are binary-valued. binary_ones_fraction : float The fraction of values in a binary column that are set to 1. missing_fraction : float The fraction of total entries in the data frame that are set to NA. response_factors : int If has_response == TRUE, then this is the number of factor levels in the response column. has_response : bool A logical value indicating whether an additional response column should be pre-pended to the final H2O data frame. If set to TRUE, the total number of columns will be cols+1. seed : int A seed used to generate random values when randomize = TRUE. :return: the H2OFrame that was created """ parms = {"dest": _py_tmp_key() if id is None else id, "rows": rows, "cols": cols, "randomize": randomize, "value": value, "real_range": real_range, "categorical_fraction": categorical_fraction, "factors": factors, "integer_fraction": integer_fraction, "integer_range": integer_range, "binary_fraction": binary_fraction, "binary_ones_fraction": binary_ones_fraction, "missing_fraction": missing_fraction, "response_factors": response_factors, "has_response": has_response, "seed": -1 if seed is None else seed, } H2OJob(H2OConnection.post_json("CreateFrame", **parms), "Create Frame").poll() return get_frame(parms["dest"]) def interaction(data, factors, pairwise, max_factors, min_occurrence, destination_frame=None): """ Categorical Interaction Feature Creation in H2O. Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by the user. Parameters ---------- data : H2OFrame the H2OFrame that holds the target categorical columns. factors : list factors Factor columns (either indices or column names). pairwise : bool Whether to create pairwise interactions between factors (otherwise create one higher-order interaction). Only applicable if there are 3 or more factors. max_factors : int Max. number of factor levels in pair-wise interaction terms (if enforced, one extra catch-all factor will be made) min_occurrence : int Min. occurrence threshold for factor levels in pair-wise interaction terms destination_frame : str A string indicating the destination key. If empty, this will be auto-generated by H2O. :return: H2OFrame """ data._eager() factors = [data.names[n] if isinstance(n,int) else n for n in factors] parms = {"dest": _py_tmp_key() if destination_frame is None else destination_frame, "source_frame": data._id, "factor_columns": [_quoted(f) for f in factors], "pairwise": pairwise, "max_factors": max_factors, "min_occurrence": min_occurrence, } H2OJob(H2OConnection.post_json("Interaction", **parms), "Interactions").poll() return get_frame(parms["dest"]) def network_test(): res = H2OConnection.get_json(url_suffix="NetworkTest") res["table"].show() def locate(path): """ Search for a relative path and turn it into an absolute path. This is handy when hunting for data files to be passed into h2o and used by import file. Note: This function is for unit testing purposes only. Parameters ---------- path : str Path to search for :return: Absolute path if it is found. None otherwise. """ tmp_dir = os.path.realpath(os.getcwd()) possible_result = os.path.join(tmp_dir, path) while (True): if (os.path.exists(possible_result)): return possible_result next_tmp_dir = os.path.dirname(tmp_dir) if (next_tmp_dir == tmp_dir): raise ValueError("File not found: " + path) tmp_dir = next_tmp_dir possible_result = os.path.join(tmp_dir, path) def store_size(): """ Get the H2O store size (current count of keys). :return: number of keys in H2O cloud """ return rapids("(store_size)")["result"] def keys_leaked(num_keys): """ Ask H2O if any keys leaked. @param num_keys: The number of keys that should be there. :return: A boolean True/False if keys leaked. If keys leaked, check H2O logs for further detail. """ return rapids("keys_leaked #{})".format(num_keys))["result"]=="TRUE" def as_list(data, use_pandas=True): """ Convert an H2O data object into a python-specific object. WARNING: This will pull all data local! If Pandas is available (and use_pandas is True), then pandas will be used to parse the data frame. Otherwise, a list-of-lists populated by character data will be returned (so the types of data will all be str). Parameters ---------- data : H2OFrame An H2O data object. use_pandas : bool Try to use pandas for reading in the data. :return: List of list (Rows x Columns). """ return H2OFrame.as_data_frame(data, use_pandas) def set_timezone(tz): """ Set the Time Zone on the H2O Cloud Parameters ---------- tz : str The desired timezone. :return: None """ rapids(ExprNode._collapse_sb(ExprNode("setTimeZone", tz)._eager())) def get_timezone(): """ Get the Time Zone on the H2O Cloud :return: the time zone (string) """ return H2OFrame(expr=ExprNode("getTimeZone"))._scalar() def list_timezones(): """ Get a list of all the timezones :return: the time zones (as an H2OFrame) """ return H2OFrame(expr=ExprNode("listTimeZones"))._frame() def turn_off_ref_cnts(): """ Reference counting on H2OFrame's allows for eager deletion of H2OFrames that go out of scope. If multiple threads are spawned, however, and data is to live beyond the use of the thread (e.g. when launching multiple jobs via Parallel in scikit-learn), then there may be referers outside of the current context. Use this to prevent deletion of H2OFrame instances. :return: None """ H2OFrame.COUNTING=False def turn_on_ref_cnts(): """ See the note in turn_off_ref_cnts :return: None """ H2OFrame.del_dropped() H2OFrame.COUNTING=True class H2ODisplay: """ Pretty printing for H2O Objects; Handles both IPython and vanilla console display """ THOUSANDS = "{:,}" def __init__(self,table=None,header=None,table_header=None,**kwargs): self.table_header=table_header self.header=header self.table=table self.kwargs=kwargs self.do_print=True # one-shot display... never return an H2ODisplay object (or try not to) # if holding onto a display object, then may have odd printing behavior # the __repr__ and _repr_html_ methods will try to save you from many prints, # but just be WARNED that your mileage may vary! # # In other words, it's better to just new one of these when you're ready to print out. if self.table_header is not None: print print self.table_header + ":" print if H2ODisplay._in_ipy(): from IPython.display import display display(self) self.do_print=False else: self.pprint() self.do_print=False # for Ipython def _repr_html_(self): if self.do_print: return H2ODisplay._html_table(self.table,self.header) def pprint(self): r = self.__repr__() print r # for python REPL console def __repr__(self): if self.do_print or not H2ODisplay._in_ipy(): if self.header is None: return tabulate.tabulate(self.table,**self.kwargs) else: return tabulate.tabulate(self.table,headers=self.header,**self.kwargs) self.do_print=True return "" @staticmethod def _in_ipy(): # are we in ipy? then pretty print tables with _repr_html try: __IPYTHON__ return True except NameError: return False # some html table builder helper things @staticmethod def _html_table(rows, header=None): table= "<div style=\"overflow:auto\"><table style=\"width:50%\">{}</table></div>" # keep table in a div for scroll-a-bility table_rows=[] if header is not None: table_rows.append(H2ODisplay._html_row(header)) for row in rows: table_rows.append(H2ODisplay._html_row(row)) return table.format("\n".join(table_rows)) @staticmethod def _html_row(row): res = "<tr>{}</tr>" entry = "<td>{}</td>" entries = "\n".join([entry.format(str(r)) for r in row]) return res.format(entries) def can_use_pandas(): try: imp.find_module('pandas') return True except ImportError: return False # ALL DEPRECATED METHODS BELOW # def h2o_deprecated(newfun=None): def o(fun): if newfun is not None: m = "{} is deprecated. Use {}.".format(fun.__name__,newfun.__name__) else: m = "{} is deprecated.".format(fun.__name__) @functools.wraps(fun) def i(*args, **kwargs): print print warnings.warn(m, category=DeprecationWarning, stacklevel=2) return fun(*args, **kwargs) return i return o @h2o_deprecated(import_file) def import_frame(path=None): """ Deprecated for import_file. Parameters ---------- path : str A path specifiying the location of the data to import. :return: A new H2OFrame """ warnings.warn("deprecated: Use import_file", DeprecationWarning) return import_file(path)
apache-2.0
bdh1011/wau
venv/lib/python2.7/site-packages/pandas/compat/pickle_compat.py
15
2829
""" support pre 0.12 series pickle compatibility """ import sys import numpy as np import pandas import copy import pickle as pkl from pandas import compat, Index from pandas.compat import u, string_types def load_reduce(self): stack = self.stack args = stack.pop() func = stack[-1] if type(args[0]) is type: n = args[0].__name__ try: stack[-1] = func(*args) return except Exception as e: # if we have a deprecated function # try to replace and try again if '_reconstruct: First argument must be a sub-type of ndarray' in str(e): try: cls = args[0] stack[-1] = object.__new__(cls) return except: pass # try to reencode the arguments if getattr(self,'encoding',None) is not None: args = tuple([arg.encode(self.encoding) if isinstance(arg, string_types) else arg for arg in args]) try: stack[-1] = func(*args) return except: pass if getattr(self,'is_verbose',None): print(sys.exc_info()) print(func, args) raise stack[-1] = value if compat.PY3: class Unpickler(pkl._Unpickler): pass else: class Unpickler(pkl.Unpickler): pass Unpickler.dispatch = copy.copy(Unpickler.dispatch) Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce def load_newobj(self): args = self.stack.pop() cls = self.stack[-1] # compat if issubclass(cls, Index): obj = object.__new__(cls) else: obj = cls.__new__(cls, *args) self.stack[-1] = obj Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj # py3 compat def load_newobj_ex(self): kwargs = self.stack.pop() args = self.stack.pop() cls = self.stack.pop() # compat if issubclass(cls, Index): obj = object.__new__(cls) else: obj = cls.__new__(cls, *args, **kwargs) self.append(obj) try: Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex except: pass def load(fh, encoding=None, compat=False, is_verbose=False): """load a pickle, with a provided encoding if compat is True: fake the old class hierarchy if it works, then return the new type objects Parameters ---------- fh: a filelike object encoding: an optional encoding compat: provide Series compatibility mode, boolean, default False is_verbose: show exception output """ try: fh.seek(0) if encoding is not None: up = Unpickler(fh, encoding=encoding) else: up = Unpickler(fh) up.is_verbose = is_verbose return up.load() except: raise
mit
kit-cel/wt
wt/uebung/u5_2D_gaussian.py
1
5356
#!/usr/bin/env python3 """ Simulation einer 2D Normalverteilung. Gezeigt werden neben zuf. Realisierungen auch die Höhenlinie der Dichte für K=9. """ from functools import partial import numpy as np import matplotlib as mp from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg from PyQt5 import QtWidgets, QtCore class Canvas(FigureCanvasQTAgg): """Ultimately, this is a QWidget""" def __init__(self, parent=None, width=5, height=4, dpi=100): """Creates a figure and axes and draws periodically on it.""" # Create a figure and axes fig = mp.figure.Figure(figsize=(width, height), dpi=dpi) self.axes = fig.add_axes((0.1, 0.1, 0.85, 0.85)) self.axes.set_aspect('equal') # Initialize widget and update timer super(Canvas, self).__init__(fig) self.setParent(parent) self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) self.updateGeometry() # plot parameter defaults self.sigma1 = self.sigma2 = 1.0 self.rho = 0 self.points_per_update = 100 # plot data self.reset() self.plot_gaussian_samples() draw_timer = QtCore.QTimer(self) draw_timer.timeout.connect(self.plot_gaussian_samples) draw_timer.start(100) clear_timer = QtCore.QTimer(self) clear_timer.timeout.connect(self.reset) clear_timer.start(20 * 1000) def set_param(self, name, value): """Update a plot parameter, clear axes""" setattr(self, name, value) self.reset() def reset(self): """Draw the theoretical contour line""" self.axes.clear() self.axes.set_xlim(-6, 6) self.axes.set_ylim(-5, 5) self.draw_contour_line() def draw_contour_line(self): """Draw the theoretical contour line""" # get parameters and math functions o1, o2, r = self.sigma1, self.sigma2, self.rho sqrt, sin, cos, pi = np.sqrt, np.sin, np.cos, np.pi # calculate ellipse parameters K = 3 ** 2 g = 0.5 * np.arctan(2 * r * o1 * o2 / (o2**2 - o1**2)) \ if o1 != o2 else pi/4 a = o1 * o2 * sqrt(K * (1 - r**2) / ( (o1 * sin(g)) ** 2 + (o2 * cos(g)) ** 2 + 2 * r * o1 * o2 * sin(g) * cos(g) )) b = o1 * o2 * sqrt(K * (1 - r**2) / ( (o1 * cos(g)) ** 2 + (o2 * sin(g)) ** 2 - 2 * r * o1 * o2 * sin(g) * cos(g) )) # add contour line (ellipse) self.axes.add_artist(mp.patches.Ellipse( xy=(0, 0), width=2 * a, height=2 * b, angle=-180 / pi * g, facecolor='none', edgecolor='r', zorder=2, linewidth=2 )) self.draw() def plot_gaussian_samples(self): """Put some samples of the current distribution on the axes""" o1, o2, r = self.sigma1, self.sigma2, self.rho # get two std norm distributed vectors x, y = np.random.normal(0, 1, (2, self.points_per_update)) # scaling parameters r1, r2 = np.sqrt((1 + r) / 2), np.sqrt((1 - r) / 2) # mix the random vectors to get desired correlation x, y = o1 * (x * r1 + y * r2), o2 * (x * r1 - y * r2) # plot and draw self.axes.plot(x, y, 'ko', zorder=1, alpha=0.5, ms=2) self.draw() class FigureCanvasWithControls(QtWidgets.QWidget): def __init__(self): super(FigureCanvasWithControls, self).__init__() layout = QtWidgets.QVBoxLayout(self) canvas = Canvas(self, width=5, height=4, dpi=100) params = (('sigma1', 0.1, 2.0, 1.0), ('sigma2', 0.1, 2.0, 1.0), ('rho', -0.99, 0.99, 0.0)) # create a control for each figure parameter for name, lo, hi, default in params: row = QtWidgets.QHBoxLayout() layout.addLayout(row) # label label = QtWidgets.QLabel(name) label.setMinimumWidth(50) label.setAlignment(QtCore.Qt.AlignRight) row.addWidget(label, 0) # value slider slider = QtWidgets.QSlider(QtCore.Qt.Horizontal, self) slider.setRange(0, 200) slider.setSingleStep(2) slider.setPageStep(10) row.addWidget(slider, 1) # value display text = QtWidgets.QLineEdit() text.setReadOnly(True) text.setMaximumWidth(50) text.setFocusPolicy(QtCore.Qt.NoFocus) text.setAlignment(QtCore.Qt.AlignRight) row.addWidget(text, 0) def update(name, lo, hi, text, value): """Convert int slider value to target range""" value = value / slider.maximum() * (hi - lo) + lo canvas.set_param(name, value) text.setText("{:.2f}".format(value)) # update figure canvas on value change slider.valueChanged.connect(partial(update, name, lo, hi, text)) slider.setValue(round((default - lo) / (hi - lo) * slider.maximum())) layout.addWidget(canvas) if __name__ == '__main__': import sys app = QtWidgets.QApplication(sys.argv) win = FigureCanvasWithControls() win.show() sys.exit(app.exec_())
gpl-2.0
walterreade/scikit-learn
examples/svm/plot_oneclass.py
80
2338
""" ========================================== One-class SVM with non-linear kernel (RBF) ========================================== An example using a one-class SVM for novelty detection. :ref:`One-class SVM <svm_outlier_detection>` is an unsupervised algorithm that learns a decision function for novelty detection: classifying new data as similar or different to the training set. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt import matplotlib.font_manager from sklearn import svm xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500)) # Generate train data X = 0.3 * np.random.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * np.random.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2)) # fit the model clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) clf.fit(X_train) y_pred_train = clf.predict(X_train) y_pred_test = clf.predict(X_test) y_pred_outliers = clf.predict(X_outliers) n_error_train = y_pred_train[y_pred_train == -1].size n_error_test = y_pred_test[y_pred_test == -1].size n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size # plot the line, the points, and the nearest vectors to the plane Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) plt.title("Novelty Detection") plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu) a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred') plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred') s = 40 b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white', s=s) b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='blueviolet', s=s) c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='gold', s=s) plt.axis('tight') plt.xlim((-5, 5)) plt.ylim((-5, 5)) plt.legend([a.collections[0], b1, b2, c], ["learned frontier", "training observations", "new regular observations", "new abnormal observations"], loc="upper left", prop=matplotlib.font_manager.FontProperties(size=11)) plt.xlabel( "error train: %d/200 ; errors novel regular: %d/40 ; " "errors novel abnormal: %d/40" % (n_error_train, n_error_test, n_error_outliers)) plt.show()
bsd-3-clause
cpcloud/ibis
ibis/pandas/execution/generic.py
1
33772
"""Execution rules for generic ibis operations.""" import collections import datetime import decimal import functools import math import numbers import operator from collections.abc import Sized import numpy as np import pandas as pd import toolz from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy import ibis import ibis.common.exceptions as com import ibis.expr.datatypes as dt import ibis.expr.operations as ops import ibis.expr.types as ir import ibis.pandas.aggcontext as agg_ctx from ibis.compat import DatetimeTZDtype from ibis.pandas.core import ( boolean_types, execute, fixed_width_types, floating_types, integer_types, numeric_types, scalar_types, simple_types, timedelta_types, ) from ibis.pandas.dispatch import execute_literal, execute_node from ibis.pandas.execution import constants # By default return the literal value @execute_literal.register(ops.Literal, object, dt.DataType) def execute_node_literal_value_datatype(op, value, datatype, **kwargs): return value # Because True and 1 hash to the same value, if we have True or False in scope # keys while executing anything that should evaluate to 1 or 0 evaluates to # True or False respectively. This is a hack to work around that by casting the # bool to an integer. @execute_literal.register(ops.Literal, object, dt.Integer) def execute_node_literal_any_integer_datatype(op, value, datatype, **kwargs): return int(value) @execute_literal.register(ops.Literal, object, dt.Boolean) def execute_node_literal_any_boolean_datatype(op, value, datatype, **kwargs): return bool(value) @execute_literal.register(ops.Literal, object, dt.Floating) def execute_node_literal_any_floating_datatype(op, value, datatype, **kwargs): return float(value) @execute_literal.register(ops.Literal, dt.DataType) def execute_node_literal_datatype(op, datatype, **kwargs): return op.value @execute_literal.register( ops.Literal, timedelta_types + (str,) + integer_types, dt.Interval ) def execute_interval_literal(op, value, dtype, **kwargs): return pd.Timedelta(value, dtype.unit) @execute_node.register(ops.Limit, pd.DataFrame, integer_types, integer_types) def execute_limit_frame(op, data, nrows, offset, **kwargs): return data.iloc[offset : offset + nrows] @execute_node.register(ops.Cast, SeriesGroupBy, dt.DataType) def execute_cast_series_group_by(op, data, type, **kwargs): result = execute_cast_series_generic(op, data.obj, type, **kwargs) return result.groupby(data.grouper.groupings) @execute_node.register(ops.Cast, pd.Series, dt.DataType) def execute_cast_series_generic(op, data, type, **kwargs): return data.astype(constants.IBIS_TYPE_TO_PANDAS_TYPE[type]) @execute_node.register(ops.Cast, pd.Series, dt.Array) def execute_cast_series_array(op, data, type, **kwargs): value_type = type.value_type numpy_type = constants.IBIS_TYPE_TO_PANDAS_TYPE.get(value_type, None) if numpy_type is None: raise ValueError( 'Array value type must be a primitive type ' '(e.g., number, string, or timestamp)' ) return data.map( lambda array, numpy_type=numpy_type: list(map(numpy_type, array)) ) @execute_node.register(ops.Cast, pd.Series, dt.Timestamp) def execute_cast_series_timestamp(op, data, type, **kwargs): arg = op.arg from_type = arg.type() if from_type.equals(type): # noop cast return data tz = type.timezone if isinstance(from_type, (dt.Timestamp, dt.Date)): return data.astype( 'M8[ns]' if tz is None else DatetimeTZDtype('ns', tz) ) if isinstance(from_type, (dt.String, dt.Integer)): timestamps = pd.to_datetime(data.values, infer_datetime_format=True) if getattr(timestamps.dtype, "tz", None) is not None: method_name = "tz_convert" else: method_name = "tz_localize" method = getattr(timestamps, method_name) timestamps = method(tz) return pd.Series(timestamps, index=data.index, name=data.name) raise TypeError("Don't know how to cast {} to {}".format(from_type, type)) def _normalize(values, original_index, name, timezone=None): index = pd.DatetimeIndex(values, tz=timezone) return pd.Series(index.normalize(), index=original_index, name=name) @execute_node.register(ops.Cast, pd.Series, dt.Date) def execute_cast_series_date(op, data, type, **kwargs): arg = op.args[0] from_type = arg.type() if from_type.equals(type): return data if isinstance(from_type, dt.Timestamp): return _normalize( data.values, data.index, data.name, timezone=from_type.timezone ) if from_type.equals(dt.string): values = data.values datetimes = pd.to_datetime(values, infer_datetime_format=True) try: datetimes = datetimes.tz_convert(None) except TypeError: pass dates = _normalize(datetimes, data.index, data.name) return pd.Series(dates, index=data.index, name=data.name) if isinstance(from_type, dt.Integer): return pd.Series( pd.to_datetime(data.values, box=False, unit='D'), index=data.index, name=data.name, ) raise TypeError("Don't know how to cast {} to {}".format(from_type, type)) @execute_node.register(ops.SortKey, pd.Series, bool) def execute_sort_key_series_bool(op, data, ascending, **kwargs): return data def call_numpy_ufunc(func, op, data, **kwargs): if data.dtype == np.dtype(np.object_): return data.apply(functools.partial(execute_node, op, **kwargs)) return func(data) @execute_node.register(ops.Negate, fixed_width_types + timedelta_types) def execute_obj_negate(op, data, **kwargs): return -data @execute_node.register(ops.Negate, pd.Series) def execute_series_negate(op, data, **kwargs): return call_numpy_ufunc(np.negative, op, data, **kwargs) @execute_node.register(ops.Negate, SeriesGroupBy) def execute_series_group_by_negate(op, data, **kwargs): return execute_series_negate(op, data.obj, **kwargs).groupby( data.grouper.groupings ) @execute_node.register(ops.UnaryOp, pd.Series) def execute_series_unary_op(op, data, **kwargs): function = getattr(np, type(op).__name__.lower()) return call_numpy_ufunc(function, op, data, **kwargs) @execute_node.register((ops.Ceil, ops.Floor), pd.Series) def execute_series_ceil(op, data, **kwargs): return_type = np.object_ if data.dtype == np.object_ else np.int64 func = getattr(np, type(op).__name__.lower()) return call_numpy_ufunc(func, op, data, **kwargs).astype(return_type) def vectorize_object(op, arg, *args, **kwargs): func = np.vectorize(functools.partial(execute_node, op, **kwargs)) return pd.Series(func(arg, *args), index=arg.index, name=arg.name) @execute_node.register( ops.Log, pd.Series, (pd.Series, numbers.Real, decimal.Decimal, type(None)) ) def execute_series_log_with_base(op, data, base, **kwargs): if data.dtype == np.dtype(np.object_): return vectorize_object(op, data, base, **kwargs) if base is None: return np.log(data) return np.log(data) / np.log(base) @execute_node.register(ops.Ln, pd.Series) def execute_series_natural_log(op, data, **kwargs): if data.dtype == np.dtype(np.object_): return data.apply(functools.partial(execute_node, op, **kwargs)) return np.log(data) @execute_node.register( ops.Clip, pd.Series, (pd.Series, type(None)) + numeric_types, (pd.Series, type(None)) + numeric_types, ) def execute_series_clip(op, data, lower, upper, **kwargs): return data.clip(lower=lower, upper=upper) @execute_node.register(ops.Quantile, (pd.Series, SeriesGroupBy), numeric_types) def execute_series_quantile(op, data, quantile, aggcontext=None, **kwargs): return aggcontext.agg( data, 'quantile', q=quantile, interpolation=op.interpolation ) @execute_node.register(ops.MultiQuantile, pd.Series, collections.abc.Sequence) def execute_series_quantile_sequence( op, data, quantile, aggcontext=None, **kwargs ): result = aggcontext.agg( data, 'quantile', q=quantile, interpolation=op.interpolation ) return list(result) @execute_node.register( ops.MultiQuantile, SeriesGroupBy, collections.abc.Sequence ) def execute_series_quantile_groupby( op, data, quantile, aggcontext=None, **kwargs ): def q(x, quantile, interpolation): result = x.quantile(quantile, interpolation=interpolation).tolist() res = [result for _ in range(len(x))] return res result = aggcontext.agg(data, q, quantile, op.interpolation) return result @execute_node.register(ops.Cast, type(None), dt.DataType) def execute_cast_null_to_anything(op, data, type, **kwargs): return None @execute_node.register(ops.Cast, datetime.datetime, dt.String) def execute_cast_datetime_or_timestamp_to_string(op, data, type, **kwargs): """Cast timestamps to strings""" return str(data) @execute_node.register(ops.Cast, datetime.datetime, dt.Int64) def execute_cast_datetime_to_integer(op, data, type, **kwargs): """Cast datetimes to integers""" return pd.Timestamp(data).value @execute_node.register(ops.Cast, pd.Timestamp, dt.Int64) def execute_cast_timestamp_to_integer(op, data, type, **kwargs): """Cast timestamps to integers""" return data.value @execute_node.register(ops.Cast, (np.bool_, bool), dt.Timestamp) def execute_cast_bool_to_timestamp(op, data, type, **kwargs): raise TypeError( 'Casting boolean values to timestamps does not make sense. If you ' 'really want to cast boolean values to timestamps please cast to ' 'int64 first then to timestamp: ' "value.cast('int64').cast('timestamp')" ) @execute_node.register(ops.Cast, (np.bool_, bool), dt.Interval) def execute_cast_bool_to_interval(op, data, type, **kwargs): raise TypeError( 'Casting boolean values to intervals does not make sense. If you ' 'really want to cast boolean values to intervals please cast to ' 'int64 first then to interval: ' "value.cast('int64').cast(ibis.expr.datatypes.Interval(...))" ) @execute_node.register(ops.Cast, integer_types + (str,), dt.Timestamp) def execute_cast_simple_literal_to_timestamp(op, data, type, **kwargs): """Cast integer and strings to timestamps""" return pd.Timestamp(data, tz=type.timezone) @execute_node.register(ops.Cast, pd.Timestamp, dt.Timestamp) def execute_cast_timestamp_to_timestamp(op, data, type, **kwargs): """Cast timestamps to other timestamps including timezone if necessary""" input_timezone = data.tz target_timezone = type.timezone if input_timezone == target_timezone: return data if input_timezone is None or target_timezone is None: return data.tz_localize(target_timezone) return data.tz_convert(target_timezone) @execute_node.register(ops.Cast, datetime.datetime, dt.Timestamp) def execute_cast_datetime_to_datetime(op, data, type, **kwargs): return execute_cast_timestamp_to_timestamp( op, data, type, **kwargs ).to_pydatetime() @execute_node.register(ops.Cast, fixed_width_types + (str,), dt.DataType) def execute_cast_string_literal(op, data, type, **kwargs): try: cast_function = constants.IBIS_TO_PYTHON_LITERAL_TYPES[type] except KeyError: raise TypeError( "Don't know how to cast {!r} to type {}".format(data, type) ) else: return cast_function(data) @execute_node.register(ops.Round, scalar_types, (int, type(None))) def execute_round_scalars(op, data, places, **kwargs): return round(data, places) if places else round(data) @execute_node.register( ops.Round, pd.Series, (pd.Series, np.integer, type(None), int) ) def execute_round_series(op, data, places, **kwargs): if data.dtype == np.dtype(np.object_): return vectorize_object(op, data, places, **kwargs) result = data.round(places or 0) return result if places else result.astype('int64') @execute_node.register(ops.TableColumn, (pd.DataFrame, DataFrameGroupBy)) def execute_table_column_df_or_df_groupby(op, data, **kwargs): return data[op.name] @execute_node.register(ops.Aggregation, pd.DataFrame) def execute_aggregation_dataframe(op, data, scope=None, **kwargs): assert op.metrics, 'no metrics found during aggregation execution' if op.sort_keys: raise NotImplementedError( 'sorting on aggregations not yet implemented' ) predicates = op.predicates if predicates: predicate = functools.reduce( operator.and_, (execute(p, scope=scope, **kwargs) for p in predicates), ) data = data.loc[predicate] columns = {} if op.by: grouping_key_pairs = list( zip(op.by, map(operator.methodcaller('op'), op.by)) ) grouping_keys = [ by_op.name if isinstance(by_op, ops.TableColumn) else execute(by, scope=scope, **kwargs).rename(by.get_name()) for by, by_op in grouping_key_pairs ] columns.update( (by_op.name, by.get_name()) for by, by_op in grouping_key_pairs if hasattr(by_op, 'name') ) source = data.groupby(grouping_keys) else: source = data new_scope = toolz.merge(scope, {op.table.op(): source}) pieces = [ pd.Series( execute(metric, scope=new_scope, **kwargs), name=metric.get_name() ) for metric in op.metrics ] # group by always needs a reset to get the grouping key back as a column result = pd.concat(pieces, axis=1).reset_index() result.columns = [columns.get(c, c) for c in result.columns] if op.having: # .having(...) is only accessible on groupby, so this should never # raise if not op.by: raise ValueError( 'Filtering out aggregation values is not allowed without at ' 'least one grouping key' ) # TODO(phillipc): Don't recompute identical subexpressions predicate = functools.reduce( operator.and_, ( execute(having, scope=new_scope, **kwargs) for having in op.having ), ) assert len(predicate) == len( result ), 'length of predicate does not match length of DataFrame' result = result.loc[predicate.values] return result @execute_node.register(ops.Reduction, SeriesGroupBy, type(None)) def execute_reduction_series_groupby( op, data, mask, aggcontext=None, **kwargs ): return aggcontext.agg(data, type(op).__name__.lower()) variance_ddof = {'pop': 0, 'sample': 1} @execute_node.register(ops.Variance, SeriesGroupBy, type(None)) def execute_reduction_series_groupby_var( op, data, _, aggcontext=None, **kwargs ): return aggcontext.agg(data, 'var', ddof=variance_ddof[op.how]) @execute_node.register(ops.StandardDev, SeriesGroupBy, type(None)) def execute_reduction_series_groupby_std( op, data, _, aggcontext=None, **kwargs ): return aggcontext.agg(data, 'std', ddof=variance_ddof[op.how]) @execute_node.register( (ops.CountDistinct, ops.HLLCardinality), SeriesGroupBy, type(None) ) def execute_count_distinct_series_groupby( op, data, _, aggcontext=None, **kwargs ): return aggcontext.agg(data, 'nunique') @execute_node.register(ops.Arbitrary, SeriesGroupBy, type(None)) def execute_arbitrary_series_groupby(op, data, _, aggcontext=None, **kwargs): how = op.how if how is None: how = 'first' if how not in {'first', 'last'}: raise com.OperationNotDefinedError( 'Arbitrary {!r} is not supported'.format(how) ) return aggcontext.agg(data, how) def _filtered_reduction(mask, method, data): return method(data[mask[data.index]]) @execute_node.register(ops.Reduction, SeriesGroupBy, SeriesGroupBy) def execute_reduction_series_gb_mask( op, data, mask, aggcontext=None, **kwargs ): method = operator.methodcaller(type(op).__name__.lower()) return aggcontext.agg( data, functools.partial(_filtered_reduction, mask.obj, method) ) @execute_node.register( (ops.CountDistinct, ops.HLLCardinality), SeriesGroupBy, SeriesGroupBy ) def execute_count_distinct_series_groupby_mask( op, data, mask, aggcontext=None, **kwargs ): return aggcontext.agg( data, functools.partial(_filtered_reduction, mask.obj, pd.Series.nunique), ) @execute_node.register(ops.Variance, SeriesGroupBy, SeriesGroupBy) def execute_var_series_groupby_mask(op, data, mask, aggcontext=None, **kwargs): return aggcontext.agg( data, lambda x, mask=mask.obj, ddof=variance_ddof[op.how]: ( x[mask[x.index]].var(ddof=ddof) ), ) @execute_node.register(ops.StandardDev, SeriesGroupBy, SeriesGroupBy) def execute_std_series_groupby_mask(op, data, mask, aggcontext=None, **kwargs): return aggcontext.agg( data, lambda x, mask=mask.obj, ddof=variance_ddof[op.how]: ( x[mask[x.index]].std(ddof=ddof) ), ) @execute_node.register(ops.Count, DataFrameGroupBy, type(None)) def execute_count_frame_groupby(op, data, _, **kwargs): result = data.size() # FIXME(phillipc): We should not hard code this column name result.name = 'count' return result @execute_node.register(ops.Reduction, pd.Series, (pd.Series, type(None))) def execute_reduction_series_mask(op, data, mask, aggcontext=None, **kwargs): operand = data[mask] if mask is not None else data return aggcontext.agg(operand, type(op).__name__.lower()) @execute_node.register( (ops.CountDistinct, ops.HLLCardinality), pd.Series, (pd.Series, type(None)) ) def execute_count_distinct_series_mask( op, data, mask, aggcontext=None, **kwargs ): return aggcontext.agg(data[mask] if mask is not None else data, 'nunique') @execute_node.register(ops.Arbitrary, pd.Series, (pd.Series, type(None))) def execute_arbitrary_series_mask(op, data, mask, aggcontext=None, **kwargs): if op.how == 'first': index = 0 elif op.how == 'last': index = -1 else: raise com.OperationNotDefinedError( 'Arbitrary {!r} is not supported'.format(op.how) ) data = data[mask] if mask is not None else data return data.iloc[index] @execute_node.register(ops.StandardDev, pd.Series, (pd.Series, type(None))) def execute_standard_dev_series(op, data, mask, aggcontext=None, **kwargs): return aggcontext.agg( data[mask] if mask is not None else data, 'std', ddof=variance_ddof[op.how], ) @execute_node.register(ops.Variance, pd.Series, (pd.Series, type(None))) def execute_variance_series(op, data, mask, aggcontext=None, **kwargs): return aggcontext.agg( data[mask] if mask is not None else data, 'var', ddof=variance_ddof[op.how], ) @execute_node.register((ops.Any, ops.All), (pd.Series, SeriesGroupBy)) def execute_any_all_series(op, data, aggcontext=None, **kwargs): if isinstance(aggcontext, (agg_ctx.Summarize, agg_ctx.Transform)): result = aggcontext.agg(data, type(op).__name__.lower()) else: result = aggcontext.agg( data, lambda data: getattr(data, type(op).__name__.lower())() ) try: return result.astype(bool) except TypeError: return result @execute_node.register(ops.NotAny, (pd.Series, SeriesGroupBy)) def execute_notany_series(op, data, aggcontext=None, **kwargs): if isinstance(aggcontext, (agg_ctx.Summarize, agg_ctx.Transform)): result = ~aggcontext.agg(data, 'any') else: result = aggcontext.agg(data, lambda data: ~data.any()) try: return result.astype(bool) except TypeError: return result @execute_node.register(ops.NotAll, (pd.Series, SeriesGroupBy)) def execute_notall_series(op, data, aggcontext=None, **kwargs): if isinstance(aggcontext, (agg_ctx.Summarize, agg_ctx.Transform)): result = ~aggcontext.agg(data, 'all') else: result = aggcontext.agg(data, lambda data: ~data.all()) try: return result.astype(bool) except TypeError: return result @execute_node.register(ops.Count, pd.DataFrame, type(None)) def execute_count_frame(op, data, _, **kwargs): return len(data) @execute_node.register(ops.Not, (bool, np.bool_)) def execute_not_bool(op, data, **kwargs): return not data @execute_node.register(ops.BinaryOp, pd.Series, pd.Series) @execute_node.register( (ops.NumericBinaryOp, ops.LogicalBinaryOp, ops.Comparison), numeric_types, pd.Series, ) @execute_node.register( (ops.NumericBinaryOp, ops.LogicalBinaryOp, ops.Comparison), pd.Series, numeric_types, ) @execute_node.register( (ops.NumericBinaryOp, ops.LogicalBinaryOp, ops.Comparison), numeric_types, numeric_types, ) @execute_node.register((ops.Comparison, ops.Add, ops.Multiply), pd.Series, str) @execute_node.register((ops.Comparison, ops.Add, ops.Multiply), str, pd.Series) @execute_node.register((ops.Comparison, ops.Add), str, str) @execute_node.register(ops.Multiply, integer_types, str) @execute_node.register(ops.Multiply, str, integer_types) def execute_binary_op(op, left, right, **kwargs): op_type = type(op) try: operation = constants.BINARY_OPERATIONS[op_type] except KeyError: raise NotImplementedError( 'Binary operation {} not implemented'.format(op_type.__name__) ) else: return operation(left, right) @execute_node.register(ops.BinaryOp, SeriesGroupBy, SeriesGroupBy) def execute_binary_op_series_group_by(op, left, right, **kwargs): left_groupings = left.grouper.groupings right_groupings = right.grouper.groupings if left_groupings != right_groupings: raise ValueError( 'Cannot perform {} operation on two series with ' 'different groupings'.format(type(op).__name__) ) result = execute_binary_op(op, left.obj, right.obj, **kwargs) return result.groupby(left_groupings) @execute_node.register(ops.BinaryOp, SeriesGroupBy, simple_types) def execute_binary_op_series_gb_simple(op, left, right, **kwargs): result = execute_binary_op(op, left.obj, right, **kwargs) return result.groupby(left.grouper.groupings) @execute_node.register(ops.BinaryOp, simple_types, SeriesGroupBy) def execute_binary_op_simple_series_gb(op, left, right, **kwargs): result = execute_binary_op(op, left, right.obj, **kwargs) return result.groupby(right.grouper.groupings) @execute_node.register(ops.UnaryOp, SeriesGroupBy) def execute_unary_op_series_gb(op, operand, **kwargs): result = execute_node(op, operand.obj, **kwargs) return result.groupby(operand.grouper.groupings) @execute_node.register( (ops.Log, ops.Round), SeriesGroupBy, (numbers.Real, decimal.Decimal, type(None)), ) def execute_log_series_gb_others(op, left, right, **kwargs): result = execute_node(op, left.obj, right, **kwargs) return result.groupby(left.grouper.groupings) @execute_node.register((ops.Log, ops.Round), SeriesGroupBy, SeriesGroupBy) def execute_log_series_gb_series_gb(op, left, right, **kwargs): result = execute_node(op, left.obj, right.obj, **kwargs) return result.groupby(left.grouper.groupings) @execute_node.register(ops.Not, pd.Series) def execute_not_series(op, data, **kwargs): return ~data @execute_node.register(ops.NullIfZero, pd.Series) def execute_null_if_zero_series(op, data, **kwargs): return data.where(data != 0, np.nan) @execute_node.register(ops.StringSplit, pd.Series, (pd.Series, str)) def execute_string_split(op, data, delimiter, **kwargs): return data.str.split(delimiter) @execute_node.register( ops.Between, pd.Series, (pd.Series, numbers.Real, str, datetime.datetime), (pd.Series, numbers.Real, str, datetime.datetime), ) def execute_between(op, data, lower, upper, **kwargs): return data.between(lower, upper) @execute_node.register(ops.DistinctColumn, pd.Series) def execute_series_distinct(op, data, **kwargs): return pd.Series(data.unique(), name=data.name) @execute_node.register(ops.Union, pd.DataFrame, pd.DataFrame, bool) def execute_union_dataframe_dataframe(op, left, right, distinct, **kwargs): result = pd.concat([left, right], axis=0) return result.drop_duplicates() if distinct else result @execute_node.register(ops.IsNull, pd.Series) def execute_series_isnull(op, data, **kwargs): return data.isnull() @execute_node.register(ops.NotNull, pd.Series) def execute_series_notnnull(op, data, **kwargs): return data.notnull() @execute_node.register(ops.IsNan, (pd.Series, floating_types)) def execute_isnan(op, data, **kwargs): return np.isnan(data) @execute_node.register(ops.IsInf, (pd.Series, floating_types)) def execute_isinf(op, data, **kwargs): return np.isinf(data) @execute_node.register(ops.SelfReference, pd.DataFrame) def execute_node_self_reference_dataframe(op, data, **kwargs): return data @execute_node.register(ops.ValueList, collections.abc.Sequence) def execute_node_value_list(op, _, **kwargs): return [execute(arg, **kwargs) for arg in op.values] @execute_node.register(ops.StringConcat, collections.abc.Sequence) def execute_node_string_concat(op, args, **kwargs): return functools.reduce(operator.add, args) @execute_node.register(ops.StringJoin, collections.abc.Sequence) def execute_node_string_join(op, args, **kwargs): return op.sep.join(args) @execute_node.register( ops.Contains, pd.Series, (collections.abc.Sequence, collections.abc.Set) ) def execute_node_contains_series_sequence(op, data, elements, **kwargs): return data.isin(elements) @execute_node.register( ops.NotContains, pd.Series, (collections.abc.Sequence, collections.abc.Set) ) def execute_node_not_contains_series_sequence(op, data, elements, **kwargs): return ~data.isin(elements) # Series, Series, Series # Series, Series, scalar @execute_node.register(ops.Where, pd.Series, pd.Series, pd.Series) @execute_node.register(ops.Where, pd.Series, pd.Series, scalar_types) def execute_node_where_series_series_series(op, cond, true, false, **kwargs): # No need to turn false into a series, pandas will broadcast it return true.where(cond, other=false) # Series, scalar, Series def execute_node_where_series_scalar_scalar(op, cond, true, false, **kwargs): return pd.Series(np.repeat(true, len(cond))).where(cond, other=false) # Series, scalar, scalar for scalar_type in scalar_types: execute_node_where_series_scalar_scalar = execute_node.register( ops.Where, pd.Series, scalar_type, scalar_type )(execute_node_where_series_scalar_scalar) # scalar, Series, Series @execute_node.register(ops.Where, boolean_types, pd.Series, pd.Series) def execute_node_where_scalar_scalar_scalar(op, cond, true, false, **kwargs): # Note that it is not necessary to check that true and false are also # scalars. This allows users to do things like: # ibis.where(even_or_odd_bool, [2, 4, 6], [1, 3, 5]) return true if cond else false # scalar, scalar, scalar for scalar_type in scalar_types: execute_node_where_scalar_scalar_scalar = execute_node.register( ops.Where, boolean_types, scalar_type, scalar_type )(execute_node_where_scalar_scalar_scalar) # scalar, Series, scalar @execute_node.register(ops.Where, boolean_types, pd.Series, scalar_types) def execute_node_where_scalar_series_scalar(op, cond, true, false, **kwargs): return ( true if cond else pd.Series(np.repeat(false, len(true)), index=true.index) ) # scalar, scalar, Series @execute_node.register(ops.Where, boolean_types, scalar_types, pd.Series) def execute_node_where_scalar_scalar_series(op, cond, true, false, **kwargs): return pd.Series(np.repeat(true, len(false))) if cond else false @execute_node.register( ibis.pandas.client.PandasTable, ibis.pandas.client.PandasClient ) def execute_database_table_client(op, client, **kwargs): return client.dictionary[op.name] MATH_FUNCTIONS = { ops.Floor: math.floor, ops.Ln: math.log, ops.Log2: lambda x: math.log(x, 2), ops.Log10: math.log10, ops.Exp: math.exp, ops.Sqrt: math.sqrt, ops.Abs: abs, ops.Ceil: math.ceil, ops.Sign: lambda x: 0 if not x else -1 if x < 0 else 1, } MATH_FUNCTION_TYPES = tuple(MATH_FUNCTIONS.keys()) @execute_node.register(MATH_FUNCTION_TYPES, numeric_types) def execute_node_math_function_number(op, value, **kwargs): return MATH_FUNCTIONS[type(op)](value) @execute_node.register(ops.Log, numeric_types, numeric_types) def execute_node_log_number_number(op, value, base, **kwargs): return math.log(value, base) @execute_node.register(ops.IfNull, pd.Series, simple_types) @execute_node.register(ops.IfNull, pd.Series, pd.Series) def execute_node_ifnull_series(op, value, replacement, **kwargs): return value.fillna(replacement) @execute_node.register(ops.IfNull, simple_types, pd.Series) def execute_node_ifnull_scalar_series(op, value, replacement, **kwargs): return ( replacement if pd.isnull(value) else pd.Series(value, index=replacement.index) ) @execute_node.register(ops.IfNull, simple_types, simple_types) def execute_node_if_scalars(op, value, replacement, **kwargs): return replacement if pd.isnull(value) else value @execute_node.register(ops.NullIf, simple_types, simple_types) def execute_node_nullif_scalars(op, value1, value2, **kwargs): return np.nan if value1 == value2 else value1 @execute_node.register(ops.NullIf, pd.Series, pd.Series) def execute_node_nullif_series(op, series1, series2, **kwargs): return series1.where(series1 != series2) @execute_node.register(ops.NullIf, pd.Series, simple_types) def execute_node_nullif_series_scalar(op, series, value, **kwargs): return series.where(series != value) @execute_node.register(ops.NullIf, simple_types, pd.Series) def execute_node_nullif_scalar_series(op, value, series, **kwargs): return pd.Series( np.where(series.values == value, np.nan, value), index=series.index ) def coalesce(values): return functools.reduce(lambda x, y: x if not pd.isnull(x) else y, values) @toolz.curry def promote_to_sequence(length, obj): return obj.values if isinstance(obj, pd.Series) else np.repeat(obj, length) def compute_row_reduction(func, value, **kwargs): final_sizes = {len(x) for x in value if isinstance(x, Sized)} if not final_sizes: return func(value) final_size, = final_sizes raw = func(list(map(promote_to_sequence(final_size), value)), **kwargs) return pd.Series(raw).squeeze() @execute_node.register(ops.Greatest, collections.abc.Sequence) def execute_node_greatest_list(op, value, **kwargs): return compute_row_reduction(np.maximum.reduce, value, axis=0) @execute_node.register(ops.Least, collections.abc.Sequence) def execute_node_least_list(op, value, **kwargs): return compute_row_reduction(np.minimum.reduce, value, axis=0) @execute_node.register(ops.Coalesce, collections.abc.Sequence) def execute_node_coalesce(op, values, **kwargs): # TODO: this is slow return compute_row_reduction(coalesce, values) @execute_node.register(ops.ExpressionList, collections.abc.Sequence) def execute_node_expr_list(op, sequence, **kwargs): # TODO: no true approx count distinct for pandas, so we use exact for now columns = [e.get_name() for e in op.exprs] schema = ibis.schema(list(zip(columns, (e.type() for e in op.exprs)))) data = {col: [execute(el, **kwargs)] for col, el in zip(columns, sequence)} return schema.apply_to(pd.DataFrame(data, columns=columns)) def wrap_case_result(raw, expr): """Wrap a CASE statement result in a Series and handle returning scalars. Parameters ---------- raw : ndarray[T] The raw results of executing the ``CASE`` expression expr : ValueExpr The expression from the which `raw` was computed Returns ------- Union[scalar, Series] """ raw_1d = np.atleast_1d(raw) if np.any(pd.isnull(raw_1d)): result = pd.Series(raw_1d) else: result = pd.Series( raw_1d, dtype=constants.IBIS_TYPE_TO_PANDAS_TYPE[expr.type()] ) if result.size == 1 and isinstance(expr, ir.ScalarExpr): return result.item() return result @execute_node.register(ops.SearchedCase, list, list, object) def execute_searched_case(op, whens, thens, otherwise, **kwargs): if otherwise is None: otherwise = np.nan raw = np.select(whens, thens, otherwise) return wrap_case_result(raw, op.to_expr()) @execute_node.register(ops.SimpleCase, object, list, list, object) def execute_simple_case_scalar(op, value, whens, thens, otherwise, **kwargs): if otherwise is None: otherwise = np.nan raw = np.select(np.asarray(whens) == value, thens, otherwise) return wrap_case_result(raw, op.to_expr()) @execute_node.register(ops.SimpleCase, pd.Series, list, list, object) def execute_simple_case_series(op, value, whens, thens, otherwise, **kwargs): if otherwise is None: otherwise = np.nan raw = np.select([value == when for when in whens], thens, otherwise) return wrap_case_result(raw, op.to_expr()) @execute_node.register(ops.Distinct, pd.DataFrame) def execute_distinct_dataframe(op, df, **kwargs): return df.drop_duplicates()
apache-2.0
aerospaceresearch/SiqNAL
Modules/fourier.py
2
2245
""" **Author :** *Jay Krishna* This module computes the fourier transform, fourier transform power & inverse fourier transform of the signal. """ import numpy as np from scipy.fftpack import fft, fftshift, ifft import matplotlib.pyplot as plt def CalcFourier(signal): """ This function calculates the fourier transform of a signal. The fourier transform's zero frequency is shifted to the centre of the spectrum. Parameters ------------------------- signal : ndarray Numpy complex array of signal. Returns ----------------------- transform : ndarray Numpy array of computed fourier transform. """ transform = fftshift(fft(signal)) return transform def CalcFourierPower(signal, fs, fc): """ This function calculates the power of fourier transform of a signal in each frequency bin. Parameters ------------------------- signal : ndarray Numpy complex array of signal. fs : float Sampling frequency of the signal. fc : float Centre frequency of the signal. Returns ----------------------- frequency : ndarray Numpy array of values of frequencies present in the signal. transform : ndarray Numpy array of computed fourier transform. """ step = 1 / fs pole = fs value = np.absolute(signal) transform = 20 * np.log10(value) N = transform.shape[0] frequency = np.arange((-1 * pole) / 2 + fc, pole / 2 + fc, fs / N) return frequency, transform def CalcIFourier(signal): """ This function calculates the inverse fourier transform of a signal. The fourier transform's zero frequency is shifted to the left of the spectrum. Parameters ------------------------- signal : ndarray Numpy complex array of signal. Returns ----------------------- isignal : ndarray Numpy array of computed inverse fourier transform. """ isignal = ifft(fftshift(signal)) return isignal
mit
vamsirajendra/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/image.py
69
28764
""" The image module supports basic image loading, rescaling and display operations. """ from __future__ import division import os, warnings import numpy as np from numpy import ma from matplotlib import rcParams from matplotlib import artist as martist from matplotlib import colors as mcolors from matplotlib import cm # For clarity, names from _image are given explicitly in this module: from matplotlib import _image from matplotlib import _png # For user convenience, the names from _image are also imported into # the image namespace: from matplotlib._image import * class AxesImage(martist.Artist, cm.ScalarMappable): zorder = 1 # map interpolation strings to module constants _interpd = { 'nearest' : _image.NEAREST, 'bilinear' : _image.BILINEAR, 'bicubic' : _image.BICUBIC, 'spline16' : _image.SPLINE16, 'spline36' : _image.SPLINE36, 'hanning' : _image.HANNING, 'hamming' : _image.HAMMING, 'hermite' : _image.HERMITE, 'kaiser' : _image.KAISER, 'quadric' : _image.QUADRIC, 'catrom' : _image.CATROM, 'gaussian' : _image.GAUSSIAN, 'bessel' : _image.BESSEL, 'mitchell' : _image.MITCHELL, 'sinc' : _image.SINC, 'lanczos' : _image.LANCZOS, 'blackman' : _image.BLACKMAN, } # reverse interp dict _interpdr = dict([ (v,k) for k,v in _interpd.items()]) interpnames = _interpd.keys() def __str__(self): return "AxesImage(%g,%g;%gx%g)" % tuple(self.axes.bbox.bounds) def __init__(self, ax, cmap = None, norm = None, interpolation=None, origin=None, extent=None, filternorm=1, filterrad=4.0, resample = False, **kwargs ): """ interpolation and cmap default to their rc settings cmap is a colors.Colormap instance norm is a colors.Normalize instance to map luminance to 0-1 extent is data axes (left, right, bottom, top) for making image plots registered with data plots. Default is to label the pixel centers with the zero-based row and column indices. Additional kwargs are matplotlib.artist properties """ martist.Artist.__init__(self) cm.ScalarMappable.__init__(self, norm, cmap) if origin is None: origin = rcParams['image.origin'] self.origin = origin self._extent = extent self.set_filternorm(filternorm) self.set_filterrad(filterrad) self._filterrad = filterrad self.set_interpolation(interpolation) self.set_resample(resample) self.axes = ax self._imcache = None self.update(kwargs) def get_size(self): 'Get the numrows, numcols of the input image' if self._A is None: raise RuntimeError('You must first set the image array') return self._A.shape[:2] def set_alpha(self, alpha): """ Set the alpha value used for blending - not supported on all backends ACCEPTS: float """ martist.Artist.set_alpha(self, alpha) self._imcache = None def changed(self): """ Call this whenever the mappable is changed so observers can update state """ self._imcache = None self._rgbacache = None cm.ScalarMappable.changed(self) def make_image(self, magnification=1.0): if self._A is None: raise RuntimeError('You must first set the image array or the image attribute') xmin, xmax, ymin, ymax = self.get_extent() dxintv = xmax-xmin dyintv = ymax-ymin # the viewport scale factor sx = dxintv/self.axes.viewLim.width sy = dyintv/self.axes.viewLim.height numrows, numcols = self._A.shape[:2] if sx > 2: x0 = (self.axes.viewLim.x0-xmin)/dxintv * numcols ix0 = max(0, int(x0 - self._filterrad)) x1 = (self.axes.viewLim.x1-xmin)/dxintv * numcols ix1 = min(numcols, int(x1 + self._filterrad)) xslice = slice(ix0, ix1) xmin_old = xmin xmin = xmin_old + ix0*dxintv/numcols xmax = xmin_old + ix1*dxintv/numcols dxintv = xmax - xmin sx = dxintv/self.axes.viewLim.width else: xslice = slice(0, numcols) if sy > 2: y0 = (self.axes.viewLim.y0-ymin)/dyintv * numrows iy0 = max(0, int(y0 - self._filterrad)) y1 = (self.axes.viewLim.y1-ymin)/dyintv * numrows iy1 = min(numrows, int(y1 + self._filterrad)) if self.origin == 'upper': yslice = slice(numrows-iy1, numrows-iy0) else: yslice = slice(iy0, iy1) ymin_old = ymin ymin = ymin_old + iy0*dyintv/numrows ymax = ymin_old + iy1*dyintv/numrows dyintv = ymax - ymin sy = dyintv/self.axes.viewLim.height else: yslice = slice(0, numrows) if xslice != self._oldxslice or yslice != self._oldyslice: self._imcache = None self._oldxslice = xslice self._oldyslice = yslice if self._imcache is None: if self._A.dtype == np.uint8 and len(self._A.shape) == 3: im = _image.frombyte(self._A[yslice,xslice,:], 0) im.is_grayscale = False else: if self._rgbacache is None: x = self.to_rgba(self._A, self._alpha) self._rgbacache = x else: x = self._rgbacache im = _image.fromarray(x[yslice,xslice], 0) if len(self._A.shape) == 2: im.is_grayscale = self.cmap.is_gray() else: im.is_grayscale = False self._imcache = im if self.origin=='upper': im.flipud_in() else: im = self._imcache fc = self.axes.patch.get_facecolor() bg = mcolors.colorConverter.to_rgba(fc, 0) im.set_bg( *bg) # image input dimensions im.reset_matrix() numrows, numcols = im.get_size() im.set_interpolation(self._interpd[self._interpolation]) im.set_resample(self._resample) # the viewport translation tx = (xmin-self.axes.viewLim.x0)/dxintv * numcols ty = (ymin-self.axes.viewLim.y0)/dyintv * numrows l, b, r, t = self.axes.bbox.extents widthDisplay = (round(r) + 0.5) - (round(l) - 0.5) heightDisplay = (round(t) + 0.5) - (round(b) - 0.5) widthDisplay *= magnification heightDisplay *= magnification im.apply_translation(tx, ty) # resize viewport to display rx = widthDisplay / numcols ry = heightDisplay / numrows im.apply_scaling(rx*sx, ry*sy) im.resize(int(widthDisplay+0.5), int(heightDisplay+0.5), norm=self._filternorm, radius=self._filterrad) return im def draw(self, renderer, *args, **kwargs): if not self.get_visible(): return if (self.axes.get_xscale() != 'linear' or self.axes.get_yscale() != 'linear'): warnings.warn("Images are not supported on non-linear axes.") im = self.make_image(renderer.get_image_magnification()) im._url = self.get_url() l, b, widthDisplay, heightDisplay = self.axes.bbox.bounds clippath, affine = self.get_transformed_clip_path_and_affine() renderer.draw_image(round(l), round(b), im, self.axes.bbox.frozen(), clippath, affine) def contains(self, mouseevent): """Test whether the mouse event occured within the image. """ if callable(self._contains): return self._contains(self,mouseevent) # TODO: make sure this is consistent with patch and patch # collection on nonlinear transformed coordinates. # TODO: consider returning image coordinates (shouldn't # be too difficult given that the image is rectilinear x, y = mouseevent.xdata, mouseevent.ydata xmin, xmax, ymin, ymax = self.get_extent() if xmin > xmax: xmin,xmax = xmax,xmin if ymin > ymax: ymin,ymax = ymax,ymin #print x, y, xmin, xmax, ymin, ymax if x is not None and y is not None: inside = x>=xmin and x<=xmax and y>=ymin and y<=ymax else: inside = False return inside,{} def write_png(self, fname, noscale=False): """Write the image to png file with fname""" im = self.make_image() if noscale: numrows, numcols = im.get_size() im.reset_matrix() im.set_interpolation(0) im.resize(numcols, numrows) im.flipud_out() rows, cols, buffer = im.as_rgba_str() _png.write_png(buffer, cols, rows, fname) def set_data(self, A, shape=None): """ Set the image array ACCEPTS: numpy/PIL Image A""" # check if data is PIL Image without importing Image if hasattr(A,'getpixel'): self._A = pil_to_array(A) elif ma.isMA(A): self._A = A else: self._A = np.asarray(A) # assume array if self._A.dtype != np.uint8 and not np.can_cast(self._A.dtype, np.float): raise TypeError("Image data can not convert to float") if (self._A.ndim not in (2, 3) or (self._A.ndim == 3 and self._A.shape[-1] not in (3, 4))): raise TypeError("Invalid dimensions for image data") self._imcache =None self._rgbacache = None self._oldxslice = None self._oldyslice = None def set_array(self, A): """ retained for backwards compatibility - use set_data instead ACCEPTS: numpy array A or PIL Image""" # This also needs to be here to override the inherited # cm.ScalarMappable.set_array method so it is not invoked # by mistake. self.set_data(A) def set_extent(self, extent): """extent is data axes (left, right, bottom, top) for making image plots """ self._extent = extent xmin, xmax, ymin, ymax = extent corners = (xmin, ymin), (xmax, ymax) self.axes.update_datalim(corners) if self.axes._autoscaleon: self.axes.set_xlim((xmin, xmax)) self.axes.set_ylim((ymin, ymax)) def get_interpolation(self): """ Return the interpolation method the image uses when resizing. One of 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos', """ return self._interpolation def set_interpolation(self, s): """ Set the interpolation method the image uses when resizing. ACCEPTS: ['nearest' | 'bilinear' | 'bicubic' | 'spline16' | 'spline36' | 'hanning' | 'hamming' | 'hermite' | 'kaiser' | 'quadric' | 'catrom' | 'gaussian' | 'bessel' | 'mitchell' | 'sinc' | 'lanczos' | ] """ if s is None: s = rcParams['image.interpolation'] s = s.lower() if s not in self._interpd: raise ValueError('Illegal interpolation string') self._interpolation = s def set_resample(self, v): if v is None: v = rcParams['image.resample'] self._resample = v def get_interpolation(self): return self._resample def get_extent(self): 'get the image extent: left, right, bottom, top' if self._extent is not None: return self._extent else: sz = self.get_size() #print 'sz', sz numrows, numcols = sz if self.origin == 'upper': return (-0.5, numcols-0.5, numrows-0.5, -0.5) else: return (-0.5, numcols-0.5, -0.5, numrows-0.5) def set_filternorm(self, filternorm): """Set whether the resize filter norms the weights -- see help for imshow ACCEPTS: 0 or 1 """ if filternorm: self._filternorm = 1 else: self._filternorm = 0 def get_filternorm(self): 'return the filternorm setting' return self._filternorm def set_filterrad(self, filterrad): """Set the resize filter radius only applicable to some interpolation schemes -- see help for imshow ACCEPTS: positive float """ r = float(filterrad) assert(r>0) self._filterrad = r def get_filterrad(self): 'return the filterrad setting' return self._filterrad class NonUniformImage(AxesImage): def __init__(self, ax, **kwargs ): interp = kwargs.pop('interpolation', 'nearest') AxesImage.__init__(self, ax, **kwargs) AxesImage.set_interpolation(self, interp) def make_image(self, magnification=1.0): if self._A is None: raise RuntimeError('You must first set the image array') x0, y0, v_width, v_height = self.axes.viewLim.bounds l, b, r, t = self.axes.bbox.extents width = (round(r) + 0.5) - (round(l) - 0.5) height = (round(t) + 0.5) - (round(b) - 0.5) width *= magnification height *= magnification im = _image.pcolor(self._Ax, self._Ay, self._A, height, width, (x0, x0+v_width, y0, y0+v_height), self._interpd[self._interpolation]) fc = self.axes.patch.get_facecolor() bg = mcolors.colorConverter.to_rgba(fc, 0) im.set_bg(*bg) im.is_grayscale = self.is_grayscale return im def set_data(self, x, y, A): x = np.asarray(x,np.float32) y = np.asarray(y,np.float32) if not ma.isMA(A): A = np.asarray(A) if len(x.shape) != 1 or len(y.shape) != 1\ or A.shape[0:2] != (y.shape[0], x.shape[0]): raise TypeError("Axes don't match array shape") if len(A.shape) not in [2, 3]: raise TypeError("Can only plot 2D or 3D data") if len(A.shape) == 3 and A.shape[2] not in [1, 3, 4]: raise TypeError("3D arrays must have three (RGB) or four (RGBA) color components") if len(A.shape) == 3 and A.shape[2] == 1: A.shape = A.shape[0:2] if len(A.shape) == 2: if A.dtype != np.uint8: A = (self.cmap(self.norm(A))*255).astype(np.uint8) self.is_grayscale = self.cmap.is_gray() else: A = np.repeat(A[:,:,np.newaxis], 4, 2) A[:,:,3] = 255 self.is_grayscale = True else: if A.dtype != np.uint8: A = (255*A).astype(np.uint8) if A.shape[2] == 3: B = zeros(tuple(list(A.shape[0:2]) + [4]), np.uint8) B[:,:,0:3] = A B[:,:,3] = 255 A = B self.is_grayscale = False self._A = A self._Ax = x self._Ay = y self._imcache = None def set_array(self, *args): raise NotImplementedError('Method not supported') def set_interpolation(self, s): if s != None and not s in ('nearest','bilinear'): raise NotImplementedError('Only nearest neighbor and bilinear interpolations are supported') AxesImage.set_interpolation(self, s) def get_extent(self): if self._A is None: raise RuntimeError('Must set data first') return self._Ax[0], self._Ax[-1], self._Ay[0], self._Ay[-1] def set_filternorm(self, s): pass def set_filterrad(self, s): pass def set_norm(self, norm): if self._A is not None: raise RuntimeError('Cannot change colors after loading data') cm.ScalarMappable.set_norm(self, norm) def set_cmap(self, cmap): if self._A is not None: raise RuntimeError('Cannot change colors after loading data') cm.ScalarMappable.set_cmap(self, norm) class PcolorImage(martist.Artist, cm.ScalarMappable): ''' Make a pcolor-style plot with an irregular rectangular grid. This uses a variation of the original irregular image code, and it is used by pcolorfast for the corresponding grid type. ''' def __init__(self, ax, x=None, y=None, A=None, cmap = None, norm = None, **kwargs ): """ cmap defaults to its rc setting cmap is a colors.Colormap instance norm is a colors.Normalize instance to map luminance to 0-1 Additional kwargs are matplotlib.artist properties """ martist.Artist.__init__(self) cm.ScalarMappable.__init__(self, norm, cmap) self.axes = ax self._rgbacache = None self.update(kwargs) self.set_data(x, y, A) def make_image(self, magnification=1.0): if self._A is None: raise RuntimeError('You must first set the image array') fc = self.axes.patch.get_facecolor() bg = mcolors.colorConverter.to_rgba(fc, 0) bg = (np.array(bg)*255).astype(np.uint8) l, b, r, t = self.axes.bbox.extents width = (round(r) + 0.5) - (round(l) - 0.5) height = (round(t) + 0.5) - (round(b) - 0.5) width = width * magnification height = height * magnification if self.check_update('array'): A = self.to_rgba(self._A, alpha=self._alpha, bytes=True) self._rgbacache = A if self._A.ndim == 2: self.is_grayscale = self.cmap.is_gray() else: A = self._rgbacache vl = self.axes.viewLim im = _image.pcolor2(self._Ax, self._Ay, A, height, width, (vl.x0, vl.x1, vl.y0, vl.y1), bg) im.is_grayscale = self.is_grayscale return im def draw(self, renderer, *args, **kwargs): if not self.get_visible(): return im = self.make_image(renderer.get_image_magnification()) renderer.draw_image(round(self.axes.bbox.xmin), round(self.axes.bbox.ymin), im, self.axes.bbox.frozen(), *self.get_transformed_clip_path_and_affine()) def set_data(self, x, y, A): if not ma.isMA(A): A = np.asarray(A) if x is None: x = np.arange(0, A.shape[1]+1, dtype=np.float64) else: x = np.asarray(x, np.float64).ravel() if y is None: y = np.arange(0, A.shape[0]+1, dtype=np.float64) else: y = np.asarray(y, np.float64).ravel() if A.shape[:2] != (y.size-1, x.size-1): print A.shape print y.size print x.size raise ValueError("Axes don't match array shape") if A.ndim not in [2, 3]: raise ValueError("A must be 2D or 3D") if A.ndim == 3 and A.shape[2] == 1: A.shape = A.shape[:2] self.is_grayscale = False if A.ndim == 3: if A.shape[2] in [3, 4]: if (A[:,:,0] == A[:,:,1]).all() and (A[:,:,0] == A[:,:,2]).all(): self.is_grayscale = True else: raise ValueError("3D arrays must have RGB or RGBA as last dim") self._A = A self._Ax = x self._Ay = y self.update_dict['array'] = True def set_array(self, *args): raise NotImplementedError('Method not supported') def set_alpha(self, alpha): """ Set the alpha value used for blending - not supported on all backends ACCEPTS: float """ martist.Artist.set_alpha(self, alpha) self.update_dict['array'] = True class FigureImage(martist.Artist, cm.ScalarMappable): zorder = 1 def __init__(self, fig, cmap = None, norm = None, offsetx = 0, offsety = 0, origin=None, **kwargs ): """ cmap is a colors.Colormap instance norm is a colors.Normalize instance to map luminance to 0-1 kwargs are an optional list of Artist keyword args """ martist.Artist.__init__(self) cm.ScalarMappable.__init__(self, norm, cmap) if origin is None: origin = rcParams['image.origin'] self.origin = origin self.figure = fig self.ox = offsetx self.oy = offsety self.update(kwargs) self.magnification = 1.0 def contains(self, mouseevent): """Test whether the mouse event occured within the image. """ if callable(self._contains): return self._contains(self,mouseevent) xmin, xmax, ymin, ymax = self.get_extent() xdata, ydata = mouseevent.x, mouseevent.y #print xdata, ydata, xmin, xmax, ymin, ymax if xdata is not None and ydata is not None: inside = xdata>=xmin and xdata<=xmax and ydata>=ymin and ydata<=ymax else: inside = False return inside,{} def get_size(self): 'Get the numrows, numcols of the input image' if self._A is None: raise RuntimeError('You must first set the image array') return self._A.shape[:2] def get_extent(self): 'get the image extent: left, right, bottom, top' numrows, numcols = self.get_size() return (-0.5+self.ox, numcols-0.5+self.ox, -0.5+self.oy, numrows-0.5+self.oy) def make_image(self, magnification=1.0): if self._A is None: raise RuntimeError('You must first set the image array') x = self.to_rgba(self._A, self._alpha) self.magnification = magnification # if magnification is not one, we need to resize ismag = magnification!=1 #if ismag: raise RuntimeError if ismag: isoutput = 0 else: isoutput = 1 im = _image.fromarray(x, isoutput) fc = self.figure.get_facecolor() im.set_bg( *mcolors.colorConverter.to_rgba(fc, 0) ) im.is_grayscale = (self.cmap.name == "gray" and len(self._A.shape) == 2) if ismag: numrows, numcols = self.get_size() numrows *= magnification numcols *= magnification im.set_interpolation(_image.NEAREST) im.resize(numcols, numrows) if self.origin=='upper': im.flipud_out() return im def draw(self, renderer, *args, **kwargs): if not self.get_visible(): return # todo: we should be able to do some cacheing here im = self.make_image(renderer.get_image_magnification()) renderer.draw_image(round(self.ox), round(self.oy), im, self.figure.bbox, *self.get_transformed_clip_path_and_affine()) def write_png(self, fname): """Write the image to png file with fname""" im = self.make_image() rows, cols, buffer = im.as_rgba_str() _png.write_png(buffer, cols, rows, fname) def imread(fname): """ Return image file in *fname* as :class:`numpy.array`. Return value is a :class:`numpy.array`. For grayscale images, the return array is MxN. For RGB images, the return value is MxNx3. For RGBA images the return value is MxNx4. matplotlib can only read PNGs natively, but if `PIL <http://www.pythonware.com/products/pil/>`_ is installed, it will use it to load the image and return an array (if possible) which can be used with :func:`~matplotlib.pyplot.imshow`. TODO: support RGB and grayscale return values in _image.readpng """ def pilread(): 'try to load the image with PIL or return None' try: import Image except ImportError: return None image = Image.open( fname ) return pil_to_array(image) handlers = {'png' :_png.read_png, } basename, ext = os.path.splitext(fname) ext = ext.lower()[1:] if ext not in handlers.keys(): im = pilread() if im is None: raise ValueError('Only know how to handle extensions: %s; with PIL installed matplotlib can handle more images' % handlers.keys()) return im handler = handlers[ext] return handler(fname) def pil_to_array( pilImage ): """ load a PIL image and return it as a numpy array of uint8. For grayscale images, the return array is MxN. For RGB images, the return value is MxNx3. For RGBA images the return value is MxNx4 """ def toarray(im): 'return a 1D array of floats' x_str = im.tostring('raw',im.mode,0,-1) x = np.fromstring(x_str,np.uint8) return x if pilImage.mode in ('RGBA', 'RGBX'): im = pilImage # no need to convert images elif pilImage.mode=='L': im = pilImage # no need to luminance images # return MxN luminance array x = toarray(im) x.shape = im.size[1], im.size[0] return x elif pilImage.mode=='RGB': #return MxNx3 RGB array im = pilImage # no need to RGB images x = toarray(im) x.shape = im.size[1], im.size[0], 3 return x else: # try to convert to an rgba image try: im = pilImage.convert('RGBA') except ValueError: raise RuntimeError('Unknown image mode') # return MxNx4 RGBA array x = toarray(im) x.shape = im.size[1], im.size[0], 4 return x def thumbnail(infile, thumbfile, scale=0.1, interpolation='bilinear', preview=False): """ make a thumbnail of image in *infile* with output filename *thumbfile*. *infile* the image file -- must be PNG or PIL readable if you have `PIL <http://www.pythonware.com/products/pil/>`_ installed *thumbfile* the thumbnail filename *scale* the scale factor for the thumbnail *interpolation* the interpolation scheme used in the resampling *preview* if True, the default backend (presumably a user interface backend) will be used which will cause a figure to be raised if :func:`~matplotlib.pyplot.show` is called. If it is False, a pure image backend will be used depending on the extension, 'png'->FigureCanvasAgg, 'pdf'->FigureCanvasPDF, 'svg'->FigureCanvasSVG See examples/misc/image_thumbnail.py. .. htmlonly:: :ref:`misc-image_thumbnail` Return value is the figure instance containing the thumbnail """ basedir, basename = os.path.split(infile) baseout, extout = os.path.splitext(thumbfile) im = imread(infile) rows, cols, depth = im.shape # this doesn't really matter, it will cancel in the end, but we # need it for the mpl API dpi = 100 height = float(rows)/dpi*scale width = float(cols)/dpi*scale extension = extout.lower() if preview: # let the UI backend do everything import matplotlib.pyplot as plt fig = plt.figure(figsize=(width, height), dpi=dpi) else: if extension=='.png': from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas elif extension=='.pdf': from matplotlib.backends.backend_pdf import FigureCanvasPDF as FigureCanvas elif extension=='.svg': from matplotlib.backends.backend_svg import FigureCanvasSVG as FigureCanvas else: raise ValueError("Can only handle extensions 'png', 'svg' or 'pdf'") from matplotlib.figure import Figure fig = Figure(figsize=(width, height), dpi=dpi) canvas = FigureCanvas(fig) ax = fig.add_axes([0,0,1,1], aspect='auto', frameon=False, xticks=[], yticks=[]) basename, ext = os.path.splitext(basename) ax.imshow(im, aspect='auto', resample=True, interpolation='bilinear') fig.savefig(thumbfile, dpi=dpi) return fig
agpl-3.0
ssaeger/scikit-learn
benchmarks/bench_random_projections.py
397
8900
""" =========================== Random projection benchmark =========================== Benchmarks for random projections. """ from __future__ import division from __future__ import print_function import gc import sys import optparse from datetime import datetime import collections import numpy as np import scipy.sparse as sp from sklearn import clone from sklearn.externals.six.moves import xrange from sklearn.random_projection import (SparseRandomProjection, GaussianRandomProjection, johnson_lindenstrauss_min_dim) def type_auto_or_float(val): if val == "auto": return "auto" else: return float(val) def type_auto_or_int(val): if val == "auto": return "auto" else: return int(val) def compute_time(t_start, delta): mu_second = 0.0 + 10 ** 6 # number of microseconds in a second return delta.seconds + delta.microseconds / mu_second def bench_scikit_transformer(X, transfomer): gc.collect() clf = clone(transfomer) # start time t_start = datetime.now() clf.fit(X) delta = (datetime.now() - t_start) # stop time time_to_fit = compute_time(t_start, delta) # start time t_start = datetime.now() clf.transform(X) delta = (datetime.now() - t_start) # stop time time_to_transform = compute_time(t_start, delta) return time_to_fit, time_to_transform # Make some random data with uniformly located non zero entries with # Gaussian distributed values def make_sparse_random_data(n_samples, n_features, n_nonzeros, random_state=None): rng = np.random.RandomState(random_state) data_coo = sp.coo_matrix( (rng.randn(n_nonzeros), (rng.randint(n_samples, size=n_nonzeros), rng.randint(n_features, size=n_nonzeros))), shape=(n_samples, n_features)) return data_coo.toarray(), data_coo.tocsr() def print_row(clf_type, time_fit, time_transform): print("%s | %s | %s" % (clf_type.ljust(30), ("%.4fs" % time_fit).center(12), ("%.4fs" % time_transform).center(12))) if __name__ == "__main__": ########################################################################### # Option parser ########################################################################### op = optparse.OptionParser() op.add_option("--n-times", dest="n_times", default=5, type=int, help="Benchmark results are average over n_times experiments") op.add_option("--n-features", dest="n_features", default=10 ** 4, type=int, help="Number of features in the benchmarks") op.add_option("--n-components", dest="n_components", default="auto", help="Size of the random subspace." " ('auto' or int > 0)") op.add_option("--ratio-nonzeros", dest="ratio_nonzeros", default=10 ** -3, type=float, help="Number of features in the benchmarks") op.add_option("--n-samples", dest="n_samples", default=500, type=int, help="Number of samples in the benchmarks") op.add_option("--random-seed", dest="random_seed", default=13, type=int, help="Seed used by the random number generators.") op.add_option("--density", dest="density", default=1 / 3, help="Density used by the sparse random projection." " ('auto' or float (0.0, 1.0]") op.add_option("--eps", dest="eps", default=0.5, type=float, help="See the documentation of the underlying transformers.") op.add_option("--transformers", dest="selected_transformers", default='GaussianRandomProjection,SparseRandomProjection', type=str, help="Comma-separated list of transformer to benchmark. " "Default: %default. Available: " "GaussianRandomProjection,SparseRandomProjection") op.add_option("--dense", dest="dense", default=False, action="store_true", help="Set input space as a dense matrix.") (opts, args) = op.parse_args() if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) opts.n_components = type_auto_or_int(opts.n_components) opts.density = type_auto_or_float(opts.density) selected_transformers = opts.selected_transformers.split(',') ########################################################################### # Generate dataset ########################################################################### n_nonzeros = int(opts.ratio_nonzeros * opts.n_features) print('Dataset statics') print("===========================") print('n_samples \t= %s' % opts.n_samples) print('n_features \t= %s' % opts.n_features) if opts.n_components == "auto": print('n_components \t= %s (auto)' % johnson_lindenstrauss_min_dim(n_samples=opts.n_samples, eps=opts.eps)) else: print('n_components \t= %s' % opts.n_components) print('n_elements \t= %s' % (opts.n_features * opts.n_samples)) print('n_nonzeros \t= %s per feature' % n_nonzeros) print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros) print('') ########################################################################### # Set transformer input ########################################################################### transformers = {} ########################################################################### # Set GaussianRandomProjection input gaussian_matrix_params = { "n_components": opts.n_components, "random_state": opts.random_seed } transformers["GaussianRandomProjection"] = \ GaussianRandomProjection(**gaussian_matrix_params) ########################################################################### # Set SparseRandomProjection input sparse_matrix_params = { "n_components": opts.n_components, "random_state": opts.random_seed, "density": opts.density, "eps": opts.eps, } transformers["SparseRandomProjection"] = \ SparseRandomProjection(**sparse_matrix_params) ########################################################################### # Perform benchmark ########################################################################### time_fit = collections.defaultdict(list) time_transform = collections.defaultdict(list) print('Benchmarks') print("===========================") print("Generate dataset benchmarks... ", end="") X_dense, X_sparse = make_sparse_random_data(opts.n_samples, opts.n_features, n_nonzeros, random_state=opts.random_seed) X = X_dense if opts.dense else X_sparse print("done") for name in selected_transformers: print("Perform benchmarks for %s..." % name) for iteration in xrange(opts.n_times): print("\titer %s..." % iteration, end="") time_to_fit, time_to_transform = bench_scikit_transformer(X_dense, transformers[name]) time_fit[name].append(time_to_fit) time_transform[name].append(time_to_transform) print("done") print("") ########################################################################### # Print results ########################################################################### print("Script arguments") print("===========================") arguments = vars(opts) print("%s \t | %s " % ("Arguments".ljust(16), "Value".center(12),)) print(25 * "-" + ("|" + "-" * 14) * 1) for key, value in arguments.items(): print("%s \t | %s " % (str(key).ljust(16), str(value).strip().center(12))) print("") print("Transformer performance:") print("===========================") print("Results are averaged over %s repetition(s)." % opts.n_times) print("") print("%s | %s | %s" % ("Transformer".ljust(30), "fit".center(12), "transform".center(12))) print(31 * "-" + ("|" + "-" * 14) * 2) for name in sorted(selected_transformers): print_row(name, np.mean(time_fit[name]), np.mean(time_transform[name])) print("") print("")
bsd-3-clause
raghavrv/scikit-learn
examples/ensemble/plot_gradient_boosting_quantile.py
392
2114
""" ===================================================== Prediction Intervals for Gradient Boosting Regression ===================================================== This example shows how quantile regression can be used to create prediction intervals. """ import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import GradientBoostingRegressor np.random.seed(1) def f(x): """The function to predict.""" return x * np.sin(x) #---------------------------------------------------------------------- # First the noiseless case X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T X = X.astype(np.float32) # Observations y = f(X).ravel() dy = 1.5 + 1.0 * np.random.random(y.shape) noise = np.random.normal(0, dy) y += noise y = y.astype(np.float32) # Mesh the input space for evaluations of the real function, the prediction and # its MSE xx = np.atleast_2d(np.linspace(0, 10, 1000)).T xx = xx.astype(np.float32) alpha = 0.95 clf = GradientBoostingRegressor(loss='quantile', alpha=alpha, n_estimators=250, max_depth=3, learning_rate=.1, min_samples_leaf=9, min_samples_split=9) clf.fit(X, y) # Make the prediction on the meshed x-axis y_upper = clf.predict(xx) clf.set_params(alpha=1.0 - alpha) clf.fit(X, y) # Make the prediction on the meshed x-axis y_lower = clf.predict(xx) clf.set_params(loss='ls') clf.fit(X, y) # Make the prediction on the meshed x-axis y_pred = clf.predict(xx) # Plot the function, the prediction and the 90% confidence interval based on # the MSE fig = plt.figure() plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$') plt.plot(X, y, 'b.', markersize=10, label=u'Observations') plt.plot(xx, y_pred, 'r-', label=u'Prediction') plt.plot(xx, y_upper, 'k-') plt.plot(xx, y_lower, 'k-') plt.fill(np.concatenate([xx, xx[::-1]]), np.concatenate([y_upper, y_lower[::-1]]), alpha=.5, fc='b', ec='None', label='90% prediction interval') plt.xlabel('$x$') plt.ylabel('$f(x)$') plt.ylim(-10, 20) plt.legend(loc='upper left') plt.show()
bsd-3-clause
prheenan/Research
Personal/EventDetection/Util/InputOutput.py
1
15554
# force floating point division. Can still use integer with // from __future__ import division # This file is used for importing the common utilities classes. import numpy as np import matplotlib.pyplot as plt import sys,os from scipy import interpolate from Research.Perkins.AnalysisUtil.ForceExtensionAnalysis import FEC_Util from scipy.stats import norm from GeneralUtil.python import CheckpointUtilities,GenUtilities,PlotUtilities import csv,re from IgorUtil.PythonAdapter import TimeSepForceObj,DataObj,ProcessSingleWave class ForceExtensionCategory: def __init__(self,number,directory=None,sample=None,velocity_nm_s=None, has_events=False,downsample=None): self.category_number = number self.directory = directory self.velocity_nm_s = velocity_nm_s self.sample = sample self.has_events = has_events self.data = None self.downsample_factor =downsample self.scores = None if (downsample is not None): self.is_simulated=True else: self.is_simulated = False def set_scores(self,scores): self.scores = scores def set_data(self,data): """ sets the pointer to the list of TimeSepForce objects for this category Args: data: list of TimeSepForce objects Returns: nothing """ self.data = data def _base_curated(data_base=None): """ Args: data_base: where the network drive lives. Returns: the base to the curated data for the masters thesis """ if (data_base is None): data_base = FEC_Util.default_data_root() network = data_base base_directory = network + "/4Patrick/CuratedData/Masters_CSCI/" return base_directory def get_positives_directory(*args,**kwargs): """ reads the (csv) file at file_path, cachine it to cache_directory, reading in events Args; see _base_curated Returns positive categories base directory """ base_directory = _base_curated(*args,**kwargs) positives_directory = base_directory + "Positive/650nm-4x-bio/csv/" return positives_directory def get_protein_directory(*args,**kwargs): """ the same as get_positives_directory, except for protein Args: see get_positives_directory Returns: string to the protein base """ base_directory = _base_curated(*args,**kwargs) return base_directory + "Positive/4nug2_alpha3D_MarcAndre/csv/" def read_and_cache_file(file_path,cache_directory,has_events=False,force=True): """ reads the (csv) file at file_path, cachine it to cache_directory, reading in events Args; file_path: where the file lives cache_directory: where to cache the file has_events: if this file has events force: if true, force a read Returns TimeSepForce Object """ file_name = os.path.basename(file_path) cache_file = cache_directory + file_name+ ".pkl" func_to_call = FEC_Util.read_time_sep_force_from_csv return CheckpointUtilities.getCheckpoint(cache_file,func_to_call,force, file_path,has_events=has_events) def get_category_data(r_obj,force,cache_directory,limit): """ gets the data for a single category data, caching on a per-data basis Args: r_obj: the category object to use others: see set_and_cache_category_data Returns: list of time,sep,force objects to use """ # restart the limit each category limit_tmp = limit data_in_category = [] # get the label for this dataset. dir_v = r_obj.directory all_files = GenUtilities.getAllFiles(dir_v,ext=".csv") kwargs =dict(cache_directory=cache_directory, has_events = r_obj.has_events,force=force) # reach all the files until we reach the limit for f in all_files: data_file_tmp = read_and_cache_file(f,**kwargs) data_in_category.append(data_file_tmp) limit_tmp = limit_tmp - 1 if (limit_tmp <= 0): break return data_in_category def set_and_cache_category_data(categories,force,cache_directory,limit): """ loops through each category, reading in at most limit files per category, caching the csv files to cache_directory Args: categories: list of ForceExtensionCategory objects. will have their data set with the appropriate TimeSepForce objects force: whether to force re-reading cache_directory: where to save the files limit: maximum number of files to each (per category) Returns: nothing, but sets the data of set_categories """ for i,r_obj in enumerate(categories): data = get_category_data(r_obj,force,cache_directory,limit) # set the data in this category r_obj.set_data(data_in_category) def category_read(category,force,cache_directory,limit,debugging=False): """ Reads in all the data associated with a category Args: category: ForceExtensionCategory object force: if true, force re-reading cache_directory: if force is not true, where to re-read from limit: maximum number to re-read Returns: list of TimeSepForce objects """ try: return get_category_data(category,force,cache_directory,limit) except OSError as e: if (not debugging): raise(e) if (category.category_number != 0): return [] print(e) # just read in the files that live here XXX just for debugging file_names = GenUtilities.getAllFiles(cache_directory,ext="csv.pkl") all_files = [CheckpointUtilities.getCheckpoint(f,None,False) for f in file_names] return all_files def simulated_read(downsample_from,category,limit): """ a function which reads in the first [limit] force extension curves from downsample_from, slicing the data by category.downsample_factor (assumed > 1) Args: categories: list of categories to read in force_read,cache_directory,limit): see Learning.get_cached_folds """ n_step = int(category.downsample_factor) tol = 1e-9 assert n_step > 1, "simulation should have downfactor > 1" assert abs((n_step-int(n_step))) < tol,\ "simulation should have integer downfactor" data = [] for l in range(limit): tmp = downsample_from.data[l] slice_v = slice(0,None,n_step) data_tmp = FEC_Util.MakeTimeSepForceFromSlice(tmp,slice_v) data.append(data_tmp) return data def read_categories(categories,force_read,cache_directory,limit): """ a function to read in a most limit force-extension curves, caching as we go Args: categories: list of categories to read in force_read,cache_directory,limit): see Learning.get_cached_folds """ for c in categories: # skip simulated categories initially if (c.is_simulated): continue data_tmp = category_read(c,force_read,cache_directory,limit) c.set_data(data_tmp) # POST: actual data is set up. go ahead and get any simulated data # get the lowest loading rate data to downsample loading_rates_effective = [c.velocity_nm_s if not c.is_simulated else np.inf for c in categories] highest_sampled_idx = np.argmin(loading_rates_effective) # use the highest sampled highest_sampled_category = categories[highest_sampled_idx] for c in categories: if (not c.is_simulated): continue vel_eff = highest_sampled_category.velocity_nm_s*c.downsample_factor c.velocity_nm_s = vel_eff file_path = "{:s}_sim_{:.1f}".format(cache_directory,c.velocity_nm_s) data_tmp = CheckpointUtilities.\ getCheckpoint(file_path,simulated_read,force_read, highest_sampled_category,c,limit) c.set_data(data_tmp) return categories def protein_categories(base_directory=get_protein_directory()): protein_meta = [ [base_directory,"NUG2",500]] # create objects to represent our data categories protein_categories = [ForceExtensionCategory(i,*r,has_events=True) for i,r in enumerate(protein_meta)] return protein_categories def get_categories(positives_directory,use_simulated=False,only_lowest=False): """ get all the categories associated with the loading rates we will use Args: positives_directory: base directory where things live use_simualted: for timing experiments, create down-sampled data only_lowest: if true, return only the lowest category Returns: list of ForceExtensionCategory """ # tuple of <relative directory,sample,velocity> for FEC with events max_load = 1000 positive_meta = \ [[positives_directory + "1000-nanometers-per-second/","650nm DNA",max_load], [positives_directory + "500-nanometers-per-second/","650nm DNA",500], [positives_directory + "100-nanometers-per-second/","650nm DNA",100]] if (only_lowest): positive_meta = [positive_meta[0]] # create objects to represent our data categories positive_categories = [ForceExtensionCategory(i,*r,has_events=True) for i,r in enumerate(positive_meta)] if (use_simulated): downsample_factors = sorted([2,3,4,10,20,100,1000]) kw = lambda i: dict(number=(len( positive_categories) + i)) simulated_categories = [ForceExtensionCategory(downsample=d,**kw(i)) for i,d in enumerate(downsample_factors)] else: simulated_categories = [] return simulated_categories[::-1] + positive_categories def get_events(file_name): """ given a file formatted like an event file, reads them in Args: file_name: path to read. Returns: list of tupels like <name of event, start index, end of index> """ # get the file without the '.pxp' extension f_no_ext = file_name[:-4] # add on the suffix for the events ( by convention) f_events = f_no_ext + "_events.txt" # read in the (csv) file assert os.path.isfile(f_events) , \ "Couldn't find event file for {:s}".format(f_events) # POST: event file exists # the syntax is <name,start of event, end of event> separated by commss with open(f_events) as csvfile: spamreader = csv.reader(csvfile, delimiter=',') data = [ [str(r[0].strip()),int(r[1]),int(r[2])] for r in spamreader] return data def read_single_directory_with_events(directory): """ Reads in pxp files and their associated events Args: directory: where to read the pxp (each is assumed to have """ pxp_files,data = FEC_Util.read_single_directory(directory) # each pxp file should have events associated with it events = [get_events(f) for f in pxp_files] # create a tuple of everything... combined = [(file_name,dat,ev) \ for file_name,dat,ev in zip(pxp_files,data,events)] return combined def get_id(x): """ Gets the id associated with the string x Args: x: probably something like the name of a trace Returns: id (e.g. Image0004_Force would give Image0004) """ id_regexpr = r""" (\D+ # any number of non-numbers \d+) # any number of digits \D?""" # anything *not* a digit match = re.match(id_regexpr,x,re.VERBOSE) assert match is not None , "Couldn't find id of {:s}".format(x) return match.group(1).strip().lower() def set_events_of_data(data,events): """ sets the events of all the data traces Args: data: list of time_sep_force objects events: list of <Trace Name, start,end> giving the events Returns: nothing but sets the object's events appropriately """ id_data = [get_id(d.Meta.Name) for d in data] # possible the data was double-annotated; get rid of duplicate events events_id_unique = ["".join(str(v) for v in e) for e in events] _, idx = np.unique(events_id_unique,return_index=True) events = [events[i] for i in idx] # POST: each event in events is unique id_events = [get_id(e[0]) for e in events] # determine matches; may have multiple events eq = lambda x,y: x == y id_parity_check = [] data_to_ret = [] for idx_tmp,(id_data_tmp,d) in enumerate(zip(id_data,data)): # find which index (in id_events) corresponds to id_data_tmp # XXX quadratic time... small numbers (hundreds), dont care matching_idx = [j for j,id_ev in enumerate(id_events) if eq(id_ev,id_data_tmp)] id_parity_check.extend(matching_idx) # make sure we have at least one event for the data... if (len(matching_idx) == 0): print("Couldnt find events for {:s}, removing". format(str(id_data_tmp))) continue # POST: have at least one data_to_ret.append(d) # get the actual events events_matching = [events[i] for i in matching_idx] # add the events to the TimeSepForce Object. Note that # an event 'e' is like <name,start,end> so we just get the starts # and ends starts_and_ends = [e[1:] for e in events_matching] Events = [TimeSepForceObj.Event(*s) for s in starts_and_ends] # set the events of the data d.set_events(Events) assert len(id_parity_check) == len(set(id_parity_check)) , "Double jeopardy" # POST: an event only mapped to one FEC_Util n_events = len(id_events) n_matched =len(id_parity_check) if (n_matched < n_events): unused = [events[i] for i in range(n_events) if i not in id_parity_check] print("Warning: The following events were unused: {:s}".format(unused)) print("{:d}/{:d} events matched".format(n_matched,n_events)) return data_to_ret def output_waves_in_directory_to_csv_files(input_directory,output_directory): """ reads all the waves from all pxp files in input_directory and outputs as csv files Args: input_directory: where to search for pxp files output_directory: where to put the csv files Returns: Nothing, prints as it goes. """ d,d_out = input_directory, output_directory # make the output directory GenUtilities.ensureDirExists(d_out) # go through each PXP in this directory print("Looking in {:s} for force-extension curves...".format(d)) files_data_events = read_single_directory_with_events(d) n_curves = sum([len(d) for _,d,_ in files_data_events]) print("Found {:d} curves".format(n_curves)) for file_path,data,ev in files_data_events: data = set_events_of_data(data,ev) # POST: all data are set. go ahead and save them out. n = len(data) for i,dat in enumerate(data): file_name = os.path.basename(file_path) meta_name = dat.Meta.Name output_path = d_out + file_name + meta_name+ ".csv" print("\t Saving out {:s} ({:d}/{:d})".format(meta_name,i+1,n)) FEC_Util.save_time_sep_force_as_csv(output_path,dat)
gpl-3.0
birdsarah/bokeh
bokeh/sampledata/periodic_table.py
45
1542
''' This module provides the periodic table as a data set. It exposes an attribute 'elements' which is a pandas dataframe with the following fields elements['atomic Number'] (units: g/cm^3) elements['symbol'] elements['name'] elements['atomic mass'] (units: amu) elements['CPK'] (convention for molecular modeling color) elements['electronic configuration'] elements['electronegativity'] (units: Pauling) elements['atomic radius'] (units: pm) elements['ionic radius'] (units: pm) elements['van der waals radius'] (units: pm) elements['ionization enerygy'] (units: kJ/mol) elements['electron affinity'] (units: kJ/mol) elements['phase'] (standard state: solid, liquid, gas) elements['bonding type'] elements['melting point'] (units: K) elements['boiling point'] (units: K) elements['density'] (units: g/cm^3) elements['type'] (see below) elements['year discovered'] elements['group'] elements['period'] element types: actinoid, alkali metal, alkaline earth metal, halogen, lanthanoid, metal, metalloid, noble gas, nonmetal, transition metalloid ''' from __future__ import absolute_import from os.path import dirname, join try: import pandas as pd except ImportError as e: raise RuntimeError("elements data requires pandas (http://pandas.pydata.org) to be installed") elements = pd.read_csv(join(dirname(__file__), 'elements.csv'))
bsd-3-clause
giorgiop/scikit-learn
sklearn/manifold/tests/test_isomap.py
121
4301
from itertools import product import numpy as np from numpy.testing import (assert_almost_equal, assert_array_almost_equal, assert_equal) from sklearn import datasets from sklearn import manifold from sklearn import neighbors from sklearn import pipeline from sklearn import preprocessing from sklearn.utils.testing import assert_less eigen_solvers = ['auto', 'dense', 'arpack'] path_methods = ['auto', 'FW', 'D'] def test_isomap_simple_grid(): # Isomap should preserve distances when all neighbors are used N_per_side = 5 Npts = N_per_side ** 2 n_neighbors = Npts - 1 # grid of equidistant points in 2D, n_components = n_dim X = np.array(list(product(range(N_per_side), repeat=2))) # distances from each point to all others G = neighbors.kneighbors_graph(X, n_neighbors, mode='distance').toarray() for eigen_solver in eigen_solvers: for path_method in path_methods: clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2, eigen_solver=eigen_solver, path_method=path_method) clf.fit(X) G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode='distance').toarray() assert_array_almost_equal(G, G_iso) def test_isomap_reconstruction_error(): # Same setup as in test_isomap_simple_grid, with an added dimension N_per_side = 5 Npts = N_per_side ** 2 n_neighbors = Npts - 1 # grid of equidistant points in 2D, n_components = n_dim X = np.array(list(product(range(N_per_side), repeat=2))) # add noise in a third dimension rng = np.random.RandomState(0) noise = 0.1 * rng.randn(Npts, 1) X = np.concatenate((X, noise), 1) # compute input kernel G = neighbors.kneighbors_graph(X, n_neighbors, mode='distance').toarray() centerer = preprocessing.KernelCenterer() K = centerer.fit_transform(-0.5 * G ** 2) for eigen_solver in eigen_solvers: for path_method in path_methods: clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2, eigen_solver=eigen_solver, path_method=path_method) clf.fit(X) # compute output kernel G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode='distance').toarray() K_iso = centerer.fit_transform(-0.5 * G_iso ** 2) # make sure error agrees reconstruction_error = np.linalg.norm(K - K_iso) / Npts assert_almost_equal(reconstruction_error, clf.reconstruction_error()) def test_transform(): n_samples = 200 n_components = 10 noise_scale = 0.01 # Create S-curve dataset X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0) # Compute isomap embedding iso = manifold.Isomap(n_components, 2) X_iso = iso.fit_transform(X) # Re-embed a noisy version of the points rng = np.random.RandomState(0) noise = noise_scale * rng.randn(*X.shape) X_iso2 = iso.transform(X + noise) # Make sure the rms error on re-embedding is comparable to noise_scale assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale) def test_pipeline(): # check that Isomap works fine as a transformer in a Pipeline # only checks that no error is raised. # TODO check that it actually does something useful X, y = datasets.make_blobs(random_state=0) clf = pipeline.Pipeline( [('isomap', manifold.Isomap()), ('clf', neighbors.KNeighborsClassifier())]) clf.fit(X, y) assert_less(.9, clf.score(X, y)) def test_isomap_clone_bug(): # regression test for bug reported in #6062 model = manifold.Isomap() for n_neighbors in [10, 15, 20]: model.set_params(n_neighbors=n_neighbors) model.fit(np.random.rand(50, 2)) assert_equal(model.nbrs_.n_neighbors, n_neighbors)
bsd-3-clause
ChanderG/scikit-learn
doc/sphinxext/numpy_ext/docscrape_sphinx.py
408
8061
import re import inspect import textwrap import pydoc from .docscrape import NumpyDocString from .docscrape import FunctionDoc from .docscrape import ClassDoc class SphinxDocString(NumpyDocString): def __init__(self, docstring, config=None): config = {} if config is None else config self.use_plots = config.get('use_plots', False) NumpyDocString.__init__(self, docstring, config=config) # string conversion routines def _str_header(self, name, symbol='`'): return ['.. rubric:: ' + name, ''] def _str_field_list(self, name): return [':' + name + ':'] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' ' * indent + line] return out def _str_signature(self): return [''] if self['Signature']: return ['``%s``' % self['Signature']] + [''] else: return [''] def _str_summary(self): return self['Summary'] + [''] def _str_extended_summary(self): return self['Extended Summary'] + [''] def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param, param_type, desc in self[name]: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) out += [''] out += self._str_indent(desc, 8) out += [''] return out @property def _obj(self): if hasattr(self, '_cls'): return self._cls elif hasattr(self, '_f'): return self._f return None def _str_member_list(self, name): """ Generate a member listing, autosummary:: table where possible, and a table where not. """ out = [] if self[name]: out += ['.. rubric:: %s' % name, ''] prefix = getattr(self, '_name', '') if prefix: prefix = '~%s.' % prefix autosum = [] others = [] for param, param_type, desc in self[name]: param = param.strip() if not self._obj or hasattr(self._obj, param): autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: # GAEL: Toctree commented out below because it creates # hundreds of sphinx warnings # out += ['.. autosummary::', ' :toctree:', ''] out += ['.. autosummary::', ''] out += autosum if others: maxlen_0 = max([len(x[0]) for x in others]) maxlen_1 = max([len(x[1]) for x in others]) hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10 fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) n_indent = maxlen_0 + maxlen_1 + 4 out += [hdr] for param, param_type, desc in others: out += [fmt % (param.strip(), param_type)] out += self._str_indent(desc, n_indent) out += [hdr] out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += [''] content = textwrap.dedent("\n".join(self[name])).split("\n") out += content out += [''] return out def _str_see_also(self, func_role): out = [] if self['See Also']: see_also = super(SphinxDocString, self)._str_see_also(func_role) out = ['.. seealso::', ''] out += self._str_indent(see_also[2:]) return out def _str_warnings(self): out = [] if self['Warnings']: out = ['.. warning::', ''] out += self._str_indent(self['Warnings']) return out def _str_index(self): idx = self['index'] out = [] if len(idx) == 0: return out out += ['.. index:: %s' % idx.get('default', '')] for section, references in idx.iteritems(): if section == 'default': continue elif section == 'refguide': out += [' single: %s' % (', '.join(references))] else: out += [' %s: %s' % (section, ','.join(references))] return out def _str_references(self): out = [] if self['References']: out += self._str_header('References') if isinstance(self['References'], str): self['References'] = [self['References']] out.extend(self['References']) out += [''] # Latex collects all references to a separate bibliography, # so we need to insert links to it import sphinx # local import to avoid test dependency if sphinx.__version__ >= "0.6": out += ['.. only:: latex', ''] else: out += ['.. latexonly::', ''] items = [] for line in self['References']: m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) if m: items.append(m.group(1)) out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] return out def _str_examples(self): examples_str = "\n".join(self['Examples']) if (self.use_plots and 'import matplotlib' in examples_str and 'plot::' not in examples_str): out = [] out += self._str_header('Examples') out += ['.. plot::', ''] out += self._str_indent(self['Examples']) out += [''] return out else: return self._str_section('Examples') def __str__(self, indent=0, func_role="obj"): out = [] out += self._str_signature() out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) out += self._str_section('Notes') out += self._str_references() out += self._str_examples() for param_list in ('Methods',): out += self._str_member_list(param_list) out = self._str_indent(out, indent) return '\n'.join(out) class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): self.use_plots = config.get('use_plots', False) FunctionDoc.__init__(self, obj, doc=doc, config=config) class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): self.use_plots = config.get('use_plots', False) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) class SphinxObjDoc(SphinxDocString): def __init__(self, obj, doc=None, config=None): self._f = obj SphinxDocString.__init__(self, doc, config=config) def get_doc_object(obj, what=None, doc=None, config={}): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif callable(obj): what = 'function' else: what = 'object' if what == 'class': return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, config=config) elif what in ('function', 'method'): return SphinxFunctionDoc(obj, doc=doc, config=config) else: if doc is None: doc = pydoc.getdoc(obj) return SphinxObjDoc(obj, doc, config=config)
bsd-3-clause
apologist/eoddata-client
setup.py
1
1736
import os import re from setuptools import setup try: from pypandoc import convert if os.name == 'nt': os.environ.setdefault('PYPANDOC_PANDOC', 'c:\\Program Files (x86)\\Pandoc\\psandoc.exe') def read_md(f): try: return convert(f, 'rst', format='md') except OSError: return open(f, 'r', encoding='utf-8').read() except ImportError: print('warning: pypandoc module not found, ' 'could not convert Markdown to RST') def read_md(f): return open(f, 'r', encoding='utf-8').read() def get_version(package): """Return package version as listed in `__version__` in `init.py`.""" init_py = open(os.path.join(package, '__init__.py')).read() return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1) VERSION = get_version('eoddata_client') setup( name='eoddata_client', packages=['eoddata_client'], version=VERSION, description='Client to get historical market data from EODData web service.', long_description=read_md('README.md'), author='Aleksey', author_email='[email protected]', url='https://github.com/apologist/eoddata-client', license='Public Domain', download_url='', keywords=['market', 'data', 'trading', 'stocks', 'finance'], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Software Development :: Libraries :: Python Modules', 'License :: Public Domain', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], install_requires=['requests', 'pandas'], )
unlicense
shikhardb/scikit-learn
sklearn/gaussian_process/gaussian_process.py
18
34542
# -*- coding: utf-8 -*- # Author: Vincent Dubourg <[email protected]> # (mostly translation, see implementation details) # Licence: BSD 3 clause from __future__ import print_function import numpy as np from scipy import linalg, optimize from ..base import BaseEstimator, RegressorMixin from ..metrics.pairwise import manhattan_distances from ..utils import check_random_state, check_array, check_X_y from ..utils.validation import check_is_fitted from . import regression_models as regression from . import correlation_models as correlation MACHINE_EPSILON = np.finfo(np.double).eps def l1_cross_distances(X): """ Computes the nonzero componentwise L1 cross-distances between the vectors in X. Parameters ---------- X: array_like An array with shape (n_samples, n_features) Returns ------- D: array with shape (n_samples * (n_samples - 1) / 2, n_features) The array of componentwise L1 cross-distances. ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2) The indices i and j of the vectors in X associated to the cross- distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]). """ X = check_array(X) n_samples, n_features = X.shape n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2 ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int) D = np.zeros((n_nonzero_cross_dist, n_features)) ll_1 = 0 for k in range(n_samples - 1): ll_0 = ll_1 ll_1 = ll_0 + n_samples - k - 1 ij[ll_0:ll_1, 0] = k ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples) D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples]) return D, ij class GaussianProcess(BaseEstimator, RegressorMixin): """The Gaussian Process model class. Parameters ---------- regr : string or callable, optional A regression function returning an array of outputs of the linear regression functional basis. The number of observations n_samples should be greater than the size p of this basis. Default assumes a simple constant regression trend. Available built-in regression models are:: 'constant', 'linear', 'quadratic' corr : string or callable, optional A stationary autocorrelation function returning the autocorrelation between two points x and x'. Default assumes a squared-exponential autocorrelation model. Built-in correlation models are:: 'absolute_exponential', 'squared_exponential', 'generalized_exponential', 'cubic', 'linear' beta0 : double array_like, optional The regression weight vector to perform Ordinary Kriging (OK). Default assumes Universal Kriging (UK) so that the vector beta of regression weights is estimated using the maximum likelihood principle. storage_mode : string, optional A string specifying whether the Cholesky decomposition of the correlation matrix should be stored in the class (storage_mode = 'full') or not (storage_mode = 'light'). Default assumes storage_mode = 'full', so that the Cholesky decomposition of the correlation matrix is stored. This might be a useful parameter when one is not interested in the MSE and only plan to estimate the BLUP, for which the correlation matrix is not required. verbose : boolean, optional A boolean specifying the verbose level. Default is verbose = False. theta0 : double array_like, optional An array with shape (n_features, ) or (1, ). The parameters in the autocorrelation model. If thetaL and thetaU are also specified, theta0 is considered as the starting point for the maximum likelihood estimation of the best set of parameters. Default assumes isotropic autocorrelation model with theta0 = 1e-1. thetaL : double array_like, optional An array with shape matching theta0's. Lower bound on the autocorrelation parameters for maximum likelihood estimation. Default is None, so that it skips maximum likelihood estimation and it uses theta0. thetaU : double array_like, optional An array with shape matching theta0's. Upper bound on the autocorrelation parameters for maximum likelihood estimation. Default is None, so that it skips maximum likelihood estimation and it uses theta0. normalize : boolean, optional Input X and observations y are centered and reduced wrt means and standard deviations estimated from the n_samples observations provided. Default is normalize = True so that data is normalized to ease maximum likelihood estimation. nugget : double or ndarray, optional Introduce a nugget effect to allow smooth predictions from noisy data. If nugget is an ndarray, it must be the same length as the number of data points used for the fit. The nugget is added to the diagonal of the assumed training covariance; in this way it acts as a Tikhonov regularization in the problem. In the special case of the squared exponential correlation function, the nugget mathematically represents the variance of the input values. Default assumes a nugget close to machine precision for the sake of robustness (nugget = 10. * MACHINE_EPSILON). optimizer : string, optional A string specifying the optimization algorithm to be used. Default uses 'fmin_cobyla' algorithm from scipy.optimize. Available optimizers are:: 'fmin_cobyla', 'Welch' 'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_. It consists in iterating over several one-dimensional optimizations instead of running one single multi-dimensional optimization. random_start : int, optional The number of times the Maximum Likelihood Estimation should be performed from a random starting point. The first MLE always uses the specified starting point (theta0), the next starting points are picked at random according to an exponential distribution (log-uniform on [thetaL, thetaU]). Default does not use random starting point (random_start = 1). random_state: integer or numpy.RandomState, optional The generator used to shuffle the sequence of coordinates of theta in the Welch optimizer. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Attributes ---------- theta_ : array Specified theta OR the best set of autocorrelation parameters (the \ sought maximizer of the reduced likelihood function). reduced_likelihood_function_value_ : array The optimal reduced likelihood function value. Examples -------- >>> import numpy as np >>> from sklearn.gaussian_process import GaussianProcess >>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T >>> y = (X * np.sin(X)).ravel() >>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.) >>> gp.fit(X, y) # doctest: +ELLIPSIS GaussianProcess(beta0=None... ... Notes ----- The presentation implementation is based on a translation of the DACE Matlab toolbox, see reference [NLNS2002]_. References ---------- .. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J. Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002) http://www2.imm.dtu.dk/~hbn/dace/dace.pdf .. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell, and M.D. Morris (1992). Screening, predicting, and computer experiments. Technometrics, 34(1) 15--25.` http://www.jstor.org/pss/1269548 """ _regression_types = { 'constant': regression.constant, 'linear': regression.linear, 'quadratic': regression.quadratic} _correlation_types = { 'absolute_exponential': correlation.absolute_exponential, 'squared_exponential': correlation.squared_exponential, 'generalized_exponential': correlation.generalized_exponential, 'cubic': correlation.cubic, 'linear': correlation.linear} _optimizer_types = [ 'fmin_cobyla', 'Welch'] def __init__(self, regr='constant', corr='squared_exponential', beta0=None, storage_mode='full', verbose=False, theta0=1e-1, thetaL=None, thetaU=None, optimizer='fmin_cobyla', random_start=1, normalize=True, nugget=10. * MACHINE_EPSILON, random_state=None): self.regr = regr self.corr = corr self.beta0 = beta0 self.storage_mode = storage_mode self.verbose = verbose self.theta0 = theta0 self.thetaL = thetaL self.thetaU = thetaU self.normalize = normalize self.nugget = nugget self.optimizer = optimizer self.random_start = random_start self.random_state = random_state def fit(self, X, y): """ The Gaussian Process model fitting method. Parameters ---------- X : double array_like An array with shape (n_samples, n_features) with the input at which observations were made. y : double array_like An array with shape (n_samples, ) or shape (n_samples, n_targets) with the observations of the output to be predicted. Returns ------- gp : self A fitted Gaussian Process model object awaiting data to perform predictions. """ # Run input checks self._check_params() self.random_state = check_random_state(self.random_state) # Force data to 2D numpy.array X, y = check_X_y(X, y, multi_output=True, y_numeric=True) self.y_ndim_ = y.ndim if y.ndim == 1: y = y[:, np.newaxis] # Check shapes of DOE & observations n_samples, n_features = X.shape _, n_targets = y.shape # Run input checks self._check_params(n_samples) # Normalize data or don't if self.normalize: X_mean = np.mean(X, axis=0) X_std = np.std(X, axis=0) y_mean = np.mean(y, axis=0) y_std = np.std(y, axis=0) X_std[X_std == 0.] = 1. y_std[y_std == 0.] = 1. # center and scale X if necessary X = (X - X_mean) / X_std y = (y - y_mean) / y_std else: X_mean = np.zeros(1) X_std = np.ones(1) y_mean = np.zeros(1) y_std = np.ones(1) # Calculate matrix of distances D between samples D, ij = l1_cross_distances(X) if (np.min(np.sum(D, axis=1)) == 0. and self.corr != correlation.pure_nugget): raise Exception("Multiple input features cannot have the same" " target value.") # Regression matrix and parameters F = self.regr(X) n_samples_F = F.shape[0] if F.ndim > 1: p = F.shape[1] else: p = 1 if n_samples_F != n_samples: raise Exception("Number of rows in F and X do not match. Most " "likely something is going wrong with the " "regression model.") if p > n_samples_F: raise Exception(("Ordinary least squares problem is undetermined " "n_samples=%d must be greater than the " "regression model size p=%d.") % (n_samples, p)) if self.beta0 is not None: if self.beta0.shape[0] != p: raise Exception("Shapes of beta0 and F do not match.") # Set attributes self.X = X self.y = y self.D = D self.ij = ij self.F = F self.X_mean, self.X_std = X_mean, X_std self.y_mean, self.y_std = y_mean, y_std # Determine Gaussian Process model parameters if self.thetaL is not None and self.thetaU is not None: # Maximum Likelihood Estimation of the parameters if self.verbose: print("Performing Maximum Likelihood Estimation of the " "autocorrelation parameters...") self.theta_, self.reduced_likelihood_function_value_, par = \ self._arg_max_reduced_likelihood_function() if np.isinf(self.reduced_likelihood_function_value_): raise Exception("Bad parameter region. " "Try increasing upper bound") else: # Given parameters if self.verbose: print("Given autocorrelation parameters. " "Computing Gaussian Process model parameters...") self.theta_ = self.theta0 self.reduced_likelihood_function_value_, par = \ self.reduced_likelihood_function() if np.isinf(self.reduced_likelihood_function_value_): raise Exception("Bad point. Try increasing theta0.") self.beta = par['beta'] self.gamma = par['gamma'] self.sigma2 = par['sigma2'] self.C = par['C'] self.Ft = par['Ft'] self.G = par['G'] if self.storage_mode == 'light': # Delete heavy data (it will be computed again if required) # (it is required only when MSE is wanted in self.predict) if self.verbose: print("Light storage mode specified. " "Flushing autocorrelation matrix...") self.D = None self.ij = None self.F = None self.C = None self.Ft = None self.G = None return self def predict(self, X, eval_MSE=False, batch_size=None): """ This function evaluates the Gaussian Process model at x. Parameters ---------- X : array_like An array with shape (n_eval, n_features) giving the point(s) at which the prediction(s) should be made. eval_MSE : boolean, optional A boolean specifying whether the Mean Squared Error should be evaluated or not. Default assumes evalMSE = False and evaluates only the BLUP (mean prediction). batch_size : integer, optional An integer giving the maximum number of points that can be evaluated simultaneously (depending on the available memory). Default is None so that all given points are evaluated at the same time. Returns ------- y : array_like, shape (n_samples, ) or (n_samples, n_targets) An array with shape (n_eval, ) if the Gaussian Process was trained on an array of shape (n_samples, ) or an array with shape (n_eval, n_targets) if the Gaussian Process was trained on an array of shape (n_samples, n_targets) with the Best Linear Unbiased Prediction at x. MSE : array_like, optional (if eval_MSE == True) An array with shape (n_eval, ) or (n_eval, n_targets) as with y, with the Mean Squared Error at x. """ check_is_fitted(self, "X") # Check input shapes X = check_array(X) n_eval, _ = X.shape n_samples, n_features = self.X.shape n_samples_y, n_targets = self.y.shape # Run input checks self._check_params(n_samples) if X.shape[1] != n_features: raise ValueError(("The number of features in X (X.shape[1] = %d) " "should match the number of features used " "for fit() " "which is %d.") % (X.shape[1], n_features)) if batch_size is None: # No memory management # (evaluates all given points in a single batch run) # Normalize input X = (X - self.X_mean) / self.X_std # Initialize output y = np.zeros(n_eval) if eval_MSE: MSE = np.zeros(n_eval) # Get pairwise componentwise L1-distances to the input training set dx = manhattan_distances(X, Y=self.X, sum_over_features=False) # Get regression function and correlation f = self.regr(X) r = self.corr(self.theta_, dx).reshape(n_eval, n_samples) # Scaled predictor y_ = np.dot(f, self.beta) + np.dot(r, self.gamma) # Predictor y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets) if self.y_ndim_ == 1: y = y.ravel() # Mean Squared Error if eval_MSE: C = self.C if C is None: # Light storage mode (need to recompute C, F, Ft and G) if self.verbose: print("This GaussianProcess used 'light' storage mode " "at instantiation. Need to recompute " "autocorrelation matrix...") reduced_likelihood_function_value, par = \ self.reduced_likelihood_function() self.C = par['C'] self.Ft = par['Ft'] self.G = par['G'] rt = linalg.solve_triangular(self.C, r.T, lower=True) if self.beta0 is None: # Universal Kriging u = linalg.solve_triangular(self.G.T, np.dot(self.Ft.T, rt) - f.T, lower=True) else: # Ordinary Kriging u = np.zeros((n_targets, n_eval)) MSE = np.dot(self.sigma2.reshape(n_targets, 1), (1. - (rt ** 2.).sum(axis=0) + (u ** 2.).sum(axis=0))[np.newaxis, :]) MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets) # Mean Squared Error might be slightly negative depending on # machine precision: force to zero! MSE[MSE < 0.] = 0. if self.y_ndim_ == 1: MSE = MSE.ravel() return y, MSE else: return y else: # Memory management if type(batch_size) is not int or batch_size <= 0: raise Exception("batch_size must be a positive integer") if eval_MSE: y, MSE = np.zeros(n_eval), np.zeros(n_eval) for k in range(max(1, n_eval / batch_size)): batch_from = k * batch_size batch_to = min([(k + 1) * batch_size + 1, n_eval + 1]) y[batch_from:batch_to], MSE[batch_from:batch_to] = \ self.predict(X[batch_from:batch_to], eval_MSE=eval_MSE, batch_size=None) return y, MSE else: y = np.zeros(n_eval) for k in range(max(1, n_eval / batch_size)): batch_from = k * batch_size batch_to = min([(k + 1) * batch_size + 1, n_eval + 1]) y[batch_from:batch_to] = \ self.predict(X[batch_from:batch_to], eval_MSE=eval_MSE, batch_size=None) return y def reduced_likelihood_function(self, theta=None): """ This function determines the BLUP parameters and evaluates the reduced likelihood function for the given autocorrelation parameters theta. Maximizing this function wrt the autocorrelation parameters theta is equivalent to maximizing the likelihood of the assumed joint Gaussian distribution of the observations y evaluated onto the design of experiments X. Parameters ---------- theta : array_like, optional An array containing the autocorrelation parameters at which the Gaussian Process model parameters should be determined. Default uses the built-in autocorrelation parameters (ie ``theta = self.theta_``). Returns ------- reduced_likelihood_function_value : double The value of the reduced likelihood function associated to the given autocorrelation parameters theta. par : dict A dictionary containing the requested Gaussian Process model parameters: sigma2 Gaussian Process variance. beta Generalized least-squares regression weights for Universal Kriging or given beta0 for Ordinary Kriging. gamma Gaussian Process weights. C Cholesky decomposition of the correlation matrix [R]. Ft Solution of the linear equation system : [R] x Ft = F G QR decomposition of the matrix Ft. """ check_is_fitted(self, "X") if theta is None: # Use built-in autocorrelation parameters theta = self.theta_ # Initialize output reduced_likelihood_function_value = - np.inf par = {} # Retrieve data n_samples = self.X.shape[0] D = self.D ij = self.ij F = self.F if D is None: # Light storage mode (need to recompute D, ij and F) D, ij = l1_cross_distances(self.X) if (np.min(np.sum(D, axis=1)) == 0. and self.corr != correlation.pure_nugget): raise Exception("Multiple X are not allowed") F = self.regr(self.X) # Set up R r = self.corr(theta, D) R = np.eye(n_samples) * (1. + self.nugget) R[ij[:, 0], ij[:, 1]] = r R[ij[:, 1], ij[:, 0]] = r # Cholesky decomposition of R try: C = linalg.cholesky(R, lower=True) except linalg.LinAlgError: return reduced_likelihood_function_value, par # Get generalized least squares solution Ft = linalg.solve_triangular(C, F, lower=True) try: Q, G = linalg.qr(Ft, econ=True) except: #/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177: # DeprecationWarning: qr econ argument will be removed after scipy # 0.7. The economy transform will then be available through the # mode='economic' argument. Q, G = linalg.qr(Ft, mode='economic') pass sv = linalg.svd(G, compute_uv=False) rcondG = sv[-1] / sv[0] if rcondG < 1e-10: # Check F sv = linalg.svd(F, compute_uv=False) condF = sv[0] / sv[-1] if condF > 1e15: raise Exception("F is too ill conditioned. Poor combination " "of regression model and observations.") else: # Ft is too ill conditioned, get out (try different theta) return reduced_likelihood_function_value, par Yt = linalg.solve_triangular(C, self.y, lower=True) if self.beta0 is None: # Universal Kriging beta = linalg.solve_triangular(G, np.dot(Q.T, Yt)) else: # Ordinary Kriging beta = np.array(self.beta0) rho = Yt - np.dot(Ft, beta) sigma2 = (rho ** 2.).sum(axis=0) / n_samples # The determinant of R is equal to the squared product of the diagonal # elements of its Cholesky decomposition C detR = (np.diag(C) ** (2. / n_samples)).prod() # Compute/Organize output reduced_likelihood_function_value = - sigma2.sum() * detR par['sigma2'] = sigma2 * self.y_std ** 2. par['beta'] = beta par['gamma'] = linalg.solve_triangular(C.T, rho) par['C'] = C par['Ft'] = Ft par['G'] = G return reduced_likelihood_function_value, par def _arg_max_reduced_likelihood_function(self): """ This function estimates the autocorrelation parameters theta as the maximizer of the reduced likelihood function. (Minimization of the opposite reduced likelihood function is used for convenience) Parameters ---------- self : All parameters are stored in the Gaussian Process model object. Returns ------- optimal_theta : array_like The best set of autocorrelation parameters (the sought maximizer of the reduced likelihood function). optimal_reduced_likelihood_function_value : double The optimal reduced likelihood function value. optimal_par : dict The BLUP parameters associated to thetaOpt. """ # Initialize output best_optimal_theta = [] best_optimal_rlf_value = [] best_optimal_par = [] if self.verbose: print("The chosen optimizer is: " + str(self.optimizer)) if self.random_start > 1: print(str(self.random_start) + " random starts are required.") percent_completed = 0. # Force optimizer to fmin_cobyla if the model is meant to be isotropic if self.optimizer == 'Welch' and self.theta0.size == 1: self.optimizer = 'fmin_cobyla' if self.optimizer == 'fmin_cobyla': def minus_reduced_likelihood_function(log10t): return - self.reduced_likelihood_function( theta=10. ** log10t)[0] constraints = [] for i in range(self.theta0.size): constraints.append(lambda log10t, i=i: log10t[i] - np.log10(self.thetaL[0, i])) constraints.append(lambda log10t, i=i: np.log10(self.thetaU[0, i]) - log10t[i]) for k in range(self.random_start): if k == 0: # Use specified starting point as first guess theta0 = self.theta0 else: # Generate a random starting point log10-uniformly # distributed between bounds log10theta0 = np.log10(self.thetaL) \ + self.random_state.rand(self.theta0.size).reshape( self.theta0.shape) * np.log10(self.thetaU / self.thetaL) theta0 = 10. ** log10theta0 # Run Cobyla try: log10_optimal_theta = \ optimize.fmin_cobyla(minus_reduced_likelihood_function, np.log10(theta0), constraints, iprint=0) except ValueError as ve: print("Optimization failed. Try increasing the ``nugget``") raise ve optimal_theta = 10. ** log10_optimal_theta optimal_rlf_value, optimal_par = \ self.reduced_likelihood_function(theta=optimal_theta) # Compare the new optimizer to the best previous one if k > 0: if optimal_rlf_value > best_optimal_rlf_value: best_optimal_rlf_value = optimal_rlf_value best_optimal_par = optimal_par best_optimal_theta = optimal_theta else: best_optimal_rlf_value = optimal_rlf_value best_optimal_par = optimal_par best_optimal_theta = optimal_theta if self.verbose and self.random_start > 1: if (20 * k) / self.random_start > percent_completed: percent_completed = (20 * k) / self.random_start print("%s completed" % (5 * percent_completed)) optimal_rlf_value = best_optimal_rlf_value optimal_par = best_optimal_par optimal_theta = best_optimal_theta elif self.optimizer == 'Welch': # Backup of the given atrributes theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU corr = self.corr verbose = self.verbose # This will iterate over fmin_cobyla optimizer self.optimizer = 'fmin_cobyla' self.verbose = False # Initialize under isotropy assumption if verbose: print("Initialize under isotropy assumption...") self.theta0 = check_array(self.theta0.min()) self.thetaL = check_array(self.thetaL.min()) self.thetaU = check_array(self.thetaU.max()) theta_iso, optimal_rlf_value_iso, par_iso = \ self._arg_max_reduced_likelihood_function() optimal_theta = theta_iso + np.zeros(theta0.shape) # Iterate over all dimensions of theta allowing for anisotropy if verbose: print("Now improving allowing for anisotropy...") for i in self.random_state.permutation(theta0.size): if verbose: print("Proceeding along dimension %d..." % (i + 1)) self.theta0 = check_array(theta_iso) self.thetaL = check_array(thetaL[0, i]) self.thetaU = check_array(thetaU[0, i]) def corr_cut(t, d): return corr(check_array(np.hstack([optimal_theta[0][0:i], t[0], optimal_theta[0][(i + 1)::]])), d) self.corr = corr_cut optimal_theta[0, i], optimal_rlf_value, optimal_par = \ self._arg_max_reduced_likelihood_function() # Restore the given atrributes self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU self.corr = corr self.optimizer = 'Welch' self.verbose = verbose else: raise NotImplementedError("This optimizer ('%s') is not " "implemented yet. Please contribute!" % self.optimizer) return optimal_theta, optimal_rlf_value, optimal_par def _check_params(self, n_samples=None): # Check regression model if not callable(self.regr): if self.regr in self._regression_types: self.regr = self._regression_types[self.regr] else: raise ValueError("regr should be one of %s or callable, " "%s was given." % (self._regression_types.keys(), self.regr)) # Check regression weights if given (Ordinary Kriging) if self.beta0 is not None: self.beta0 = check_array(self.beta0) if self.beta0.shape[1] != 1: # Force to column vector self.beta0 = self.beta0.T # Check correlation model if not callable(self.corr): if self.corr in self._correlation_types: self.corr = self._correlation_types[self.corr] else: raise ValueError("corr should be one of %s or callable, " "%s was given." % (self._correlation_types.keys(), self.corr)) # Check storage mode if self.storage_mode != 'full' and self.storage_mode != 'light': raise ValueError("Storage mode should either be 'full' or " "'light', %s was given." % self.storage_mode) # Check correlation parameters self.theta0 = check_array(self.theta0) lth = self.theta0.size if self.thetaL is not None and self.thetaU is not None: self.thetaL = check_array(self.thetaL) self.thetaU = check_array(self.thetaU) if self.thetaL.size != lth or self.thetaU.size != lth: raise ValueError("theta0, thetaL and thetaU must have the " "same length.") if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL): raise ValueError("The bounds must satisfy O < thetaL <= " "thetaU.") elif self.thetaL is None and self.thetaU is None: if np.any(self.theta0 <= 0): raise ValueError("theta0 must be strictly positive.") elif self.thetaL is None or self.thetaU is None: raise ValueError("thetaL and thetaU should either be both or " "neither specified.") # Force verbose type to bool self.verbose = bool(self.verbose) # Force normalize type to bool self.normalize = bool(self.normalize) # Check nugget value self.nugget = np.asarray(self.nugget) if np.any(self.nugget) < 0.: raise ValueError("nugget must be positive or zero.") if (n_samples is not None and self.nugget.shape not in [(), (n_samples,)]): raise ValueError("nugget must be either a scalar " "or array of length n_samples.") # Check optimizer if self.optimizer not in self._optimizer_types: raise ValueError("optimizer should be one of %s" % self._optimizer_types) # Force random_start type to int self.random_start = int(self.random_start)
bsd-3-clause
Schwittleymani/ECO
src/python/nlp/original_2d_export.py
2
3090
import argparse import pprint import glob import sys import gensim import util import numpy import json import os from sklearn.manifold import TSNE def process_arguments(args): parser = argparse.ArgumentParser(description='configure Word2Vec model building') parser.add_argument('--model_path', action='store', help='the path to the model') parser.add_argument('--txt_path', action='store', help='path containing text files which are all loaded') parser.add_argument('--output_file', action='store', help='the text file to store all vectors in') params = vars(parser.parse_args(args)) return params class LineVectorCombination(object): vector = 0 sentence = 0 if __name__ == '__main__': params = process_arguments(sys.argv[1:]) input_path = params['model_path'] util.enable_verbose_training(sys.argv[0]) try: model = gensim.models.Word2Vec.load_word2vec_format(input_path, binary=True) # this raises an exception if the model type is different.. except Exception: # just use the other mothod of loading.. model = gensim.models.Word2Vec.load(input_path) txt_path = params['txt_path'] data_300d = [] originals = [] original_vectors = [] original_sentences = [] text_files = glob.glob(txt_path + '/*.txt') for file in text_files: line = 'loading file ' + str(text_files.index(file)) + '/' + str(len(text_files)) print(line) index = 0 for line in open(file, 'r'): vector_words = [] word_count = 0 for word in line.split(): try: vector_words.append(model[word]) word_count += 1 except: pass # skip vocab unknown word if word_count > 5: vector = gensim.matutils.unitvec(numpy.array(vector_words).mean(axis=0)) combined = LineVectorCombination() combined.sentence = line combined.vector = vector originals.append(combined) original_vectors.append(vector) original_sentences.append(line) vlist = vector.tolist() intlist = [] for number in vlist: intnumber = int(number*10000) intlist.append(intnumber) data_300d.append({"sentence": line, "point": intlist}) index += 1 output_file = params['output_file'] # X = numpy.array(original_vectors) # tsne = TSNE(n_components=2, learning_rate=200, perplexity=20, verbose=2).fit_transform(X) # # data_2d = [] # for i, f in enumerate(original_sentences): # point = [(tsne[i, k] - numpy.min(tsne[:, k]))/(numpy.max(tsne[:, k]) - numpy.min(tsne[:, k])) for k in range(2)] # data_2d.append({"sentence": os.path.abspath(original_sentences[i]), "point": point}) with open(output_file, 'w') as outfile: #json.dump(data_2d, outfile) json.dump(data_300d, outfile)
apache-2.0
bestwpw/BDA_py_demos
demos_ch5/demo5_1.py
19
5055
"""Bayesian Data Analysis, 3rd ed Chapter 5, demo 1 Hierarchical model for Rats experiment (BDA3, p. 102). """ from __future__ import division import numpy as np from scipy.stats import beta from scipy.special import gammaln import matplotlib.pyplot as plt # Edit default plot settings (colours from colorbrewer2.org) plt.rc('font', size=14) plt.rc('lines', color='#377eb8', linewidth=2) plt.rc('axes', color_cycle=(plt.rcParams['lines.color'],)) # Disable color cycle # rat data (BDA3, p. 102) y = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 5, 2, 5, 3, 2, 7, 7, 3, 3, 2, 9, 10, 4, 4, 4, 4, 4, 4, 4, 10, 4, 4, 4, 5, 11, 12, 5, 5, 6, 5, 6, 6, 6, 6, 16, 15, 15, 9, 4 ]) n = np.array([ 20, 20, 20, 20, 20, 20, 20, 19, 19, 19, 19, 18, 18, 17, 20, 20, 20, 20, 19, 19, 18, 18, 25, 24, 23, 20, 20, 20, 20, 20, 20, 10, 49, 19, 46, 27, 17, 49, 47, 20, 20, 13, 48, 50, 20, 20, 20, 20, 20, 20, 20, 48, 19, 19, 19, 22, 46, 49, 20, 20, 23, 19, 22, 20, 20, 20, 52, 46, 47, 24, 14 ]) M = len(y) # plot the separate and pooled models plt.figure(figsize=(8,10)) x = np.linspace(0, 1, 250) # separate plt.subplot(2, 1, 1) lines = plt.plot(x, beta.pdf(x[:,None], y[:-1] + 1, n[:-1] - y[:-1] + 1), linewidth=1) # highlight the last line line1, = plt.plot(x, beta.pdf(x, y[-1] + 1, n[-1] - y[-1] + 1), 'r') plt.legend((lines[0], line1), (r'Posterior of $\theta_j$', r'Posterior of $\theta_{71}$')) plt.yticks(()) plt.title('separate model') # pooled plt.subplot(2, 1, 2) plt.plot(x, beta.pdf(x, y.sum() + 1, n.sum() - y.sum() + 1), linewidth=2, label=(r'Posterior of common $\theta$')) plt.legend() plt.yticks(()) plt.xlabel(r'$\theta$', fontsize=20) plt.title('pooled model') # compute the marginal posterior of alpha and beta in the hierarchical model in a grid A = np.linspace(0.5, 6, 100) B = np.linspace(3, 33, 100) # calculated in logarithms for numerical accuracy lp = ( - 5/2 * np.log(A + B[:,None]) + np.sum( gammaln(A + B[:,None]) - gammaln(A) - gammaln(B[:,None]) + gammaln(A + y[:,None,None]) + gammaln(B[:,None] + (n - y)[:,None,None]) - gammaln(A + B[:,None] + n[:,None,None]), axis=0 ) ) # subtract the maximum value to avoid over/underflow in exponentation lp -= lp.max() p = np.exp(lp) # plot the marginal posterior fig = plt.figure() plt.imshow(p, origin='lower', aspect='auto', extent=(A[0], A[-1], B[0], B[-1])) plt.xlabel(r'$\alpha$', fontsize=20) plt.ylabel(r'$\beta$', fontsize=20) plt.title('The marginal posterior of alpha and beta in hierarchical model') # sample from the posterior grid of alpha and beta nsamp = 1000 samp_indices = np.unravel_index( np.random.choice(p.size, size=nsamp, p=p.ravel()/p.sum()), p.shape ) samp_A = A[samp_indices[1]] samp_B = B[samp_indices[0]] # add random jitter, see BDA3 p. 76 samp_A += (np.random.rand(nsamp) - 0.5) * (A[1]-A[0]) samp_B += (np.random.rand(nsamp) - 0.5) * (B[1]-B[0]) # Plot samples from the distribution of distributions Beta(alpha,beta), # that is, plot Beta(alpha,beta) using the posterior samples of alpha and beta fig = plt.figure(figsize=(8,10)) plt.subplot(2, 1, 1) plt.plot(x, beta.pdf(x[:,None], samp_A[:20], samp_B[:20]), linewidth=1) plt.yticks(()) plt.title(r'Posterior samples from the distribution of distributions ' r'Beta($\alpha$,$\beta$)') # The average of above distributions, is the predictive distribution for a new # theta, and also the prior distribution for theta_j. # Plot this. plt.subplot(2, 1, 2) plt.plot(x, np.mean(beta.pdf(x, samp_A[:,None], samp_B[:,None]), axis=0)) plt.yticks(()) plt.xlabel(r'$\theta$', fontsize=20) plt.title(r'Predictive distribution for a new $\theta$ ' r'and prior for $\theta_j$') # And finally compare the separate model and hierarchical model plt.figure(figsize=(8,10)) x = np.linspace(0, 1, 250) # first plot the separate model (same as above) plt.subplot(2, 1, 1) # note that for clarity only every 7th distribution is plotted plt.plot(x, beta.pdf(x[:,None], y[7:-1:7] + 1, n[7:-1:7] - y[7:-1:7] + 1), linewidth=1) # highlight the last line plt.plot(x, beta.pdf(x, y[-1] + 1, n[-1] - y[-1] + 1), 'r') plt.yticks(()) plt.title('separate model') # And the hierarchical model. Note that these marginal posteriors for theta_j are # more narrow than in separate model case, due to borrowed information from # the other theta_j's. plt.subplot(2, 1, 2) # note that for clarity only every 7th distribution is plotted lines = plt.plot( x, np.mean( beta.pdf( x[:,None], y[7::7] + samp_A[:,None,None], n[7::7] - y[7::7] + samp_B[:,None,None] ), axis=0 ), linewidth=1, ) # highlight the last line lines[-1].set_linewidth(2) lines[-1].set_color('r') plt.yticks(()) plt.xlabel(r'$\theta$', fontsize=20) plt.title('hierarchical model') plt.show()
gpl-3.0
superbobry/hyperopt-sklearn
hpsklearn/vkmeans.py
6
2032
import numpy as np from sklearn.cluster import KMeans class ColumnKMeans(object): def __init__(self, n_clusters, init='k-means++', n_init=10, max_iter=300, tol=1e-4, precompute_distances=True, verbose=0, random_state=None, copy_x=True, n_jobs=1, ): self.n_clusters = n_clusters self.init = init self.n_init = n_init self.max_iter = max_iter self.tol = tol self.precompute_distances = precompute_distances self.verbose = verbose self.random_state = random_state self.copy_x = copy_x self.n_jobs = n_jobs self.output_dtype = None def fit(self, X): rows, cols = X.shape self.col_models = [] for jj in range(cols): col_model=KMeans( n_clusters=self.n_clusters, init=self.init, n_init=self.n_init, max_iter=self.max_iter, tol=self.tol, precompute_distances=self.precompute_distances, verbose=self.verbose, random_state=self.random_state, copy_x=self.copy_x, n_jobs=self.n_jobs, ) col_model.fit(X[:, jj:jj + 1]) self.col_models.append(col_model) def transform(self, X): rows, cols = X.shape if self.output_dtype is None: output_dtype = X.dtype # XXX else: output_dtype = self.output_dtype rval = np.empty( (rows, cols, self.n_clusters), dtype=output_dtype) for jj in range(cols): Xj = X[:, jj:jj + 1] dists = self.col_models[jj].transform(Xj) feats = np.exp(-(dists ** 2)) # -- normalize features by row rval[:, jj, :] = feats / (feats.sum(axis=1)[:, None]) assert np.all(np.isfinite(rval)) return rval.reshape((rows, cols * self.n_clusters))
bsd-3-clause
rhyolight/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/font_manager.py
69
42655
""" A module for finding, managing, and using fonts across platforms. This module provides a single :class:`FontManager` instance that can be shared across backends and platforms. The :func:`findfont` function returns the best TrueType (TTF) font file in the local or system font path that matches the specified :class:`FontProperties` instance. The :class:`FontManager` also handles Adobe Font Metrics (AFM) font files for use by the PostScript backend. The design is based on the `W3C Cascading Style Sheet, Level 1 (CSS1) font specification <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_. Future versions may implement the Level 2 or 2.1 specifications. Experimental support is included for using `fontconfig <http://www.fontconfig.org>`_ on Unix variant plaforms (Linux, OS X, Solaris). To enable it, set the constant ``USE_FONTCONFIG`` in this file to ``True``. Fontconfig has the advantage that it is the standard way to look up fonts on X11 platforms, so if a font is installed, it is much more likely to be found. """ """ KNOWN ISSUES - documentation - font variant is untested - font stretch is incomplete - font size is incomplete - font size_adjust is incomplete - default font algorithm needs improvement and testing - setWeights function needs improvement - 'light' is an invalid weight value, remove it. - update_fonts not implemented Authors : John Hunter <[email protected]> Paul Barrett <[email protected]> Michael Droettboom <[email protected]> Copyright : John Hunter (2004,2005), Paul Barrett (2004,2005) License : matplotlib license (PSF compatible) The font directory code is from ttfquery, see license/LICENSE_TTFQUERY. """ import os, sys, glob try: set except NameError: from sets import Set as set import matplotlib from matplotlib import afm from matplotlib import ft2font from matplotlib import rcParams, get_configdir from matplotlib.cbook import is_string_like from matplotlib.fontconfig_pattern import \ parse_fontconfig_pattern, generate_fontconfig_pattern try: import cPickle as pickle except ImportError: import pickle USE_FONTCONFIG = False verbose = matplotlib.verbose font_scalings = { 'xx-small' : 0.579, 'x-small' : 0.694, 'small' : 0.833, 'medium' : 1.0, 'large' : 1.200, 'x-large' : 1.440, 'xx-large' : 1.728, 'larger' : 1.2, 'smaller' : 0.833, None : 1.0} stretch_dict = { 'ultra-condensed' : 100, 'extra-condensed' : 200, 'condensed' : 300, 'semi-condensed' : 400, 'normal' : 500, 'semi-expanded' : 600, 'expanded' : 700, 'extra-expanded' : 800, 'ultra-expanded' : 900} weight_dict = { 'ultralight' : 100, 'light' : 200, 'normal' : 400, 'regular' : 400, 'book' : 400, 'medium' : 500, 'roman' : 500, 'semibold' : 600, 'demibold' : 600, 'demi' : 600, 'bold' : 700, 'heavy' : 800, 'extra bold' : 800, 'black' : 900} font_family_aliases = set([ 'serif', 'sans-serif', 'cursive', 'fantasy', 'monospace', 'sans']) # OS Font paths MSFolders = \ r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders' MSFontDirectories = [ r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts', r'SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts'] X11FontDirectories = [ # an old standard installation point "/usr/X11R6/lib/X11/fonts/TTF/", # here is the new standard location for fonts "/usr/share/fonts/", # documented as a good place to install new fonts "/usr/local/share/fonts/", # common application, not really useful "/usr/lib/openoffice/share/fonts/truetype/", ] OSXFontDirectories = [ "/Library/Fonts/", "/Network/Library/Fonts/", "/System/Library/Fonts/" ] if not USE_FONTCONFIG: home = os.environ.get('HOME') if home is not None: # user fonts on OSX path = os.path.join(home, 'Library', 'Fonts') OSXFontDirectories.append(path) path = os.path.join(home, '.fonts') X11FontDirectories.append(path) def get_fontext_synonyms(fontext): """ Return a list of file extensions extensions that are synonyms for the given file extension *fileext*. """ return {'ttf': ('ttf', 'otf'), 'otf': ('ttf', 'otf'), 'afm': ('afm',)}[fontext] def win32FontDirectory(): """ Return the user-specified font directory for Win32. This is looked up from the registry key:: \\HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders\Fonts If the key is not found, $WINDIR/Fonts will be returned. """ try: import _winreg except ImportError: pass # Fall through to default else: try: user = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, MSFolders) try: try: return _winreg.QueryValueEx(user, 'Fonts')[0] except OSError: pass # Fall through to default finally: _winreg.CloseKey(user) except OSError: pass # Fall through to default return os.path.join(os.environ['WINDIR'], 'Fonts') def win32InstalledFonts(directory=None, fontext='ttf'): """ Search for fonts in the specified font directory, or use the system directories if none given. A list of TrueType font filenames are returned by default, or AFM fonts if *fontext* == 'afm'. """ import _winreg if directory is None: directory = win32FontDirectory() fontext = get_fontext_synonyms(fontext) key, items = None, {} for fontdir in MSFontDirectories: try: local = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, fontdir) except OSError: continue if not local: files = [] for ext in fontext: files.extend(glob.glob(os.path.join(directory, '*.'+ext))) return files try: for j in range(_winreg.QueryInfoKey(local)[1]): try: key, direc, any = _winreg.EnumValue( local, j) if not os.path.dirname(direc): direc = os.path.join(directory, direc) direc = os.path.abspath(direc).lower() if os.path.splitext(direc)[1][1:] in fontext: items[direc] = 1 except EnvironmentError: continue except WindowsError: continue return items.keys() finally: _winreg.CloseKey(local) return None def OSXFontDirectory(): """ Return the system font directories for OS X. This is done by starting at the list of hardcoded paths in :attr:`OSXFontDirectories` and returning all nested directories within them. """ fontpaths = [] def add(arg,directory,files): fontpaths.append(directory) for fontdir in OSXFontDirectories: try: if os.path.isdir(fontdir): os.path.walk(fontdir, add, None) except (IOError, OSError, TypeError, ValueError): pass return fontpaths def OSXInstalledFonts(directory=None, fontext='ttf'): """ Get list of font files on OS X - ignores font suffix by default. """ if directory is None: directory = OSXFontDirectory() fontext = get_fontext_synonyms(fontext) files = [] for path in directory: if fontext is None: files.extend(glob.glob(os.path.join(path,'*'))) else: for ext in fontext: files.extend(glob.glob(os.path.join(path, '*.'+ext))) files.extend(glob.glob(os.path.join(path, '*.'+ext.upper()))) return files def x11FontDirectory(): """ Return the system font directories for X11. This is done by starting at the list of hardcoded paths in :attr:`X11FontDirectories` and returning all nested directories within them. """ fontpaths = [] def add(arg,directory,files): fontpaths.append(directory) for fontdir in X11FontDirectories: try: if os.path.isdir(fontdir): os.path.walk(fontdir, add, None) except (IOError, OSError, TypeError, ValueError): pass return fontpaths def get_fontconfig_fonts(fontext='ttf'): """ Grab a list of all the fonts that are being tracked by fontconfig by making a system call to ``fc-list``. This is an easy way to grab all of the fonts the user wants to be made available to applications, without needing knowing where all of them reside. """ try: import commands except ImportError: return {} fontext = get_fontext_synonyms(fontext) fontfiles = {} status, output = commands.getstatusoutput("fc-list file") if status == 0: for line in output.split('\n'): fname = line.split(':')[0] if (os.path.splitext(fname)[1][1:] in fontext and os.path.exists(fname)): fontfiles[fname] = 1 return fontfiles def findSystemFonts(fontpaths=None, fontext='ttf'): """ Search for fonts in the specified font paths. If no paths are given, will use a standard set of system paths, as well as the list of fonts tracked by fontconfig if fontconfig is installed and available. A list of TrueType fonts are returned by default with AFM fonts as an option. """ fontfiles = {} fontexts = get_fontext_synonyms(fontext) if fontpaths is None: if sys.platform == 'win32': fontdir = win32FontDirectory() fontpaths = [fontdir] # now get all installed fonts directly... for f in win32InstalledFonts(fontdir): base, ext = os.path.splitext(f) if len(ext)>1 and ext[1:].lower() in fontexts: fontfiles[f] = 1 else: fontpaths = x11FontDirectory() # check for OS X & load its fonts if present if sys.platform == 'darwin': for f in OSXInstalledFonts(fontext=fontext): fontfiles[f] = 1 for f in get_fontconfig_fonts(fontext): fontfiles[f] = 1 elif isinstance(fontpaths, (str, unicode)): fontpaths = [fontpaths] for path in fontpaths: files = [] for ext in fontexts: files.extend(glob.glob(os.path.join(path, '*.'+ext))) files.extend(glob.glob(os.path.join(path, '*.'+ext.upper()))) for fname in files: fontfiles[os.path.abspath(fname)] = 1 return [fname for fname in fontfiles.keys() if os.path.exists(fname)] def weight_as_number(weight): """ Return the weight property as a numeric value. String values are converted to their corresponding numeric value. """ if isinstance(weight, str): try: weight = weight_dict[weight.lower()] except KeyError: weight = 400 elif weight in range(100, 1000, 100): pass else: raise ValueError, 'weight not a valid integer' return weight class FontEntry(object): """ A class for storing Font properties. It is used when populating the font lookup dictionary. """ def __init__(self, fname ='', name ='', style ='normal', variant='normal', weight ='normal', stretch='normal', size ='medium', ): self.fname = fname self.name = name self.style = style self.variant = variant self.weight = weight self.stretch = stretch try: self.size = str(float(size)) except ValueError: self.size = size def ttfFontProperty(font): """ A function for populating the :class:`FontKey` by extracting information from the TrueType font file. *font* is a :class:`FT2Font` instance. """ name = font.family_name # Styles are: italic, oblique, and normal (default) sfnt = font.get_sfnt() sfnt2 = sfnt.get((1,0,0,2)) sfnt4 = sfnt.get((1,0,0,4)) if sfnt2: sfnt2 = sfnt2.lower() else: sfnt2 = '' if sfnt4: sfnt4 = sfnt4.lower() else: sfnt4 = '' if sfnt4.find('oblique') >= 0: style = 'oblique' elif sfnt4.find('italic') >= 0: style = 'italic' elif sfnt2.find('regular') >= 0: style = 'normal' elif font.style_flags & ft2font.ITALIC: style = 'italic' else: style = 'normal' # Variants are: small-caps and normal (default) # !!!! Untested if name.lower() in ['capitals', 'small-caps']: variant = 'small-caps' else: variant = 'normal' # Weights are: 100, 200, 300, 400 (normal: default), 500 (medium), # 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black) # lighter and bolder are also allowed. weight = None for w in weight_dict.keys(): if sfnt4.find(w) >= 0: weight = w break if not weight: if font.style_flags & ft2font.BOLD: weight = 700 else: weight = 400 weight = weight_as_number(weight) # Stretch can be absolute and relative # Absolute stretches are: ultra-condensed, extra-condensed, condensed, # semi-condensed, normal, semi-expanded, expanded, extra-expanded, # and ultra-expanded. # Relative stretches are: wider, narrower # Child value is: inherit # !!!! Incomplete if sfnt4.find('narrow') >= 0 or sfnt4.find('condensed') >= 0 or \ sfnt4.find('cond') >= 0: stretch = 'condensed' elif sfnt4.find('demi cond') >= 0: stretch = 'semi-condensed' elif sfnt4.find('wide') >= 0 or sfnt4.find('expanded') >= 0: stretch = 'expanded' else: stretch = 'normal' # Sizes can be absolute and relative. # Absolute sizes are: xx-small, x-small, small, medium, large, x-large, # and xx-large. # Relative sizes are: larger, smaller # Length value is an absolute font size, e.g. 12pt # Percentage values are in 'em's. Most robust specification. # !!!! Incomplete if font.scalable: size = 'scalable' else: size = str(float(font.get_fontsize())) # !!!! Incomplete size_adjust = None return FontEntry(font.fname, name, style, variant, weight, stretch, size) def afmFontProperty(fontpath, font): """ A function for populating a :class:`FontKey` instance by extracting information from the AFM font file. *font* is a class:`AFM` instance. """ name = font.get_familyname() # Styles are: italic, oblique, and normal (default) if font.get_angle() != 0 or name.lower().find('italic') >= 0: style = 'italic' elif name.lower().find('oblique') >= 0: style = 'oblique' else: style = 'normal' # Variants are: small-caps and normal (default) # !!!! Untested if name.lower() in ['capitals', 'small-caps']: variant = 'small-caps' else: variant = 'normal' # Weights are: 100, 200, 300, 400 (normal: default), 500 (medium), # 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black) # lighter and bolder are also allowed. weight = weight_as_number(font.get_weight().lower()) # Stretch can be absolute and relative # Absolute stretches are: ultra-condensed, extra-condensed, condensed, # semi-condensed, normal, semi-expanded, expanded, extra-expanded, # and ultra-expanded. # Relative stretches are: wider, narrower # Child value is: inherit # !!!! Incomplete stretch = 'normal' # Sizes can be absolute and relative. # Absolute sizes are: xx-small, x-small, small, medium, large, x-large, # and xx-large. # Relative sizes are: larger, smaller # Length value is an absolute font size, e.g. 12pt # Percentage values are in 'em's. Most robust specification. # All AFM fonts are apparently scalable. size = 'scalable' # !!!! Incomplete size_adjust = None return FontEntry(fontpath, name, style, variant, weight, stretch, size) def createFontList(fontfiles, fontext='ttf'): """ A function to create a font lookup list. The default is to create a list of TrueType fonts. An AFM font list can optionally be created. """ fontlist = [] # Add fonts from list of known font files. seen = {} for fpath in fontfiles: verbose.report('createFontDict: %s' % (fpath), 'debug') fname = os.path.split(fpath)[1] if fname in seen: continue else: seen[fname] = 1 if fontext == 'afm': try: fh = open(fpath, 'r') except: verbose.report("Could not open font file %s" % fpath) continue try: try: font = afm.AFM(fh) finally: fh.close() except RuntimeError: verbose.report("Could not parse font file %s"%fpath) continue prop = afmFontProperty(fpath, font) else: try: font = ft2font.FT2Font(str(fpath)) except RuntimeError: verbose.report("Could not open font file %s"%fpath) continue except UnicodeError: verbose.report("Cannot handle unicode filenames") #print >> sys.stderr, 'Bad file is', fpath continue try: prop = ttfFontProperty(font) except: continue fontlist.append(prop) return fontlist class FontProperties(object): """ A class for storing and manipulating font properties. The font properties are those described in the `W3C Cascading Style Sheet, Level 1 <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ font specification. The six properties are: - family: A list of font names in decreasing order of priority. The items may include a generic font family name, either 'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace'. In that case, the actual font to be used will be looked up from the associated rcParam in :file:`matplotlibrc`. - style: Either 'normal', 'italic' or 'oblique'. - variant: Either 'normal' or 'small-caps'. - stretch: A numeric value in the range 0-1000 or one of 'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded', 'expanded', 'extra-expanded' or 'ultra-expanded' - weight: A numeric value in the range 0-1000 or one of 'ultralight', 'light', 'normal', 'regular', 'book', 'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy', 'extra bold', 'black' - size: Either an relative value of 'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large' or an absolute font size, e.g. 12 The default font property for TrueType fonts (as specified in the default :file:`matplotlibrc` file) is:: sans-serif, normal, normal, normal, normal, scalable. Alternatively, a font may be specified using an absolute path to a .ttf file, by using the *fname* kwarg. The preferred usage of font sizes is to use the relative values, e.g. 'large', instead of absolute font sizes, e.g. 12. This approach allows all text sizes to be made larger or smaller based on the font manager's default font size, i.e. by using the :meth:`FontManager.set_default_size` method. This class will also accept a `fontconfig <http://www.fontconfig.org/>`_ pattern, if it is the only argument provided. See the documentation on `fontconfig patterns <http://www.fontconfig.org/fontconfig-user.html>`_. This support does not require fontconfig to be installed. We are merely borrowing its pattern syntax for use here. Note that matplotlib's internal font manager and fontconfig use a different algorithm to lookup fonts, so the results of the same pattern may be different in matplotlib than in other applications that use fontconfig. """ def __init__(self, family = None, style = None, variant= None, weight = None, stretch= None, size = None, fname = None, # if this is set, it's a hardcoded filename to use _init = None # used only by copy() ): self._family = None self._slant = None self._variant = None self._weight = None self._stretch = None self._size = None self._file = None # This is used only by copy() if _init is not None: self.__dict__.update(_init.__dict__) return if is_string_like(family): # Treat family as a fontconfig pattern if it is the only # parameter provided. if (style is None and variant is None and weight is None and stretch is None and size is None and fname is None): self.set_fontconfig_pattern(family) return self.set_family(family) self.set_style(style) self.set_variant(variant) self.set_weight(weight) self.set_stretch(stretch) self.set_file(fname) self.set_size(size) def _parse_fontconfig_pattern(self, pattern): return parse_fontconfig_pattern(pattern) def __hash__(self): l = self.__dict__.items() l.sort() return hash(repr(l)) def __str__(self): return self.get_fontconfig_pattern() def get_family(self): """ Return a list of font names that comprise the font family. """ if self._family is None: family = rcParams['font.family'] if is_string_like(family): return [family] return family return self._family def get_name(self): """ Return the name of the font that best matches the font properties. """ return ft2font.FT2Font(str(findfont(self))).family_name def get_style(self): """ Return the font style. Values are: 'normal', 'italic' or 'oblique'. """ if self._slant is None: return rcParams['font.style'] return self._slant get_slant = get_style def get_variant(self): """ Return the font variant. Values are: 'normal' or 'small-caps'. """ if self._variant is None: return rcParams['font.variant'] return self._variant def get_weight(self): """ Set the font weight. Options are: A numeric value in the range 0-1000 or one of 'light', 'normal', 'regular', 'book', 'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy', 'extra bold', 'black' """ if self._weight is None: return rcParams['font.weight'] return self._weight def get_stretch(self): """ Return the font stretch or width. Options are: 'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'. """ if self._stretch is None: return rcParams['font.stretch'] return self._stretch def get_size(self): """ Return the font size. """ if self._size is None: return rcParams['font.size'] return self._size def get_size_in_points(self): if self._size is not None: try: return float(self._size) except ValueError: pass default_size = fontManager.get_default_size() return default_size * font_scalings.get(self._size) def get_file(self): """ Return the filename of the associated font. """ return self._file def get_fontconfig_pattern(self): """ Get a fontconfig pattern suitable for looking up the font as specified with fontconfig's ``fc-match`` utility. See the documentation on `fontconfig patterns <http://www.fontconfig.org/fontconfig-user.html>`_. This support does not require fontconfig to be installed or support for it to be enabled. We are merely borrowing its pattern syntax for use here. """ return generate_fontconfig_pattern(self) def set_family(self, family): """ Change the font family. May be either an alias (generic name is CSS parlance), such as: 'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace', or a real font name. """ if family is None: self._family = None else: if is_string_like(family): family = [family] self._family = family set_name = set_family def set_style(self, style): """ Set the font style. Values are: 'normal', 'italic' or 'oblique'. """ if style not in ('normal', 'italic', 'oblique', None): raise ValueError("style must be normal, italic or oblique") self._slant = style set_slant = set_style def set_variant(self, variant): """ Set the font variant. Values are: 'normal' or 'small-caps'. """ if variant not in ('normal', 'small-caps', None): raise ValueError("variant must be normal or small-caps") self._variant = variant def set_weight(self, weight): """ Set the font weight. May be either a numeric value in the range 0-1000 or one of 'ultralight', 'light', 'normal', 'regular', 'book', 'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy', 'extra bold', 'black' """ if weight is not None: try: weight = int(weight) if weight < 0 or weight > 1000: raise ValueError() except ValueError: if weight not in weight_dict: raise ValueError("weight is invalid") self._weight = weight def set_stretch(self, stretch): """ Set the font stretch or width. Options are: 'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded', 'expanded', 'extra-expanded' or 'ultra-expanded', or a numeric value in the range 0-1000. """ if stretch is not None: try: stretch = int(stretch) if stretch < 0 or stretch > 1000: raise ValueError() except ValueError: if stretch not in stretch_dict: raise ValueError("stretch is invalid") self._stretch = stretch def set_size(self, size): """ Set the font size. Either an relative value of 'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large' or an absolute font size, e.g. 12. """ if size is not None: try: size = float(size) except ValueError: if size is not None and size not in font_scalings: raise ValueError("size is invalid") self._size = size def set_file(self, file): """ Set the filename of the fontfile to use. In this case, all other properties will be ignored. """ self._file = file def set_fontconfig_pattern(self, pattern): """ Set the properties by parsing a fontconfig *pattern*. See the documentation on `fontconfig patterns <http://www.fontconfig.org/fontconfig-user.html>`_. This support does not require fontconfig to be installed or support for it to be enabled. We are merely borrowing its pattern syntax for use here. """ for key, val in self._parse_fontconfig_pattern(pattern).items(): if type(val) == list: getattr(self, "set_" + key)(val[0]) else: getattr(self, "set_" + key)(val) def copy(self): """Return a deep copy of self""" return FontProperties(_init = self) def ttfdict_to_fnames(d): """ flatten a ttfdict to all the filenames it contains """ fnames = [] for named in d.values(): for styled in named.values(): for variantd in styled.values(): for weightd in variantd.values(): for stretchd in weightd.values(): for fname in stretchd.values(): fnames.append(fname) return fnames def pickle_dump(data, filename): """ Equivalent to pickle.dump(data, open(filename, 'w')) but closes the file to prevent filehandle leakage. """ fh = open(filename, 'w') try: pickle.dump(data, fh) finally: fh.close() def pickle_load(filename): """ Equivalent to pickle.load(open(filename, 'r')) but closes the file to prevent filehandle leakage. """ fh = open(filename, 'r') try: data = pickle.load(fh) finally: fh.close() return data class FontManager: """ On import, the :class:`FontManager` singleton instance creates a list of TrueType fonts based on the font properties: name, style, variant, weight, stretch, and size. The :meth:`findfont` method does a nearest neighbor search to find the font that most closely matches the specification. If no good enough match is found, a default font is returned. """ def __init__(self, size=None, weight='normal'): self.__default_weight = weight self.default_size = size paths = [os.path.join(rcParams['datapath'], 'fonts', 'ttf'), os.path.join(rcParams['datapath'], 'fonts', 'afm')] # Create list of font paths for pathname in ['TTFPATH', 'AFMPATH']: if pathname in os.environ: ttfpath = os.environ[pathname] if ttfpath.find(';') >= 0: #win32 style paths.extend(ttfpath.split(';')) elif ttfpath.find(':') >= 0: # unix style paths.extend(ttfpath.split(':')) else: paths.append(ttfpath) verbose.report('font search path %s'%(str(paths))) # Load TrueType fonts and create font dictionary. self.ttffiles = findSystemFonts(paths) + findSystemFonts() for fname in self.ttffiles: verbose.report('trying fontname %s' % fname, 'debug') if fname.lower().find('vera.ttf')>=0: self.defaultFont = fname break else: # use anything self.defaultFont = self.ttffiles[0] self.ttflist = createFontList(self.ttffiles) if rcParams['pdf.use14corefonts']: # Load only the 14 PDF core fonts. These fonts do not need to be # embedded; every PDF viewing application is required to have them: # Helvetica, Helvetica-Bold, Helvetica-Oblique, Helvetica-BoldOblique, # Courier, Courier-Bold, Courier-Oblique, Courier-BoldOblique, # Times-Roman, Times-Bold, Times-Italic, Times-BoldItalic, Symbol, # ZapfDingbats. afmpath = os.path.join(rcParams['datapath'],'fonts','pdfcorefonts') afmfiles = findSystemFonts(afmpath, fontext='afm') self.afmlist = createFontList(afmfiles, fontext='afm') else: self.afmfiles = findSystemFonts(paths, fontext='afm') + \ findSystemFonts(fontext='afm') self.afmlist = createFontList(self.afmfiles, fontext='afm') self.ttf_lookup_cache = {} self.afm_lookup_cache = {} def get_default_weight(self): """ Return the default font weight. """ return self.__default_weight def get_default_size(self): """ Return the default font size. """ if self.default_size is None: return rcParams['font.size'] return self.default_size def set_default_weight(self, weight): """ Set the default font weight. The initial value is 'normal'. """ self.__default_weight = weight def set_default_size(self, size): """ Set the default font size in points. The initial value is set by ``font.size`` in rc. """ self.default_size = size def update_fonts(self, filenames): """ Update the font dictionary with new font files. Currently not implemented. """ # !!!! Needs implementing raise NotImplementedError # Each of the scoring functions below should return a value between # 0.0 (perfect match) and 1.0 (terrible match) def score_family(self, families, family2): """ Returns a match score between the list of font families in *families* and the font family name *family2*. An exact match anywhere in the list returns 0.0. A match by generic font name will return 0.1. No match will return 1.0. """ for i, family1 in enumerate(families): if family1.lower() in font_family_aliases: if family1 == 'sans': family1 == 'sans-serif' options = rcParams['font.' + family1] if family2 in options: idx = options.index(family2) return 0.1 * (float(idx) / len(options)) elif family1.lower() == family2.lower(): return 0.0 return 1.0 def score_style(self, style1, style2): """ Returns a match score between *style1* and *style2*. An exact match returns 0.0. A match between 'italic' and 'oblique' returns 0.1. No match returns 1.0. """ if style1 == style2: return 0.0 elif style1 in ('italic', 'oblique') and \ style2 in ('italic', 'oblique'): return 0.1 return 1.0 def score_variant(self, variant1, variant2): """ Returns a match score between *variant1* and *variant2*. An exact match returns 0.0, otherwise 1.0. """ if variant1 == variant2: return 0.0 else: return 1.0 def score_stretch(self, stretch1, stretch2): """ Returns a match score between *stretch1* and *stretch2*. The result is the absolute value of the difference between the CSS numeric values of *stretch1* and *stretch2*, normalized between 0.0 and 1.0. """ try: stretchval1 = int(stretch1) except ValueError: stretchval1 = stretch_dict.get(stretch1, 500) try: stretchval2 = int(stretch2) except ValueError: stretchval2 = stretch_dict.get(stretch2, 500) return abs(stretchval1 - stretchval2) / 1000.0 def score_weight(self, weight1, weight2): """ Returns a match score between *weight1* and *weight2*. The result is the absolute value of the difference between the CSS numeric values of *weight1* and *weight2*, normalized between 0.0 and 1.0. """ try: weightval1 = int(weight1) except ValueError: weightval1 = weight_dict.get(weight1, 500) try: weightval2 = int(weight2) except ValueError: weightval2 = weight_dict.get(weight2, 500) return abs(weightval1 - weightval2) / 1000.0 def score_size(self, size1, size2): """ Returns a match score between *size1* and *size2*. If *size2* (the size specified in the font file) is 'scalable', this function always returns 0.0, since any font size can be generated. Otherwise, the result is the absolute distance between *size1* and *size2*, normalized so that the usual range of font sizes (6pt - 72pt) will lie between 0.0 and 1.0. """ if size2 == 'scalable': return 0.0 # Size value should have already been try: sizeval1 = float(size1) except ValueError: sizeval1 = self.default_size * font_scalings(size1) try: sizeval2 = float(size2) except ValueError: return 1.0 return abs(sizeval1 - sizeval2) / 72.0 def findfont(self, prop, fontext='ttf'): """ Search the font list for the font that most closely matches the :class:`FontProperties` *prop*. :meth:`findfont` performs a nearest neighbor search. Each font is given a similarity score to the target font properties. The first font with the highest score is returned. If no matches below a certain threshold are found, the default font (usually Vera Sans) is returned. The result is cached, so subsequent lookups don't have to perform the O(n) nearest neighbor search. See the `W3C Cascading Style Sheet, Level 1 <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ documentation for a description of the font finding algorithm. """ debug = False if prop is None: return self.defaultFont if is_string_like(prop): prop = FontProperties(prop) fname = prop.get_file() if fname is not None: verbose.report('findfont returning %s'%fname, 'debug') return fname if fontext == 'afm': font_cache = self.afm_lookup_cache fontlist = self.afmlist else: font_cache = self.ttf_lookup_cache fontlist = self.ttflist cached = font_cache.get(hash(prop)) if cached: return cached best_score = 1e64 best_font = None for font in fontlist: # Matching family should have highest priority, so it is multiplied # by 10.0 score = \ self.score_family(prop.get_family(), font.name) * 10.0 + \ self.score_style(prop.get_style(), font.style) + \ self.score_variant(prop.get_variant(), font.variant) + \ self.score_weight(prop.get_weight(), font.weight) + \ self.score_stretch(prop.get_stretch(), font.stretch) + \ self.score_size(prop.get_size(), font.size) if score < best_score: best_score = score best_font = font if score == 0: break if best_font is None or best_score >= 10.0: verbose.report('findfont: Could not match %s. Returning %s' % (prop, self.defaultFont)) result = self.defaultFont else: verbose.report('findfont: Matching %s to %s (%s) with score of %f' % (prop, best_font.name, best_font.fname, best_score)) result = best_font.fname font_cache[hash(prop)] = result return result _is_opentype_cff_font_cache = {} def is_opentype_cff_font(filename): """ Returns True if the given font is a Postscript Compact Font Format Font embedded in an OpenType wrapper. Used by the PostScript and PDF backends that can not subset these fonts. """ if os.path.splitext(filename)[1].lower() == '.otf': result = _is_opentype_cff_font_cache.get(filename) if result is None: fd = open(filename, 'rb') tag = fd.read(4) fd.close() result = (tag == 'OTTO') _is_opentype_cff_font_cache[filename] = result return result return False # The experimental fontconfig-based backend. if USE_FONTCONFIG and sys.platform != 'win32': import re def fc_match(pattern, fontext): import commands fontexts = get_fontext_synonyms(fontext) ext = "." + fontext status, output = commands.getstatusoutput('fc-match -sv "%s"' % pattern) if status == 0: for match in _fc_match_regex.finditer(output): file = match.group(1) if os.path.splitext(file)[1][1:] in fontexts: return file return None _fc_match_regex = re.compile(r'\sfile:\s+"([^"]*)"') _fc_match_cache = {} def findfont(prop, fontext='ttf'): if not is_string_like(prop): prop = prop.get_fontconfig_pattern() cached = _fc_match_cache.get(prop) if cached is not None: return cached result = fc_match(prop, fontext) if result is None: result = fc_match(':', fontext) _fc_match_cache[prop] = result return result else: _fmcache = os.path.join(get_configdir(), 'fontList.cache') fontManager = None def _rebuild(): global fontManager fontManager = FontManager() pickle_dump(fontManager, _fmcache) verbose.report("generated new fontManager") try: fontManager = pickle_load(_fmcache) fontManager.default_size = None verbose.report("Using fontManager instance from %s" % _fmcache) except: _rebuild() def findfont(prop, **kw): global fontManager font = fontManager.findfont(prop, **kw) if not os.path.exists(font): verbose.report("%s returned by pickled fontManager does not exist" % font) _rebuild() font = fontManager.findfont(prop, **kw) return font
agpl-3.0
opengeostat/pygslib
sandbox/vmodel.py
1
6603
# -*- coding: utf-8 -*- #!/usr/bin/env python # using naming on http://www.gslib.com/gslib_help/programs.html import subprocess import copy import pandas as pd import pygslib import numpy as np import os import matplotlib.pyplot as plt __vmodel_par = \ """ Parameters for VMODEL ********************* START OF PARAMETERS: {outfl} - file for variogram output {ndir} {nlag} - number of directions and lags {ivdir_} - azm, dip, lag distance (array with shape [ndir,3]) {nst} {c0} - nst, nugget effect {vst_} """ def vmodel(parameters, gslib_path = None, silent = False): """vmodel(parameters, gslib_path = None) Funtion to calculate variogram models using the vmodel.exe external gslib program. Parameters ---------- parameters : dict dictionary with parameters gslib_path : string (default None) absolute or relative path to gslib excecutable programs silent: boolean if true external GSLIB stdout text is printed Returns ------ pandas.DataFrame with variograms Example -------- TODO: Notes ------ The dictionary with parameters may be as follows:: parameters = { 'outfl' : str or None, # path to the output file or None (to use '_xxx_.out') 'nlag' : int, # number of lags 'ivdir' : 2D array of floats, # azm,dip,lag distance (array with shape [ndir,3]) 'c0' : float, # nugget effect 'it' : int, # list of structure types (array with shape [nst]) 'cc' : float, # list of structure variances (array with shape [nst]) 'ang1' : float, # list of structure 1st rotation (array with shape [nst]) 'ang2' : float, # list of structure 2nd rotation (array with shape [nst]) 'ang3' : float, # list of structure 3rd rotation (array with shape [nst]) 'a_hmax' : float, # list of structure maximum horizontal ranges (array with shape [nst]) 'a_hmin' : float, # list of structure minimum horizontal ranges (array with shape [nst]) 'a_vert' : float} # list of structure vertical ranges (array with shape [nst]) the parameters nst (number of structures) and ndir (number of directions) are theduced from arrays ivdir and it see http://www.gslib.com/gslib_help/vmodel.html for more information """ if gslib_path is None: if os.name == "posix": gslib_path = '~/gslib/vmodel' else: gslib_path = 'c:\\gslib\\vmodel.exe' mypar = copy.deepcopy(parameters) if mypar['outfl'] is None: mypar['outfl'] = '_xxx_.out' # handle parameter arrays ivdir = np.array (mypar['ivdir']) it = np.array (mypar['it'], dtype = int) cc = np.array (mypar['cc']) ang1 = np.array (mypar['ang1']) ang2 = np.array (mypar['ang2']) ang3 = np.array (mypar['ang3']) a_hmax = np.array (mypar['a_hmax']) a_hmin = np.array (mypar['a_hmin']) a_vert = np.array (mypar['a_vert']) assert (ivdir.ndim==2) assert (it.ndim==cc.ndim==ang1.ndim==ang2.ndim==ang3.ndim==a_hmax.ndim==a_hmin.ndim==a_vert.ndim==1) assert (it.shape[0]==cc.shape[0]==ang1.shape[0]==ang2.shape[0]==ang3.shape[0]==a_hmax.shape[0]==a_hmin.shape[0]==a_vert.shape[0]) mypar['ndir'] = ivdir.shape[0] mypar['nst'] = it.shape[0] assert (set(it).issubset(set([1,2,3,4,5]))) # is a correct variogram type? mypar['ivdir_'] = pd.DataFrame.to_string(pd.DataFrame(ivdir),index= False, header=False) # array to string mypar['vst_'] = '' for i in range(mypar['ndir']): mypar['vst_'] = mypar['vst_'] + str (it[i]) + \ ' ' + str (cc[i]) + \ ' ' + str (ang1[i]) + \ ' ' + str (ang2[i]) + \ ' ' + str (ang3[i]) + \ ' - it,cc,ang1,ang2,ang3 \n' + \ ' ' + str (a_hmax[i]) + \ ' ' + str (a_hmin[i]) + \ ' ' + str (a_vert[i]) + \ ' - a_hmax, a_hmin, a_vert \n' par = __vmodel_par.format(**mypar) print (par) fpar ='_xxx_.par' with open(fpar,"w") as f: f.write(par) # call pygslib # this construction can be used in a loop for parallel execution p=subprocess.Popen([gslib_path, fpar], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() result = p.returncode p.wait() if p.returncode!=0: raise NameError('gslib declus NameError' + str(stderr.decode('utf-8'))) if ~silent: try: print (stdout.decode('utf-8')) except: print (stdout) # put results in pandas nvarg = 1 ndir = mypar['ndir'] nlag = mypar['nlag'] + 2 ignore = np.arange(0,nvarg*ndir*nlag+ndir*nvarg,nlag+1) # list to ignore variogram headers # a) read resulting file vg = pd.read_csv(mypar['outfl'], header=None, skiprows = ignore, delim_whitespace= True, names = ['Lag', 'average separation', 'var funct', 'number of directions', 'covariance ', 'correlation']) # b) add extra variables from headers vg['Variogram'] = np.repeat(range(nvarg), ndir*nlag) # variogram number = row index on parameter['ivpar'] vg['Direction'] = np.tile(np.repeat(range(ndir), nlag),nvarg) vg['tail'] = np.nan vg['head'] = np.nan vg['type'] = np.nan vg['cut'] = np.nan # clean a bit zeros and variogram at distance zero vg.loc[vg['correlation']==1,'var funct']=None vg = vg.set_index(['Variogram', 'Direction', 'Lag']) # prepare figure fig, ax = plt.subplots(figsize=(8,6)) for i in vg.index.levels[0]: for j in vg.index.levels[1]: vg.loc[i,j].plot(kind='line', x= 'average separation', y = 'var funct', ax=ax, label = 'v{} d{}'.format(i,j)) return vg, fig, ax
mit
google-research/google-research
rllim/main_rllim_on_real_data.py
1
6988
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RL-LIM Experiments on real datasets. Understanding Black-box Model Predictions using RL-LIM. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import lightgbm import numpy as np import pandas as pd from sklearn import linear_model from rllim import data_loading from rllim import rllim from rllim import rllim_metrics def main(args): """Main function of RL-LIM for synthetic data experiments. Args: args: data_name, train_no, probe_no, test_no, seed, hyperparameters, network parameters """ # Problem specification problem = args.problem # The ratio between training and probe datasets train_rate = args.train_rate probe_rate = args.probe_rate dict_rate = {'train': train_rate, 'probe': probe_rate} # Random seed seed = args.seed # Network parameters parameters = dict() parameters['hidden_dim'] = args.hidden_dim parameters['iterations'] = args.iterations parameters['num_layers'] = args.num_layers parameters['batch_size'] = args.batch_size parameters['batch_size_inner'] = args.batch_size_inner parameters['lambda'] = args.hyper_lambda # Checkpoint file name checkpoint_file_name = args.checkpoint_file_name # Number of sample explanations n_exp = args.n_exp # Loads data data_loading.load_facebook_data(dict_rate, seed) print('Finished data loading.') # Preprocesses data # Normalization methods: either 'minmax' or 'standard' normalization = args.normalization # Extracts features and labels & normalizes features x_train, y_train, x_probe, _, x_test, y_test, col_names = \ data_loading.preprocess_data(normalization, 'train.csv', 'probe.csv', 'test.csv') print('Finished data preprocess.') # Trains black-box model # Initializes black-box model if problem == 'regression': bb_model = lightgbm.LGBMRegressor() elif problem == 'classification': bb_model = lightgbm.LGBMClassifier() # Trains black-box model bb_model = bb_model.fit(x_train, y_train) print('Finished black-box model training.') # Constructs auxiliary datasets if problem == 'regression': y_train_hat = bb_model.predict(x_train) y_probe_hat = bb_model.predict(x_probe) elif problem == 'classification': y_train_hat = bb_model.predict_proba(x_train)[:, 1] y_probe_hat = bb_model.predict_proba(x_probe)[:, 1] print('Finished auxiliary dataset construction.') # Trains interpretable baseline # Defines baseline baseline = linear_model.Ridge(alpha=1) # Trains baseline model baseline.fit(x_train, y_train_hat) print('Finished interpretable baseline training.') # Trains instance-wise weight estimator # Defines locally interpretable model interp_model = linear_model.Ridge(alpha=1) # Initializes RL-LIM rllim_class = rllim.Rllim(x_train, y_train_hat, x_probe, y_probe_hat, parameters, interp_model, baseline, checkpoint_file_name) # Trains RL-LIM rllim_class.rllim_train() print('Finished instance-wise weight estimator training.') # Interpretable inference # Trains locally interpretable models and output # instance-wise explanations (test_coef) and # interpretable predictions (test_y_fit) test_y_fit, test_coef = rllim_class.rllim_interpreter(x_train, y_train_hat, x_test, interp_model) print('Finished instance-wise predictions and local explanations.') # Overall performance mae = rllim_metrics.overall_performance_metrics(y_test, test_y_fit, metric='mae') print('Overall performance of RL-LIM in terms of MAE: ' + str(np.round(mae, 4))) # Black-box model predictions y_test_hat = bb_model.predict(x_test) # Fidelity in terms of MAE mae = rllim_metrics.fidelity_metrics(y_test_hat, test_y_fit, metric='mae') print('Fidelity of RL-LIM in terms of MAE: ' + str(np.round(mae, 4))) # Fidelity in terms of R2 Score r2 = rllim_metrics.fidelity_metrics(y_test_hat, test_y_fit, metric='r2') print('Fidelity of RL-LIM in terms of R2 Score: ' + str(np.round(r2, 4))) # Instance-wise explanations # Local explanations of n_exp samples local_explanations = test_coef[:n_exp, :] final_col_names = np.concatenate((np.asarray(['intercept']), col_names), axis=0) pd.DataFrame(data=local_explanations, index=range(n_exp), columns=final_col_names) if __name__ == '__main__': # Inputs for the main function parser = argparse.ArgumentParser() parser.add_argument( '--problem', help='regression or classification', default='regression', type=str) parser.add_argument( '--normalization', help='minmax or standard', default='minmax', type=str) parser.add_argument( '--train_rate', help='rate of training samples', default=0.9, type=float) parser.add_argument( '--probe_rate', help='rate of probe samples', default=0.1, type=float) parser.add_argument( '--seed', help='random seed', default=0, type=int) parser.add_argument( '--hyper_lambda', help='main hyper-parameter of RL-LIM (lambda)', default=1.0, type=float) parser.add_argument( '--hidden_dim', help='dimensions of hidden states', default=100, type=int) parser.add_argument( '--num_layers', help='number of network layers', default=5, type=int) parser.add_argument( '--iterations', help='number of iterations', default=2000, type=int) parser.add_argument( '--batch_size', help='number of batch size for RL', default=5000, type=int) parser.add_argument( '--batch_size_inner', help='number of batch size for inner iterations', default=10, type=int) parser.add_argument( '--n_exp', help='the number of sample explanations', default=5, type=int) parser.add_argument( '--checkpoint_file_name', help='file name for saving and loading the trained model', default='./tmp/model.ckpt', type=str) args_in = parser.parse_args() # Calls main function main(args_in)
apache-2.0
John-Keating/ThinkStats2
code/thinkstats2.py
68
68825
"""This file contains code for use with "Think Stats" and "Think Bayes", both by Allen B. Downey, available from greenteapress.com Copyright 2014 Allen B. Downey License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """ from __future__ import print_function, division """This file contains class definitions for: Hist: represents a histogram (map from values to integer frequencies). Pmf: represents a probability mass function (map from values to probs). _DictWrapper: private parent class for Hist and Pmf. Cdf: represents a discrete cumulative distribution function Pdf: represents a continuous probability density function """ import bisect import copy import logging import math import random import re from collections import Counter from operator import itemgetter import thinkplot import numpy as np import pandas import scipy from scipy import stats from scipy import special from scipy import ndimage from io import open ROOT2 = math.sqrt(2) def RandomSeed(x): """Initialize the random and np.random generators. x: int seed """ random.seed(x) np.random.seed(x) def Odds(p): """Computes odds for a given probability. Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor. Note: when p=1, the formula for odds divides by zero, which is normally undefined. But I think it is reasonable to define Odds(1) to be infinity, so that's what this function does. p: float 0-1 Returns: float odds """ if p == 1: return float('inf') return p / (1 - p) def Probability(o): """Computes the probability corresponding to given odds. Example: o=2 means 2:1 odds in favor, or 2/3 probability o: float odds, strictly positive Returns: float probability """ return o / (o + 1) def Probability2(yes, no): """Computes the probability corresponding to given odds. Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability. yes, no: int or float odds in favor """ return yes / (yes + no) class Interpolator(object): """Represents a mapping between sorted sequences; performs linear interp. Attributes: xs: sorted list ys: sorted list """ def __init__(self, xs, ys): self.xs = xs self.ys = ys def Lookup(self, x): """Looks up x and returns the corresponding value of y.""" return self._Bisect(x, self.xs, self.ys) def Reverse(self, y): """Looks up y and returns the corresponding value of x.""" return self._Bisect(y, self.ys, self.xs) def _Bisect(self, x, xs, ys): """Helper function.""" if x <= xs[0]: return ys[0] if x >= xs[-1]: return ys[-1] i = bisect.bisect(xs, x) frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1]) y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1]) return y class _DictWrapper(object): """An object that contains a dictionary.""" def __init__(self, obj=None, label=None): """Initializes the distribution. obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs label: string label """ self.label = label if label is not None else '_nolegend_' self.d = {} # flag whether the distribution is under a log transform self.log = False if obj is None: return if isinstance(obj, (_DictWrapper, Cdf, Pdf)): self.label = label if label is not None else obj.label if isinstance(obj, dict): self.d.update(obj.items()) elif isinstance(obj, (_DictWrapper, Cdf, Pdf)): self.d.update(obj.Items()) elif isinstance(obj, pandas.Series): self.d.update(obj.value_counts().iteritems()) else: # finally, treat it like a list self.d.update(Counter(obj)) if len(self) > 0 and isinstance(self, Pmf): self.Normalize() def __hash__(self): return id(self) def __str__(self): cls = self.__class__.__name__ return '%s(%s)' % (cls, str(self.d)) __repr__ = __str__ def __eq__(self, other): return self.d == other.d def __len__(self): return len(self.d) def __iter__(self): return iter(self.d) def iterkeys(self): """Returns an iterator over keys.""" return iter(self.d) def __contains__(self, value): return value in self.d def __getitem__(self, value): return self.d.get(value, 0) def __setitem__(self, value, prob): self.d[value] = prob def __delitem__(self, value): del self.d[value] def Copy(self, label=None): """Returns a copy. Make a shallow copy of d. If you want a deep copy of d, use copy.deepcopy on the whole object. label: string label for the new Hist returns: new _DictWrapper with the same type """ new = copy.copy(self) new.d = copy.copy(self.d) new.label = label if label is not None else self.label return new def Scale(self, factor): """Multiplies the values by a factor. factor: what to multiply by Returns: new object """ new = self.Copy() new.d.clear() for val, prob in self.Items(): new.Set(val * factor, prob) return new def Log(self, m=None): """Log transforms the probabilities. Removes values with probability 0. Normalizes so that the largest logprob is 0. """ if self.log: raise ValueError("Pmf/Hist already under a log transform") self.log = True if m is None: m = self.MaxLike() for x, p in self.d.items(): if p: self.Set(x, math.log(p / m)) else: self.Remove(x) def Exp(self, m=None): """Exponentiates the probabilities. m: how much to shift the ps before exponentiating If m is None, normalizes so that the largest prob is 1. """ if not self.log: raise ValueError("Pmf/Hist not under a log transform") self.log = False if m is None: m = self.MaxLike() for x, p in self.d.items(): self.Set(x, math.exp(p - m)) def GetDict(self): """Gets the dictionary.""" return self.d def SetDict(self, d): """Sets the dictionary.""" self.d = d def Values(self): """Gets an unsorted sequence of values. Note: one source of confusion is that the keys of this dictionary are the values of the Hist/Pmf, and the values of the dictionary are frequencies/probabilities. """ return self.d.keys() def Items(self): """Gets an unsorted sequence of (value, freq/prob) pairs.""" return self.d.items() def Render(self, **options): """Generates a sequence of points suitable for plotting. Note: options are ignored Returns: tuple of (sorted value sequence, freq/prob sequence) """ if min(self.d.keys()) is np.nan: logging.warning('Hist: contains NaN, may not render correctly.') return zip(*sorted(self.Items())) def MakeCdf(self, label=None): """Makes a Cdf.""" label = label if label is not None else self.label return Cdf(self, label=label) def Print(self): """Prints the values and freqs/probs in ascending order.""" for val, prob in sorted(self.d.items()): print(val, prob) def Set(self, x, y=0): """Sets the freq/prob associated with the value x. Args: x: number value y: number freq or prob """ self.d[x] = y def Incr(self, x, term=1): """Increments the freq/prob associated with the value x. Args: x: number value term: how much to increment by """ self.d[x] = self.d.get(x, 0) + term def Mult(self, x, factor): """Scales the freq/prob associated with the value x. Args: x: number value factor: how much to multiply by """ self.d[x] = self.d.get(x, 0) * factor def Remove(self, x): """Removes a value. Throws an exception if the value is not there. Args: x: value to remove """ del self.d[x] def Total(self): """Returns the total of the frequencies/probabilities in the map.""" total = sum(self.d.values()) return total def MaxLike(self): """Returns the largest frequency/probability in the map.""" return max(self.d.values()) def Largest(self, n=10): """Returns the largest n values, with frequency/probability. n: number of items to return """ return sorted(self.d.items(), reverse=True)[:n] def Smallest(self, n=10): """Returns the smallest n values, with frequency/probability. n: number of items to return """ return sorted(self.d.items(), reverse=False)[:n] class Hist(_DictWrapper): """Represents a histogram, which is a map from values to frequencies. Values can be any hashable type; frequencies are integer counters. """ def Freq(self, x): """Gets the frequency associated with the value x. Args: x: number value Returns: int frequency """ return self.d.get(x, 0) def Freqs(self, xs): """Gets frequencies for a sequence of values.""" return [self.Freq(x) for x in xs] def IsSubset(self, other): """Checks whether the values in this histogram are a subset of the values in the given histogram.""" for val, freq in self.Items(): if freq > other.Freq(val): return False return True def Subtract(self, other): """Subtracts the values in the given histogram from this histogram.""" for val, freq in other.Items(): self.Incr(val, -freq) class Pmf(_DictWrapper): """Represents a probability mass function. Values can be any hashable type; probabilities are floating-point. Pmfs are not necessarily normalized. """ def Prob(self, x, default=0): """Gets the probability associated with the value x. Args: x: number value default: value to return if the key is not there Returns: float probability """ return self.d.get(x, default) def Probs(self, xs): """Gets probabilities for a sequence of values.""" return [self.Prob(x) for x in xs] def Percentile(self, percentage): """Computes a percentile of a given Pmf. Note: this is not super efficient. If you are planning to compute more than a few percentiles, compute the Cdf. percentage: float 0-100 returns: value from the Pmf """ p = percentage / 100.0 total = 0 for val, prob in sorted(self.Items()): total += prob if total >= p: return val def ProbGreater(self, x): """Probability that a sample from this Pmf exceeds x. x: number returns: float probability """ if isinstance(x, _DictWrapper): return PmfProbGreater(self, x) else: t = [prob for (val, prob) in self.d.items() if val > x] return sum(t) def ProbLess(self, x): """Probability that a sample from this Pmf is less than x. x: number returns: float probability """ if isinstance(x, _DictWrapper): return PmfProbLess(self, x) else: t = [prob for (val, prob) in self.d.items() if val < x] return sum(t) def __lt__(self, obj): """Less than. obj: number or _DictWrapper returns: float probability """ return self.ProbLess(obj) def __gt__(self, obj): """Greater than. obj: number or _DictWrapper returns: float probability """ return self.ProbGreater(obj) def __ge__(self, obj): """Greater than or equal. obj: number or _DictWrapper returns: float probability """ return 1 - (self < obj) def __le__(self, obj): """Less than or equal. obj: number or _DictWrapper returns: float probability """ return 1 - (self > obj) def Normalize(self, fraction=1.0): """Normalizes this PMF so the sum of all probs is fraction. Args: fraction: what the total should be after normalization Returns: the total probability before normalizing """ if self.log: raise ValueError("Normalize: Pmf is under a log transform") total = self.Total() if total == 0.0: raise ValueError('Normalize: total probability is zero.') #logging.warning('Normalize: total probability is zero.') #return total factor = fraction / total for x in self.d: self.d[x] *= factor return total def Random(self): """Chooses a random element from this PMF. Note: this is not very efficient. If you plan to call this more than a few times, consider converting to a CDF. Returns: float value from the Pmf """ target = random.random() total = 0.0 for x, p in self.d.items(): total += p if total >= target: return x # we shouldn't get here raise ValueError('Random: Pmf might not be normalized.') def Mean(self): """Computes the mean of a PMF. Returns: float mean """ mean = 0.0 for x, p in self.d.items(): mean += p * x return mean def Var(self, mu=None): """Computes the variance of a PMF. mu: the point around which the variance is computed; if omitted, computes the mean returns: float variance """ if mu is None: mu = self.Mean() var = 0.0 for x, p in self.d.items(): var += p * (x - mu) ** 2 return var def Std(self, mu=None): """Computes the standard deviation of a PMF. mu: the point around which the variance is computed; if omitted, computes the mean returns: float standard deviation """ var = self.Var(mu) return math.sqrt(var) def MaximumLikelihood(self): """Returns the value with the highest probability. Returns: float probability """ _, val = max((prob, val) for val, prob in self.Items()) return val def CredibleInterval(self, percentage=90): """Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ cdf = self.MakeCdf() return cdf.CredibleInterval(percentage) def __add__(self, other): """Computes the Pmf of the sum of values drawn from self and other. other: another Pmf or a scalar returns: new Pmf """ try: return self.AddPmf(other) except AttributeError: return self.AddConstant(other) def AddPmf(self, other): """Computes the Pmf of the sum of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 + v2, p1 * p2) return pmf def AddConstant(self, other): """Computes the Pmf of the sum a constant and values from self. other: a number returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): pmf.Set(v1 + other, p1) return pmf def __sub__(self, other): """Computes the Pmf of the diff of values drawn from self and other. other: another Pmf returns: new Pmf """ try: return self.SubPmf(other) except AttributeError: return self.AddConstant(-other) def SubPmf(self, other): """Computes the Pmf of the diff of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 - v2, p1 * p2) return pmf def __mul__(self, other): """Computes the Pmf of the product of values drawn from self and other. other: another Pmf returns: new Pmf """ try: return self.MulPmf(other) except AttributeError: return self.MulConstant(other) def MulPmf(self, other): """Computes the Pmf of the diff of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 * v2, p1 * p2) return pmf def MulConstant(self, other): """Computes the Pmf of the product of a constant and values from self. other: a number returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): pmf.Set(v1 * other, p1) return pmf def __div__(self, other): """Computes the Pmf of the ratio of values drawn from self and other. other: another Pmf returns: new Pmf """ try: return self.DivPmf(other) except AttributeError: return self.MulConstant(1/other) __truediv__ = __div__ def DivPmf(self, other): """Computes the Pmf of the ratio of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 / v2, p1 * p2) return pmf def Max(self, k): """Computes the CDF of the maximum of k selections from this dist. k: int returns: new Cdf """ cdf = self.MakeCdf() return cdf.Max(k) class Joint(Pmf): """Represents a joint distribution. The values are sequences (usually tuples) """ def Marginal(self, i, label=None): """Gets the marginal distribution of the indicated variable. i: index of the variable we want Returns: Pmf """ pmf = Pmf(label=label) for vs, prob in self.Items(): pmf.Incr(vs[i], prob) return pmf def Conditional(self, i, j, val, label=None): """Gets the conditional distribution of the indicated variable. Distribution of vs[i], conditioned on vs[j] = val. i: index of the variable we want j: which variable is conditioned on val: the value the jth variable has to have Returns: Pmf """ pmf = Pmf(label=label) for vs, prob in self.Items(): if vs[j] != val: continue pmf.Incr(vs[i], prob) pmf.Normalize() return pmf def MaxLikeInterval(self, percentage=90): """Returns the maximum-likelihood credible interval. If percentage=90, computes a 90% CI containing the values with the highest likelihoods. percentage: float between 0 and 100 Returns: list of values from the suite """ interval = [] total = 0 t = [(prob, val) for val, prob in self.Items()] t.sort(reverse=True) for prob, val in t: interval.append(val) total += prob if total >= percentage / 100.0: break return interval def MakeJoint(pmf1, pmf2): """Joint distribution of values from pmf1 and pmf2. Assumes that the PMFs represent independent random variables. Args: pmf1: Pmf object pmf2: Pmf object Returns: Joint pmf of value pairs """ joint = Joint() for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): joint.Set((v1, v2), p1 * p2) return joint def MakeHistFromList(t, label=None): """Makes a histogram from an unsorted sequence of values. Args: t: sequence of numbers label: string label for this histogram Returns: Hist object """ return Hist(t, label=label) def MakeHistFromDict(d, label=None): """Makes a histogram from a map from values to frequencies. Args: d: dictionary that maps values to frequencies label: string label for this histogram Returns: Hist object """ return Hist(d, label) def MakePmfFromList(t, label=None): """Makes a PMF from an unsorted sequence of values. Args: t: sequence of numbers label: string label for this PMF Returns: Pmf object """ return Pmf(t, label=label) def MakePmfFromDict(d, label=None): """Makes a PMF from a map from values to probabilities. Args: d: dictionary that maps values to probabilities label: string label for this PMF Returns: Pmf object """ return Pmf(d, label=label) def MakePmfFromItems(t, label=None): """Makes a PMF from a sequence of value-probability pairs Args: t: sequence of value-probability pairs label: string label for this PMF Returns: Pmf object """ return Pmf(dict(t), label=label) def MakePmfFromHist(hist, label=None): """Makes a normalized PMF from a Hist object. Args: hist: Hist object label: string label Returns: Pmf object """ if label is None: label = hist.label return Pmf(hist, label=label) def MakeMixture(metapmf, label='mix'): """Make a mixture distribution. Args: metapmf: Pmf that maps from Pmfs to probs. label: string label for the new Pmf. Returns: Pmf object. """ mix = Pmf(label=label) for pmf, p1 in metapmf.Items(): for x, p2 in pmf.Items(): mix.Incr(x, p1 * p2) return mix def MakeUniformPmf(low, high, n): """Make a uniform Pmf. low: lowest value (inclusive) high: highest value (inclusize) n: number of values """ pmf = Pmf() for x in np.linspace(low, high, n): pmf.Set(x, 1) pmf.Normalize() return pmf class Cdf(object): """Represents a cumulative distribution function. Attributes: xs: sequence of values ps: sequence of probabilities label: string used as a graph label. """ def __init__(self, obj=None, ps=None, label=None): """Initializes. If ps is provided, obj must be the corresponding list of values. obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs ps: list of cumulative probabilities label: string label """ self.label = label if label is not None else '_nolegend_' if isinstance(obj, (_DictWrapper, Cdf, Pdf)): if not label: self.label = label if label is not None else obj.label if obj is None: # caller does not provide obj, make an empty Cdf self.xs = np.asarray([]) self.ps = np.asarray([]) if ps is not None: logging.warning("Cdf: can't pass ps without also passing xs.") return else: # if the caller provides xs and ps, just store them if ps is not None: if isinstance(ps, str): logging.warning("Cdf: ps can't be a string") self.xs = np.asarray(obj) self.ps = np.asarray(ps) return # caller has provided just obj, not ps if isinstance(obj, Cdf): self.xs = copy.copy(obj.xs) self.ps = copy.copy(obj.ps) return if isinstance(obj, _DictWrapper): dw = obj else: dw = Hist(obj) if len(dw) == 0: self.xs = np.asarray([]) self.ps = np.asarray([]) return xs, freqs = zip(*sorted(dw.Items())) self.xs = np.asarray(xs) self.ps = np.cumsum(freqs, dtype=np.float) self.ps /= self.ps[-1] def __str__(self): return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps)) __repr__ = __str__ def __len__(self): return len(self.xs) def __getitem__(self, x): return self.Prob(x) def __setitem__(self): raise UnimplementedMethodException() def __delitem__(self): raise UnimplementedMethodException() def __eq__(self, other): return np.all(self.xs == other.xs) and np.all(self.ps == other.ps) def Copy(self, label=None): """Returns a copy of this Cdf. label: string label for the new Cdf """ if label is None: label = self.label return Cdf(list(self.xs), list(self.ps), label=label) def MakePmf(self, label=None): """Makes a Pmf.""" if label is None: label = self.label return Pmf(self, label=label) def Values(self): """Returns a sorted list of values. """ return self.xs def Items(self): """Returns a sorted sequence of (value, probability) pairs. Note: in Python3, returns an iterator. """ a = self.ps b = np.roll(a, 1) b[0] = 0 return zip(self.xs, a-b) def Shift(self, term): """Adds a term to the xs. term: how much to add """ new = self.Copy() # don't use +=, or else an int array + float yields int array new.xs = new.xs + term return new def Scale(self, factor): """Multiplies the xs by a factor. factor: what to multiply by """ new = self.Copy() # don't use *=, or else an int array * float yields int array new.xs = new.xs * factor return new def Prob(self, x): """Returns CDF(x), the probability that corresponds to value x. Args: x: number Returns: float probability """ if x < self.xs[0]: return 0.0 index = bisect.bisect(self.xs, x) p = self.ps[index-1] return p def Probs(self, xs): """Gets probabilities for a sequence of values. xs: any sequence that can be converted to NumPy array returns: NumPy array of cumulative probabilities """ xs = np.asarray(xs) index = np.searchsorted(self.xs, xs, side='right') ps = self.ps[index-1] ps[xs < self.xs[0]] = 0.0 return ps ProbArray = Probs def Value(self, p): """Returns InverseCDF(p), the value that corresponds to probability p. Args: p: number in the range [0, 1] Returns: number value """ if p < 0 or p > 1: raise ValueError('Probability p must be in range [0, 1]') index = bisect.bisect_left(self.ps, p) return self.xs[index] def ValueArray(self, ps): """Returns InverseCDF(p), the value that corresponds to probability p. Args: ps: NumPy array of numbers in the range [0, 1] Returns: NumPy array of values """ ps = np.asarray(ps) if np.any(ps < 0) or np.any(ps > 1): raise ValueError('Probability p must be in range [0, 1]') index = np.searchsorted(self.ps, ps, side='left') return self.xs[index] def Percentile(self, p): """Returns the value that corresponds to percentile p. Args: p: number in the range [0, 100] Returns: number value """ return self.Value(p / 100.0) def PercentileRank(self, x): """Returns the percentile rank of the value x. x: potential value in the CDF returns: percentile rank in the range 0 to 100 """ return self.Prob(x) * 100.0 def Random(self): """Chooses a random value from this distribution.""" return self.Value(random.random()) def Sample(self, n): """Generates a random sample from this distribution. n: int length of the sample returns: NumPy array """ ps = np.random.random(n) return self.ValueArray(ps) def Mean(self): """Computes the mean of a CDF. Returns: float mean """ old_p = 0 total = 0.0 for x, new_p in zip(self.xs, self.ps): p = new_p - old_p total += p * x old_p = new_p return total def CredibleInterval(self, percentage=90): """Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ prob = (1 - percentage / 100.0) / 2 interval = self.Value(prob), self.Value(1 - prob) return interval ConfidenceInterval = CredibleInterval def _Round(self, multiplier=1000.0): """ An entry is added to the cdf only if the percentile differs from the previous value in a significant digit, where the number of significant digits is determined by multiplier. The default is 1000, which keeps log10(1000) = 3 significant digits. """ # TODO(write this method) raise UnimplementedMethodException() def Render(self, **options): """Generates a sequence of points suitable for plotting. An empirical CDF is a step function; linear interpolation can be misleading. Note: options are ignored Returns: tuple of (xs, ps) """ def interleave(a, b): c = np.empty(a.shape[0] + b.shape[0]) c[::2] = a c[1::2] = b return c a = np.array(self.xs) xs = interleave(a, a) shift_ps = np.roll(self.ps, 1) shift_ps[0] = 0 ps = interleave(shift_ps, self.ps) return xs, ps def Max(self, k): """Computes the CDF of the maximum of k selections from this dist. k: int returns: new Cdf """ cdf = self.Copy() cdf.ps **= k return cdf def MakeCdfFromItems(items, label=None): """Makes a cdf from an unsorted sequence of (value, frequency) pairs. Args: items: unsorted sequence of (value, frequency) pairs label: string label for this CDF Returns: cdf: list of (value, fraction) pairs """ return Cdf(dict(items), label=label) def MakeCdfFromDict(d, label=None): """Makes a CDF from a dictionary that maps values to frequencies. Args: d: dictionary that maps values to frequencies. label: string label for the data. Returns: Cdf object """ return Cdf(d, label=label) def MakeCdfFromList(seq, label=None): """Creates a CDF from an unsorted sequence. Args: seq: unsorted sequence of sortable values label: string label for the cdf Returns: Cdf object """ return Cdf(seq, label=label) def MakeCdfFromHist(hist, label=None): """Makes a CDF from a Hist object. Args: hist: Pmf.Hist object label: string label for the data. Returns: Cdf object """ if label is None: label = hist.label return Cdf(hist, label=label) def MakeCdfFromPmf(pmf, label=None): """Makes a CDF from a Pmf object. Args: pmf: Pmf.Pmf object label: string label for the data. Returns: Cdf object """ if label is None: label = pmf.label return Cdf(pmf, label=label) class UnimplementedMethodException(Exception): """Exception if someone calls a method that should be overridden.""" class Suite(Pmf): """Represents a suite of hypotheses and their probabilities.""" def Update(self, data): """Updates each hypothesis based on the data. data: any representation of the data returns: the normalizing constant """ for hypo in self.Values(): like = self.Likelihood(data, hypo) self.Mult(hypo, like) return self.Normalize() def LogUpdate(self, data): """Updates a suite of hypotheses based on new data. Modifies the suite directly; if you want to keep the original, make a copy. Note: unlike Update, LogUpdate does not normalize. Args: data: any representation of the data """ for hypo in self.Values(): like = self.LogLikelihood(data, hypo) self.Incr(hypo, like) def UpdateSet(self, dataset): """Updates each hypothesis based on the dataset. This is more efficient than calling Update repeatedly because it waits until the end to Normalize. Modifies the suite directly; if you want to keep the original, make a copy. dataset: a sequence of data returns: the normalizing constant """ for data in dataset: for hypo in self.Values(): like = self.Likelihood(data, hypo) self.Mult(hypo, like) return self.Normalize() def LogUpdateSet(self, dataset): """Updates each hypothesis based on the dataset. Modifies the suite directly; if you want to keep the original, make a copy. dataset: a sequence of data returns: None """ for data in dataset: self.LogUpdate(data) def Likelihood(self, data, hypo): """Computes the likelihood of the data under the hypothesis. hypo: some representation of the hypothesis data: some representation of the data """ raise UnimplementedMethodException() def LogLikelihood(self, data, hypo): """Computes the log likelihood of the data under the hypothesis. hypo: some representation of the hypothesis data: some representation of the data """ raise UnimplementedMethodException() def Print(self): """Prints the hypotheses and their probabilities.""" for hypo, prob in sorted(self.Items()): print(hypo, prob) def MakeOdds(self): """Transforms from probabilities to odds. Values with prob=0 are removed. """ for hypo, prob in self.Items(): if prob: self.Set(hypo, Odds(prob)) else: self.Remove(hypo) def MakeProbs(self): """Transforms from odds to probabilities.""" for hypo, odds in self.Items(): self.Set(hypo, Probability(odds)) def MakeSuiteFromList(t, label=None): """Makes a suite from an unsorted sequence of values. Args: t: sequence of numbers label: string label for this suite Returns: Suite object """ hist = MakeHistFromList(t, label=label) d = hist.GetDict() return MakeSuiteFromDict(d) def MakeSuiteFromHist(hist, label=None): """Makes a normalized suite from a Hist object. Args: hist: Hist object label: string label Returns: Suite object """ if label is None: label = hist.label # make a copy of the dictionary d = dict(hist.GetDict()) return MakeSuiteFromDict(d, label) def MakeSuiteFromDict(d, label=None): """Makes a suite from a map from values to probabilities. Args: d: dictionary that maps values to probabilities label: string label for this suite Returns: Suite object """ suite = Suite(label=label) suite.SetDict(d) suite.Normalize() return suite class Pdf(object): """Represents a probability density function (PDF).""" def Density(self, x): """Evaluates this Pdf at x. Returns: float or NumPy array of probability density """ raise UnimplementedMethodException() def GetLinspace(self): """Get a linspace for plotting. Not all subclasses of Pdf implement this. Returns: numpy array """ raise UnimplementedMethodException() def MakePmf(self, **options): """Makes a discrete version of this Pdf. options can include label: string low: low end of range high: high end of range n: number of places to evaluate Returns: new Pmf """ label = options.pop('label', '') xs, ds = self.Render(**options) return Pmf(dict(zip(xs, ds)), label=label) def Render(self, **options): """Generates a sequence of points suitable for plotting. If options includes low and high, it must also include n; in that case the density is evaluated an n locations between low and high, including both. If options includes xs, the density is evaluate at those location. Otherwise, self.GetLinspace is invoked to provide the locations. Returns: tuple of (xs, densities) """ low, high = options.pop('low', None), options.pop('high', None) if low is not None and high is not None: n = options.pop('n', 101) xs = np.linspace(low, high, n) else: xs = options.pop('xs', None) if xs is None: xs = self.GetLinspace() ds = self.Density(xs) return xs, ds def Items(self): """Generates a sequence of (value, probability) pairs. """ return zip(*self.Render()) class NormalPdf(Pdf): """Represents the PDF of a Normal distribution.""" def __init__(self, mu=0, sigma=1, label=None): """Constructs a Normal Pdf with given mu and sigma. mu: mean sigma: standard deviation label: string """ self.mu = mu self.sigma = sigma self.label = label if label is not None else '_nolegend_' def __str__(self): return 'NormalPdf(%f, %f)' % (self.mu, self.sigma) def GetLinspace(self): """Get a linspace for plotting. Returns: numpy array """ low, high = self.mu-3*self.sigma, self.mu+3*self.sigma return np.linspace(low, high, 101) def Density(self, xs): """Evaluates this Pdf at xs. xs: scalar or sequence of floats returns: float or NumPy array of probability density """ return stats.norm.pdf(xs, self.mu, self.sigma) class ExponentialPdf(Pdf): """Represents the PDF of an exponential distribution.""" def __init__(self, lam=1, label=None): """Constructs an exponential Pdf with given parameter. lam: rate parameter label: string """ self.lam = lam self.label = label if label is not None else '_nolegend_' def __str__(self): return 'ExponentialPdf(%f)' % (self.lam) def GetLinspace(self): """Get a linspace for plotting. Returns: numpy array """ low, high = 0, 5.0/self.lam return np.linspace(low, high, 101) def Density(self, xs): """Evaluates this Pdf at xs. xs: scalar or sequence of floats returns: float or NumPy array of probability density """ return stats.expon.pdf(xs, scale=1.0/self.lam) class EstimatedPdf(Pdf): """Represents a PDF estimated by KDE.""" def __init__(self, sample, label=None): """Estimates the density function based on a sample. sample: sequence of data label: string """ self.label = label if label is not None else '_nolegend_' self.kde = stats.gaussian_kde(sample) low = min(sample) high = max(sample) self.linspace = np.linspace(low, high, 101) def __str__(self): return 'EstimatedPdf(label=%s)' % str(self.label) def GetLinspace(self): """Get a linspace for plotting. Returns: numpy array """ return self.linspace def Density(self, xs): """Evaluates this Pdf at xs. returns: float or NumPy array of probability density """ return self.kde.evaluate(xs) def CredibleInterval(pmf, percentage=90): """Computes a credible interval for a given distribution. If percentage=90, computes the 90% CI. Args: pmf: Pmf object representing a posterior distribution percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ cdf = pmf.MakeCdf() prob = (1 - percentage / 100.0) / 2 interval = cdf.Value(prob), cdf.Value(1 - prob) return interval def PmfProbLess(pmf1, pmf2): """Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability """ total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 < v2: total += p1 * p2 return total def PmfProbGreater(pmf1, pmf2): """Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability """ total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 > v2: total += p1 * p2 return total def PmfProbEqual(pmf1, pmf2): """Probability that a value from pmf1 equals a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability """ total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 == v2: total += p1 * p2 return total def RandomSum(dists): """Chooses a random value from each dist and returns the sum. dists: sequence of Pmf or Cdf objects returns: numerical sum """ total = sum(dist.Random() for dist in dists) return total def SampleSum(dists, n): """Draws a sample of sums from a list of distributions. dists: sequence of Pmf or Cdf objects n: sample size returns: new Pmf of sums """ pmf = Pmf(RandomSum(dists) for i in range(n)) return pmf def EvalNormalPdf(x, mu, sigma): """Computes the unnormalized PDF of the normal distribution. x: value mu: mean sigma: standard deviation returns: float probability density """ return stats.norm.pdf(x, mu, sigma) def MakeNormalPmf(mu, sigma, num_sigmas, n=201): """Makes a PMF discrete approx to a Normal distribution. mu: float mean sigma: float standard deviation num_sigmas: how many sigmas to extend in each direction n: number of values in the Pmf returns: normalized Pmf """ pmf = Pmf() low = mu - num_sigmas * sigma high = mu + num_sigmas * sigma for x in np.linspace(low, high, n): p = EvalNormalPdf(x, mu, sigma) pmf.Set(x, p) pmf.Normalize() return pmf def EvalBinomialPmf(k, n, p): """Evaluates the binomial PMF. Returns the probabily of k successes in n trials with probability p. """ return stats.binom.pmf(k, n, p) def EvalHypergeomPmf(k, N, K, n): """Evaluates the hypergeometric PMF. Returns the probabily of k successes in n trials from a population N with K successes in it. """ return stats.hypergeom.pmf(k, N, K, n) def EvalPoissonPmf(k, lam): """Computes the Poisson PMF. k: number of events lam: parameter lambda in events per unit time returns: float probability """ # don't use the scipy function (yet). for lam=0 it returns NaN; # should be 0.0 # return stats.poisson.pmf(k, lam) return lam ** k * math.exp(-lam) / special.gamma(k+1) def MakePoissonPmf(lam, high, step=1): """Makes a PMF discrete approx to a Poisson distribution. lam: parameter lambda in events per unit time high: upper bound of the Pmf returns: normalized Pmf """ pmf = Pmf() for k in range(0, high + 1, step): p = EvalPoissonPmf(k, lam) pmf.Set(k, p) pmf.Normalize() return pmf def EvalExponentialPdf(x, lam): """Computes the exponential PDF. x: value lam: parameter lambda in events per unit time returns: float probability density """ return lam * math.exp(-lam * x) def EvalExponentialCdf(x, lam): """Evaluates CDF of the exponential distribution with parameter lam.""" return 1 - math.exp(-lam * x) def MakeExponentialPmf(lam, high, n=200): """Makes a PMF discrete approx to an exponential distribution. lam: parameter lambda in events per unit time high: upper bound n: number of values in the Pmf returns: normalized Pmf """ pmf = Pmf() for x in np.linspace(0, high, n): p = EvalExponentialPdf(x, lam) pmf.Set(x, p) pmf.Normalize() return pmf def StandardNormalCdf(x): """Evaluates the CDF of the standard Normal distribution. See http://en.wikipedia.org/wiki/Normal_distribution #Cumulative_distribution_function Args: x: float Returns: float """ return (math.erf(x / ROOT2) + 1) / 2 def EvalNormalCdf(x, mu=0, sigma=1): """Evaluates the CDF of the normal distribution. Args: x: float mu: mean parameter sigma: standard deviation parameter Returns: float """ return stats.norm.cdf(x, loc=mu, scale=sigma) def EvalNormalCdfInverse(p, mu=0, sigma=1): """Evaluates the inverse CDF of the normal distribution. See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function Args: p: float mu: mean parameter sigma: standard deviation parameter Returns: float """ return stats.norm.ppf(p, loc=mu, scale=sigma) def EvalLognormalCdf(x, mu=0, sigma=1): """Evaluates the CDF of the lognormal distribution. x: float or sequence mu: mean parameter sigma: standard deviation parameter Returns: float or sequence """ return stats.lognorm.cdf(x, loc=mu, scale=sigma) def RenderExpoCdf(lam, low, high, n=101): """Generates sequences of xs and ps for an exponential CDF. lam: parameter low: float high: float n: number of points to render returns: numpy arrays (xs, ps) """ xs = np.linspace(low, high, n) ps = 1 - np.exp(-lam * xs) #ps = stats.expon.cdf(xs, scale=1.0/lam) return xs, ps def RenderNormalCdf(mu, sigma, low, high, n=101): """Generates sequences of xs and ps for a Normal CDF. mu: parameter sigma: parameter low: float high: float n: number of points to render returns: numpy arrays (xs, ps) """ xs = np.linspace(low, high, n) ps = stats.norm.cdf(xs, mu, sigma) return xs, ps def RenderParetoCdf(xmin, alpha, low, high, n=50): """Generates sequences of xs and ps for a Pareto CDF. xmin: parameter alpha: parameter low: float high: float n: number of points to render returns: numpy arrays (xs, ps) """ if low < xmin: low = xmin xs = np.linspace(low, high, n) ps = 1 - (xs / xmin) ** -alpha #ps = stats.pareto.cdf(xs, scale=xmin, b=alpha) return xs, ps class Beta(object): """Represents a Beta distribution. See http://en.wikipedia.org/wiki/Beta_distribution """ def __init__(self, alpha=1, beta=1, label=None): """Initializes a Beta distribution.""" self.alpha = alpha self.beta = beta self.label = label if label is not None else '_nolegend_' def Update(self, data): """Updates a Beta distribution. data: pair of int (heads, tails) """ heads, tails = data self.alpha += heads self.beta += tails def Mean(self): """Computes the mean of this distribution.""" return self.alpha / (self.alpha + self.beta) def Random(self): """Generates a random variate from this distribution.""" return random.betavariate(self.alpha, self.beta) def Sample(self, n): """Generates a random sample from this distribution. n: int sample size """ size = n, return np.random.beta(self.alpha, self.beta, size) def EvalPdf(self, x): """Evaluates the PDF at x.""" return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1) def MakePmf(self, steps=101, label=None): """Returns a Pmf of this distribution. Note: Normally, we just evaluate the PDF at a sequence of points and treat the probability density as a probability mass. But if alpha or beta is less than one, we have to be more careful because the PDF goes to infinity at x=0 and x=1. In that case we evaluate the CDF and compute differences. """ if self.alpha < 1 or self.beta < 1: cdf = self.MakeCdf() pmf = cdf.MakePmf() return pmf xs = [i / (steps - 1.0) for i in range(steps)] probs = [self.EvalPdf(x) for x in xs] pmf = Pmf(dict(zip(xs, probs)), label=label) return pmf def MakeCdf(self, steps=101): """Returns the CDF of this distribution.""" xs = [i / (steps - 1.0) for i in range(steps)] ps = [special.betainc(self.alpha, self.beta, x) for x in xs] cdf = Cdf(xs, ps) return cdf class Dirichlet(object): """Represents a Dirichlet distribution. See http://en.wikipedia.org/wiki/Dirichlet_distribution """ def __init__(self, n, conc=1, label=None): """Initializes a Dirichlet distribution. n: number of dimensions conc: concentration parameter (smaller yields more concentration) label: string label """ if n < 2: raise ValueError('A Dirichlet distribution with ' 'n<2 makes no sense') self.n = n self.params = np.ones(n, dtype=np.float) * conc self.label = label if label is not None else '_nolegend_' def Update(self, data): """Updates a Dirichlet distribution. data: sequence of observations, in order corresponding to params """ m = len(data) self.params[:m] += data def Random(self): """Generates a random variate from this distribution. Returns: normalized vector of fractions """ p = np.random.gamma(self.params) return p / p.sum() def Likelihood(self, data): """Computes the likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float probability """ m = len(data) if self.n < m: return 0 x = data p = self.Random() q = p[:m] ** x return q.prod() def LogLikelihood(self, data): """Computes the log likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float log probability """ m = len(data) if self.n < m: return float('-inf') x = self.Random() y = np.log(x[:m]) * data return y.sum() def MarginalBeta(self, i): """Computes the marginal distribution of the ith element. See http://en.wikipedia.org/wiki/Dirichlet_distribution #Marginal_distributions i: int Returns: Beta object """ alpha0 = self.params.sum() alpha = self.params[i] return Beta(alpha, alpha0 - alpha) def PredictivePmf(self, xs, label=None): """Makes a predictive distribution. xs: values to go into the Pmf Returns: Pmf that maps from x to the mean prevalence of x """ alpha0 = self.params.sum() ps = self.params / alpha0 return Pmf(zip(xs, ps), label=label) def BinomialCoef(n, k): """Compute the binomial coefficient "n choose k". n: number of trials k: number of successes Returns: float """ return scipy.misc.comb(n, k) def LogBinomialCoef(n, k): """Computes the log of the binomial coefficient. http://math.stackexchange.com/questions/64716/ approximating-the-logarithm-of-the-binomial-coefficient n: number of trials k: number of successes Returns: float """ return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k) def NormalProbability(ys, jitter=0.0): """Generates data for a normal probability plot. ys: sequence of values jitter: float magnitude of jitter added to the ys returns: numpy arrays xs, ys """ n = len(ys) xs = np.random.normal(0, 1, n) xs.sort() if jitter: ys = Jitter(ys, jitter) else: ys = np.array(ys) ys.sort() return xs, ys def Jitter(values, jitter=0.5): """Jitters the values by adding a uniform variate in (-jitter, jitter). values: sequence jitter: scalar magnitude of jitter returns: new numpy array """ n = len(values) return np.random.uniform(-jitter, +jitter, n) + values def NormalProbabilityPlot(sample, fit_color='0.8', **options): """Makes a normal probability plot with a fitted line. sample: sequence of numbers fit_color: color string for the fitted line options: passed along to Plot """ xs, ys = NormalProbability(sample) mean, var = MeanVar(sample) std = math.sqrt(var) fit = FitLine(xs, mean, std) thinkplot.Plot(*fit, color=fit_color, label='model') xs, ys = NormalProbability(sample) thinkplot.Plot(xs, ys, **options) def Mean(xs): """Computes mean. xs: sequence of values returns: float mean """ return np.mean(xs) def Var(xs, mu=None, ddof=0): """Computes variance. xs: sequence of values mu: option known mean ddof: delta degrees of freedom returns: float """ xs = np.asarray(xs) if mu is None: mu = xs.mean() ds = xs - mu return np.dot(ds, ds) / (len(xs) - ddof) def Std(xs, mu=None, ddof=0): """Computes standard deviation. xs: sequence of values mu: option known mean ddof: delta degrees of freedom returns: float """ var = Var(xs, mu, ddof) return math.sqrt(var) def MeanVar(xs, ddof=0): """Computes mean and variance. Based on http://stackoverflow.com/questions/19391149/ numpy-mean-and-variance-from-single-function xs: sequence of values ddof: delta degrees of freedom returns: pair of float, mean and var """ xs = np.asarray(xs) mean = xs.mean() s2 = Var(xs, mean, ddof) return mean, s2 def Trim(t, p=0.01): """Trims the largest and smallest elements of t. Args: t: sequence of numbers p: fraction of values to trim off each end Returns: sequence of values """ n = int(p * len(t)) t = sorted(t)[n:-n] return t def TrimmedMean(t, p=0.01): """Computes the trimmed mean of a sequence of numbers. Args: t: sequence of numbers p: fraction of values to trim off each end Returns: float """ t = Trim(t, p) return Mean(t) def TrimmedMeanVar(t, p=0.01): """Computes the trimmed mean and variance of a sequence of numbers. Side effect: sorts the list. Args: t: sequence of numbers p: fraction of values to trim off each end Returns: float """ t = Trim(t, p) mu, var = MeanVar(t) return mu, var def CohenEffectSize(group1, group2): """Compute Cohen's d. group1: Series or NumPy array group2: Series or NumPy array returns: float """ diff = group1.mean() - group2.mean() n1, n2 = len(group1), len(group2) var1 = group1.var() var2 = group2.var() pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2) d = diff / math.sqrt(pooled_var) return d def Cov(xs, ys, meanx=None, meany=None): """Computes Cov(X, Y). Args: xs: sequence of values ys: sequence of values meanx: optional float mean of xs meany: optional float mean of ys Returns: Cov(X, Y) """ xs = np.asarray(xs) ys = np.asarray(ys) if meanx is None: meanx = np.mean(xs) if meany is None: meany = np.mean(ys) cov = np.dot(xs-meanx, ys-meany) / len(xs) return cov def Corr(xs, ys): """Computes Corr(X, Y). Args: xs: sequence of values ys: sequence of values Returns: Corr(X, Y) """ xs = np.asarray(xs) ys = np.asarray(ys) meanx, varx = MeanVar(xs) meany, vary = MeanVar(ys) corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary) return corr def SerialCorr(series, lag=1): """Computes the serial correlation of a series. series: Series lag: integer number of intervals to shift returns: float correlation """ xs = series[lag:] ys = series.shift(lag)[lag:] corr = Corr(xs, ys) return corr def SpearmanCorr(xs, ys): """Computes Spearman's rank correlation. Args: xs: sequence of values ys: sequence of values Returns: float Spearman's correlation """ xranks = pandas.Series(xs).rank() yranks = pandas.Series(ys).rank() return Corr(xranks, yranks) def MapToRanks(t): """Returns a list of ranks corresponding to the elements in t. Args: t: sequence of numbers Returns: list of integer ranks, starting at 1 """ # pair up each value with its index pairs = enumerate(t) # sort by value sorted_pairs = sorted(pairs, key=itemgetter(1)) # pair up each pair with its rank ranked = enumerate(sorted_pairs) # sort by index resorted = sorted(ranked, key=lambda trip: trip[1][0]) # extract the ranks ranks = [trip[0]+1 for trip in resorted] return ranks def LeastSquares(xs, ys): """Computes a linear least squares fit for ys as a function of xs. Args: xs: sequence of values ys: sequence of values Returns: tuple of (intercept, slope) """ meanx, varx = MeanVar(xs) meany = Mean(ys) slope = Cov(xs, ys, meanx, meany) / varx inter = meany - slope * meanx return inter, slope def FitLine(xs, inter, slope): """Fits a line to the given data. xs: sequence of x returns: tuple of numpy arrays (sorted xs, fit ys) """ fit_xs = np.sort(xs) fit_ys = inter + slope * fit_xs return fit_xs, fit_ys def Residuals(xs, ys, inter, slope): """Computes residuals for a linear fit with parameters inter and slope. Args: xs: independent variable ys: dependent variable inter: float intercept slope: float slope Returns: list of residuals """ xs = np.asarray(xs) ys = np.asarray(ys) res = ys - (inter + slope * xs) return res def CoefDetermination(ys, res): """Computes the coefficient of determination (R^2) for given residuals. Args: ys: dependent variable res: residuals Returns: float coefficient of determination """ return 1 - Var(res) / Var(ys) def CorrelatedGenerator(rho): """Generates standard normal variates with serial correlation. rho: target coefficient of correlation Returns: iterable """ x = random.gauss(0, 1) yield x sigma = math.sqrt(1 - rho**2) while True: x = random.gauss(x * rho, sigma) yield x def CorrelatedNormalGenerator(mu, sigma, rho): """Generates normal variates with serial correlation. mu: mean of variate sigma: standard deviation of variate rho: target coefficient of correlation Returns: iterable """ for x in CorrelatedGenerator(rho): yield x * sigma + mu def RawMoment(xs, k): """Computes the kth raw moment of xs. """ return sum(x**k for x in xs) / len(xs) def CentralMoment(xs, k): """Computes the kth central moment of xs. """ mean = RawMoment(xs, 1) return sum((x - mean)**k for x in xs) / len(xs) def StandardizedMoment(xs, k): """Computes the kth standardized moment of xs. """ var = CentralMoment(xs, 2) std = math.sqrt(var) return CentralMoment(xs, k) / std**k def Skewness(xs): """Computes skewness. """ return StandardizedMoment(xs, 3) def Median(xs): """Computes the median (50th percentile) of a sequence. xs: sequence or anything else that can initialize a Cdf returns: float """ cdf = Cdf(xs) return cdf.Value(0.5) def IQR(xs): """Computes the interquartile of a sequence. xs: sequence or anything else that can initialize a Cdf returns: pair of floats """ cdf = Cdf(xs) return cdf.Value(0.25), cdf.Value(0.75) def PearsonMedianSkewness(xs): """Computes the Pearson median skewness. """ median = Median(xs) mean = RawMoment(xs, 1) var = CentralMoment(xs, 2) std = math.sqrt(var) gp = 3 * (mean - median) / std return gp class FixedWidthVariables(object): """Represents a set of variables in a fixed width file.""" def __init__(self, variables, index_base=0): """Initializes. variables: DataFrame index_base: are the indices 0 or 1 based? Attributes: colspecs: list of (start, end) index tuples names: list of string variable names """ self.variables = variables # note: by default, subtract 1 from colspecs self.colspecs = variables[['start', 'end']] - index_base # convert colspecs to a list of pair of int self.colspecs = self.colspecs.astype(np.int).values.tolist() self.names = variables['name'] def ReadFixedWidth(self, filename, **options): """Reads a fixed width ASCII file. filename: string filename returns: DataFrame """ df = pandas.read_fwf(filename, colspecs=self.colspecs, names=self.names, **options) return df def ReadStataDct(dct_file, **options): """Reads a Stata dictionary file. dct_file: string filename options: dict of options passed to open() returns: FixedWidthVariables object """ type_map = dict(byte=int, int=int, long=int, float=float, double=float) var_info = [] for line in open(dct_file, **options): match = re.search( r'_column\(([^)]*)\)', line) if match: start = int(match.group(1)) t = line.split() vtype, name, fstring = t[1:4] name = name.lower() if vtype.startswith('str'): vtype = str else: vtype = type_map[vtype] long_desc = ' '.join(t[4:]).strip('"') var_info.append((start, vtype, name, fstring, long_desc)) columns = ['start', 'type', 'name', 'fstring', 'desc'] variables = pandas.DataFrame(var_info, columns=columns) # fill in the end column by shifting the start column variables['end'] = variables.start.shift(-1) variables.loc[len(variables)-1, 'end'] = 0 dct = FixedWidthVariables(variables, index_base=1) return dct def Resample(xs, n=None): """Draw a sample from xs with the same length as xs. xs: sequence n: sample size (default: len(xs)) returns: NumPy array """ if n is None: n = len(xs) return np.random.choice(xs, n, replace=True) def SampleRows(df, nrows, replace=False): """Choose a sample of rows from a DataFrame. df: DataFrame nrows: number of rows replace: whether to sample with replacement returns: DataDf """ indices = np.random.choice(df.index, nrows, replace=replace) sample = df.loc[indices] return sample def ResampleRows(df): """Resamples rows from a DataFrame. df: DataFrame returns: DataFrame """ return SampleRows(df, len(df), replace=True) def ResampleRowsWeighted(df, column='finalwgt'): """Resamples a DataFrame using probabilities proportional to given column. df: DataFrame column: string column name to use as weights returns: DataFrame """ weights = df[column] cdf = Cdf(dict(weights)) indices = cdf.Sample(len(weights)) sample = df.loc[indices] return sample def PercentileRow(array, p): """Selects the row from a sorted array that maps to percentile p. p: float 0--100 returns: NumPy array (one row) """ rows, cols = array.shape index = int(rows * p / 100) return array[index,] def PercentileRows(ys_seq, percents): """Given a collection of lines, selects percentiles along vertical axis. For example, if ys_seq contains simulation results like ys as a function of time, and percents contains (5, 95), the result would be a 90% CI for each vertical slice of the simulation results. ys_seq: sequence of lines (y values) percents: list of percentiles (0-100) to select returns: list of NumPy arrays, one for each percentile """ nrows = len(ys_seq) ncols = len(ys_seq[0]) array = np.zeros((nrows, ncols)) for i, ys in enumerate(ys_seq): array[i,] = ys array = np.sort(array, axis=0) rows = [PercentileRow(array, p) for p in percents] return rows def Smooth(xs, sigma=2, **options): """Smooths a NumPy array with a Gaussian filter. xs: sequence sigma: standard deviation of the filter """ return ndimage.filters.gaussian_filter1d(xs, sigma, **options) class HypothesisTest(object): """Represents a hypothesis test.""" def __init__(self, data): """Initializes. data: data in whatever form is relevant """ self.data = data self.MakeModel() self.actual = self.TestStatistic(data) self.test_stats = None self.test_cdf = None def PValue(self, iters=1000): """Computes the distribution of the test statistic and p-value. iters: number of iterations returns: float p-value """ self.test_stats = [self.TestStatistic(self.RunModel()) for _ in range(iters)] self.test_cdf = Cdf(self.test_stats) count = sum(1 for x in self.test_stats if x >= self.actual) return count / iters def MaxTestStat(self): """Returns the largest test statistic seen during simulations. """ return max(self.test_stats) def PlotCdf(self, label=None): """Draws a Cdf with vertical lines at the observed test stat. """ def VertLine(x): """Draws a vertical line at x.""" thinkplot.Plot([x, x], [0, 1], color='0.8') VertLine(self.actual) thinkplot.Cdf(self.test_cdf, label=label) def TestStatistic(self, data): """Computes the test statistic. data: data in whatever form is relevant """ raise UnimplementedMethodException() def MakeModel(self): """Build a model of the null hypothesis. """ pass def RunModel(self): """Run the model of the null hypothesis. returns: simulated data """ raise UnimplementedMethodException() def main(): pass if __name__ == '__main__': main()
gpl-3.0
Eomys/MoSQITo
mosqito/validations/sharpness/validation_sharpness_din.py
1
8440
# -*- coding: utf-8 -*- """ Created on Tue Dec 15 16:36:37 2020 @author: wantysal """ # Third party imports import numpy as np import matplotlib.pyplot as plt # Local application imports from mosqito.functions.sharpness.comp_sharpness import comp_sharpness from mosqito.functions.shared.load import load # Signals and results from DIN 45692_2009E, chapter 6 broadband = np.zeros((20), dtype=dict) broadband[0] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_250.wav", "type": "Broad-band", "S": 2.70, } broadband[1] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_350.wav", "S": 2.74, } broadband[2] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_450.wav", "S": 2.78, } broadband[3] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_570.wav", "S": 2.85, } broadband[4] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_700.wav", "S": 2.91, } broadband[5] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_840.wav", "S": 2.96, } broadband[6] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_1000.wav", "S": 3.05, } broadband[7] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_1170.wav", "S": 3.12, } broadband[8] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_1370.wav", "S": 3.20, } broadband[9] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_1600.wav", "S": 3.30, } broadband[10] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_1850.wav", "S": 3.42, } broadband[11] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_2150.wav", "S": 3.53, } broadband[12] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_2500.wav", "S": 3.69, } broadband[13] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_2900.wav", "S": 3.89, } broadband[14] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_3400.wav", "S": 4.12, } broadband[15] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_4000.wav", "S": 4.49, } broadband[16] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_4800.wav", "S": 5.04, } broadband[17] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_5800.wav", "S": 5.69, } broadband[18] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_7000.wav", "S": 6.47, } broadband[19] = { "data_file": r".\mosqito\validations\sharpness\data\broadband_8500.wav", "S": 7.46, } # Test signal as input for sharpness (from DIN 45692) narrowband = np.zeros((21), dtype=dict) narrowband[0] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_250.wav", "type": "Narrow-band", "S": 0.38, } narrowband[1] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_350.wav", "S": 0.49, } narrowband[2] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_450.wav", "S": 0.6, } narrowband[3] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_570.wav", "S": 0.71, } narrowband[4] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_700.wav", "S": 0.82, } narrowband[5] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_840.wav", "S": 0.93, } narrowband[6] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_1000.wav", "S": 1.00, } narrowband[7] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_1170.wav", "S": 1.13, } narrowband[8] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_1370.wav", "S": 1.26, } narrowband[9] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_1600.wav", "S": 1.35, } narrowband[10] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_1850.wav", "S": 1.49, } narrowband[11] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_2150.wav", "S": 1.64, } narrowband[12] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_2500.wav", "S": 1.78, } narrowband[13] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_2900.wav", "S": 2.06, } narrowband[14] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_3400.wav", "S": 2.40, } narrowband[15] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_4000.wav", "S": 2.82, } narrowband[16] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_4800.wav", "S": 3.48, } narrowband[17] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_5800.wav", "S": 4.43, } narrowband[18] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_7000.wav", "S": 5.52, } narrowband[19] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_8500.wav", "S": 6.81, } narrowband[20] = { "data_file": r".\mosqito\validations\sharpness\data\narrowband_10500.wav", "S": 8.55, } def validation_sharpness(noise): """Test function for the script sharpness_din Test function for the script sharpness_din with .wav filesas input. The input files are provided by DIN 45692_2009E The compliance is assessed according to chapter 6 of the standard. One .png compliance plot is generated. Parameters ---------- None Outputs ------- None """ sharpness = np.zeros((len(noise))) reference = np.zeros((len(noise))) for i in range(len(noise)): # Load signal sig, fs = load(True, noise[i]["data_file"], calib=1) # Compute sharpness S = comp_sharpness(True, sig, fs, method="din") sharpness[i] = S["values"] # Load reference value reference[i] = noise[i]["S"] noise_type = noise[0]["type"] check_compliance(sharpness, reference, noise_type) def check_compliance(sharpness, reference, noise_type): """Check the compliance of sharpness calc. to DIN 45692 The compliance is assessed according to chapter 6 of the standard DIN 45692_2009E. One .png compliance plot is generated. Parameters ---------- sharpness : numpy.array computed sharpness values reference : numpy.array reference sharpness values Outputs ------- tst : bool Compliance to the reference data """ plt.figure() # Frequency bark axis barks = np.arange(2.5, len(sharpness) + 2.5, 1) # Test for DIN 45692_2009E comformance (chapter 6) S = sharpness tstS = (S >= np.amin([reference * 0.95, reference - 0.05], axis=0)).all() and ( S <= np.amax([reference * 1.05, reference + 0.05], axis=0) ).all() # Tolerance curves definition tol_low = np.amin([reference * 0.95, reference - 0.05], axis=0) tol_high = np.amax([reference * 1.05, reference + 0.05], axis=0) # Plot tolerance curves plt.plot( barks, tol_low, color="red", linestyle="solid", label="tolerance", linewidth=1 ) plt.plot(barks, tol_high, color="red", linestyle="solid", linewidth=1) if tstS: plt.text( 0.5, 0.5, "Test passed ", horizontalalignment="center", verticalalignment="center", transform=plt.gca().transAxes, bbox=dict(facecolor="green", alpha=0.3), ) else: plt.text( 0.5, 0.5, "Test not passed", horizontalalignment="center", verticalalignment="center", transform=plt.gca().transAxes, bbox=dict(facecolor="red", alpha=0.3), ) # Plot the calculated sharpness plt.plot(barks, sharpness, label="MOSQITO") plt.title("Sharpness of " + noise_type + " noises", fontsize=10) plt.legend() plt.xlabel("Center frequency [bark]") plt.ylabel("Sharpness, [acum]") plt.savefig( "./mosqito/validations/sharpness/output/" + "validation_sharpness_" + noise_type + "_noise" + ".png", format="png", ) plt.clf() # test de la fonction if __name__ == "__main__": # generate compliance plot for broadband noise validation_sharpness(broadband) validation_sharpness(narrowband)
apache-2.0
cpcloud/arrow
python/pyarrow/tests/test_array.py
1
30472
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import collections import datetime import pickle import pytest import struct import sys import numpy as np import pandas as pd import pandas.util.testing as tm try: import pickle5 except ImportError: pickle5 = None import pyarrow as pa from pyarrow.pandas_compat import get_logical_type import pyarrow.formatting as fmt def test_total_bytes_allocated(): assert pa.total_allocated_bytes() == 0 def test_getitem_NULL(): arr = pa.array([1, None, 2]) assert arr[1] is pa.NULL def test_constructor_raises(): # This could happen by wrong capitalization. # ARROW-2638: prevent calling extension class constructors directly with pytest.raises(TypeError): pa.Array([1, 2]) def test_list_format(): arr = pa.array([[1], None, [2, 3, None]]) result = fmt.array_format(arr) expected = """\ [ [ 1 ], null, [ 2, 3, null ] ]""" assert result == expected def test_string_format(): arr = pa.array([u'', None, u'foo']) result = fmt.array_format(arr) expected = """\ [ "", null, "foo" ]""" assert result == expected def test_long_array_format(): arr = pa.array(range(100)) result = fmt.array_format(arr, window=2) expected = """\ [ 0, 1, ... 98, 99 ]""" assert result == expected def test_to_numpy_zero_copy(): arr = pa.array(range(10)) old_refcount = sys.getrefcount(arr) np_arr = arr.to_numpy() np_arr[0] = 1 assert arr[0] == 1 assert sys.getrefcount(arr) == old_refcount arr = None import gc gc.collect() # Ensure base is still valid assert np_arr.base is not None expected = np.arange(10) expected[0] = 1 np.testing.assert_array_equal(np_arr, expected) def test_to_numpy_unsupported_types(): # ARROW-2871: Some primitive types are not yet supported in to_numpy bool_arr = pa.array([True, False, True]) with pytest.raises(NotImplementedError): bool_arr.to_numpy() null_arr = pa.array([None, None, None]) with pytest.raises(NotImplementedError): null_arr.to_numpy() def test_to_pandas_zero_copy(): import gc arr = pa.array(range(10)) for i in range(10): np_arr = arr.to_pandas() assert sys.getrefcount(np_arr) == 2 np_arr = None # noqa assert sys.getrefcount(arr) == 2 for i in range(10): arr = pa.array(range(10)) np_arr = arr.to_pandas() arr = None gc.collect() # Ensure base is still valid # Because of py.test's assert inspection magic, if you put getrefcount # on the line being examined, it will be 1 higher than you expect base_refcount = sys.getrefcount(np_arr.base) assert base_refcount == 2 np_arr.sum() def test_array_getitem(): arr = pa.array(range(10, 15)) lst = arr.to_pylist() for idx in range(-len(arr), len(arr)): assert arr[idx].as_py() == lst[idx] for idx in range(-2 * len(arr), -len(arr)): with pytest.raises(IndexError): arr[idx] for idx in range(len(arr), 2 * len(arr)): with pytest.raises(IndexError): arr[idx] def test_array_slice(): arr = pa.array(range(10)) sliced = arr.slice(2) expected = pa.array(range(2, 10)) assert sliced.equals(expected) sliced2 = arr.slice(2, 4) expected2 = pa.array(range(2, 6)) assert sliced2.equals(expected2) # 0 offset assert arr.slice(0).equals(arr) # Slice past end of array assert len(arr.slice(len(arr))) == 0 with pytest.raises(IndexError): arr.slice(-1) # Test slice notation assert arr[2:].equals(arr.slice(2)) assert arr[2:5].equals(arr.slice(2, 3)) assert arr[-5:].equals(arr.slice(len(arr) - 5)) with pytest.raises(IndexError): arr[::-1] with pytest.raises(IndexError): arr[::2] n = len(arr) for start in range(-n * 2, n * 2): for stop in range(-n * 2, n * 2): assert arr[start:stop].to_pylist() == arr.to_pylist()[start:stop] def test_array_iter(): arr = pa.array(range(10)) for i, j in zip(range(10), arr): assert i == j assert isinstance(arr, collections.Iterable) def test_struct_array_slice(): # ARROW-2311: slicing nested arrays needs special care ty = pa.struct([pa.field('a', pa.int8()), pa.field('b', pa.float32())]) arr = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty) assert arr[1:].to_pylist() == [{'a': 3, 'b': 4.5}, {'a': 5, 'b': 6.5}] def test_array_factory_invalid_type(): arr = np.array([datetime.timedelta(1), datetime.timedelta(2)]) with pytest.raises(ValueError): pa.array(arr) def test_array_ref_to_ndarray_base(): arr = np.array([1, 2, 3]) refcount = sys.getrefcount(arr) arr2 = pa.array(arr) # noqa assert sys.getrefcount(arr) == (refcount + 1) def test_array_eq_raises(): # ARROW-2150: we are raising when comparing arrays until we define the # behavior to either be elementwise comparisons or data equality arr1 = pa.array([1, 2, 3], type=pa.int32()) arr2 = pa.array([1, 2, 3], type=pa.int32()) with pytest.raises(NotImplementedError): arr1 == arr2 def test_array_from_buffers(): values_buf = pa.py_buffer(np.int16([4, 5, 6, 7])) nulls_buf = pa.py_buffer(np.uint8([0b00001101])) arr = pa.Array.from_buffers(pa.int16(), 4, [nulls_buf, values_buf]) assert arr.type == pa.int16() assert arr.to_pylist() == [4, None, 6, 7] arr = pa.Array.from_buffers(pa.int16(), 4, [None, values_buf]) assert arr.type == pa.int16() assert arr.to_pylist() == [4, 5, 6, 7] arr = pa.Array.from_buffers(pa.int16(), 3, [nulls_buf, values_buf], offset=1) assert arr.type == pa.int16() assert arr.to_pylist() == [None, 6, 7] with pytest.raises(TypeError): pa.Array.from_buffers(pa.int16(), 3, [u'', u''], offset=1) with pytest.raises(NotImplementedError): pa.Array.from_buffers(pa.list_(pa.int16()), 4, [None, values_buf]) def test_dictionary_from_numpy(): indices = np.repeat([0, 1, 2], 2) dictionary = np.array(['foo', 'bar', 'baz'], dtype=object) mask = np.array([False, False, True, False, False, False]) d1 = pa.DictionaryArray.from_arrays(indices, dictionary) d2 = pa.DictionaryArray.from_arrays(indices, dictionary, mask=mask) for i in range(len(indices)): assert d1[i].as_py() == dictionary[indices[i]] if mask[i]: assert d2[i] is pa.NULL else: assert d2[i].as_py() == dictionary[indices[i]] def test_dictionary_from_boxed_arrays(): indices = np.repeat([0, 1, 2], 2) dictionary = np.array(['foo', 'bar', 'baz'], dtype=object) iarr = pa.array(indices) darr = pa.array(dictionary) d1 = pa.DictionaryArray.from_arrays(iarr, darr) for i in range(len(indices)): assert d1[i].as_py() == dictionary[indices[i]] def test_dictionary_from_arrays_boundscheck(): indices1 = pa.array([0, 1, 2, 0, 1, 2]) indices2 = pa.array([0, -1, 2]) indices3 = pa.array([0, 1, 2, 3]) dictionary = pa.array(['foo', 'bar', 'baz']) # Works fine pa.DictionaryArray.from_arrays(indices1, dictionary) with pytest.raises(pa.ArrowException): pa.DictionaryArray.from_arrays(indices2, dictionary) with pytest.raises(pa.ArrowException): pa.DictionaryArray.from_arrays(indices3, dictionary) # If we are confident that the indices are "safe" we can pass safe=False to # disable the boundschecking pa.DictionaryArray.from_arrays(indices2, dictionary, safe=False) def test_dictionary_with_pandas(): indices = np.repeat([0, 1, 2], 2) dictionary = np.array(['foo', 'bar', 'baz'], dtype=object) mask = np.array([False, False, True, False, False, False]) d1 = pa.DictionaryArray.from_arrays(indices, dictionary) d2 = pa.DictionaryArray.from_arrays(indices, dictionary, mask=mask) pandas1 = d1.to_pandas() ex_pandas1 = pd.Categorical.from_codes(indices, categories=dictionary) tm.assert_series_equal(pd.Series(pandas1), pd.Series(ex_pandas1)) pandas2 = d2.to_pandas() ex_pandas2 = pd.Categorical.from_codes(np.where(mask, -1, indices), categories=dictionary) tm.assert_series_equal(pd.Series(pandas2), pd.Series(ex_pandas2)) def test_list_from_arrays(): offsets_arr = np.array([0, 2, 5, 8], dtype='i4') offsets = pa.array(offsets_arr, type='int32') pyvalues = [b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h'] values = pa.array(pyvalues, type='binary') result = pa.ListArray.from_arrays(offsets, values) expected = pa.array([pyvalues[:2], pyvalues[2:5], pyvalues[5:8]]) assert result.equals(expected) # With nulls offsets = [0, None, 2, 6] values = ['a', 'b', 'c', 'd', 'e', 'f'] result = pa.ListArray.from_arrays(offsets, values) expected = pa.array([values[:2], None, values[2:]]) assert result.equals(expected) # Another edge case offsets2 = [0, 2, None, 6] result = pa.ListArray.from_arrays(offsets2, values) expected = pa.array([values[:2], values[2:], None]) assert result.equals(expected) def test_union_from_dense(): binary = pa.array([b'a', b'b', b'c', b'd'], type='binary') int64 = pa.array([1, 2, 3], type='int64') types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8') value_offsets = pa.array([0, 0, 2, 1, 1, 2, 3], type='int32') result = pa.UnionArray.from_dense(types, value_offsets, [binary, int64]) assert result.to_pylist() == [b'a', 1, b'c', b'b', 2, 3, b'd'] def test_union_from_sparse(): binary = pa.array([b'a', b' ', b'b', b'c', b' ', b' ', b'd'], type='binary') int64 = pa.array([0, 1, 0, 0, 2, 3, 0], type='int64') types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8') result = pa.UnionArray.from_sparse(types, [binary, int64]) assert result.to_pylist() == [b'a', 1, b'b', b'c', 2, 3, b'd'] def test_union_array_slice(): # ARROW-2314 arr = pa.UnionArray.from_sparse(pa.array([0, 0, 1, 1], type=pa.int8()), [pa.array(["a", "b", "c", "d"]), pa.array([1, 2, 3, 4])]) assert arr[1:].to_pylist() == ["b", 3, 4] binary = pa.array([b'a', b'b', b'c', b'd'], type='binary') int64 = pa.array([1, 2, 3], type='int64') types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8') value_offsets = pa.array([0, 0, 2, 1, 1, 2, 3], type='int32') arr = pa.UnionArray.from_dense(types, value_offsets, [binary, int64]) lst = arr.to_pylist() for i in range(len(arr)): for j in range(i, len(arr)): assert arr[i:j].to_pylist() == lst[i:j] def test_string_from_buffers(): array = pa.array(["a", None, "b", "c"]) buffers = array.buffers() copied = pa.StringArray.from_buffers( len(array), buffers[1], buffers[2], buffers[0], array.null_count, array.offset) assert copied.to_pylist() == ["a", None, "b", "c"] copied = pa.StringArray.from_buffers( len(array), buffers[1], buffers[2], buffers[0]) assert copied.to_pylist() == ["a", None, "b", "c"] sliced = array[1:] buffers = sliced.buffers() copied = pa.StringArray.from_buffers( len(sliced), buffers[1], buffers[2], buffers[0], -1, sliced.offset) assert copied.to_pylist() == [None, "b", "c"] assert copied.null_count == 1 # Slice but exclude all null entries so that we don't need to pass # the null bitmap. sliced = array[2:] buffers = sliced.buffers() copied = pa.StringArray.from_buffers( len(sliced), buffers[1], buffers[2], None, -1, sliced.offset) assert copied.to_pylist() == ["b", "c"] assert copied.null_count == 0 def _check_cast_case(case, safe=True): in_data, in_type, out_data, out_type = case in_arr = pa.array(in_data, type=in_type) casted = in_arr.cast(out_type, safe=safe) expected = pa.array(out_data, type=out_type) assert casted.equals(expected) def test_cast_integers_safe(): safe_cases = [ (np.array([0, 1, 2, 3], dtype='i1'), 'int8', np.array([0, 1, 2, 3], dtype='i4'), pa.int32()), (np.array([0, 1, 2, 3], dtype='i1'), 'int8', np.array([0, 1, 2, 3], dtype='u4'), pa.uint16()), (np.array([0, 1, 2, 3], dtype='i1'), 'int8', np.array([0, 1, 2, 3], dtype='u1'), pa.uint8()), (np.array([0, 1, 2, 3], dtype='i1'), 'int8', np.array([0, 1, 2, 3], dtype='f8'), pa.float64()) ] for case in safe_cases: _check_cast_case(case) unsafe_cases = [ (np.array([50000], dtype='i4'), 'int32', 'int16'), (np.array([70000], dtype='i4'), 'int32', 'uint16'), (np.array([-1], dtype='i4'), 'int32', 'uint16'), (np.array([50000], dtype='u2'), 'uint16', 'int16') ] for in_data, in_type, out_type in unsafe_cases: in_arr = pa.array(in_data, type=in_type) with pytest.raises(pa.ArrowInvalid): in_arr.cast(out_type) def test_cast_column(): arrays = [pa.array([1, 2, 3]), pa.array([4, 5, 6])] col = pa.column('foo', arrays) target = pa.float64() casted = col.cast(target) expected = pa.column('foo', [x.cast(target) for x in arrays]) assert casted.equals(expected) def test_cast_integers_unsafe(): # We let NumPy do the unsafe casting unsafe_cases = [ (np.array([50000], dtype='i4'), 'int32', np.array([50000], dtype='i2'), pa.int16()), (np.array([70000], dtype='i4'), 'int32', np.array([70000], dtype='u2'), pa.uint16()), (np.array([-1], dtype='i4'), 'int32', np.array([-1], dtype='u2'), pa.uint16()), (np.array([50000], dtype='u2'), pa.uint16(), np.array([50000], dtype='i2'), pa.int16()) ] for case in unsafe_cases: _check_cast_case(case, safe=False) def test_cast_timestamp_unit(): # ARROW-1680 val = datetime.datetime.now() s = pd.Series([val]) s_nyc = s.dt.tz_localize('tzlocal()').dt.tz_convert('America/New_York') us_with_tz = pa.timestamp('us', tz='America/New_York') arr = pa.Array.from_pandas(s_nyc, type=us_with_tz) # ARROW-1906 assert arr.type == us_with_tz arr2 = pa.Array.from_pandas(s, type=pa.timestamp('us')) assert arr[0].as_py() == s_nyc[0] assert arr2[0].as_py() == s[0] # Disallow truncation arr = pa.array([123123], type='int64').cast(pa.timestamp('ms')) expected = pa.array([123], type='int64').cast(pa.timestamp('s')) target = pa.timestamp('s') with pytest.raises(ValueError): arr.cast(target) result = arr.cast(target, safe=False) assert result.equals(expected) def test_cast_signed_to_unsigned(): safe_cases = [ (np.array([0, 1, 2, 3], dtype='i1'), pa.uint8(), np.array([0, 1, 2, 3], dtype='u1'), pa.uint8()), (np.array([0, 1, 2, 3], dtype='i2'), pa.uint16(), np.array([0, 1, 2, 3], dtype='u2'), pa.uint16()) ] for case in safe_cases: _check_cast_case(case) def test_unique_simple(): cases = [ (pa.array([1, 2, 3, 1, 2, 3]), pa.array([1, 2, 3])), (pa.array(['foo', None, 'bar', 'foo']), pa.array(['foo', 'bar'])) ] for arr, expected in cases: result = arr.unique() assert result.equals(expected) result = pa.column("column", arr).unique() assert result.equals(expected) result = pa.chunked_array([arr]).unique() assert result.equals(expected) def test_dictionary_encode_simple(): cases = [ (pa.array([1, 2, 3, None, 1, 2, 3]), pa.DictionaryArray.from_arrays( pa.array([0, 1, 2, None, 0, 1, 2], type='int32'), [1, 2, 3])), (pa.array(['foo', None, 'bar', 'foo']), pa.DictionaryArray.from_arrays( pa.array([0, None, 1, 0], type='int32'), ['foo', 'bar'])) ] for arr, expected in cases: result = arr.dictionary_encode() assert result.equals(expected) result = pa.column("column", arr).dictionary_encode() assert result.data.chunk(0).equals(expected) result = pa.chunked_array([arr]).dictionary_encode() assert result.chunk(0).equals(expected) def test_cast_time32_to_int(): arr = pa.array(np.array([0, 1, 2], dtype='int32'), type=pa.time32('s')) expected = pa.array([0, 1, 2], type='i4') result = arr.cast('i4') assert result.equals(expected) def test_cast_time64_to_int(): arr = pa.array(np.array([0, 1, 2], dtype='int64'), type=pa.time64('us')) expected = pa.array([0, 1, 2], type='i8') result = arr.cast('i8') assert result.equals(expected) def test_cast_timestamp_to_int(): arr = pa.array(np.array([0, 1, 2], dtype='int64'), type=pa.timestamp('us')) expected = pa.array([0, 1, 2], type='i8') result = arr.cast('i8') assert result.equals(expected) def test_cast_date32_to_int(): arr = pa.array([0, 1, 2], type='i4') result1 = arr.cast('date32') result2 = result1.cast('i4') expected1 = pa.array([ datetime.date(1970, 1, 1), datetime.date(1970, 1, 2), datetime.date(1970, 1, 3) ]).cast('date32') assert result1.equals(expected1) assert result2.equals(arr) def test_cast_date64_to_int(): arr = pa.array(np.array([0, 1, 2], dtype='int64'), type=pa.date64()) expected = pa.array([0, 1, 2], type='i8') result = arr.cast('i8') assert result.equals(expected) pickle_test_parametrize = pytest.mark.parametrize( ('data', 'typ'), [ ([True, False, True, True], pa.bool_()), ([1, 2, 4, 6], pa.int64()), ([1.0, 2.5, None], pa.float64()), (['a', None, 'b'], pa.string()), ([], None), ([[1, 2], [3]], pa.list_(pa.int64())), ([['a'], None, ['b', 'c']], pa.list_(pa.string())), ([(1, 'a'), (2, 'c'), None], pa.struct([pa.field('a', pa.int64()), pa.field('b', pa.string())])) ] ) @pickle_test_parametrize def test_array_pickle(data, typ): # Allocate here so that we don't have any Arrow data allocated. # This is needed to ensure that allocator tests can be reliable. array = pa.array(data, type=typ) for proto in range(0, pickle.HIGHEST_PROTOCOL + 1): result = pickle.loads(pickle.dumps(array, proto)) assert array.equals(result) @pickle_test_parametrize def test_array_pickle5(data, typ): # Test zero-copy pickling with protocol 5 (PEP 574) picklemod = pickle5 or pickle if pickle5 is None and picklemod.HIGHEST_PROTOCOL < 5: pytest.skip("need pickle5 package or Python 3.8+") array = pa.array(data, type=typ) addresses = [buf.address if buf is not None else 0 for buf in array.buffers()] for proto in range(5, pickle.HIGHEST_PROTOCOL + 1): buffers = [] pickled = picklemod.dumps(array, proto, buffer_callback=buffers.append) result = picklemod.loads(pickled, buffers=buffers) assert array.equals(result) result_addresses = [buf.address if buf is not None else 0 for buf in result.buffers()] assert result_addresses == addresses @pytest.mark.parametrize( 'narr', [ np.arange(10, dtype=np.int64), np.arange(10, dtype=np.int32), np.arange(10, dtype=np.int16), np.arange(10, dtype=np.int8), np.arange(10, dtype=np.uint64), np.arange(10, dtype=np.uint32), np.arange(10, dtype=np.uint16), np.arange(10, dtype=np.uint8), np.arange(10, dtype=np.float64), np.arange(10, dtype=np.float32), np.arange(10, dtype=np.float16), ] ) def test_to_numpy_roundtrip(narr): arr = pa.array(narr) assert narr.dtype == arr.to_numpy().dtype np.testing.assert_array_equal(narr, arr.to_numpy()) np.testing.assert_array_equal(narr[:6], arr[:6].to_numpy()) np.testing.assert_array_equal(narr[2:], arr[2:].to_numpy()) np.testing.assert_array_equal(narr[2:6], arr[2:6].to_numpy()) @pytest.mark.parametrize( ('type', 'expected'), [ (pa.null(), 'empty'), (pa.bool_(), 'bool'), (pa.int8(), 'int8'), (pa.int16(), 'int16'), (pa.int32(), 'int32'), (pa.int64(), 'int64'), (pa.uint8(), 'uint8'), (pa.uint16(), 'uint16'), (pa.uint32(), 'uint32'), (pa.uint64(), 'uint64'), (pa.float16(), 'float16'), (pa.float32(), 'float32'), (pa.float64(), 'float64'), (pa.date32(), 'date'), (pa.date64(), 'date'), (pa.binary(), 'bytes'), (pa.binary(length=4), 'bytes'), (pa.string(), 'unicode'), (pa.list_(pa.list_(pa.int16())), 'list[list[int16]]'), (pa.decimal128(18, 3), 'decimal'), (pa.timestamp('ms'), 'datetime'), (pa.timestamp('us', 'UTC'), 'datetimetz'), (pa.time32('s'), 'time'), (pa.time64('us'), 'time') ] ) def test_logical_type(type, expected): assert get_logical_type(type) == expected def test_array_uint64_from_py_over_range(): arr = pa.array([2 ** 63], type=pa.uint64()) expected = pa.array(np.array([2 ** 63], dtype='u8')) assert arr.equals(expected) def test_array_conversions_no_sentinel_values(): arr = np.array([1, 2, 3, 4], dtype='int8') refcount = sys.getrefcount(arr) arr2 = pa.array(arr) # noqa assert sys.getrefcount(arr) == (refcount + 1) assert arr2.type == 'int8' arr3 = pa.array(np.array([1, np.nan, 2, 3, np.nan, 4], dtype='float32'), type='float32') assert arr3.type == 'float32' assert arr3.null_count == 0 def test_array_from_numpy_datetimeD(): arr = np.array([None, datetime.date(2017, 4, 4)], dtype='datetime64[D]') result = pa.array(arr) expected = pa.array([None, datetime.date(2017, 4, 4)], type=pa.date32()) assert result.equals(expected) def test_array_from_py_float32(): data = [[1.2, 3.4], [9.0, 42.0]] t = pa.float32() arr1 = pa.array(data[0], type=t) arr2 = pa.array(data, type=pa.list_(t)) expected1 = np.array(data[0], dtype=np.float32) expected2 = pd.Series([np.array(data[0], dtype=np.float32), np.array(data[1], dtype=np.float32)]) assert arr1.type == t assert arr1.equals(pa.array(expected1)) assert arr2.equals(pa.array(expected2)) def test_array_from_numpy_ascii(): arr = np.array(['abcde', 'abc', ''], dtype='|S5') arrow_arr = pa.array(arr) assert arrow_arr.type == 'binary' expected = pa.array(['abcde', 'abc', ''], type='binary') assert arrow_arr.equals(expected) mask = np.array([False, True, False]) arrow_arr = pa.array(arr, mask=mask) expected = pa.array(['abcde', None, ''], type='binary') assert arrow_arr.equals(expected) # Strided variant arr = np.array(['abcde', 'abc', ''] * 5, dtype='|S5')[::2] mask = np.array([False, True, False] * 5)[::2] arrow_arr = pa.array(arr, mask=mask) expected = pa.array(['abcde', '', None, 'abcde', '', None, 'abcde', ''], type='binary') assert arrow_arr.equals(expected) # 0 itemsize arr = np.array(['', '', ''], dtype='|S0') arrow_arr = pa.array(arr) expected = pa.array(['', '', ''], type='binary') assert arrow_arr.equals(expected) def test_array_from_numpy_unicode(): dtypes = ['<U5', '>U5'] for dtype in dtypes: arr = np.array(['abcde', 'abc', ''], dtype=dtype) arrow_arr = pa.array(arr) assert arrow_arr.type == 'utf8' expected = pa.array(['abcde', 'abc', ''], type='utf8') assert arrow_arr.equals(expected) mask = np.array([False, True, False]) arrow_arr = pa.array(arr, mask=mask) expected = pa.array(['abcde', None, ''], type='utf8') assert arrow_arr.equals(expected) # Strided variant arr = np.array(['abcde', 'abc', ''] * 5, dtype=dtype)[::2] mask = np.array([False, True, False] * 5)[::2] arrow_arr = pa.array(arr, mask=mask) expected = pa.array(['abcde', '', None, 'abcde', '', None, 'abcde', ''], type='utf8') assert arrow_arr.equals(expected) # 0 itemsize arr = np.array(['', '', ''], dtype='<U0') arrow_arr = pa.array(arr) expected = pa.array(['', '', ''], type='utf8') assert arrow_arr.equals(expected) def test_buffers_primitive(): a = pa.array([1, 2, None, 4], type=pa.int16()) buffers = a.buffers() assert len(buffers) == 2 null_bitmap = buffers[0].to_pybytes() assert 1 <= len(null_bitmap) <= 64 # XXX this is varying assert bytearray(null_bitmap)[0] == 0b00001011 # Slicing does not affect the buffers but the offset a_sliced = a[1:] buffers = a_sliced.buffers() a_sliced.offset == 1 assert len(buffers) == 2 null_bitmap = buffers[0].to_pybytes() assert 1 <= len(null_bitmap) <= 64 # XXX this is varying assert bytearray(null_bitmap)[0] == 0b00001011 assert struct.unpack('hhxxh', buffers[1].to_pybytes()) == (1, 2, 4) a = pa.array(np.int8([4, 5, 6])) buffers = a.buffers() assert len(buffers) == 2 # No null bitmap from Numpy int array assert buffers[0] is None assert struct.unpack('3b', buffers[1].to_pybytes()) == (4, 5, 6) a = pa.array([b'foo!', None, b'bar!!']) buffers = a.buffers() assert len(buffers) == 3 null_bitmap = buffers[0].to_pybytes() assert bytearray(null_bitmap)[0] == 0b00000101 offsets = buffers[1].to_pybytes() assert struct.unpack('4i', offsets) == (0, 4, 4, 9) values = buffers[2].to_pybytes() assert values == b'foo!bar!!' def test_buffers_nested(): a = pa.array([[1, 2], None, [3, None, 4, 5]], type=pa.list_(pa.int64())) buffers = a.buffers() assert len(buffers) == 4 # The parent buffers null_bitmap = buffers[0].to_pybytes() assert bytearray(null_bitmap)[0] == 0b00000101 offsets = buffers[1].to_pybytes() assert struct.unpack('4i', offsets) == (0, 2, 2, 6) # The child buffers null_bitmap = buffers[2].to_pybytes() assert bytearray(null_bitmap)[0] == 0b00110111 values = buffers[3].to_pybytes() assert struct.unpack('qqq8xqq', values) == (1, 2, 3, 4, 5) a = pa.array([(42, None), None, (None, 43)], type=pa.struct([pa.field('a', pa.int8()), pa.field('b', pa.int16())])) buffers = a.buffers() assert len(buffers) == 5 # The parent buffer null_bitmap = buffers[0].to_pybytes() assert bytearray(null_bitmap)[0] == 0b00000101 # The child buffers: 'a' null_bitmap = buffers[1].to_pybytes() assert bytearray(null_bitmap)[0] == 0b00000001 values = buffers[2].to_pybytes() assert struct.unpack('bxx', values) == (42,) # The child buffers: 'b' null_bitmap = buffers[3].to_pybytes() assert bytearray(null_bitmap)[0] == 0b00000100 values = buffers[4].to_pybytes() assert struct.unpack('4xh', values) == (43,) def test_invalid_tensor_constructor_repr(): # ARROW-2638: prevent calling extension class constructors directly with pytest.raises(TypeError): repr(pa.Tensor([1])) def test_invalid_tensor_construction(): with pytest.raises(TypeError): pa.Tensor() def test_struct_array_flatten(): ty = pa.struct([pa.field('x', pa.int16()), pa.field('y', pa.float32())]) a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty) xs, ys = a.flatten() assert xs.type == pa.int16() assert ys.type == pa.float32() assert xs.to_pylist() == [1, 3, 5] assert ys.to_pylist() == [2.5, 4.5, 6.5] xs, ys = a[1:].flatten() assert xs.to_pylist() == [3, 5] assert ys.to_pylist() == [4.5, 6.5] a = pa.array([(1, 2.5), None, (3, 4.5)], type=ty) xs, ys = a.flatten() assert xs.to_pylist() == [1, None, 3] assert ys.to_pylist() == [2.5, None, 4.5] xs, ys = a[1:].flatten() assert xs.to_pylist() == [None, 3] assert ys.to_pylist() == [None, 4.5] a = pa.array([(1, None), (2, 3.5), (None, 4.5)], type=ty) xs, ys = a.flatten() assert xs.to_pylist() == [1, 2, None] assert ys.to_pylist() == [None, 3.5, 4.5] xs, ys = a[1:].flatten() assert xs.to_pylist() == [2, None] assert ys.to_pylist() == [3.5, 4.5] a = pa.array([(1, None), None, (None, 2.5)], type=ty) xs, ys = a.flatten() assert xs.to_pylist() == [1, None, None] assert ys.to_pylist() == [None, None, 2.5] xs, ys = a[1:].flatten() assert xs.to_pylist() == [None, None] assert ys.to_pylist() == [None, 2.5] def test_nested_dictionary_array(): dict_arr = pa.DictionaryArray.from_arrays([0, 1, 0], ['a', 'b']) list_arr = pa.ListArray.from_arrays([0, 2, 3], dict_arr) assert list_arr.to_pylist() == [['a', 'b'], ['a']] dict_arr = pa.DictionaryArray.from_arrays([0, 1, 0], ['a', 'b']) dict_arr2 = pa.DictionaryArray.from_arrays([0, 1, 2, 1, 0], dict_arr) assert dict_arr2.to_pylist() == ['a', 'b', 'a', 'b', 'a'] @pytest.mark.parametrize('unit', ['ns', 'us', 'ms', 's']) def test_timestamp_units_from_list(unit): x = np.datetime64('2017-01-01 01:01:01.111111111', unit) a1 = pa.array([x]) a2 = pa.array([x], type=pa.timestamp(unit)) assert a1.type == a2.type assert a1.type.unit == unit assert a1[0] == a2[0]
apache-2.0
james-jz-zheng/jjzz
ml_test.py
1
4779
import yahoo_finance as yhf from sklearn import * import os.path, os import pickle import numpy as np import datetime as dt import pandas as pd def next_biz_day(d):     nd = d+dt.timedelta(days=1)     return nd if nd.weekday() in range(5) else next_biz_day(nd) def prev_biz_day(d):     pd = d-dt.timedelta(days=1)     return pd if pd.weekday() in range(5) else prev_biz_day(pd) def get_raw(s_name, start, end):     FILE_PATH = 'C:\\Temp' # os.environ.get('TEMP')     file_name = FILE_PATH + '\\' + s_name + start + end + '.txt'     if os.path.isfile(file_name):         with open(file_name,'r') as f:             raw = pickle.load(f)     else:         raw = yhf.Share(s_name).get_historical(start,end)         with open(file_name,'w') as f:             pickle.dump(raw, f)     return raw def get_s(s_name, start, end, field):     return [float(i[field]) for i in get_raw(s_name, start, end)]      def get_diff(arr):     return [0] + [2.0*(arr[i+1] - arr[i])/(arr[i+1] + arr[i]) for i in range(len(arr) - 1)] def sigmoid(z):     return 1.0 / (1.0 + np.exp(-1.0 * z)) def nomalize(arr):     x = np.array(arr)     min, max = x[np.argmin(x)], x[np.argmax(x)]     return ((x - min) / (max - min))*2.0 -1 def average(arr, ndays):     a = [[arr[0]] * i + arr[:-i] if i>0 else arr for i in range(ndays)]     k = np.zeros_like(a[0])     for i in range(ndays):         k += np.array(a[i])     return np.array(k) / float(ndays) def ave_n(n):     return lambda x:average(x,  n) def merge_fs(fs):     return fs[0] if len(fs) == 1 else lambda *args: (merge_fs(fs[1:]))(fs[0](*args)) # --- Run parameters --- x_names = 'MSFT|AAPL|GOOG|FB|INTC|AMZN|BIDU'.split('|') y_name = 'BIDU' percentage_for_training = 0.95 se_dates = [dt.datetime(*d) for d in [(2013,1,3), (2017,4,3)]] input_start,   input_end   = [d.strftime('%Y-%m-%d') for d in se_dates] se_dates = [next_biz_day(d) for d in se_dates] predict_start, predict_end = [d.strftime('%Y-%m-%d') for d in se_dates] # training dataset selection lwfs = [     # label,   weight, methods     ('Close',  5.0,    [get_s,            get_diff, nomalize, sigmoid]),     ('Open',   3.0,    [get_s,            get_diff, nomalize, sigmoid]),     ('High',   1.0,    [get_s,            get_diff, nomalize, sigmoid]),     ('Low',    1.0,    [get_s,            get_diff, nomalize, sigmoid]),     ('Volume', 1.0,    [get_s,                      nomalize, sigmoid]),     ('Close',  1.0,    [get_s, ave_n(5),  get_diff, nomalize, sigmoid]),     ('Open',   1.0,    [get_s, ave_n(5),  get_diff, nomalize, sigmoid]),     ('Close',  0.5,    [get_s, ave_n(10), get_diff, nomalize, sigmoid]), ] train_X_all = zip(*[w*(merge_fs(fs)(i, input_start, input_end, l)) for i in x_names for l,w,fs in lwfs]) train_Y_all = [1 if i>0 else 0 for i in get_diff(get_s(y_name, predict_start, predict_end, 'Close'))] print "Running for input X({0}) and Y({1})...".format(len(train_X_all), len(train_Y_all)) if len(train_X_all) != len(train_Y_all):     raise Exception("### Uneven input X({0}) and Y({1}), please Check!!!".format(len(train_X_all), len(train_Y_all))) n_train_data = int(len(train_X_all)*percentage_for_training) train_X, train_Y = train_X_all[:n_train_data], train_Y_all[:n_train_data] test_X,  test_Y  = train_X_all[n_train_data:], train_Y_all[n_train_data:] # fit and predict def fit_and_predict(sklnr, train_X, train_Y, test_X):     sklnr.fit(train_X ,train_Y)     out_Y = sklnr.predict(test_X)     actual_vs_predict = zip(*[test_Y, out_Y])     matched_count = [1 if i[0]==i[1] else 0 for i in actual_vs_predict]     accuracy = 1.0* sum(matched_count)/len(matched_count)     print 'Accuracy: {0}% Train({1}):Test({2}) - Model: {3}'.format(         int(accuracy*1000)/10.0,         len(train_Y),         len(test_Y),         str(sklnr).replace('\n',' ')) # choose different learners learner = [         naive_bayes.GaussianNB(),         linear_model.SGDClassifier(),         svm.SVC(),         tree.DecisionTreeClassifier(),         ensemble.RandomForestClassifier(),         # neural_network.MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(10, 2), random_state=1)     ] # run for l in learner:     fit_and_predict(l, train_X, train_Y, test_X)
gpl-3.0
tum-camp/survival-support-vector-machine
survival/io/arffread.py
1
1824
# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy from scipy.io.arff import loadarff as scipy_loadarff import pandas __all__ = ["loadarff"] def _to_pandas(data, meta): data_dict = {} attrnames = meta.names() for name in attrnames: tp, attr_format = meta[name] if tp == "nominal": raw = [] for b in data[name]: # replace missing values with NaN if b == b'?': raw.append(numpy.nan) else: raw.append(b.decode()) data_dict[name] = pandas.Categorical(raw, categories=attr_format, ordered=False) else: arr = data[name] p = pandas.Series(arr, dtype=arr.dtype) data_dict[name] = p # currently, this step converts all pandas.Categorial columns back to pandas.Series return pandas.DataFrame(data_dict) def loadarff(filename): """Load ARFF file Parameters ---------- filename : string Path to ARFF file Returns ------- data_frame : :class:`pandas.DataFrame` DataFrame containing data of ARFF file """ data, meta = scipy_loadarff(filename) return _to_pandas(data, meta)
gpl-3.0
aleksandr-bakanov/astropy
astropy/visualization/wcsaxes/core.py
2
30823
# Licensed under a 3-clause BSD style license - see LICENSE.rst from functools import partial from collections import defaultdict import numpy as np from matplotlib import rcParams from matplotlib.artist import Artist from matplotlib.axes import Axes, subplot_class_factory from matplotlib.transforms import Affine2D, Bbox, Transform import astropy.units as u from astropy.coordinates import SkyCoord, BaseCoordinateFrame from astropy.wcs import WCS from astropy.wcs.wcsapi import BaseHighLevelWCS from .transforms import CoordinateTransform from .coordinates_map import CoordinatesMap from .utils import get_coord_meta, transform_contour_set_inplace from .frame import RectangularFrame, RectangularFrame1D from .wcsapi import IDENTITY, transform_coord_meta_from_wcs __all__ = ['WCSAxes', 'WCSAxesSubplot'] VISUAL_PROPERTIES = ['facecolor', 'edgecolor', 'linewidth', 'alpha', 'linestyle'] class _WCSAxesArtist(Artist): """This is a dummy artist to enforce the correct z-order of axis ticks, tick labels, and gridlines. FIXME: This is a bit of a hack. ``Axes.draw`` sorts the artists by zorder and then renders them in sequence. For normal Matplotlib axes, the ticks, tick labels, and gridlines are included in this list of artists and hence are automatically drawn in the correct order. However, ``WCSAxes`` disables the native ticks, labels, and gridlines. Instead, ``WCSAxes.draw`` renders ersatz ticks, labels, and gridlines by explicitly calling the functions ``CoordinateHelper._draw_ticks``, ``CoordinateHelper._draw_grid``, etc. This hack would not be necessary if ``WCSAxes`` drew ticks, tick labels, and gridlines in the standary way.""" def draw(self, renderer, *args, **kwargs): self.axes.draw_wcsaxes(renderer) class WCSAxes(Axes): """ The main axes class that can be used to show world coordinates from a WCS. Parameters ---------- fig : `~matplotlib.figure.Figure` The figure to add the axes to rect : list The position of the axes in the figure in relative units. Should be given as ``[left, bottom, width, height]``. wcs : :class:`~astropy.wcs.WCS`, optional The WCS for the data. If this is specified, ``transform`` cannot be specified. transform : `~matplotlib.transforms.Transform`, optional The transform for the data. If this is specified, ``wcs`` cannot be specified. coord_meta : dict, optional A dictionary providing additional metadata when ``transform`` is specified. This should include the keys ``type``, ``wrap``, and ``unit``. Each of these should be a list with as many items as the dimension of the WCS. The ``type`` entries should be one of ``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should give, for the longitude, the angle at which the coordinate wraps (and `None` otherwise), and the ``unit`` should give the unit of the coordinates as :class:`~astropy.units.Unit` instances. This can optionally also include a ``format_unit`` entry giving the units to use for the tick labels (if not specified, this defaults to ``unit``). transData : `~matplotlib.transforms.Transform`, optional Can be used to override the default data -> pixel mapping. slices : tuple, optional For WCS transformations with more than two dimensions, we need to choose which dimensions are being shown in the 2D image. The slice should contain one ``x`` entry, one ``y`` entry, and the rest of the values should be integers indicating the slice through the data. The order of the items in the slice should be the same as the order of the dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means that the first WCS dimension (last Numpy dimension) will be sliced at an index of 50, the second WCS and Numpy dimension will be shown on the x axis, and the final WCS dimension (first Numpy dimension) will be shown on the y-axis (and therefore the data will be plotted using ``data[:, :, 50].transpose()``) frame_class : type, optional The class for the frame, which should be a subclass of :class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a :class:`~astropy.visualization.wcsaxes.frame.RectangularFrame` """ def __init__(self, fig, rect, wcs=None, transform=None, coord_meta=None, transData=None, slices=None, frame_class=None, **kwargs): """ """ super().__init__(fig, rect, **kwargs) self._bboxes = [] if frame_class is not None: self.frame_class = frame_class elif (wcs is not None and (wcs.pixel_n_dim == 1 or (slices is not None and 'y' not in slices))): self.frame_class = RectangularFrame1D else: self.frame_class = RectangularFrame if not (transData is None): # User wants to override the transform for the final # data->pixel mapping self.transData = transData self.reset_wcs(wcs=wcs, slices=slices, transform=transform, coord_meta=coord_meta) self._hide_parent_artists() self.format_coord = self._display_world_coords self._display_coords_index = 0 fig.canvas.mpl_connect('key_press_event', self._set_cursor_prefs) self.patch = self.coords.frame.patch self._wcsaxesartist = _WCSAxesArtist() self.add_artist(self._wcsaxesartist) self._drawn = False def _display_world_coords(self, x, y): if not self._drawn: return "" if self._display_coords_index == -1: return f"{x} {y} (pixel)" pixel = np.array([x, y]) coords = self._all_coords[self._display_coords_index] world = coords._transform.transform(np.array([pixel]))[0] coord_strings = [] for idx, coord in enumerate(coords): if coord.coord_index is not None: coord_strings.append(coord.format_coord(world[coord.coord_index], format='ascii')) coord_string = ' '.join(coord_strings) if self._display_coords_index == 0: system = "world" else: system = f"world, overlay {self._display_coords_index}" coord_string = f"{coord_string} ({system})" return coord_string def _set_cursor_prefs(self, event, **kwargs): if event.key == 'w': self._display_coords_index += 1 if self._display_coords_index + 1 > len(self._all_coords): self._display_coords_index = -1 def _hide_parent_artists(self): # Turn off spines and current axes for s in self.spines.values(): s.set_visible(False) self.xaxis.set_visible(False) if self.frame_class is not RectangularFrame1D: self.yaxis.set_visible(False) # We now overload ``imshow`` because we need to make sure that origin is # set to ``lower`` for all images, which means that we need to flip RGB # images. def imshow(self, X, *args, **kwargs): """ Wrapper to Matplotlib's :meth:`~matplotlib.axes.Axes.imshow`. If an RGB image is passed as a PIL object, it will be flipped vertically and ``origin`` will be set to ``lower``, since WCS transformations - like FITS files - assume that the origin is the lower left pixel of the image (whereas RGB images have the origin in the top left). All arguments are passed to :meth:`~matplotlib.axes.Axes.imshow`. """ origin = kwargs.pop('origin', 'lower') # plt.imshow passes origin as None, which we should default to lower. if origin is None: origin = 'lower' elif origin == 'upper': raise ValueError("Cannot use images with origin='upper' in WCSAxes.") # To check whether the image is a PIL image we can check if the data # has a 'getpixel' attribute - this is what Matplotlib's AxesImage does try: from PIL.Image import Image, FLIP_TOP_BOTTOM except ImportError: # We don't need to worry since PIL is not installed, so user cannot # have passed RGB image. pass else: if isinstance(X, Image) or hasattr(X, 'getpixel'): X = X.transpose(FLIP_TOP_BOTTOM) return super().imshow(X, *args, origin=origin, **kwargs) def contour(self, *args, **kwargs): """ Plot contours. This is a custom implementation of :meth:`~matplotlib.axes.Axes.contour` which applies the transform (if specified) to all contours in one go for performance rather than to each contour line individually. All positional and keyword arguments are the same as for :meth:`~matplotlib.axes.Axes.contour`. """ # In Matplotlib, when calling contour() with a transform, each # individual path in the contour map is transformed separately. However, # this is much too slow for us since each call to the transforms results # in an Astropy coordinate transformation, which has a non-negligible # overhead - therefore a better approach is to override contour(), call # the Matplotlib one with no transform, then apply the transform in one # go to all the segments that make up the contour map. transform = kwargs.pop('transform', None) cset = super().contour(*args, **kwargs) if transform is not None: # The transform passed to self.contour will normally include # a transData component at the end, but we can remove that since # we are already working in data space. transform = transform - self.transData transform_contour_set_inplace(cset, transform) return cset def contourf(self, *args, **kwargs): """ Plot filled contours. This is a custom implementation of :meth:`~matplotlib.axes.Axes.contourf` which applies the transform (if specified) to all contours in one go for performance rather than to each contour line individually. All positional and keyword arguments are the same as for :meth:`~matplotlib.axes.Axes.contourf`. """ # See notes for contour above. transform = kwargs.pop('transform', None) cset = super().contourf(*args, **kwargs) if transform is not None: # The transform passed to self.contour will normally include # a transData component at the end, but we can remove that since # we are already working in data space. transform = transform - self.transData transform_contour_set_inplace(cset, transform) return cset def plot_coord(self, *args, **kwargs): """ Plot `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` objects onto the axes. The first argument to :meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord` should be a coordinate, which will then be converted to the first two parameters to `matplotlib.axes.Axes.plot`. All other arguments are the same as `matplotlib.axes.Axes.plot`. If not specified a ``transform`` keyword argument will be created based on the coordinate. Parameters ---------- coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` The coordinate object to plot on the axes. This is converted to the first two arguments to `matplotlib.axes.Axes.plot`. See Also -------- matplotlib.axes.Axes.plot : This method is called from this function with all arguments passed to it. """ if isinstance(args[0], (SkyCoord, BaseCoordinateFrame)): # Extract the frame from the first argument. frame0 = args[0] if isinstance(frame0, SkyCoord): frame0 = frame0.frame native_frame = self._transform_pixel2world.frame_out # Transform to the native frame of the plot frame0 = frame0.transform_to(native_frame) plot_data = [] for coord in self.coords: if coord.coord_type == 'longitude': plot_data.append(frame0.spherical.lon.to_value(u.deg)) elif coord.coord_type == 'latitude': plot_data.append(frame0.spherical.lat.to_value(u.deg)) else: raise NotImplementedError("Coordinates cannot be plotted with this " "method because the WCS does not represent longitude/latitude.") if 'transform' in kwargs.keys(): raise TypeError("The 'transform' keyword argument is not allowed," " as it is automatically determined by the input coordinate frame.") transform = self.get_transform(native_frame) kwargs.update({'transform': transform}) args = tuple(plot_data) + args[1:] return super().plot(*args, **kwargs) def reset_wcs(self, wcs=None, slices=None, transform=None, coord_meta=None): """ Reset the current Axes, to use a new WCS object. """ # Here determine all the coordinate axes that should be shown. if wcs is None and transform is None: self.wcs = IDENTITY else: # We now force call 'set', which ensures the WCS object is # consistent, which will only be important if the WCS has been set # by hand. For example if the user sets a celestial WCS by hand and # forgets to set the units, WCS.wcs.set() will do this. if wcs is not None: # Check if the WCS object is an instance of `astropy.wcs.WCS` # This check is necessary as only `astropy.wcs.WCS` supports # wcs.set() method if isinstance(wcs, WCS): wcs.wcs.set() if isinstance(wcs, BaseHighLevelWCS): wcs = wcs.low_level_wcs self.wcs = wcs # If we are making a new WCS, we need to preserve the path object since # it may already be used by objects that have been plotted, and we need # to continue updating it. CoordinatesMap will create a new frame # instance, but we can tell that instance to keep using the old path. if hasattr(self, 'coords'): previous_frame = {'path': self.coords.frame._path, 'color': self.coords.frame.get_color(), 'linewidth': self.coords.frame.get_linewidth()} else: previous_frame = {'path': None} if self.wcs is not None: transform, coord_meta = transform_coord_meta_from_wcs(self.wcs, self.frame_class, slices=slices) self.coords = CoordinatesMap(self, transform=transform, coord_meta=coord_meta, frame_class=self.frame_class, previous_frame_path=previous_frame['path']) self._transform_pixel2world = transform if previous_frame['path'] is not None: self.coords.frame.set_color(previous_frame['color']) self.coords.frame.set_linewidth(previous_frame['linewidth']) self._all_coords = [self.coords] # Common default settings for Rectangular Frame for ind, pos in enumerate(coord_meta.get('default_axislabel_position', ['b', 'l'])): self.coords[ind].set_axislabel_position(pos) for ind, pos in enumerate(coord_meta.get('default_ticklabel_position', ['b', 'l'])): self.coords[ind].set_ticklabel_position(pos) for ind, pos in enumerate(coord_meta.get('default_ticks_position', ['bltr', 'bltr'])): self.coords[ind].set_ticks_position(pos) if rcParams['axes.grid']: self.grid() def draw_wcsaxes(self, renderer): if not self.axison: return # Here need to find out range of all coordinates, and update range for # each coordinate axis. For now, just assume it covers the whole sky. self._bboxes = [] # This generates a structure like [coords][axis] = [...] ticklabels_bbox = defaultdict(partial(defaultdict, list)) ticks_locs = defaultdict(partial(defaultdict, list)) visible_ticks = [] for coords in self._all_coords: coords.frame.update() for coord in coords: coord._draw_grid(renderer) for coords in self._all_coords: for coord in coords: coord._draw_ticks(renderer, bboxes=self._bboxes, ticklabels_bbox=ticklabels_bbox[coord], ticks_locs=ticks_locs[coord]) visible_ticks.extend(coord.ticklabels.get_visible_axes()) for coords in self._all_coords: for coord in coords: coord._draw_axislabels(renderer, bboxes=self._bboxes, ticklabels_bbox=ticklabels_bbox, ticks_locs=ticks_locs[coord], visible_ticks=visible_ticks) self.coords.frame.draw(renderer) def draw(self, renderer, inframe=False): # In Axes.draw, the following code can result in the xlim and ylim # values changing, so we need to force call this here to make sure that # the limits are correct before we update the patch. locator = self.get_axes_locator() if locator: pos = locator(self, renderer) self.apply_aspect(pos) else: self.apply_aspect() if self._axisbelow is True: self._wcsaxesartist.set_zorder(0.5) elif self._axisbelow is False: self._wcsaxesartist.set_zorder(2.5) else: # 'line': above patches, below lines self._wcsaxesartist.set_zorder(1.5) # We need to make sure that that frame path is up to date self.coords.frame._update_patch_path() super().draw(renderer, inframe=inframe) self._drawn = True # MATPLOTLIB_LT_30: The ``kwargs.pop('label', None)`` is to ensure # compatibility with Matplotlib 2.x (which has label) and 3.x (which has # xlabel). While these are meant to be a single positional argument, # Matplotlib internally sometimes specifies e.g. set_xlabel(xlabel=...). def set_xlabel(self, xlabel=None, labelpad=1, **kwargs): if xlabel is None: xlabel = kwargs.pop('label', None) if xlabel is None: raise TypeError("set_xlabel() missing 1 required positional argument: 'xlabel'") for coord in self.coords: if 'b' in coord.axislabels.get_visible_axes(): coord.set_axislabel(xlabel, minpad=labelpad, **kwargs) break def set_ylabel(self, ylabel=None, labelpad=1, **kwargs): if ylabel is None: ylabel = kwargs.pop('label', None) if ylabel is None: raise TypeError("set_ylabel() missing 1 required positional argument: 'ylabel'") if self.frame_class is RectangularFrame1D: return super().set_ylabel(ylabel, labelpad=labelpad, **kwargs) for coord in self.coords: if 'l' in coord.axislabels.get_visible_axes(): coord.set_axislabel(ylabel, minpad=labelpad, **kwargs) break def get_xlabel(self): for coord in self.coords: if 'b' in coord.axislabels.get_visible_axes(): return coord.get_axislabel() def get_ylabel(self): if self.frame_class is RectangularFrame1D: return super().get_ylabel() for coord in self.coords: if 'l' in coord.axislabels.get_visible_axes(): return coord.get_axislabel() def get_coords_overlay(self, frame, coord_meta=None): # Here we can't use get_transform because that deals with # pixel-to-pixel transformations when passing a WCS object. if isinstance(frame, WCS): transform, coord_meta = transform_coord_meta_from_wcs(frame, self.frame_class) else: transform = self._get_transform_no_transdata(frame) if coord_meta is None: coord_meta = get_coord_meta(frame) coords = CoordinatesMap(self, transform=transform, coord_meta=coord_meta, frame_class=self.frame_class) self._all_coords.append(coords) # Common settings for overlay coords[0].set_axislabel_position('t') coords[1].set_axislabel_position('r') coords[0].set_ticklabel_position('t') coords[1].set_ticklabel_position('r') self.overlay_coords = coords return coords def get_transform(self, frame): """ Return a transform from the specified frame to display coordinates. This does not include the transData transformation Parameters ---------- frame : :class:`~astropy.wcs.WCS` or :class:`~matplotlib.transforms.Transform` or str The ``frame`` parameter can have several possible types: * :class:`~astropy.wcs.WCS` instance: assumed to be a transformation from pixel to world coordinates, where the world coordinates are the same as those in the WCS transformation used for this ``WCSAxes`` instance. This is used for example to show contours, since this involves plotting an array in pixel coordinates that are not the final data coordinate and have to be transformed to the common world coordinate system first. * :class:`~matplotlib.transforms.Transform` instance: it is assumed to be a transform to the world coordinates that are part of the WCS used to instantiate this ``WCSAxes`` instance. * ``'pixel'`` or ``'world'``: return a transformation that allows users to plot in pixel/data coordinates (essentially an identity transform) and ``world`` (the default world-to-pixel transformation used to instantiate the ``WCSAxes`` instance). * ``'fk5'`` or ``'galactic'``: return a transformation from the specified frame to the pixel/data coordinates. * :class:`~astropy.coordinates.BaseCoordinateFrame` instance. """ return self._get_transform_no_transdata(frame).inverted() + self.transData def _get_transform_no_transdata(self, frame): """ Return a transform from data to the specified frame """ if isinstance(frame, WCS): transform, coord_meta = transform_coord_meta_from_wcs(frame, self.frame_class) transform_world2pixel = transform.inverted() if self._transform_pixel2world.frame_out == transform_world2pixel.frame_in: return self._transform_pixel2world + transform_world2pixel else: return (self._transform_pixel2world + CoordinateTransform(self._transform_pixel2world.frame_out, transform_world2pixel.frame_in) + transform_world2pixel) elif frame == 'pixel': return Affine2D() elif isinstance(frame, Transform): return self._transform_pixel2world + frame else: if frame == 'world': return self._transform_pixel2world else: coordinate_transform = CoordinateTransform(self._transform_pixel2world.frame_out, frame) if coordinate_transform.same_frames: return self._transform_pixel2world else: return self._transform_pixel2world + coordinate_transform def get_tightbbox(self, renderer, *args, **kwargs): # FIXME: we should determine what to do with the extra arguments here. # Note that the expected signature of this method is different in # Matplotlib 3.x compared to 2.x. if not self.get_visible(): return bb = [b for b in self._bboxes if b and (b.width != 0 or b.height != 0)] if bb: _bbox = Bbox.union(bb) return _bbox else: return self.get_window_extent(renderer) def grid(self, b=None, axis='both', *, which='major', **kwargs): """ Plot gridlines for both coordinates. Standard matplotlib appearance options (color, alpha, etc.) can be passed as keyword arguments. This behaves like `matplotlib.axes.Axes` except that if no arguments are specified, the grid is shown rather than toggled. Parameters ---------- b : bool Whether to show the gridlines. """ if not hasattr(self, 'coords'): return if which != 'major': raise NotImplementedError('Plotting the grid for the minor ticks is ' 'not supported.') if axis == 'both': self.coords.grid(draw_grid=b, **kwargs) elif axis == 'x': self.coords[0].grid(draw_grid=b, **kwargs) elif axis == 'y': self.coords[1].grid(draw_grid=b, **kwargs) else: raise ValueError('axis should be one of x/y/both') def tick_params(self, axis='both', **kwargs): """ Method to set the tick and tick label parameters in the same way as the :meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib. This is provided for convenience, but the recommended API is to use :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`, :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`, :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`, :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`, and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`. Parameters ---------- axis : int or str, optional Which axis to apply the parameters to. This defaults to 'both' but this can also be set to an `int` or `str` that refers to the axis to apply it to, following the valid values that can index ``ax.coords``. Note that ``'x'`` and ``'y``' are also accepted in the case of rectangular axes. which : {'both', 'major', 'minor'}, optional Which ticks to apply the settings to. By default, setting are applied to both major and minor ticks. Note that if ``'minor'`` is specified, only the length of the ticks can be set currently. direction : {'in', 'out'}, optional Puts ticks inside the axes, or outside the axes. length : float, optional Tick length in points. width : float, optional Tick width in points. color : color, optional Tick color (accepts any valid Matplotlib color) pad : float, optional Distance in points between tick and label. labelsize : float or str, optional Tick label font size in points or as a string (e.g., 'large'). labelcolor : color, optional Tick label color (accepts any valid Matplotlib color) colors : color, optional Changes the tick color and the label color to the same value (accepts any valid Matplotlib color). bottom, top, left, right : bool, optional Where to draw the ticks. Note that this can only be given if a specific coordinate is specified via the ``axis`` argument, and it will not work correctly if the frame is not rectangular. labelbottom, labeltop, labelleft, labelright : bool, optional Where to draw the tick labels. Note that this can only be given if a specific coordinate is specified via the ``axis`` argument, and it will not work correctly if the frame is not rectangular. grid_color : color, optional The color of the grid lines (accepts any valid Matplotlib color). grid_alpha : float, optional Transparency of grid lines: 0 (transparent) to 1 (opaque). grid_linewidth : float, optional Width of grid lines in points. grid_linestyle : str, optional The style of the grid lines (accepts any valid Matplotlib line style). """ if not hasattr(self, 'coords'): # Axes haven't been fully initialized yet, so just ignore, as # Axes.__init__ calls this method return if axis == 'both': for pos in ('bottom', 'left', 'top', 'right'): if pos in kwargs: raise ValueError(f"Cannot specify {pos}= when axis='both'") if 'label' + pos in kwargs: raise ValueError(f"Cannot specify label{pos}= when axis='both'") for coord in self.coords: coord.tick_params(**kwargs) elif axis in self.coords: self.coords[axis].tick_params(**kwargs) elif axis in ('x', 'y') and self.frame_class is RectangularFrame: spine = 'b' if axis == 'x' else 'l' for coord in self.coords: if spine in coord.axislabels.get_visible_axes(): coord.tick_params(**kwargs) # In the following, we put the generated subplot class in a temporary class and # we then inherit it - if we don't do this, the generated class appears to # belong in matplotlib, not in WCSAxes, from the API's point of view. class WCSAxesSubplot(subplot_class_factory(WCSAxes)): """ A subclass class for WCSAxes """ pass
bsd-3-clause
Eric89GXL/scikit-learn
sklearn/decomposition/pca.py
4
25539
""" Principal Component Analysis """ # Author: Alexandre Gramfort <[email protected]> # Olivier Grisel <[email protected]> # Mathieu Blondel <[email protected]> # Denis A. Engemann <[email protected]> # # License: BSD 3 clause from math import log, sqrt import warnings import numpy as np from scipy import linalg from scipy.special import gammaln from ..base import BaseEstimator, TransformerMixin from ..utils import array2d, check_random_state, as_float_array from ..utils import atleast2d_or_csr from ..utils import deprecated from ..utils.extmath import (fast_logdet, safe_sparse_dot, randomized_svd, fast_dot) def _assess_dimension_(spectrum, rank, n_samples, n_features): """Compute the likelihood of a rank ``rank`` dataset The dataset is assumed to be embedded in gaussian noise of shape(n, dimf) having spectrum ``spectrum``. Parameters ---------- spectrum: array of shape (n) data spectrum rank: int, tested rank value n_samples: int, number of samples dim: int, embedding/empirical dimension Returns ------- ll: float, The log-likelihood Notes ----- This implements the method of `Thomas P. Minka: Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604` """ if rank > len(spectrum): raise ValueError("The tested rank cannot exceed the rank of the" " dataset") pu = -rank * log(2.) for i in range(rank): pu += (gammaln((n_features - i) / 2.) - log(np.pi) * (n_features - i) / 2.) pl = np.sum(np.log(spectrum[:rank])) pl = -pl * n_samples / 2. if rank == n_features: pv = 0 v = 1 else: v = np.sum(spectrum[rank:]) / (n_features - rank) pv = -np.log(v) * n_samples * (n_features - rank) / 2. m = n_features * rank - rank * (rank + 1.) / 2. pp = log(2. * np.pi) * (m + rank + 1.) / 2. pa = 0. spectrum_ = spectrum.copy() spectrum_[rank:n_features] = v for i in range(rank): for j in range(i + 1, len(spectrum)): pa += log((spectrum[i] - spectrum[j]) * (1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples) ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2. return ll def _infer_dimension_(spectrum, n_samples, n_features): """Infers the dimension of a dataset of shape (n_samples, n_features) The dataset is described by its spectrum `spectrum`. """ n_spectrum = len(spectrum) ll = np.empty(n_spectrum) for rank in range(n_spectrum): ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features) return ll.argmax() class PCA(BaseEstimator, TransformerMixin): """Principal component analysis (PCA) Linear dimensionality reduction using Singular Value Decomposition of the data and keeping only the most significant singular vectors to project the data to a lower dimensional space. This implementation uses the scipy.linalg implementation of the singular value decomposition. It only works for dense arrays and is not scalable to large dimensional data. The time complexity of this implementation is ``O(n ** 3)`` assuming n ~ n_samples ~ n_features. Parameters ---------- n_components : int, None or string Number of components to keep. if n_components is not set all components are kept:: n_components == min(n_samples, n_features) if n_components == 'mle', Minka\'s MLE is used to guess the dimension if ``0 < n_components < 1``, select the number of components such that the amount of variance that needs to be explained is greater than the percentage specified by n_components copy : bool If False, data passed to fit are overwritten and running fit(X).transform(X) will not yield the expected results, use fit_transform(X) instead. whiten : bool, optional When True (False by default) the `components_` vectors are divided by n_samples times singular values to ensure uncorrelated outputs with unit component-wise variances. Whitening will remove some information from the transformed signal (the relative variance scales of the components) but can sometime improve the predictive accuracy of the downstream estimators by making there data respect some hard-wired assumptions. Attributes ---------- `components_` : array, [n_components, n_features] Components with maximum variance. `explained_variance_ratio_` : array, [n_components] Percentage of variance explained by each of the selected components. \ k is not set then all components are stored and the sum of explained \ variances is equal to 1.0 `n_components_` : int The estimated number of components. Relevant when n_components is set to 'mle' or a number between 0 and 1 to select using explained variance. `noise_variance_` : float The estimated noise covariance following the Probabilistic PCA model from Tipping and Bishop 1999. See "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf. It is required to computed the estimated data covariance and score samples. Notes ----- For n_components='mle', this class uses the method of `Thomas P. Minka: Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604` Implements the probabilistic PCA model from: M. Tipping and C. Bishop, Probabilistic Principal Component Analysis, Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622 via the score and score_samples methods. See http://www.miketipping.com/papers/met-mppca.pdf Due to implementation subtleties of the Singular Value Decomposition (SVD), which is used in this implementation, running fit twice on the same matrix can lead to principal components with signs flipped (change in direction). For this reason, it is important to always use the same estimator object to transform data in a consistent fashion. Examples -------- >>> import numpy as np >>> from sklearn.decomposition import PCA >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> pca = PCA(n_components=2) >>> pca.fit(X) PCA(copy=True, n_components=2, whiten=False) >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS [ 0.99244... 0.00755...] See also -------- ProbabilisticPCA RandomizedPCA KernelPCA SparsePCA TruncatedSVD """ def __init__(self, n_components=None, copy=True, whiten=False): self.n_components = n_components self.copy = copy self.whiten = whiten def fit(self, X, y=None): """Fit the model with X. Parameters ---------- X: array-like, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the instance itself. """ self._fit(X) return self def fit_transform(self, X, y=None): """Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ U, S, V = self._fit(X) U = U[:, :self.n_components_] if self.whiten: # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples) U *= sqrt(X.shape[0]) else: # X_new = X * V = U * S * V^T * V = U * S U *= S[:self.n_components_] return U def _fit(self, X): """ Fit the model on X Parameters ---------- X: array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. Returns ------- U, s, V : ndarrays The SVD of the input data, copied and centered when requested. """ X = array2d(X) n_samples, n_features = X.shape X = as_float_array(X, copy=self.copy) # Center data self.mean_ = np.mean(X, axis=0) X -= self.mean_ U, S, V = linalg.svd(X, full_matrices=False) explained_variance_ = (S ** 2) / n_samples explained_variance_ratio_ = (explained_variance_ / explained_variance_.sum()) if self.whiten: components_ = V / (S[:, np.newaxis] / sqrt(n_samples)) else: components_ = V n_components = self.n_components if n_components is None: n_components = n_features elif n_components == 'mle': if n_samples < n_features: raise ValueError("n_components='mle' is only supported " "if n_samples >= n_features") n_components = _infer_dimension_(explained_variance_, n_samples, n_features) if 0 < n_components < 1.0: # number of components for which the cumulated explained variance # percentage is superior to the desired threshold ratio_cumsum = explained_variance_ratio_.cumsum() n_components = np.sum(ratio_cumsum < n_components) + 1 # Compute noise covariance using Probabilistic PCA model # The sigma2 maximum likelihood (cf. eq. 12.46) if n_components < n_features: self.noise_variance_ = explained_variance_[n_components:].mean() else: self.noise_variance_ = 0. # store n_samples to revert whitening when getting covariance self.n_samples_ = n_samples self.components_ = components_[:n_components] self.explained_variance_ = explained_variance_[:n_components] explained_variance_ratio_ = explained_variance_ratio_[:n_components] self.explained_variance_ratio_ = explained_variance_ratio_ self.n_components_ = n_components return (U, S, V) def get_covariance(self): """Compute data covariance with the generative model. ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)`` where S**2 contains the explained variances. Returns ------- cov : array, shape=(n_features, n_features) Estimated covariance of data. """ components_ = self.components_ exp_var = self.explained_variance_ if self.whiten: components_ = components_ * np.sqrt(exp_var[:, np.newaxis]) exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.) cov = np.dot(components_.T * exp_var_diff, components_) cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace return cov def get_precision(self): """Compute data precision matrix with the generative model. Equals the inverse of the covariance but computed with the matrix inversion lemma for efficiency. Returns ------- precision : array, shape=(n_features, n_features) Estimated precision of data. """ n_features = self.components_.shape[1] # handle corner cases first if self.n_components_ == 0: return np.eye(n_features) / self.noise_variance_ if self.n_components_ == n_features: return linalg.inv(self.get_covariance()) # Get precision using matrix inversion lemma components_ = self.components_ exp_var = self.explained_variance_ if self.whiten: components_ = components_ * np.sqrt(exp_var[:, np.newaxis]) exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.) precision = np.dot(components_, components_.T) / self.noise_variance_ precision.flat[::len(precision) + 1] += 1. / exp_var_diff precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_)) precision /= -(self.noise_variance_ ** 2) precision.flat[::len(precision) + 1] += 1. / self.noise_variance_ return precision def transform(self, X): """Apply the dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples is the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ X = array2d(X) if self.mean_ is not None: X = X - self.mean_ X_transformed = fast_dot(X, self.components_.T) return X_transformed def inverse_transform(self, X): """Transform data back to its original space, i.e., return an input X_original whose transform would be X Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples is the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform does not compute the exact inverse operation as transform. """ return fast_dot(X, self.components_) + self.mean_ def score_samples(self, X): """Return the log-likelihood of each sample See. "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf Parameters ---------- X: array, shape(n_samples, n_features) The data. Returns ------- ll: array, shape (n_samples,) Log-likelihood of each sample under the current model """ Xr = X - self.mean_ n_features = X.shape[1] log_like = np.zeros(X.shape[0]) precision = self.get_precision() log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1) log_like -= .5 * (n_features * log(2. * np.pi) - fast_logdet(precision)) return log_like def score(self, X, y=None): """Return the average log-likelihood of all samples See. "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf Parameters ---------- X: array, shape(n_samples, n_features) The data. Returns ------- ll: float Average log-likelihood of the samples under the current model """ return np.mean(self.score_samples(X)) @deprecated("ProbabilisticPCA will be removed in 0.16. WARNING: the covariance" " estimation was previously incorrect, your output might be different " " than under the previous versions. Use PCA that implements score" " and score_samples. To work with homoscedastic=False, you should use" " FactorAnalysis.") class ProbabilisticPCA(PCA): """Additional layer on top of PCA that adds a probabilistic evaluation""" __doc__ += PCA.__doc__ def fit(self, X, y=None, homoscedastic=True): """Additionally to PCA.fit, learns a covariance model Parameters ---------- X : array of shape(n_samples, n_features) The data to fit homoscedastic : bool, optional, If True, average variance across remaining dimensions """ PCA.fit(self, X) n_samples, n_features = X.shape n_components = self.n_components if n_components is None: n_components = n_features explained_variance = self.explained_variance_.copy() if homoscedastic: explained_variance -= self.noise_variance_ # Make the low rank part of the estimated covariance self.covariance_ = np.dot(self.components_[:n_components].T * explained_variance, self.components_[:n_components]) if n_features == n_components: delta = 0. elif homoscedastic: delta = self.noise_variance_ else: Xr = X - self.mean_ Xr -= np.dot(np.dot(Xr, self.components_.T), self.components_) delta = (Xr ** 2).mean(axis=0) / (n_features - n_components) # Add delta to the diagonal without extra allocation self.covariance_.flat[::n_features + 1] += delta return self def score(self, X, y=None): """Return a score associated to new data Parameters ---------- X: array of shape(n_samples, n_features) The data to test Returns ------- ll: array of shape (n_samples), log-likelihood of each row of X under the current model """ Xr = X - self.mean_ n_features = X.shape[1] log_like = np.zeros(X.shape[0]) self.precision_ = linalg.inv(self.covariance_) log_like = -.5 * (Xr * (np.dot(Xr, self.precision_))).sum(axis=1) log_like -= .5 * (fast_logdet(self.covariance_) + n_features * log(2. * np.pi)) return log_like class RandomizedPCA(BaseEstimator, TransformerMixin): """Principal component analysis (PCA) using randomized SVD Linear dimensionality reduction using approximated Singular Value Decomposition of the data and keeping only the most significant singular vectors to project the data to a lower dimensional space. Parameters ---------- n_components : int, optional Maximum number of components to keep. When not given or None, this is set to n_features (the second dimension of the training data). copy : bool If False, data passed to fit are overwritten and running fit(X).transform(X) will not yield the expected results, use fit_transform(X) instead. iterated_power : int, optional Number of iterations for the power method. 3 by default. whiten : bool, optional When True (False by default) the `components_` vectors are divided by the singular values to ensure uncorrelated outputs with unit component-wise variances. Whitening will remove some information from the transformed signal (the relative variance scales of the components) but can sometime improve the predictive accuracy of the downstream estimators by making their data respect some hard-wired assumptions. random_state : int or RandomState instance or None (default) Pseudo Random Number generator seed control. If None, use the numpy.random singleton. Attributes ---------- `components_` : array, [n_components, n_features] Components with maximum variance. `explained_variance_ratio_` : array, [n_components] Percentage of variance explained by each of the selected components. \ k is not set then all components are stored and the sum of explained \ variances is equal to 1.0 Examples -------- >>> import numpy as np >>> from sklearn.decomposition import RandomizedPCA >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> pca = RandomizedPCA(n_components=2) >>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE RandomizedPCA(copy=True, iterated_power=3, n_components=2, random_state=None, whiten=False) >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS [ 0.99244... 0.00755...] See also -------- PCA ProbabilisticPCA TruncatedSVD References ---------- .. [Halko2009] `Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 (arXiv:909)` .. [MRT] `A randomized algorithm for the decomposition of matrices Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert` Notes ----- This class supports sparse matrix input for backward compatibility, but actually computes a truncated SVD instead of a PCA in that case (i.e. no centering is performed). This support is deprecated; use the class TruncatedSVD for sparse matrix support. """ def __init__(self, n_components=None, copy=True, iterated_power=3, whiten=False, random_state=None): self.n_components = n_components self.copy = copy self.iterated_power = iterated_power self.whiten = whiten self.mean_ = None self.random_state = random_state def fit(self, X, y=None): """Fit the model with X. Parameters ---------- X: array-like, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the instance itself. """ self._fit(X) return self def _fit(self, X): """Fit the model to the data X. Parameters ---------- X: array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. Returns ------- X : ndarray, shape (n_samples, n_features) The input data, copied, centered and whitened when requested. """ random_state = check_random_state(self.random_state) if hasattr(X, 'todense'): warnings.warn("Sparse matrix support is deprecated" " and will be dropped in 0.16." " Use TruncatedSVD instead.", DeprecationWarning) else: # not a sparse matrix, ensure this is a 2D array X = np.atleast_2d(as_float_array(X, copy=self.copy)) n_samples = X.shape[0] if not hasattr(X, 'todense'): # Center data self.mean_ = np.mean(X, axis=0) X -= self.mean_ if self.n_components is None: n_components = X.shape[1] else: n_components = self.n_components U, S, V = randomized_svd(X, n_components, n_iter=self.iterated_power, random_state=random_state) self.explained_variance_ = exp_var = (S ** 2) / n_samples self.explained_variance_ratio_ = exp_var / exp_var.sum() if self.whiten: self.components_ = V / S[:, np.newaxis] * sqrt(n_samples) else: self.components_ = V return X def transform(self, X, y=None): """Apply dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ # XXX remove scipy.sparse support here in 0.16 X = atleast2d_or_csr(X) if self.mean_ is not None: X = X - self.mean_ X = safe_sparse_dot(X, self.components_.T) return X def fit_transform(self, X, y=None): """Apply dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ X = self._fit(atleast2d_or_csr(X)) X = safe_sparse_dot(X, self.components_.T) return X def inverse_transform(self, X, y=None): """Transform data back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples in the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform does not compute the exact inverse operation of transform. """ # XXX remove scipy.sparse support here in 0.16 X_original = safe_sparse_dot(X, self.components_) if self.mean_ is not None: X_original = X_original + self.mean_ return X_original
bsd-3-clause
peterfpeterson/mantid
Framework/PythonInterface/mantid/plots/scales.py
3
5597
# Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2019 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source, # Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS # SPDX - License - Identifier: GPL - 3.0 + # This file is part of the mantid package """ Defines a set of custom axis scales """ from mantid.plots.utility import mpl_version_info from matplotlib.scale import ScaleBase from matplotlib.ticker import (AutoLocator, NullFormatter, NullLocator, ScalarFormatter) from matplotlib.transforms import Transform import numpy as np class PowerScale(ScaleBase): """Scales the data using a power-law scaling: x^gamma """ # Name required by register_scale. Use this is set_*scale # commands name = 'power' def __init__(self, _axis, **kwargs): """ Any keyword arguments passed to ``set_xscale`` and ``set_yscale`` will be passed along to the scale's constructor. gamma: The power used to scale the data. """ if mpl_version_info() > (3,): super(PowerScale, self).__init__(_axis) else: super(PowerScale, self).__init__() gamma = kwargs.pop("gamma", None) if gamma is None: raise ValueError("power scale must specify gamma value") self._gamma = float(gamma) def get_transform(self): """ Return a PowerTransform that does the actual scaling """ return PowerScale.PowerTransform(self._gamma) def set_default_locators_and_formatters(self, axis): """ Set the locators and formatters to automatic and scalar """ axis.set_major_locator(AutoLocator()) axis.set_major_formatter(ScalarFormatter()) axis.set_minor_locator(NullLocator()) axis.set_minor_formatter(NullFormatter()) def limit_range_for_scale(self, vmin, vmax, minpos): """ Limit the domain to positive values for even or fractional indices """ if not self._gamma.is_integer() or self._gamma % 2 == 0: if not np.isfinite(minpos): minpos = 1e-300 # This value should rarely if ever # end up with a visible effect. return (minpos if vmin <= 0 else vmin, minpos if vmax <= 0 else vmax) else: return vmin, vmax class PowerTransform(Transform): # There are two value members that must be defined. # ``input_dims`` and ``output_dims`` specify number of input # dimensions and output dimensions to the transformation. # These are used by the transformation framework to do some # error checking and prevent incompatible transformations from # being connected together. When defining transforms for a # scale, which are, by definition, separable and have only one # dimension, these members should always be set to 1. input_dims = 1 output_dims = 1 is_separable = True has_inverse = True def __init__(self, gamma): super(PowerScale.PowerTransform, self).__init__() self._gamma = gamma def transform_non_affine(self, a): """Apply the transform to the given data array """ with np.errstate(divide="ignore", invalid="ignore"): out = np.power(a, self._gamma) if not self._gamma.is_integer(): # negative numbers to power of fractions are undefined # clip them to 0 out[a <= 0] = 0 return out def inverted(self): """ Return the type responsible for inverting the transform """ return PowerScale.InvertedPowerTransform(self._gamma) class InvertedPowerTransform(Transform): input_dims = 1 output_dims = 1 is_separable = True has_inverse = True def __init__(self, gamma): super(PowerScale.InvertedPowerTransform, self).__init__() self._gamma = gamma def transform_non_affine(self, a): if not self._gamma.is_integer() or self._gamma % 2 == 0: with np.errstate(divide="ignore", invalid="ignore"): out = np.power(a, 1. / self._gamma) # clip negative values to 0 out[a <= 0] = 0 else: # negative numbers to power of non-integers are undefined and np.power # returns nan. In the case of where we have a fractional power with # an odd denominator then we can write the power as # (-a)^(1/b) = (-1)^1(a^(1/b)) = -1*a^(1/b) negative_indices = (a < 0.) if np.any(negative_indices): out = np.copy(a) np.negative(a, where=negative_indices, out=out) np.power(out, 1. / self._gamma, out=out) np.negative(out, where=negative_indices, out=out) else: out = np.power(a, 1. / self._gamma) return out def inverted(self): return PowerScale.PowerTransform(self._gamma) class SquareScale(PowerScale): # Convenience type for square scaling name = 'square' def __init__(self, axis, **kwargs): kwargs['gamma'] = 2 super(SquareScale, self).__init__(axis, **kwargs)
gpl-3.0
pompiduskus/scikit-learn
examples/hetero_feature_union.py
288
6236
""" ============================================= Feature Union with Heterogeneous Data Sources ============================================= Datasets can often contain components of that require different feature extraction and processing pipelines. This scenario might occur when: 1. Your dataset consists of heterogeneous data types (e.g. raster images and text captions) 2. Your dataset is stored in a Pandas DataFrame and different columns require different processing pipelines. This example demonstrates how to use :class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing different types of features. We use the 20-newsgroups dataset and compute standard bag-of-words features for the subject line and body in separate pipelines as well as ad hoc features on the body. We combine them (with weights) using a FeatureUnion and finally train a classifier on the combined set of features. The choice of features is not particularly helpful, but serves to illustrate the technique. """ # Author: Matt Terry <[email protected]> # # License: BSD 3 clause from __future__ import print_function import numpy as np from sklearn.base import BaseEstimator, TransformerMixin from sklearn.datasets import fetch_20newsgroups from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction import DictVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import classification_report from sklearn.pipeline import FeatureUnion from sklearn.pipeline import Pipeline from sklearn.svm import SVC class ItemSelector(BaseEstimator, TransformerMixin): """For data grouped by feature, select subset of data at a provided key. The data is expected to be stored in a 2D data structure, where the first index is over features and the second is over samples. i.e. >> len(data[key]) == n_samples Please note that this is the opposite convention to sklearn feature matrixes (where the first index corresponds to sample). ItemSelector only requires that the collection implement getitem (data[key]). Examples include: a dict of lists, 2D numpy array, Pandas DataFrame, numpy record array, etc. >> data = {'a': [1, 5, 2, 5, 2, 8], 'b': [9, 4, 1, 4, 1, 3]} >> ds = ItemSelector(key='a') >> data['a'] == ds.transform(data) ItemSelector is not designed to handle data grouped by sample. (e.g. a list of dicts). If your data is structured this way, consider a transformer along the lines of `sklearn.feature_extraction.DictVectorizer`. Parameters ---------- key : hashable, required The key corresponding to the desired value in a mappable. """ def __init__(self, key): self.key = key def fit(self, x, y=None): return self def transform(self, data_dict): return data_dict[self.key] class TextStats(BaseEstimator, TransformerMixin): """Extract features from each document for DictVectorizer""" def fit(self, x, y=None): return self def transform(self, posts): return [{'length': len(text), 'num_sentences': text.count('.')} for text in posts] class SubjectBodyExtractor(BaseEstimator, TransformerMixin): """Extract the subject & body from a usenet post in a single pass. Takes a sequence of strings and produces a dict of sequences. Keys are `subject` and `body`. """ def fit(self, x, y=None): return self def transform(self, posts): features = np.recarray(shape=(len(posts),), dtype=[('subject', object), ('body', object)]) for i, text in enumerate(posts): headers, _, bod = text.partition('\n\n') bod = strip_newsgroup_footer(bod) bod = strip_newsgroup_quoting(bod) features['body'][i] = bod prefix = 'Subject:' sub = '' for line in headers.split('\n'): if line.startswith(prefix): sub = line[len(prefix):] break features['subject'][i] = sub return features pipeline = Pipeline([ # Extract the subject & body ('subjectbody', SubjectBodyExtractor()), # Use FeatureUnion to combine the features from subject and body ('union', FeatureUnion( transformer_list=[ # Pipeline for pulling features from the post's subject line ('subject', Pipeline([ ('selector', ItemSelector(key='subject')), ('tfidf', TfidfVectorizer(min_df=50)), ])), # Pipeline for standard bag-of-words model for body ('body_bow', Pipeline([ ('selector', ItemSelector(key='body')), ('tfidf', TfidfVectorizer()), ('best', TruncatedSVD(n_components=50)), ])), # Pipeline for pulling ad hoc features from post's body ('body_stats', Pipeline([ ('selector', ItemSelector(key='body')), ('stats', TextStats()), # returns a list of dicts ('vect', DictVectorizer()), # list of dicts -> feature matrix ])), ], # weight components in FeatureUnion transformer_weights={ 'subject': 0.8, 'body_bow': 0.5, 'body_stats': 1.0, }, )), # Use a SVC classifier on the combined features ('svc', SVC(kernel='linear')), ]) # limit the list of categories to make running this exmaple faster. categories = ['alt.atheism', 'talk.religion.misc'] train = fetch_20newsgroups(random_state=1, subset='train', categories=categories, ) test = fetch_20newsgroups(random_state=1, subset='test', categories=categories, ) pipeline.fit(train.data, train.target) y = pipeline.predict(test.data) print(classification_report(y, test.target))
bsd-3-clause