repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
zorojean/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/scipy/interpolate/fitpack2.py | 12 | 61523 | """
fitpack --- curve and surface fitting with splines
fitpack is based on a collection of Fortran routines DIERCKX
by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
to double routines by Pearu Peterson.
"""
# Created by Pearu Peterson, June,August 2003
from __future__ import division, print_function, absolute_import
__all__ = [
'UnivariateSpline',
'InterpolatedUnivariateSpline',
'LSQUnivariateSpline',
'BivariateSpline',
'LSQBivariateSpline',
'SmoothBivariateSpline',
'LSQSphereBivariateSpline',
'SmoothSphereBivariateSpline',
'RectBivariateSpline',
'RectSphereBivariateSpline']
import warnings
from numpy import zeros, concatenate, alltrue, ravel, all, diff, array, ones
import numpy as np
from . import fitpack
from . import dfitpack
################ Univariate spline ####################
_curfit_messages = {1:"""
The required storage space exceeds the available storage space, as
specified by the parameter nest: nest too small. If nest is already
large (say nest > m/2), it may also indicate that s is too small.
The approximation returned is the weighted least-squares spline
according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
gives the corresponding weighted sum of squared residuals (fp>s).
""",
2:"""
A theoretically impossible result was found during the iteration
proces for finding a smoothing spline with fp = s: s too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
3:"""
The maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached: s
too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1
if iopt=-1:
xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe"""
}
# UnivariateSpline, ext parameter can be an int or a string
_extrap_modes = {0: 0, 'extrapolate': 0,
1: 1, 'zeros': 1,
2: 2, 'raise': 2,
3: 3, 'const': 3}
class UnivariateSpline(object):
"""
One-dimensional smoothing spline fit to a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `s`
specifies the number of knots by specifying a smoothing condition.
Parameters
----------
x : (N,) array_like
1-D array of independent input data. Must be increasing.
y : (N,) array_like
1-D array of dependent input data, of the same length as `x`.
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be <= 5.
Default is k=3, a cubic spline.
s : float or None, optional
Positive smoothing factor used to choose the number of knots. Number
of knots will be increased until the smoothing condition is satisfied::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0) <= s
If None (default), ``s = len(w)`` which should be a good value if
``1/w[i]`` is an estimate of the standard deviation of ``y[i]``.
If 0, spline will interpolate through all data points.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
InterpolatedUnivariateSpline : Subclass with smoothing forced to 0
LSQUnivariateSpline : Subclass in which knots are user-selected instead of
being set by smoothing condition
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
**NaN handling**: If the input arrays contain ``nan`` values, the result
is not useful, since the underlying spline fitting routines cannot deal
with ``nan`` . A workaround is to use zero weights for not-a-number
data points:
>>> from scipy.interpolate import UnivariateSpline
>>> x, y = np.array([1, 2, 3, 4]), np.array([1, np.nan, 3, 4])
>>> w = np.isnan(y)
>>> y[w] = 0.
>>> spl = UnivariateSpline(x, y, w=~w)
Notice the need to replace a ``nan`` by a numerical value (precise value
does not matter as long as the corresponding weight is zero.)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> plt.plot(x, y, 'ro', ms=5)
Use the default value for the smoothing parameter:
>>> spl = UnivariateSpline(x, y)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3)
Manually change the amount of smoothing:
>>> spl.set_smoothing_factor(0.5)
>>> plt.plot(xs, spl(xs), 'b', lw=3)
>>> plt.show()
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None,
ext=0, check_finite=False):
if check_finite:
if not np.isfinite(x).all() or not np.isfinite(y).all():
raise ValueError("x and y array must not contain NaNs or infs.")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=s)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
@classmethod
def _from_tck(cls, tck, ext=0):
"""Construct a spline object from given tck"""
self = cls.__new__(cls)
t, c, k = tck
self._eval_args = tck
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = (None,None,None,None,None,k,None,len(t),t,
c,None,None,None,None)
self.ext = ext
return self
def _reset_class(self):
data = self._data
n,t,c,k,ier = data[7],data[8],data[9],data[5],data[-1]
self._eval_args = t[:n],c[:n],k
if ier == 0:
# the spline returned has a residual sum of squares fp
# such that abs(fp-s)/s <= tol with tol a relative
# tolerance set to 0.001 by the program
pass
elif ier == -1:
# the spline returned is an interpolating spline
self._set_class(InterpolatedUnivariateSpline)
elif ier == -2:
# the spline returned is the weighted least-squares
# polynomial of degree k. In this extreme case fp gives
# the upper bound fp0 for the smoothing factor s.
self._set_class(LSQUnivariateSpline)
else:
# error
if ier == 1:
self._set_class(LSQUnivariateSpline)
message = _curfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
def _set_class(self, cls):
self._spline_class = cls
if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
LSQUnivariateSpline):
self.__class__ = cls
else:
# It's an unknown subclass -- don't change class. cf. #731
pass
def _reset_nest(self, data, nest=None):
n = data[10]
if nest is None:
k,m = data[5],len(data[0])
nest = m+k+1 # this is the maximum bound for nest
else:
if not n <= nest:
raise ValueError("`nest` can only be increased")
t, c, fpint, nrdata = [np.resize(data[j], nest) for j in [8,9,11,12]]
args = data[:8] + (t,c,n,fpint,nrdata,data[13])
data = dfitpack.fpcurf1(*args)
return data
def set_smoothing_factor(self, s):
""" Continue spline computation with the given smoothing
factor s and with the knots found at the last call.
This routine modifies the spline in place.
"""
data = self._data
if data[6] == -1:
warnings.warn('smoothing factor unchanged for'
'LSQ spline with fixed knots')
return
args = data[:6] + (s,) + data[7:]
data = dfitpack.fpcurf1(*args)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def __call__(self, x, nu=0, ext=None):
"""
Evaluate spline (or its nu-th derivative) at positions x.
Parameters
----------
x : array_like
A 1-D array of points at which to return the value of the smoothed
spline or its derivatives. Note: x can be unordered but the
evaluation is more efficient if x is (partially) ordered.
nu : int
The order of derivative of the spline to compute.
ext : int
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 or 'const', return the boundary value.
The default value is 0, passed from the initialization of
UnivariateSpline.
"""
x = np.asarray(x)
# empty input yields empty output
if x.size == 0:
return array([])
# if nu is None:
# return dfitpack.splev(*(self._eval_args+(x,)))
# return dfitpack.splder(nu=nu,*(self._eval_args+(x,)))
if ext is None:
ext = self.ext
else:
try:
ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
return fitpack.splev(x, self._eval_args, der=nu, ext=ext)
def get_knots(self):
""" Return positions of interior knots of the spline.
Internally, the knot vector contains ``2*k`` additional boundary knots.
"""
data = self._data
k,n = data[5],data[7]
return data[8][k:n-k]
def get_coeffs(self):
"""Return spline coefficients."""
data = self._data
k,n = data[5],data[7]
return data[9][:n-k-1]
def get_residual(self):
"""Return weighted sum of squared residuals of the spline approximation.
This is equivalent to::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)
"""
return self._data[10]
def integral(self, a, b):
""" Return definite integral of the spline between two given points.
Parameters
----------
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
integral : float
The value of the definite integral of the spline between limits.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.integral(0, 3)
9.0
which agrees with :math:`\int x^2 dx = x^3 / 3` between the limits
of 0 and 3.
A caveat is that this routine assumes the spline to be zero outside of
the data limits:
>>> spl.integral(-1, 4)
9.0
>>> spl.integral(-1, 0)
0.0
"""
return dfitpack.splint(*(self._eval_args+(a,b)))
def derivatives(self, x):
""" Return all derivatives of the spline at the point x.
Parameters
----------
x : float
The point to evaluate the derivatives at.
Returns
-------
der : ndarray, shape(k+1,)
Derivatives of the orders 0 to k.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.derivatives(1.5)
array([2.25, 3.0, 2.0, 0])
"""
d,ier = dfitpack.spalde(*(self._eval_args+(x,)))
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return d
def roots(self):
""" Return the zeros of the spline.
Restriction: only cubic splines are supported by fitpack.
"""
k = self._data[5]
if k == 3:
z,m,ier = dfitpack.sproot(*self._eval_args[:2])
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return z[:m]
raise NotImplementedError('finding roots unsupported for '
'non-cubic splines')
def derivative(self, n=1):
"""
Construct a new spline representing the derivative of this spline.
Parameters
----------
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k-n representing the derivative of this
spline.
See Also
--------
splder, antiderivative
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = UnivariateSpline(x, y, k=4, s=0)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> spl.derivative().roots() / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\pi/2 + n\pi` of `cos(x) = sin'(x)`.
"""
tck = fitpack.splder(self._eval_args, n)
return UnivariateSpline._from_tck(tck, self.ext)
def antiderivative(self, n=1):
"""
Construct a new spline representing the antiderivative of this spline.
Parameters
----------
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k+n representing the antiderivative of this
spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, derivative
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = UnivariateSpline(x, y, s=0)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> spl(1.7), spl.antiderivative().derivative()(1.7)
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = spl.antiderivative()
>>> ispl(np.pi/2) - ispl(0)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
tck = fitpack.splantider(self._eval_args, n)
return UnivariateSpline._from_tck(tck, self.ext)
class InterpolatedUnivariateSpline(UnivariateSpline):
"""
One-dimensional interpolating spline for a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. Spline
function passes through all provided points. Equivalent to
`UnivariateSpline` with s=0.
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
input dimension of data points
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
UnivariateSpline : Superclass -- allows knots to be selected by a
smoothing condition
LSQUnivariateSpline : spline for which knots are user-selected
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import InterpolatedUnivariateSpline
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> spl = InterpolatedUnivariateSpline(x, y)
>>> plt.plot(x, y, 'ro', ms=5)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3, alpha=0.7)
>>> plt.show()
Notice that the ``spl(x)`` interpolates `y`:
>>> spl.get_residual()
0.0
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
if check_finite:
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not np.isfinite(w).all()):
raise ValueError("Input must not contain NaNs or infs.")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=0)
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
_fpchec_error_string = """The input parameters have been rejected by fpchec. \
This means that at least one of the following conditions is violated:
1) k+1 <= n-k-1 <= m
2) t(1) <= t(2) <= ... <= t(k+1)
t(n-k) <= t(n-k+1) <= ... <= t(n)
3) t(k+1) < t(k+2) < ... < t(n-k)
4) t(k+1) <= x(i) <= t(n-k)
5) The conditions specified by Schoenberg and Whitney must hold
for at least one subset of data points, i.e., there must be a
subset of data points y(j) such that
t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
"""
class LSQUnivariateSpline(UnivariateSpline):
"""
One-dimensional spline with explicit internal knots.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `t`
specifies the internal knots of the spline
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
Input dimension of data points
t : (M,) array_like
interior knots of the spline. Must be in ascending order and::
bbox[0] < t[0] < ... < t[-1] < bbox[-1]
w : (N,) array_like, optional
weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox = [x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
Default is k=3, a cubic spline.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
Raises
------
ValueError
If the interior knots do not satisfy the Schoenberg-Whitney conditions
See Also
--------
UnivariateSpline : Superclass -- knots are specified by setting a
smoothing condition
InterpolatedUnivariateSpline : spline passing through all points
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
Examples
--------
>>> from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
Fit a smoothing spline with a pre-defined internal knots:
>>> t = [-1, 0, 1]
>>> spl = LSQUnivariateSpline(x, y, t)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(x, y, 'ro', ms=5)
>>> plt.plot(xs, spl(xs), 'g-', lw=3)
>>> plt.show()
Check the knot vector:
>>> spl.get_knots()
array([-3., -1., 0., 1., 3.])
Constructing lsq spline using the knots from another spline:
>>> x = np.arange(10)
>>> s = UnivariateSpline(x, x, s=0)
>>> s.get_knots()
array([ 0., 2., 3., 4., 5., 6., 7., 9.])
>>> knt = s.get_knots()
>>> s1 = LSQUnivariateSpline(x, x, knt[1:-1]) # Chop 1st and last knot
>>> s1.get_knots()
array([ 0., 2., 3., 4., 5., 6., 7., 9.])
"""
def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
if check_finite:
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not np.isfinite(w).all() or not np.isfinite(t).all()):
raise ValueError("Input(s) must not contain NaNs or infs.")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
xb = bbox[0]
xe = bbox[1]
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
t = concatenate(([xb]*(k+1), t, [xe]*(k+1)))
n = len(t)
if not alltrue(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0):
raise ValueError('Interior knots t must satisfy '
'Schoenberg-Whitney conditions')
if not dfitpack.fpchec(x, t, k) == 0:
raise ValueError(_fpchec_error_string)
data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe)
self._data = data[:-3] + (None, None, data[-1])
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
################ Bivariate spline ####################
class _BivariateSplineBase(object):
""" Base class for Bivariate spline s(x,y) interpolation on the rectangle
[xb,xe] x [yb, ye] calculated from a given set of data points
(x,y,z).
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
BivariateSpline :
implementation of bivariate spline interpolation on a plane grid
SphereBivariateSpline :
implementation of bivariate spline interpolation on a spherical grid
"""
def get_residual(self):
""" Return weighted sum of squared residuals of the spline
approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
"""
return self.fp
def get_knots(self):
""" Return a tuple (tx,ty) where tx,ty contain knots positions
of the spline with respect to x-, y-variable, respectively.
The position of interior and additional knots are given as
t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
"""
return self.tck[:2]
def get_coeffs(self):
""" Return spline coefficients."""
return self.tck[2]
def __call__(self, x, y, mth=None, dx=0, dy=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
x, y : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points ``(x[i],
y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting
is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays x, y. The arrays must be
sorted to increasing order.
dx : int
Order of x-derivative
.. versionadded:: 0.14.0
dy : int
Order of y-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
mth : str
Deprecated argument. Has no effect.
"""
x = np.asarray(x)
y = np.asarray(y)
if mth is not None:
warnings.warn("The `mth` argument is deprecated and will be removed",
FutureWarning)
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
if grid:
if x.size == 0 or y.size == 0:
return np.zeros((x.size, y.size), dtype=self.tck[2].dtype)
if dx or dy:
z,ier = dfitpack.parder(tx,ty,c,kx,ky,dx,dy,x,y)
if not ier == 0:
raise ValueError("Error code returned by parder: %s" % ier)
else:
z,ier = dfitpack.bispev(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispev: %s" % ier)
else:
# standard Numpy broadcasting
if x.shape != y.shape:
x, y = np.broadcast_arrays(x, y)
shape = x.shape
x = x.ravel()
y = y.ravel()
if x.size == 0 or y.size == 0:
return np.zeros(shape, dtype=self.tck[2].dtype)
if dx or dy:
z,ier = dfitpack.pardeu(tx,ty,c,kx,ky,dx,dy,x,y)
if not ier == 0:
raise ValueError("Error code returned by pardeu: %s" % ier)
else:
z,ier = dfitpack.bispeu(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispeu: %s" % ier)
z = z.reshape(shape)
return z
_surfit_messages = {1:"""
The required storage space exceeds the available storage space: nxest
or nyest too small, or s too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
2:"""
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small or
badly chosen eps.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
3:"""
the maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached:
s too small.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
4:"""
No more knots can be added because the number of b-spline coefficients
(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
either s or m too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
5:"""
No more knots can be added because the additional knot would (quasi)
coincide with an old one: s too small or too large a weight to an
inaccurate data point.
The weighted least-squares spline corresponds to the current set of
knots.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
If iopt==-1, then
xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe
yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""",
-3:"""
The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank deficient
system (deficiency=%i). If deficiency is large, the results may be
inaccurate. Deficiency may strongly depend on the value of eps."""
}
class BivariateSpline(_BivariateSplineBase):
"""
Base class for bivariate splines.
This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on
the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set
of data points ``(x, y, z)``.
This class is meant to be subclassed, not instantiated directly.
To construct these splines, call either `SmoothBivariateSpline` or
`LSQBivariateSpline`.
See Also
--------
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline :
to create a BivariateSpline through the given points
LSQBivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
SphereBivariateSpline :
bivariate spline interpolation in spherical cooridinates
bisplrep : older wrapping of FITPACK
bisplev : older wrapping of FITPACK
"""
@classmethod
def _from_tck(cls, tck):
"""Construct a spline object from given tck and degree"""
self = cls.__new__(cls)
if len(tck) != 5:
raise ValueError("tck should be a 5 element tuple of tx, ty, c, kx, ky")
self.tck = tck[:3]
self.degrees = tck[3:]
return self
def ev(self, xi, yi, dx=0, dy=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(xi[i], yi[i]),
i=0,...,len(xi)-1``.
Parameters
----------
xi, yi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dx : int, optional
Order of x-derivative
.. versionadded:: 0.14.0
dy : int, optional
Order of y-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(xi, yi, dx=dx, dy=dy, grid=False)
def integral(self, xa, xb, ya, yb):
"""
Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
return dfitpack.dblint(tx,ty,c,kx,ky,xa,xb,ya,yb)
class SmoothBivariateSpline(BivariateSpline):
"""
Smooth bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
w : array_like, optional
Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
LSQUnivariateSpline : to create a BivariateSpline using weighted
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
eps=None):
xb,xe,yb,ye = bbox
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=1)
if ier > 10: # lwrk2 was to small, re-run
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
message = _surfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx[:nx],ty[:ny],c[:(nx-kx-1)*(ny-ky-1)]
self.degrees = kx,ky
class LSQBivariateSpline(BivariateSpline):
"""
Weighted least-squares bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
tx, ty : array_like
Strictly ordered 1-D sequences of knots coordinates.
w : array_like, optional
Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
bbox : (4,) array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline : create a smoothing BivariateSpline
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
eps=None):
nx = 2*kx+2+len(tx)
ny = 2*ky+2+len(ty)
tx1 = zeros((nx,),float)
ty1 = zeros((ny,),float)
tx1[kx+1:nx-kx-1] = tx
ty1[ky+1:ny-ky-1] = ty
xb,xe,yb,ye = bbox
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,
xb,xe,yb,ye,
kx,ky,eps,lwrk2=1)
if ier > 10:
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,
xb,xe,yb,ye,
kx,ky,eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
if ier < -2:
deficiency = (nx-kx-1)*(ny-ky-1)+ier
message = _surfit_messages.get(-3) % (deficiency)
else:
message = _surfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx1, ty1, c
self.degrees = kx, ky
class RectBivariateSpline(BivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh.
Can be used for both smoothing and interpolating data.
Parameters
----------
x,y : array_like
1-D arrays of coordinates in strictly ascending order.
z : array_like
2-D array of data with shape (x.size,y.size).
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default is ``s=0``, which is for interpolation.
See Also
--------
SmoothBivariateSpline : a smoothing bivariate spline for scattered data
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
"""
def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
x, y = ravel(x), ravel(y)
if not all(diff(x) > 0.0):
raise TypeError('x must be strictly increasing')
if not all(diff(y) > 0.0):
raise TypeError('y must be strictly increasing')
if not ((x.min() == x[0]) and (x.max() == x[-1])):
raise TypeError('x must be strictly ascending')
if not ((y.min() == y[0]) and (y.max() == y[-1])):
raise TypeError('y must be strictly ascending')
if not x.size == z.shape[0]:
raise TypeError('x dimension of z must have same number of '
'elements as x')
if not y.size == z.shape[1]:
raise TypeError('y dimension of z must have same number of '
'elements as y')
z = ravel(z)
xb, xe, yb, ye = bbox
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
ye, kx, ky, s)
if ier not in [0, -1, -2]:
msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
self.degrees = kx, ky
_spherefit_messages = _surfit_messages.copy()
_spherefit_messages[10] = """
ERROR. On entry, the input data are controlled on validity. The following
restrictions must be satisfied:
-1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1,
0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m
lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
kwrk >= m+(ntest-7)*(npest-7)
if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
0<tt(5)<tt(6)<...<tt(nt-4)<pi
0<tp(5)<tp(6)<...<tp(np-4)<2*pi
if iopt>=0: s>=0
if one of these conditions is found to be violated,control
is immediately repassed to the calling program. in that
case there is no approximation returned."""
_spherefit_messages[-3] = """
WARNING. The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank
deficient system (deficiency=%i, rank=%i). Especially if the rank
deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
the results may be inaccurate. They could also seriously depend on
the value of eps."""
class SphereBivariateSpline(_BivariateSplineBase):
"""
Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
given set of data points (theta,phi,r).
.. versionadded:: 0.11.0
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothUnivariateSpline :
to create a BivariateSpline through the given points
LSQUnivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
"""
def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
theta, phi : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points
``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard
Numpy broadcasting is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays theta, phi. The arrays
must be sorted to increasing order.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int
Order of phi-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
"""
theta = np.asarray(theta)
phi = np.asarray(phi)
if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi):
raise ValueError("requested theta out of bounds.")
if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
raise ValueError("requested phi out of bounds.")
return _BivariateSplineBase.__call__(self, theta, phi,
dx=dtheta, dy=dphi, grid=grid)
def ev(self, theta, phi, dtheta=0, dphi=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(theta[i], phi[i]),
i=0,...,len(theta)-1``.
Parameters
----------
theta, phi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int, optional
Order of phi-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)
class SmoothSphereBivariateSpline(SphereBivariateSpline):
"""
Smooth bivariate spline approximation in spherical coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
w : array_like, optional
Positive 1-D sequence of weights.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if 1/w[i] is an
estimate of the standard deviation of r[i].
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object
>>> lats, lons = np.meshgrid(theta, phi)
>>> from scipy.interpolate import SmoothSphereBivariateSpline
>>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), s=3.5)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2 * np.pi, 90)
>>> data_smth = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_smth, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
r, w=w, s=s,
eps=eps)
if ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
self.degrees = (3, 3)
class LSQSphereBivariateSpline(SphereBivariateSpline):
"""
Weighted least-squares bivariate spline approximation in spherical
coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
tt, tp : array_like
Strictly ordered 1-D sequences of knots coordinates.
Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
w : array_like, optional
Positive 1-D sequence of weights, of the same length as `theta`, `phi`
and `r`.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object. Here, we must also specify the
coordinates of the knots to use.
>>> lats, lons = np.meshgrid(theta, phi)
>>> knotst, knotsp = theta.copy(), phi.copy()
>>> knotst[0] += .0001
>>> knotst[-1] -= .0001
>>> knotsp[0] += .0001
>>> knotsp[-1] -= .0001
>>> from scipy.interpolate import LSQSphereBivariateSpline
>>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), knotst, knotsp)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2*np.pi, 90)
>>> data_lsq = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_lsq, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, np_ = 8 + len(tt), 8 + len(tp)
tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
tt_[4:-4], tp_[4:-4] = tt, tp
tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
w=w, eps=eps)
if ier < -2:
deficiency = 6 + (nt_ - 8) * (np_ - 7) + ier
message = _spherefit_messages.get(-3) % (deficiency, -ier)
warnings.warn(message)
elif ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_, tp_, c
self.degrees = (3, 3)
_spfit_messages = _surfit_messages.copy()
_spfit_messages[10] = """
ERROR: on entry, the input data are controlled on validity
the following restrictions must be satisfied.
-1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
-1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
-1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
kwrk>=5+mu+mv+nuest+nvest,
lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
0< u(i-1)<u(i)< pi,i=2,..,mu,
-pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv
if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3))
0<tu(5)<tu(6)<...<tu(nu-4)< pi
8<=nv<=min(nvest,mv+7)
v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi
the schoenberg-whitney conditions, i.e. there must be
subset of grid co-ordinates uu(p) and vv(q) such that
tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4
(iopt(2)=1 and iopt(3)=1 also count for a uu-value
tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4
(vv(q) is either a value v(j) or v(j)+2*pi)
if iopt(1)>=0: s>=0
if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
if one of these conditions is found to be violated,control is
immediately repassed to the calling program. in that case there is no
approximation returned."""
class RectSphereBivariateSpline(SphereBivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh on a sphere.
Can be used for smoothing data.
.. versionadded:: 0.11.0
Parameters
----------
u : array_like
1-D array of latitude coordinates in strictly ascending order.
Coordinates must be given in radians and lie within the interval
(0, pi).
v : array_like
1-D array of longitude coordinates in strictly ascending order.
Coordinates must be given in radians, and must lie within (0, 2pi).
r : array_like
2-D array of data with shape ``(u.size, v.size)``.
s : float, optional
Positive smoothing factor defined for estimation condition
(``s=0`` is for interpolation).
pole_continuity : bool or (bool, bool), optional
Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole
will be 1 or 0 when this is True or False, respectively.
Defaults to False.
pole_values : float or (float, float), optional
Data values at the poles ``u=0`` and ``u=pi``. Either the whole
parameter or each individual element can be None. Defaults to None.
pole_exact : bool or (bool, bool), optional
Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the
value is considered to be the right function value, and it will be
fitted exactly. If False, the value will be considered to be a data
value just like the other data values. Defaults to False.
pole_flat : bool or (bool, bool), optional
For the poles at ``u=0`` and ``u=pi``, specify whether or not the
approximation has vanishing derivatives. Defaults to False.
See Also
--------
RectBivariateSpline : bivariate spline approximation over a rectangular
mesh
Notes
-----
Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
``iopt[0] = 1`` in the FITPACK routine) is supported. The exact
least-squares spline approximation is not implemented yet.
When actually performing the interpolation, the requested `v` values must
lie within the same length 2pi interval that the original `v` values were
chosen from.
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
Examples
--------
Suppose we have global data on a coarse grid
>>> lats = np.linspace(10, 170, 9) * np.pi / 180.
>>> lons = np.linspace(0, 350, 18) * np.pi / 180.
>>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
... np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
We want to interpolate it to a global one-degree grid
>>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
>>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
>>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
We need to set up the interpolator object
>>> from scipy.interpolate import RectSphereBivariateSpline
>>> lut = RectSphereBivariateSpline(lats, lons, data)
Finally we interpolate the data. The `RectSphereBivariateSpline` object
only takes 1-D arrays as input, therefore we need to do some reshaping.
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
Looking at the original and the interpolated data, one can see that the
interpolant reproduces the original data very well:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(212)
>>> ax2.imshow(data_interp, interpolation='nearest')
>>> plt.show()
Chosing the optimal value of ``s`` can be a delicate task. Recommended
values for ``s`` depend on the accuracy of the data values. If the user
has an idea of the statistical errors on the data, she can also find a
proper estimate for ``s``. By assuming that, if she specifies the
right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
reproduces the function underlying the data, she can evaluate
``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
For example, if she knows that the statistical errors on her
``r(i,j)``-values are not greater than 0.1, she may expect that a good
``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
be determined by trial and error. The best is then to start with a very
large value of ``s`` (to determine the least-squares polynomial and the
corresponding upper bound ``fp0`` for ``s``) and then to progressively
decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation
shows more detail) to obtain closer fits.
The interpolation results for different values of ``s`` give some insight
into this process:
>>> fig2 = plt.figure()
>>> s = [3e9, 2e9, 1e9, 1e8]
>>> for ii in xrange(len(s)):
... lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii])
... data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
... ax = fig2.add_subplot(2, 2, ii+1)
... ax.imshow(data_interp, interpolation='nearest')
... ax.set_title("s = %g" % s[ii])
>>> plt.show()
"""
def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
pole_exact=False, pole_flat=False):
iopt = np.array([0, 0, 0], dtype=int)
ider = np.array([-1, 0, -1, 0], dtype=int)
if pole_values is None:
pole_values = (None, None)
elif isinstance(pole_values, (float, np.float32, np.float64)):
pole_values = (pole_values, pole_values)
if isinstance(pole_continuity, bool):
pole_continuity = (pole_continuity, pole_continuity)
if isinstance(pole_exact, bool):
pole_exact = (pole_exact, pole_exact)
if isinstance(pole_flat, bool):
pole_flat = (pole_flat, pole_flat)
r0, r1 = pole_values
iopt[1:] = pole_continuity
if r0 is None:
ider[0] = -1
else:
ider[0] = pole_exact[0]
if r1 is None:
ider[2] = -1
else:
ider[2] = pole_exact[1]
ider[1], ider[3] = pole_flat
u, v = np.ravel(u), np.ravel(v)
if not np.all(np.diff(u) > 0.0):
raise TypeError('u must be strictly increasing')
if not np.all(np.diff(v) > 0.0):
raise TypeError('v must be strictly increasing')
if not u.size == r.shape[0]:
raise TypeError('u dimension of r must have same number of '
'elements as u')
if not v.size == r.shape[1]:
raise TypeError('v dimension of r must have same number of '
'elements as v')
if pole_continuity[1] is False and pole_flat[1] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
if pole_continuity[0] is False and pole_flat[0] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
r = np.ravel(r)
nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
u.copy(), v.copy(), r.copy(), r0, r1, s)
if ier not in [0, -1, -2]:
msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
self.degrees = (3, 3)
| gpl-3.0 |
sanja7s/SR_Twitter | src_CAPITAL/Soc_Sem_Capital_VOL.py | 1 | 14174 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
analyze assortativity of the graphs in terms of sentiment
'''
from igraph import *
import networkx as nx
import os
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import os
import matplotlib.cm as cm
from collections import defaultdict
import matplotlib
from scipy.stats.stats import pearsonr
import pandas as pd
import seaborn as sns
sns.set(color_codes=True, font_scale=2)
f_in_user_labels = "usr_num_CVs.tab"
##################
f_in_user_taxons = "user_taxons.tab"
f_in_user_concepts = "user_concepts.tab"
f_in_user_entities = "user_entities.tab"
f_in_num_tweets = "usr_num_tweets.tab"
#########################
#
f_in_user_sentiment = "user_sentiment.tab"
#
# mention graph
#########################
f_in_graph = "threshold_mention_graphs/directed_threshold0.tab"
f_in_graph_weights = "mention_graph_weights.dat"
f_out_sent_mention_graph = "directed_threshold0_sent_val.tab"
IN_DIR = "../../../DATA/CAPITAL/"
f_out_mention = "sentiment_assortativity_mention_2.txt"
#########################
soc_capital = 'node_degree.dat'
sem_capital = 'user_entities.tab'
os.chdir(IN_DIR)
# one time call
def save_node_degree():
G = Graph.Read_Ncol(f_in_graph_weights,names=True, directed=True, weights=True)
summary(G)
fo = open('node_degree.dat', 'w')
for v in G.vs:
d = G.degree(v.index)
n = v['name']
fo.write(str(n) + '\t' + str(d) + '\n')
# one time call
def save_node_MUTUAL_degree():
G = Graph.Read_Ncol(f_in_graph_weights,names=True, directed=True, weights=True)
summary(G)
G.to_undirected(mode='mutual')
not_connected_nodes = G.vs(_degree_eq=0)
to_delete_vertices = not_connected_nodes
print len(to_delete_vertices)
G.delete_vertices(to_delete_vertices)
summary(G)
fo = open('mutual degree', 'w')
for v in G.vs:
d = G.degree(v.index)
n = v['name']
fo.write(str(n) + '\t' + str(d) + '\n')
# one time call
def save_node_MUTUAL_weighted_degree():
G = Graph.Read_Ncol(f_in_graph_weights,names=True, directed=True, weights=True)
summary(G)
G.to_undirected(mode='mutual',combine_edges=sum)
not_connected_nodes = G.vs(_degree_eq=0)
to_delete_vertices = not_connected_nodes
print len(to_delete_vertices)
G.delete_vertices(to_delete_vertices)
summary(G)
fo = open('mutual weighted degree', 'w')
for v in G.vs:
d = G.strength(v.index,weights='weight')
n = v['name']
fo.write(str(n) + '\t' + str(d) + '\n')
# one time call
def save_node_indegree():
G = Graph.Read_Ncol(f_in_graph_weights,names=True, directed=True, weights=True)
summary(G)
fo = open('indegree', 'w')
for v in G.vs:
d = G.degree(v.index,mode=IN)
n = v['name']
fo.write(str(n) + '\t' + str(d) + '\n')
# one time call
def save_node_outdegree():
G = Graph.Read_Ncol(f_in_graph_weights,names=True, directed=True, weights=True)
summary(G)
fo = open('outdegree', 'w')
for v in G.vs:
d = G.degree(v.index,mode=OUT)
n = v['name']
fo.write(str(n) + '\t' + str(d) + '\n')
# one time call
def save_node_weighted_degree():
G = Graph.Read_Ncol(f_in_graph_weights,names=True, directed=True, weights=True)
summary(G)
fo = open('weighted_node_degree.dat', 'w')
for v in G.vs:
d = G.strength(v.index, weights='weight', loops=False)
n = v['name']
fo.write(str(n) + '\t' + str(d) + '\n')
# one time call
def save_node_weighted_outdegree():
G = Graph.Read_Ncol(f_in_graph_weights,names=True, directed=True, weights=True)
summary(G)
fo = open('weighted outdegree', 'w')
for v in G.vs:
d = G.strength(v.index,mode=OUT, weights='weight')
n = v['name']
fo.write(str(n) + '\t' + str(d) + '\n')
# one time call
def save_node_weighted_indegree():
G = Graph.Read_Ncol(f_in_graph_weights,names=True, directed=True, weights=True)
summary(G)
fo = open('weighted indegree', 'w')
for v in G.vs:
d = G.strength(v.index,mode=IN, weights='weight')
n = v['name']
fo.write(str(n) + '\t' + str(d) + '\n')
def read_soc_capital(soc_capital=soc_capital):
cap = defaultdict(int)
if soc == 'Burt\'s index':
return pd.read_csv('BI_indexR_full.txt',\
encoding='utf-8', delim_whitespace=1)
f = open(soc_capital, "r")
for line in f:
(n,d) = line.split('\t')
cap[int(n)] = float(d)
return cap
def read_sem_capital(sem_capital=sem_capital):
cap = defaultdict(int)
if sem_capital == 'concepts':
f = open("CVs","r")
else:
f = open(sem_capital, "r")
for line in f:
try:
if sem_capital <> 'sentiment':
(n,d) = line.split('\t')
if sem_capital == 'status inconsistency':
cap[int(n)] = float(d)
else:
cap[int(n)] = int(d)
else:
(n,nd,d) = line.split('\t')
cap[int(n)] = float(d)
except ValueError:
pass
return cap
def social_capital_vs_sem(soc='weighted degree',sem='entities'):
soc_cap = read_soc_capital(soc)
sem_cap = read_sem_capital(sem)
max_soc_cap = max(soc_cap.values())
max_sem_cap = max(sem_cap.values())
print max_sem_cap, max_soc_cap
coef_sem = 100
coef_soc = 10
soca = []
sema = []
cap = np.zeros((max_soc_cap+1,max_sem_cap+1))
for n in soc_cap:
if n in sem_cap:
v1 = soc_cap[n] / coef_soc
v2 = sem_cap[n] / coef_sem
soca.append(soc_cap[n])
sema.append(sem_cap[n])
cap[v1][v2] += 1
print soc, sem
soca = np.array(soca)
sema = np.array(sema)
print pearsonr(soca, sema)
plot_capitals_seaborn(soca, sema, name_soc=soc, name_sem=sem)
def social_capital_vs_sentiment(soc='weighted degree'):
soc_cap = read_soc_capital(soc)
sem_cap = read_sem_capital('sentiment')
max_soc_cap = max(soc_cap.values())
print max_soc_cap
coef_soc = 50
cap = defaultdict(int)
soca = []
sema = []
for n in soc_cap:
if n in sem_cap:
v1 = soc_cap[n] / coef_soc
v2 = math.ceil(sem_cap[n] * 20) / 20
soca.append(soc_cap[n])
sema.append(sem_cap[n])
if v1 in cap:
cap[v1][v2] += 1
else:
cap[v1] = defaultdict(float)
cap[v1][v2] += 1
soca = np.array(soca)
sema = np.array(sema)
print pearsonr(soca, sema)
plot_sentiment_capital_seaborn(soca, sema, name_soc=soc)
def social_capital_vs_status_inconsistency(soc='weighted degree'):
soc_cap = read_soc_capital(soc)
sem_cap = read_sem_capital('status inconsistency')
max_soc_cap = max(soc_cap.values())
print max_soc_cap
soca = []
sema = []
coef_soc = 50
cap = defaultdict(int)
for n in soc_cap:
if n in sem_cap:
v1 = soc_cap[n] / coef_soc
v2 = math.ceil(sem_cap[n] * 20) / 20
soca.append(soc_cap[n])
sema.append(sem_cap[n])
if v1 in cap:
cap[v1][v2] += 1
else:
cap[v1] = defaultdict(float)
cap[v1][v2] += 1
soca = np.array(soca)
sema = np.array(sema)
print pearsonr(soca, sema)
plot_status_inconsistency_capital(cap, coef_soc, name_soc=soc)
def plot_status_inconsistency_capital(cap, coef_soc, name_soc='deg'):
x = []
y = []
vol = []
for i in cap:
for j in cap[i]:
if cap[i][j] > 0 and i < 16:
x.append(i*coef_soc)
y.append(j)
vol.append(cap[i][j]*7)
#print cap
plt.clf()
plt.scatter(y,x,s=vol, c='darkblue', edgecolors='none',alpha=0.4)
plt.tight_layout()
plt.ylabel('social capital:' + name_soc )
#plt.yscale('log')
plt.xlim(-1,1)
plt.xlabel('status inconsistency')
plt.savefig(name_soc + 'status inconsistency v7.png')
#plt.show()
def plot_sentiment_capital(cap, coef_soc, name_soc='deg'):
x = []
y = []
vol = []
for i in cap:
for j in cap[i]:
if cap[i][j] > 0 and i < 16:
x.append(i*coef_soc)
y.append(j)
vol.append(cap[i][j]*20)
print cap
plt.clf()
plt.scatter(y,x,s=vol, c='darkorchid', edgecolors='none',alpha=0.4)
plt.tight_layout()
plt.ylabel('social capital: popularity' )
#plt.yscale('log')
plt.xlim(-1,1)
plt.xlabel('sentiment score ')
plt.savefig(name_soc + 'setiment3.png')
#plt.show()
def plot_sentiment_capital_seaborn(x, y, name_soc='deg'):
xlabel = 'social capital: popularity'
ylabel = 'sentiment'
labels = [r'$ 10^0 $', r'$ 10^0 $', r'$ 10^1 $', r'$ 10^2 $', r'$ 10^3 $', r'$ 10^4 $', r'$ 10^5 $', r'$ 10^6 $', r'$ 10^7 $', r'$ 10^8 $']
labelsy = ['-1','-0.5','0','0.5','1']
with sns.axes_style("white"):
g = sns.jointplot(x=np.log(x+1), y=y, kind="hex", color="darkorchid").set_axis_labels(xlabel, ylabel)
#g.set(xticklabels=labels)
g.ax_joint.set_xticklabels(labels)
#g.ax_joint.set_yticklabels(labelsy)
#plt.tight_layout()
plt.savefig(name_soc + 'sent.eps', bbox_inches='tight', dpi=550)
"""
plt.ylabel('social capital: popularity' )
#plt.yscale('log')
plt.xlim(-1,1)
plt.xlabel('sentiment score ')
"""
def social_capital_vs_IN_OUT_sentiment_plot(coef_socIN=50, coef_socOUT=50, name_soc='weighted degINOUT'):
soc_capIN = read_soc_capital('weighted indegree')
soc_capOUT = read_soc_capital('weighted outdegree')
sem_cap = read_sem_capital('sentiment')
capIN = defaultdict(int)
for n in soc_capIN:
if n in sem_cap:
v1 = soc_capIN[n] / coef_socIN
v2 = math.ceil(sem_cap[n] * 10) / 10
if v1 in capIN:
capIN[v1][v2] += 1
else:
capIN[v1] = defaultdict(float)
capIN[v1][v2] += 1
capOUT = defaultdict(int)
for n in soc_capOUT:
if n in sem_cap:
v1 = soc_capOUT[n] / coef_socOUT
v2 = math.ceil(sem_cap[n] * 10) / 10
if v1 in capOUT:
capOUT[v1][v2] += 1
else:
capOUT[v1] = defaultdict(float)
capOUT[v1][v2] += 1
xIN = []
yIN = []
volIN = []
for i in capIN:
for j in capIN[i]:
if capIN[i][j] > 0 and i < 16:
xIN.append(i*coef_socIN)
yIN.append(j)
volIN.append(capIN[i][j]*10)
xOUT = []
yOUT = []
volOUT = []
for i in capOUT:
for j in capOUT[i]:
if capOUT[i][j] > 0 and i < 16:
xOUT.append(i*coef_socOUT)
yOUT.append(j)
volOUT.append(capOUT[i][j]*10)
plt.clf()
plt.scatter(yOUT,xOUT,s=volOUT, c='red', edgecolors='none',alpha=0.7)
plt.scatter(yIN,xIN,s=volIN, edgecolors='none',alpha=0.1)
plt.ylabel('Social capital ')
plt.xlabel('Semantiment score')
plt.savefig(name_soc + '2setiment.png')
#plt.show()
def plot_capitals(cap, coef_soc, coef_sem, name_soc='degree', name_sem='CVs'):
x = []
y = []
vol = []
for i in range(len(cap)):
for j in range(len(cap[i])):
if cap[i][j] > 0 and i < 16:
x.append(i*coef_soc)
y.append(j*coef_sem)
vol.append(cap[i][j])
print cap
plt.clf()
plt.scatter(y,x,s=vol,c='darkorchid', edgecolors='none',alpha=0.7)
plt.tight_layout()
plt.ylabel('social capital: ' + name_soc)
#plt.yscale('log')
plt.xlabel('semantic capital: ' + name_sem )
#plt.xlim(-3,33)
plt.savefig(name_sem + name_soc + '3.png')
#plt.show()
def plot_capitals_seaborn(x, y, name_soc='degree', name_sem='CVs'):
xlabel = 'social capital: activity'
ylabel = 'semantic capital: ' + name_sem
labels = [r'$ 10^0 $', r'$ 10^0 $', r'$ 10^1 $', r'$ 10^2 $', r'$ 10^3 $', r'$ 10^4 $', r'$ 10^5 $', r'$ 10^6 $', r'$ 10^7 $', r'$ 10^8 $']
#labels = ['s','d','g','s','d','g','s','d','g']
sns.set_style("white")
g = sns.jointplot(x=np.log(x+1), y=y, kind="hex", annot_kws=dict(stat="r"), color="darkred").set_axis_labels(xlabel, ylabel)
#g.set(xticklabels=labels)
g.ax_joint.set_xticklabels(labels)
#plt.tight_layout()
plt.savefig(name_sem + name_soc + '77.eps', bbox_inches='tight', dpi=550)
#soc='weighted outdegree'
#social_capital_vs_sem(soc=soc,sem='entities')
#social_capital_vs_sem(soc=soc,sem='entities')
#social_capital_vs_sem(soc=soc,sem='concepts')
#soc='weighted indegree'
#social_capital_vs_sentiment(soc)
#soc='weighted outdegree'
#social_capital_vs_sentiment(soc)
#social_capital_vs_IN_OUT_sentiment_plot()
#soc='outdegree'
#social_capital_vs_status_inconsistency(soc=soc)
def plot_BIcapitals_seaborn(x, y, name_soc, name_sem):
xlabel = 'social capital: Burt\'s index'
ylabel = 'semantic capital: ' + name_sem
sns.set_style("white")
g = sns.jointplot(x=np.log(x+1), y=y, kind="hex", annot_kws=dict(stat="r"), color="darkgreen").set_axis_labels(xlabel, ylabel)
#g.set(xticklabels=labels)
#g.ax_joint.set_xticklabels(labels)
#plt.tight_layout()
plt.savefig(name_sem + name_soc + '77.eps', bbox_inches='tight', dpi=550)
def plot_sentiment_BIcapital_seaborn(x, y, name_soc='deg'):
xlabel = 'social capital: Burt\'s index'
ylabel = 'semantic capital: sentiment'
labelsy = ['-1','-0.5','0','0.5','1']
with sns.axes_style("white"):
g = sns.jointplot(x=x, y=y, kind="hex", annot_kws=dict(stat="r"), color="darkgreen").set_axis_labels(xlabel, ylabel)
#g.set(xticklabels=labels)
#g.ax_joint.set_xticklabels(labels)
#g.ax_joint.set_yticklabels(labelsy)
#plt.tight_layout()
plt.savefig(name_soc + 'sent.eps', bbox_inches='tight', dpi=550)
def BI_capital_vs_sentiment(soc, sem):
soc_cap = read_soc_capital(soc)
soc_cap = soc_cap.set_index('id')['bi'].to_dict()
for el in soc_cap:
if soc_cap[el] > 1:
soc_cap[el] = 1
sem_cap = read_sem_capital('sentiment')
print max(soc_cap.values())
cap = defaultdict(int)
soca = []
sema = []
for n in soc_cap:
if n in sem_cap:
v1 = math.ceil(soc_cap[n] * 10) / 10
v2 = math.ceil(sem_cap[n] * 10) / 10
soca.append(soc_cap[n])
sema.append(sem_cap[n])
if v1 in cap:
cap[v1][v2] += 1
else:
cap[v1] = defaultdict(float)
cap[v1][v2] += 1
soca = np.array(soca)
sema = np.array(sema)
print pearsonr(soca, sema)
plot_sentiment_BIcapital_seaborn(soca, sema, name_soc=soc)
def BI_capital_vs_sem(soc,sem):
soc_cap = read_soc_capital(soc)
soc_cap = soc_cap.set_index('id')['bi'].to_dict()
for el in soc_cap:
if soc_cap[el] > 1:
soc_cap[el] = 1
sem_cap = read_sem_capital(sem)
max_soc_cap = max(soc_cap.values())
max_sem_cap = max(sem_cap.values())
print max_sem_cap, max_soc_cap
coef_sem = 100
soca = []
sema = []
cap = defaultdict(int)
for n in soc_cap:
if n in sem_cap:
v1 = math.ceil(soc_cap[n] * 100) / 100
v2 = sem_cap[n] / coef_sem
soca.append(soc_cap[n])
sema.append(sem_cap[n])
if v1 in cap:
cap[v1][v2] += 1
else:
cap[v1] = defaultdict(float)
cap[v1][v2] += 1
print soc, sem
soca = np.array(soca)
sema = np.array(sema)
print pearsonr(soca, sema)
plot_BIcapitals_seaborn(soca, sema, name_soc=soc, name_sem=sem)
soc='Burt\'s index'
BI_capital_vs_sentiment(soc=soc,sem='sentiment')
#BI_capital_vs_sem(soc=soc,sem='entities')
#BI_capital_vs_sem(soc=soc,sem='concepts')
| mit |
sanjayankur31/nest-simulator | pynest/examples/synapsecollection.py | 8 | 5755 | # -*- coding: utf-8 -*-
#
# synapsecollection.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Synapse Collection usage example
--------------------------------
Example script to show some of the possibilities of the SynapseCollection class. We
connect neurons, and get the SynapseCollection with a GetConnections call. To get
a better understanding of the connections, we plot the weights between the
source and targets.
"""
import nest
import matplotlib.pyplot as plt
import numpy as np
def makeMatrix(sources, targets, weights):
"""
Returns a matrix with the weights between the source and target node_ids.
"""
aa = np.zeros((max(sources) + 1, max(targets) + 1))
for src, trg, wght in zip(sources, targets, weights):
aa[src, trg] += wght
return aa
def plotMatrix(srcs, tgts, weights, title, pos):
"""
Plots weight matrix.
"""
plt.subplot(pos)
plt.matshow(makeMatrix(srcs, tgts, weights), fignum=False)
plt.xlim([min(tgts) - 0.5, max(tgts) + 0.5])
plt.xlabel('target')
plt.ylim([max(srcs) + 0.5, min(srcs) - 0.5])
plt.ylabel('source')
plt.title(title)
plt.colorbar(fraction=0.046, pad=0.04)
"""
Start with a simple, one_to_one example.
We create the neurons, connect them, and get the connections. From this we can
get the connected sources, targets, and weights. The corresponding matrix will
be the identity matrix, as we have a one_to_one connection.
"""
nest.ResetKernel()
nrns = nest.Create('iaf_psc_alpha', 10)
nest.Connect(nrns, nrns, 'one_to_one')
conns = nest.GetConnections(nrns, nrns) # This returns a SynapseCollection
# We can get desired information of the SynapseCollection with simple get() call.
g = conns.get(['source', 'target', 'weight'])
srcs = g['source']
tgts = g['target']
weights = g['weight']
# Plot the matrix consisting of the weights between the sources and targets
plt.figure(figsize=(12, 10))
plotMatrix(srcs, tgts, weights, 'Uniform weight', 121)
"""
Add some weights to the connections, and plot the updated weight matrix.
"""
# We can set data of the connections with a simple set() call.
w = [{'weight': x * 1.0} for x in range(1, 11)]
conns.set(w)
weights = conns.weight
plotMatrix(srcs, tgts, weights, 'Set weight', 122)
"""
We can also plot an all_to_all connection, with uniformly distributed weights,
and different number of sources and targets.
"""
nest.ResetKernel()
pre = nest.Create('iaf_psc_alpha', 10)
post = nest.Create('iaf_psc_delta', 5)
nest.Connect(pre, post,
syn_spec={'weight':
{'distribution': 'uniform', 'low': 0.5, 'high': 4.5}})
# Get a SynapseCollection with all connections
conns = nest.GetConnections()
srcs = conns.source
tgts = conns.target
weights = conns.weight
plt.figure(figsize=(12, 10))
plotMatrix(srcs, tgts, weights, 'All to all connection', 111)
"""
Lastly, we'll do an exmple that is a bit more complex. We connect different
neurons with different rules, synapse models and weight distributions, and get
different SynapseCollections by calling GetConnections with different inputs.
"""
nest.ResetKernel()
nrns = nest.Create('iaf_psc_alpha', 15)
nest.Connect(nrns[:5], nrns[:5],
'one_to_one',
{'synapse_model': 'stdp_synapse',
'weight': {'distribution': 'normal', 'mu': 5.0, 'sigma': 2.0}})
nest.Connect(nrns[:10], nrns[5:12],
{'rule': 'pairwise_bernoulli', 'p': 0.4},
{'weight': 4.0})
nest.Connect(nrns[5:10], nrns[:5],
{'rule': 'fixed_total_number', 'N': 5},
{'weight': 3.0})
nest.Connect(nrns[10:], nrns[:12],
'all_to_all',
{'synapse_model': 'stdp_synapse',
'weight': {'distribution': 'uniform', 'low': 1., 'high': 5.}})
nest.Connect(nrns, nrns[12:],
{'rule': 'fixed_indegree', 'indegree': 3})
# First get a SynapseCollection consisting of all the connections
conns = nest.GetConnections()
srcs = conns.source
tgts = conns.target
weights = conns.weight
plt.figure(figsize=(14, 12))
plotMatrix(list(srcs), list(tgts), weights, 'All connections', 221)
# Get SynapseCollection consisting of a subset of connections
conns = nest.GetConnections(nrns[:10], nrns[:10])
g = conns.get(['source', 'target', 'weight'])
srcs = g['source']
tgts = g['target']
weights = g['weight']
plotMatrix(srcs, tgts, weights, 'Connections of the first ten neurons', 222)
# Get SynapseCollection consisting of just the stdp_synapses
conns = nest.GetConnections(synapse_model='stdp_synapse')
g = conns.get(['source', 'target', 'weight'])
srcs = g['source']
tgts = g['target']
weights = g['weight']
plotMatrix(srcs, tgts, weights, 'Connections with stdp_synapse', 223)
# Get SynapseCollection consisting of the fixed_total_number connections, but set
# weight before plotting
conns = nest.GetConnections(nrns[5:10], nrns[:5])
w = [{'weight': x * 1.0} for x in range(1, 6)]
conns.set(w)
g = conns.get(['source', 'target', 'weight'])
srcs = g['source']
tgts = g['target']
weights = g['weight']
plotMatrix(srcs, tgts, weights, 'fixed_total_number, set weight', 224)
plt.show()
| gpl-2.0 |
teonlamont/mne-python | tutorials/plot_sensors_decoding.py | 4 | 5754 | """
=================================
Decoding sensor space data (MVPA)
=================================
Decoding, a.k.a MVPA or supervised machine learning, is applied to MEG
data in sensor space. Here the classifier is applied to every time
point.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
import mne
from mne.datasets import sample
from mne.decoding import (SlidingEstimator, GeneralizingEstimator,
cross_val_multiscore, LinearModel, get_coef)
data_path = sample.data_path()
plt.close('all')
# sphinx_gallery_thumbnail_number = 4
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
tmin, tmax = -0.200, 0.500
event_id = dict(audio_left=1, visual_left=3)
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# The subsequent decoding analyses only capture evoked responses, so we can
# low-pass the MEG data. Usually a value more like 40 Hz would be used,
# but here low-pass at 20 so we can more heavily decimate, and allow
# the examlpe to run faster.
raw.filter(None, 20., fir_design='firwin')
events = mne.find_events(raw, 'STI 014')
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0.), preload=True,
reject=dict(grad=4000e-13, eog=150e-6), decim=10)
epochs.pick_types(meg=True, exclude='bads')
###############################################################################
# Temporal decoding
# -----------------
#
# We'll use a Logistic Regression for a binary classification as machine
# learning model.
# We will train the classifier on all left visual vs auditory trials on MEG
X = epochs.get_data() # MEG signals: n_epochs, n_channels, n_times
y = epochs.events[:, 2] # target: Audio left or right
clf = make_pipeline(StandardScaler(), LogisticRegression())
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc')
scores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scores = np.mean(scores, axis=0)
# Plot
fig, ax = plt.subplots()
ax.plot(epochs.times, scores, label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC') # Area Under the Curve
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Sensor space decoding')
plt.show()
# You can retrieve the spatial filters and spatial patterns if you explicitly
# use a LinearModel
clf = make_pipeline(StandardScaler(), LinearModel(LogisticRegression()))
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc')
time_decod.fit(X, y)
coef = get_coef(time_decod, 'patterns_', inverse_transform=True)
evoked = mne.EvokedArray(coef, epochs.info, tmin=epochs.times[0])
joint_kwargs = dict(ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
evoked.plot_joint(times=np.arange(0., .500, .100), title='patterns',
**joint_kwargs)
###############################################################################
# Temporal Generalization
# -----------------------
#
# This runs the analysis used in [1]_ and further detailed in [2]_
#
# The idea is to fit the models on each time instant and see how it
# generalizes to any other time point.
# define the Temporal Generalization object
time_gen = GeneralizingEstimator(clf, n_jobs=1, scoring='roc_auc')
scores = cross_val_multiscore(time_gen, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scores = np.mean(scores, axis=0)
# Plot the diagonal (it's exactly the same as the time-by-time decoding above)
fig, ax = plt.subplots()
ax.plot(epochs.times, np.diag(scores), label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC')
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Decoding MEG sensors over time')
plt.show()
# Plot the full matrix
fig, ax = plt.subplots(1, 1)
im = ax.imshow(scores, interpolation='lanczos', origin='lower', cmap='RdBu_r',
extent=epochs.times[[0, -1, 0, -1]], vmin=0., vmax=1.)
ax.set_xlabel('Testing Time (s)')
ax.set_ylabel('Training Time (s)')
ax.set_title('Temporal Generalization')
ax.axvline(0, color='k')
ax.axhline(0, color='k')
plt.colorbar(im, ax=ax)
plt.show()
###############################################################################
# Exercise
# --------
# - Can you improve the performance using full epochs and a common spatial
# pattern (CSP) used by most BCI systems?
# - Explore other datasets from MNE (e.g. Face dataset from SPM to predict
# Face vs. Scrambled)
#
# Have a look at the example
# :ref:`sphx_glr_auto_examples_decoding_plot_decoding_csp_space.py`
#
# References
# ==========
#
# .. [1] Jean-Remi King, Alexandre Gramfort, Aaron Schurger, Lionel Naccache
# and Stanislas Dehaene, "Two distinct dynamic modes subtend the
# detection of unexpected sounds", PLOS ONE, 2013,
# http://www.ncbi.nlm.nih.gov/pubmed/24475052
#
# .. [2] King & Dehaene (2014) 'Characterizing the dynamics of mental
# representations: the temporal generalization method', Trends In
# Cognitive Sciences, 18(4), 203-210.
# http://www.ncbi.nlm.nih.gov/pubmed/24593982
| bsd-3-clause |
zzcclp/spark | python/pyspark/pandas/categorical.py | 15 | 5290 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import TYPE_CHECKING
import pandas as pd
from pandas.api.types import CategoricalDtype
if TYPE_CHECKING:
import pyspark.pandas as ps # noqa: F401 (SPARK-34943)
class CategoricalAccessor(object):
"""
Accessor object for categorical properties of the Series values.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.categories
Index(['a', 'b', 'c'], dtype='object')
>>> s.cat.codes
0 0
1 1
2 1
3 2
4 2
5 2
dtype: int8
"""
def __init__(self, series: "ps.Series"):
if not isinstance(series.dtype, CategoricalDtype):
raise ValueError("Cannot call CategoricalAccessor on type {}".format(series.dtype))
self._data = series
@property
def categories(self) -> pd.Index:
"""
The categories of this categorical.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.categories
Index(['a', 'b', 'c'], dtype='object')
"""
return self._data.dtype.categories
@categories.setter
def categories(self, categories: pd.Index) -> None:
raise NotImplementedError()
@property
def ordered(self) -> bool:
"""
Whether the categories have an ordered relationship.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.ordered
False
"""
return self._data.dtype.ordered
@property
def codes(self) -> "ps.Series":
"""
Return Series of codes as well as the index.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.codes
0 0
1 1
2 1
3 2
4 2
5 2
dtype: int8
"""
return self._data._with_new_scol(self._data.spark.column).rename()
def add_categories(self, new_categories: pd.Index, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def as_ordered(self, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def as_unordered(self, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def remove_categories(self, removals: pd.Index, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def remove_unused_categories(self) -> "ps.Series":
raise NotImplementedError()
def rename_categories(self, new_categories: pd.Index, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def reorder_categories(
self, new_categories: pd.Index, ordered: bool = None, inplace: bool = False
) -> "ps.Series":
raise NotImplementedError()
def set_categories(
self,
new_categories: pd.Index,
ordered: bool = None,
rename: bool = False,
inplace: bool = False,
) -> "ps.Series":
raise NotImplementedError()
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.categorical
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.categorical.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.categorical tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.categorical,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
pratapvardhan/pandas | pandas/tests/tseries/offsets/test_yqm_offsets.py | 9 | 44135 | # -*- coding: utf-8 -*-
"""
Tests for Year, Quarter, and Month-based DateOffset subclasses
"""
from datetime import datetime
import pytest
import pandas as pd
from pandas import Timestamp
from pandas import compat
from pandas.tseries.offsets import (BMonthBegin, BMonthEnd,
MonthBegin, MonthEnd,
YearEnd, YearBegin, BYearEnd, BYearBegin,
QuarterEnd, QuarterBegin,
BQuarterEnd, BQuarterBegin)
from .test_offsets import Base
from .common import assert_offset_equal, assert_onOffset
# --------------------------------------------------------------------
# Misc
def test_quarterly_dont_normalize():
date = datetime(2012, 3, 31, 5, 30)
offsets = (QuarterBegin, QuarterEnd, BQuarterEnd, BQuarterBegin)
for klass in offsets:
result = date + klass()
assert (result.time() == date.time())
@pytest.mark.parametrize('n', [-2, 1])
@pytest.mark.parametrize('cls', [MonthBegin, MonthEnd,
BMonthBegin, BMonthEnd,
QuarterBegin, QuarterEnd,
BQuarterBegin, BQuarterEnd,
YearBegin, YearEnd,
BYearBegin, BYearEnd])
def test_apply_index(cls, n):
offset = cls(n=n)
rng = pd.date_range(start='1/1/2000', periods=100000, freq='T')
ser = pd.Series(rng)
res = rng + offset
res_v2 = offset.apply_index(rng)
assert (res == res_v2).all()
assert res[0] == rng[0] + offset
assert res[-1] == rng[-1] + offset
res2 = ser + offset
# apply_index is only for indexes, not series, so no res2_v2
assert res2.iloc[0] == ser.iloc[0] + offset
assert res2.iloc[-1] == ser.iloc[-1] + offset
@pytest.mark.parametrize('offset', [QuarterBegin(), QuarterEnd(),
BQuarterBegin(), BQuarterEnd()])
def test_on_offset(offset):
dates = [datetime(2016, m, d)
for m in [10, 11, 12]
for d in [1, 2, 3, 28, 29, 30, 31] if not (m == 11 and d == 31)]
for date in dates:
res = offset.onOffset(date)
slow_version = date == (date + offset) - offset
assert res == slow_version
# --------------------------------------------------------------------
# Months
class TestMonthBegin(Base):
_offset = MonthBegin
offset_cases = []
# NOTE: I'm not entirely happy with the logic here for Begin -ss
# see thread 'offset conventions' on the ML
offset_cases.append((MonthBegin(), {
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 2, 1): datetime(2008, 3, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2007, 1, 31): datetime(2007, 2, 1)}))
offset_cases.append((MonthBegin(0), {
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2006, 12, 3): datetime(2007, 1, 1),
datetime(2007, 1, 31): datetime(2007, 2, 1)}))
offset_cases.append((MonthBegin(2), {
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 1, 31): datetime(2008, 3, 1),
datetime(2006, 12, 31): datetime(2007, 2, 1),
datetime(2007, 12, 28): datetime(2008, 2, 1),
datetime(2007, 1, 1): datetime(2007, 3, 1),
datetime(2006, 11, 1): datetime(2007, 1, 1)}))
offset_cases.append((MonthBegin(-1), {
datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 5, 31): datetime(2008, 5, 1),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 1, 2): datetime(2006, 1, 1)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
class TestMonthEnd(Base):
_offset = MonthEnd
def test_day_of_month(self):
dt = datetime(2007, 1, 1)
offset = MonthEnd()
result = dt + offset
assert result == Timestamp(2007, 1, 31)
result = result + offset
assert result == Timestamp(2007, 2, 28)
def test_normalize(self):
dt = datetime(2007, 1, 1, 3)
result = dt + MonthEnd(normalize=True)
expected = dt.replace(hour=0) + MonthEnd()
assert result == expected
offset_cases = []
offset_cases.append((MonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 31)}))
offset_cases.append((MonthEnd(0), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
offset_cases.append((MonthEnd(2), {
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 31)}))
offset_cases.append((MonthEnd(-1), {
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 11, 30),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
on_offset_cases = [(MonthEnd(), datetime(2007, 12, 31), True),
(MonthEnd(), datetime(2008, 1, 1), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
class TestBMonthBegin(Base):
_offset = BMonthBegin
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BMonthBegin()
offset2 = BMonthBegin()
assert not offset1 != offset2
offset_cases = []
offset_cases.append((BMonthBegin(), {
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 9, 1): datetime(2006, 10, 2),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2006, 12, 1): datetime(2007, 1, 1)}))
offset_cases.append((BMonthBegin(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2006, 10, 2): datetime(2006, 10, 2),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 9, 15): datetime(2006, 10, 2)}))
offset_cases.append((BMonthBegin(2), {
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 1, 15): datetime(2008, 3, 3),
datetime(2006, 12, 29): datetime(2007, 2, 1),
datetime(2006, 12, 31): datetime(2007, 2, 1),
datetime(2007, 1, 1): datetime(2007, 3, 1),
datetime(2006, 11, 1): datetime(2007, 1, 1)}))
offset_cases.append((BMonthBegin(-1), {
datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 2),
datetime(2008, 6, 1): datetime(2008, 5, 1),
datetime(2008, 3, 10): datetime(2008, 3, 3),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 30): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 1)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
on_offset_cases = [(BMonthBegin(), datetime(2007, 12, 31), False),
(BMonthBegin(), datetime(2008, 1, 1), True),
(BMonthBegin(), datetime(2001, 4, 2), True),
(BMonthBegin(), datetime(2008, 3, 3), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
class TestBMonthEnd(Base):
_offset = BMonthEnd
def test_normalize(self):
dt = datetime(2007, 1, 1, 3)
result = dt + BMonthEnd(normalize=True)
expected = dt.replace(hour=0) + BMonthEnd()
assert result == expected
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BMonthEnd()
offset2 = BMonthEnd()
assert not offset1 != offset2
offset_cases = []
offset_cases.append((BMonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 29)}))
offset_cases.append((BMonthEnd(0), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 29),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
offset_cases.append((BMonthEnd(2), {
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 2, 28),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 29)}))
offset_cases.append((BMonthEnd(-1), {
datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2008, 5, 30),
datetime(2008, 12, 31): datetime(2008, 11, 28),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
on_offset_cases = [(BMonthEnd(), datetime(2007, 12, 31), True),
(BMonthEnd(), datetime(2008, 1, 1), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
# --------------------------------------------------------------------
# Quarters
class TestQuarterBegin(Base):
def test_repr(self):
expected = "<QuarterBegin: startingMonth=3>"
assert repr(QuarterBegin()) == expected
expected = "<QuarterBegin: startingMonth=3>"
assert repr(QuarterBegin(startingMonth=3)) == expected
expected = "<QuarterBegin: startingMonth=1>"
assert repr(QuarterBegin(startingMonth=1)) == expected
def test_isAnchored(self):
assert QuarterBegin(startingMonth=1).isAnchored()
assert QuarterBegin().isAnchored()
assert not QuarterBegin(2, startingMonth=1).isAnchored()
def test_offset_corner_case(self):
# corner
offset = QuarterBegin(n=-1, startingMonth=1)
assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 1)
offset_cases = []
offset_cases.append((QuarterBegin(startingMonth=1), {
datetime(2007, 12, 1): datetime(2008, 1, 1),
datetime(2008, 1, 1): datetime(2008, 4, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2008, 4, 1): datetime(2008, 7, 1)}))
offset_cases.append((QuarterBegin(startingMonth=2), {
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 2, 29): datetime(2008, 5, 1),
datetime(2008, 3, 15): datetime(2008, 5, 1),
datetime(2008, 3, 31): datetime(2008, 5, 1),
datetime(2008, 4, 15): datetime(2008, 5, 1),
datetime(2008, 4, 30): datetime(2008, 5, 1)}))
offset_cases.append((QuarterBegin(startingMonth=1, n=0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 12, 1): datetime(2009, 1, 1),
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2008, 4, 30): datetime(2008, 7, 1)}))
offset_cases.append((QuarterBegin(startingMonth=1, n=-1), {
datetime(2008, 1, 1): datetime(2007, 10, 1),
datetime(2008, 1, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 1, 1),
datetime(2008, 2, 29): datetime(2008, 1, 1),
datetime(2008, 3, 15): datetime(2008, 1, 1),
datetime(2008, 3, 31): datetime(2008, 1, 1),
datetime(2008, 4, 15): datetime(2008, 4, 1),
datetime(2008, 4, 30): datetime(2008, 4, 1),
datetime(2008, 7, 1): datetime(2008, 4, 1)}))
offset_cases.append((QuarterBegin(startingMonth=1, n=2), {
datetime(2008, 1, 1): datetime(2008, 7, 1),
datetime(2008, 2, 15): datetime(2008, 7, 1),
datetime(2008, 2, 29): datetime(2008, 7, 1),
datetime(2008, 3, 15): datetime(2008, 7, 1),
datetime(2008, 3, 31): datetime(2008, 7, 1),
datetime(2008, 4, 15): datetime(2008, 10, 1),
datetime(2008, 4, 1): datetime(2008, 10, 1)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
class TestQuarterEnd(Base):
_offset = QuarterEnd
def test_repr(self):
expected = "<QuarterEnd: startingMonth=3>"
assert repr(QuarterEnd()) == expected
expected = "<QuarterEnd: startingMonth=3>"
assert repr(QuarterEnd(startingMonth=3)) == expected
expected = "<QuarterEnd: startingMonth=1>"
assert repr(QuarterEnd(startingMonth=1)) == expected
def test_isAnchored(self):
assert QuarterEnd(startingMonth=1).isAnchored()
assert QuarterEnd().isAnchored()
assert not QuarterEnd(2, startingMonth=1).isAnchored()
def test_offset_corner_case(self):
# corner
offset = QuarterEnd(n=-1, startingMonth=1)
assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 31)
offset_cases = []
offset_cases.append((QuarterEnd(startingMonth=1), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31)}))
offset_cases.append((QuarterEnd(startingMonth=2), {
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 5, 31),
datetime(2008, 3, 31): datetime(2008, 5, 31),
datetime(2008, 4, 15): datetime(2008, 5, 31),
datetime(2008, 4, 30): datetime(2008, 5, 31)}))
offset_cases.append((QuarterEnd(startingMonth=1, n=0), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30)}))
offset_cases.append((QuarterEnd(startingMonth=1, n=-1), {
datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31),
datetime(2008, 7, 1): datetime(2008, 4, 30)}))
offset_cases.append((QuarterEnd(startingMonth=1, n=2), {
datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(QuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(QuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(QuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(QuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(QuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(QuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(QuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
(QuarterEnd(1, startingMonth=1), datetime(2008, 5, 31), False),
(QuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
(QuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
(QuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(QuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
(QuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 5, 31), True),
(QuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),
(QuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),
(QuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),
(QuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), True),
(QuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
(QuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
(QuarterEnd(1, startingMonth=3), datetime(2008, 5, 31), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
class TestBQuarterBegin(Base):
_offset = BQuarterBegin
def test_repr(self):
expected = "<BusinessQuarterBegin: startingMonth=3>"
assert repr(BQuarterBegin()) == expected
expected = "<BusinessQuarterBegin: startingMonth=3>"
assert repr(BQuarterBegin(startingMonth=3)) == expected
expected = "<BusinessQuarterBegin: startingMonth=1>"
assert repr(BQuarterBegin(startingMonth=1)) == expected
def test_isAnchored(self):
assert BQuarterBegin(startingMonth=1).isAnchored()
assert BQuarterBegin().isAnchored()
assert not BQuarterBegin(2, startingMonth=1).isAnchored()
def test_offset_corner_case(self):
# corner
offset = BQuarterBegin(n=-1, startingMonth=1)
assert datetime(2007, 4, 3) + offset == datetime(2007, 4, 2)
offset_cases = []
offset_cases.append((BQuarterBegin(startingMonth=1), {
datetime(2008, 1, 1): datetime(2008, 4, 1),
datetime(2008, 1, 31): datetime(2008, 4, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2007, 3, 15): datetime(2007, 4, 2),
datetime(2007, 2, 28): datetime(2007, 4, 2),
datetime(2007, 1, 1): datetime(2007, 4, 2),
datetime(2007, 4, 15): datetime(2007, 7, 2),
datetime(2007, 7, 1): datetime(2007, 7, 2),
datetime(2007, 4, 1): datetime(2007, 4, 2),
datetime(2007, 4, 2): datetime(2007, 7, 2),
datetime(2008, 4, 30): datetime(2008, 7, 1)}))
offset_cases.append((BQuarterBegin(startingMonth=2), {
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 2, 29): datetime(2008, 5, 1),
datetime(2008, 3, 15): datetime(2008, 5, 1),
datetime(2008, 3, 31): datetime(2008, 5, 1),
datetime(2008, 4, 15): datetime(2008, 5, 1),
datetime(2008, 8, 15): datetime(2008, 11, 3),
datetime(2008, 9, 15): datetime(2008, 11, 3),
datetime(2008, 11, 1): datetime(2008, 11, 3),
datetime(2008, 4, 30): datetime(2008, 5, 1)}))
offset_cases.append((BQuarterBegin(startingMonth=1, n=0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2007, 12, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 1, 15): datetime(2008, 4, 1),
datetime(2008, 2, 27): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2007, 4, 1): datetime(2007, 4, 2),
datetime(2007, 4, 2): datetime(2007, 4, 2),
datetime(2007, 7, 1): datetime(2007, 7, 2),
datetime(2007, 4, 15): datetime(2007, 7, 2),
datetime(2007, 7, 2): datetime(2007, 7, 2)}))
offset_cases.append((BQuarterBegin(startingMonth=1, n=-1), {
datetime(2008, 1, 1): datetime(2007, 10, 1),
datetime(2008, 1, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 1, 1),
datetime(2008, 2, 29): datetime(2008, 1, 1),
datetime(2008, 3, 15): datetime(2008, 1, 1),
datetime(2008, 3, 31): datetime(2008, 1, 1),
datetime(2008, 4, 15): datetime(2008, 4, 1),
datetime(2007, 7, 3): datetime(2007, 7, 2),
datetime(2007, 4, 3): datetime(2007, 4, 2),
datetime(2007, 7, 2): datetime(2007, 4, 2),
datetime(2008, 4, 1): datetime(2008, 1, 1)}))
offset_cases.append((BQuarterBegin(startingMonth=1, n=2), {
datetime(2008, 1, 1): datetime(2008, 7, 1),
datetime(2008, 1, 15): datetime(2008, 7, 1),
datetime(2008, 2, 29): datetime(2008, 7, 1),
datetime(2008, 3, 15): datetime(2008, 7, 1),
datetime(2007, 3, 31): datetime(2007, 7, 2),
datetime(2007, 4, 15): datetime(2007, 10, 1),
datetime(2008, 4, 30): datetime(2008, 10, 1)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
class TestBQuarterEnd(Base):
_offset = BQuarterEnd
def test_repr(self):
expected = "<BusinessQuarterEnd: startingMonth=3>"
assert repr(BQuarterEnd()) == expected
expected = "<BusinessQuarterEnd: startingMonth=3>"
assert repr(BQuarterEnd(startingMonth=3)) == expected
expected = "<BusinessQuarterEnd: startingMonth=1>"
assert repr(BQuarterEnd(startingMonth=1)) == expected
def test_isAnchored(self):
assert BQuarterEnd(startingMonth=1).isAnchored()
assert BQuarterEnd().isAnchored()
assert not BQuarterEnd(2, startingMonth=1).isAnchored()
def test_offset_corner_case(self):
# corner
offset = BQuarterEnd(n=-1, startingMonth=1)
assert datetime(2010, 1, 31) + offset == datetime(2010, 1, 29)
offset_cases = []
offset_cases.append((BQuarterEnd(startingMonth=1), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31)}))
offset_cases.append((BQuarterEnd(startingMonth=2), {
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 30),
datetime(2008, 3, 15): datetime(2008, 5, 30),
datetime(2008, 3, 31): datetime(2008, 5, 30),
datetime(2008, 4, 15): datetime(2008, 5, 30),
datetime(2008, 4, 30): datetime(2008, 5, 30)}))
offset_cases.append((BQuarterEnd(startingMonth=1, n=0), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30)}))
offset_cases.append((BQuarterEnd(startingMonth=1, n=-1), {
datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31)}))
offset_cases.append((BQuarterEnd(startingMonth=1, n=2), {
datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
# --------------------------------------------------------------------
# Years
class TestYearBegin(Base):
_offset = YearBegin
def test_misspecified(self):
pytest.raises(ValueError, YearBegin, month=13)
offset_cases = []
offset_cases.append((YearBegin(), {
datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1)}))
offset_cases.append((YearBegin(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1)}))
offset_cases.append((YearBegin(3), {
datetime(2008, 1, 1): datetime(2011, 1, 1),
datetime(2008, 6, 30): datetime(2011, 1, 1),
datetime(2008, 12, 31): datetime(2011, 1, 1),
datetime(2005, 12, 30): datetime(2008, 1, 1),
datetime(2005, 12, 31): datetime(2008, 1, 1)}))
offset_cases.append((YearBegin(-1), {
datetime(2007, 1, 1): datetime(2006, 1, 1),
datetime(2007, 1, 15): datetime(2007, 1, 1),
datetime(2008, 6, 30): datetime(2008, 1, 1),
datetime(2008, 12, 31): datetime(2008, 1, 1),
datetime(2006, 12, 29): datetime(2006, 1, 1),
datetime(2006, 12, 30): datetime(2006, 1, 1),
datetime(2007, 1, 1): datetime(2006, 1, 1)}))
offset_cases.append((YearBegin(-2), {
datetime(2007, 1, 1): datetime(2005, 1, 1),
datetime(2008, 6, 30): datetime(2007, 1, 1),
datetime(2008, 12, 31): datetime(2007, 1, 1)}))
offset_cases.append((YearBegin(month=4), {
datetime(2007, 4, 1): datetime(2008, 4, 1),
datetime(2007, 4, 15): datetime(2008, 4, 1),
datetime(2007, 3, 1): datetime(2007, 4, 1),
datetime(2007, 12, 15): datetime(2008, 4, 1),
datetime(2012, 1, 31): datetime(2012, 4, 1)}))
offset_cases.append((YearBegin(0, month=4), {
datetime(2007, 4, 1): datetime(2007, 4, 1),
datetime(2007, 3, 1): datetime(2007, 4, 1),
datetime(2007, 12, 15): datetime(2008, 4, 1),
datetime(2012, 1, 31): datetime(2012, 4, 1)}))
offset_cases.append((YearBegin(4, month=4), {
datetime(2007, 4, 1): datetime(2011, 4, 1),
datetime(2007, 4, 15): datetime(2011, 4, 1),
datetime(2007, 3, 1): datetime(2010, 4, 1),
datetime(2007, 12, 15): datetime(2011, 4, 1),
datetime(2012, 1, 31): datetime(2015, 4, 1)}))
offset_cases.append((YearBegin(-1, month=4), {
datetime(2007, 4, 1): datetime(2006, 4, 1),
datetime(2007, 3, 1): datetime(2006, 4, 1),
datetime(2007, 12, 15): datetime(2007, 4, 1),
datetime(2012, 1, 31): datetime(2011, 4, 1)}))
offset_cases.append((YearBegin(-3, month=4), {
datetime(2007, 4, 1): datetime(2004, 4, 1),
datetime(2007, 3, 1): datetime(2004, 4, 1),
datetime(2007, 12, 15): datetime(2005, 4, 1),
datetime(2012, 1, 31): datetime(2009, 4, 1)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
on_offset_cases = [(YearBegin(), datetime(2007, 1, 3), False),
(YearBegin(), datetime(2008, 1, 1), True),
(YearBegin(), datetime(2006, 12, 31), False),
(YearBegin(), datetime(2006, 1, 2), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
class TestYearEnd(Base):
_offset = YearEnd
def test_misspecified(self):
pytest.raises(ValueError, YearEnd, month=13)
offset_cases = []
offset_cases.append((YearEnd(), {
datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 31)}))
offset_cases.append((YearEnd(0), {
datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31)}))
offset_cases.append((YearEnd(-1), {
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 31),
datetime(2006, 12, 30): datetime(2005, 12, 31),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
offset_cases.append((YearEnd(-2), {
datetime(2007, 1, 1): datetime(2005, 12, 31),
datetime(2008, 6, 30): datetime(2006, 12, 31),
datetime(2008, 12, 31): datetime(2006, 12, 31)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
on_offset_cases = [(YearEnd(), datetime(2007, 12, 31), True),
(YearEnd(), datetime(2008, 1, 1), False),
(YearEnd(), datetime(2006, 12, 31), True),
(YearEnd(), datetime(2006, 12, 29), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
class TestYearEndDiffMonth(Base):
offset_cases = []
offset_cases.append((YearEnd(month=3),
{datetime(2008, 1, 1): datetime(2008, 3, 31),
datetime(2008, 2, 15): datetime(2008, 3, 31),
datetime(2008, 3, 31): datetime(2009, 3, 31),
datetime(2008, 3, 30): datetime(2008, 3, 31),
datetime(2005, 3, 31): datetime(2006, 3, 31),
datetime(2006, 7, 30): datetime(2007, 3, 31)}))
offset_cases.append((YearEnd(0, month=3),
{datetime(2008, 1, 1): datetime(2008, 3, 31),
datetime(2008, 2, 28): datetime(2008, 3, 31),
datetime(2008, 3, 31): datetime(2008, 3, 31),
datetime(2005, 3, 30): datetime(2005, 3, 31)}))
offset_cases.append((YearEnd(-1, month=3),
{datetime(2007, 1, 1): datetime(2006, 3, 31),
datetime(2008, 2, 28): datetime(2007, 3, 31),
datetime(2008, 3, 31): datetime(2007, 3, 31),
datetime(2006, 3, 29): datetime(2005, 3, 31),
datetime(2006, 3, 30): datetime(2005, 3, 31),
datetime(2007, 3, 1): datetime(2006, 3, 31)}))
offset_cases.append((YearEnd(-2, month=3),
{datetime(2007, 1, 1): datetime(2005, 3, 31),
datetime(2008, 6, 30): datetime(2007, 3, 31),
datetime(2008, 3, 31): datetime(2006, 3, 31)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
on_offset_cases = [(YearEnd(month=3), datetime(2007, 3, 31), True),
(YearEnd(month=3), datetime(2008, 1, 1), False),
(YearEnd(month=3), datetime(2006, 3, 31), True),
(YearEnd(month=3), datetime(2006, 3, 29), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
class TestBYearBegin(Base):
_offset = BYearBegin
def test_misspecified(self):
pytest.raises(ValueError, BYearBegin, month=13)
pytest.raises(ValueError, BYearEnd, month=13)
offset_cases = []
offset_cases.append((BYearBegin(), {
datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2011, 1, 1): datetime(2011, 1, 3),
datetime(2011, 1, 3): datetime(2012, 1, 2),
datetime(2005, 12, 30): datetime(2006, 1, 2),
datetime(2005, 12, 31): datetime(2006, 1, 2)}))
offset_cases.append((BYearBegin(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 2),
datetime(2005, 12, 31): datetime(2006, 1, 2)}))
offset_cases.append((BYearBegin(-1), {
datetime(2007, 1, 1): datetime(2006, 1, 2),
datetime(2009, 1, 4): datetime(2009, 1, 1),
datetime(2009, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2008, 1, 1),
datetime(2008, 12, 31): datetime(2008, 1, 1),
datetime(2006, 12, 29): datetime(2006, 1, 2),
datetime(2006, 12, 30): datetime(2006, 1, 2),
datetime(2006, 1, 1): datetime(2005, 1, 3)}))
offset_cases.append((BYearBegin(-2), {
datetime(2007, 1, 1): datetime(2005, 1, 3),
datetime(2007, 6, 30): datetime(2006, 1, 2),
datetime(2008, 12, 31): datetime(2007, 1, 1)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
class TestBYearEnd(Base):
_offset = BYearEnd
offset_cases = []
offset_cases.append((BYearEnd(), {
datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2006, 12, 29),
datetime(2005, 12, 31): datetime(2006, 12, 29)}))
offset_cases.append((BYearEnd(0), {
datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 29)}))
offset_cases.append((BYearEnd(-1), {
datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29)}))
offset_cases.append((BYearEnd(-2), {
datetime(2007, 1, 1): datetime(2005, 12, 30),
datetime(2008, 6, 30): datetime(2006, 12, 29),
datetime(2008, 12, 31): datetime(2006, 12, 29)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
on_offset_cases = [(BYearEnd(), datetime(2007, 12, 31), True),
(BYearEnd(), datetime(2008, 1, 1), False),
(BYearEnd(), datetime(2006, 12, 31), False),
(BYearEnd(), datetime(2006, 12, 29), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
class TestBYearEndLagged(Base):
_offset = BYearEnd
def test_bad_month_fail(self):
pytest.raises(Exception, BYearEnd, month=13)
pytest.raises(Exception, BYearEnd, month=0)
offset_cases = []
offset_cases.append((BYearEnd(month=6), {
datetime(2008, 1, 1): datetime(2008, 6, 30),
datetime(2007, 6, 30): datetime(2008, 6, 30)}))
offset_cases.append((BYearEnd(n=-1, month=6), {
datetime(2008, 1, 1): datetime(2007, 6, 29),
datetime(2007, 6, 30): datetime(2007, 6, 29)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_roll(self):
offset = BYearEnd(month=6)
date = datetime(2009, 11, 30)
assert offset.rollforward(date) == datetime(2010, 6, 30)
assert offset.rollback(date) == datetime(2009, 6, 30)
on_offset_cases = [(BYearEnd(month=2), datetime(2007, 2, 28), True),
(BYearEnd(month=6), datetime(2007, 6, 30), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
| bsd-3-clause |
jonwright/ImageD11 | scripts/ImageD11_gui.py | 1 | 8151 | #!/usr/bin/env python
from __future__ import print_function
# ImageD11_v1.0 Software for beamline ID11
# Copyright (C) 2005-2007 Jon Wright
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Graphical user interface for ImageD11
Uses Tkinter (comes with python, normally)
Also depends eventually on matplotlib (publication quality 2d plotting)
and on OpenGl for plotting things in 3D (reciprocal space)
Hopefully the gui and the gruntwork can be completely separated, eventually.
"""
try:
from Tkinter import *
import tkFileDialog as filedialog
from tkMessageBox import showinfo
from ScrolledText import ScrolledText
except:
from tkinter import *
import tkinter.filedialog as filedialog
from tkinter.messagebox import showinfo
from tkinter.scrolledtext import ScrolledText
import logging
import sys
import os
# GuiMaker is for building up the windows etc
from ImageD11.tkGui.guimaker import GuiMaker
from ImageD11.tkGui import twodplot, guipeaksearch, guitransformer, guiindexer, guisolver
from ImageD11 import __version__, guicommand
from ImageD11.license import license
# This does not do anything unless you call it as a program:
if __name__ == "__main__":
# get the output!
# Set up the logging stuff
console = logging.StreamHandler(sys.stdout)
# set a format which is simpler for console use
formatter = logging.Formatter('%(levelname)-8s : %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
console.setLevel(logging.DEBUG)
root = logging.getLogger('')
root.addHandler(console)
root.setLevel(logging.DEBUG) # sh
# Help message - TODO - proper online help
def help():
hlp = """Try pointing your web browser at:
http://fable.sourceforge.net/index.php/ImageD11
See also the html documentation for programmers, somewhere like:
file:///c:/python24/Lib/site-packages/ImageD11/doc/ImageD11.html
"""
showinfo("Help", "Please try harder\n"+hlp)
# Credits message
ImageD11credits = """Thanks to:
All of the Fable team, which includes at least:
Andy Gotz, Gavin Vaughan, Henning O. Sorensen,
Soren Schmidt, Henning Poulsen, Larry Margulies
Erik Knudsen,
...and others who should remind me to mention them
Tine Knudsen who bravely helped commission the
introduction of a second rotation axis (wedge).
Benoit Mallard for his assistance with some extreme
programming to debug the transformation module.
Younes ElHachi for adding the eps_sig calculations.
John Hunter for the matplotlib plotting.
All of the pyopengl, Numeric, numpy and python teams
Anyone who tests me and gives useful feedback
Jon Wright, for writing me!
"""
def credits():
showinfo("Credits", ImageD11credits)
# GPL is stored in ImageD11/license.py as a string to be
# displayed in the GUI if the user asks to see it
def showlicense():
win = ScrolledText(Toplevel(), width=100)
win.insert(END, license)
win.pack(expand=1, fill=BOTH)
win.focus_set()
# Inherits from the GuiMaker and uses functions defined above
class TestGuiMaker(GuiMaker):
guicommand.RETURN_NUMERICS = True
guicommander = guicommand.guicommand()
def start(self):
"""
Override the GuiMaker start
These are things to do when the gui starts
eg: show a message about the license and list of things to do
Then build the actual gui
"""
startmessage = """
ImageD11 version %s, Copyright (C) 2005-2017 Jon Wright
ImageD11 comes with ABSOLUTELY NO WARRANTY; for details select help,
license. This is free software, and you are welcome to redistribute it
under certain conditions
Please send useful feedback to [email protected]
""" % (__version__)
startmessage += """
You are using version %s
There have been lots of changes recently!
I would also be happily surprised if it is currently working.
""" % (__version__)
showinfo("Welcome to ImageD11 " + __version__,
startmessage)
# For the peaksearch menu
self.peaksearcher = guipeaksearch.guipeaksearcher(self)
self.transformer = guitransformer.guitransformer(self)
# For the indexing - supposed to generate orientations from the
# unitcell and g-vectors
self.indexer = guiindexer.guiindexer(self)
self.solver = guisolver.guisolver(self)
# Configure the menubar (lists of Tuples of (name,
# underline_char, command_or_submenu) )
self.menuBar = [("File", 0,
[("Print", 0, self.printplot),
("Exit", 1, sys.exit)]),
self.peaksearcher.menuitems,
self.transformer.menuitems,
self.indexer.menuitems,
self.solver.menuitems,
("Plotting", 0,
[("Autoscale", 0, self.autoscaleplot),
("Clear plot", 0, self.clearplot),
]),
("Help", 0,
[("Help Me!", 0, help),
("History", 1, self.history),
("Credits", 0, credits),
("License", 0, showlicense)
])]
# The twodplot object should be taking care of it's own menu
# Stop doing it here - TODO
def history(self):
win = ScrolledText(Toplevel(), width=100)
history = self.guicommander.gethistory()
win.insert(END, history)
win.pack(expand=1, fill=BOTH)
win.focus_set()
def printplot(self):
"""
Print the 2D plot (probably to a file?)
"""
self.twodplotter.printplot()
def autoscaleplot(self):
"""
Autoscale the plot
"""
self.twodplotter.autoscale()
def clearplot(self):
"""
Clear out the twodplot
"""
self.twodplotter.clear()
def makeWidgets(self):
"""
Draw the gui and initialise some hidden things
"""
# TODO Get size of TopLevels window and position it in a
# sensible way
#
# Opening and saving file widgets, normally hidden, they
# remember where you were for working directories
self.opener = filedialog.Open()
self.saver = filedialog.SaveAs()
#
# Draw the twodplot
self.twodplotter = twodplot.twodplot(self)
self.twodplotter.pack(side=RIGHT, expand=1, fill=BOTH)
# Start up Tkinter
root = Tk()
root.wm_title("ImageD11")
# Instantiate an object of the class TestGuiMaker
TestGuiMaker()
# Thats it!
root.mainloop()
| gpl-2.0 |
Markus-Goetz/CDS-Invenio-Authorlist | modules/webstat/lib/webstat_engine.py | 3 | 87275 | ## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
__lastupdated__ = "$Date$"
import calendar, commands, datetime, time, os, cPickle, random
try:
import xlwt
xlwt_imported = True
except ImportError:
xlwt_imported = False
from invenio.config import CFG_TMPDIR, CFG_SITE_URL, CFG_SITE_NAME, CFG_BINDIR
from invenio.urlutils import redirect_to_url
from invenio.search_engine import perform_request_search, \
get_collection_reclist, \
get_most_popular_field_values
from invenio.search_engine_utils import get_fieldvalues
from invenio.dbquery import run_sql, \
wash_table_column_name
from invenio.websubmitadmin_dblayer import get_docid_docname_alldoctypes
from invenio.bibcirculation_utils import book_title_from_MARC, \
book_information_from_MARC
from invenio.bibcirculation_dblayer import get_id_bibrec, \
get_borrower_data
WEBSTAT_SESSION_LENGTH = 48 * 60 * 60 # seconds
WEBSTAT_GRAPH_TOKENS = '-=#+@$%&XOSKEHBC'
# KEY EVENT TREND SECTION
def get_keyevent_trend_collection_population(args):
"""
Returns the quantity of documents in Invenio for
the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
if args.get('collection','All') == 'All':
sql_query_g = ("SELECT creation_date FROM bibrec WHERE " + \
"creation_date > '%s' AND creation_date < '%s' " + \
"ORDER BY creation_date DESC") % \
(lower, upper)
sql_query_i = "SELECT COUNT(id) FROM bibrec " + \
"WHERE creation_date < '%s'" % (lower)
else:
ids = perform_request_search(cc=args['collection'])
if len(ids) == 0:
return []
ids_str = str(ids).replace('[', '(').replace(']', ')')
sql_query_g = ("SELECT creation_date FROM bibrec WHERE id IN %s AND " + \
"creation_date > '%s' AND creation_date < '%s' " + \
"ORDER BY creation_date DESC") % \
(ids_str, lower, upper)
sql_query_i = "SELECT COUNT(id) FROM bibrec " + \
"WHERE id IN %s AND creation_date < '%s'" % (ids_str, lower)
action_dates = [x[0] for x in run_sql(sql_query_g)]
initial_quantity = run_sql(sql_query_i)[0][0]
return _get_trend_from_actions(action_dates, initial_quantity,
args['t_start'], args['t_end'],
args['granularity'], args['t_format'])
def get_keyevent_trend_search_frequency(args):
"""
Returns the number of searches (of any kind) carried out
during the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql = "SELECT date FROM query INNER JOIN user_query ON id=id_query " + \
"WHERE date > '%s' AND date < '%s' ORDER BY date DESC" % \
(lower, upper)
action_dates = [x[0] for x in run_sql(sql)]
return _get_trend_from_actions(action_dates, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_comments_frequency(args):
"""
Returns the number of comments (of any kind) carried out
during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
if args.get('collection','All') == 'All':
sql = "SELECT date_creation FROM cmtRECORDCOMMENT " + \
"WHERE date_creation > '%s' AND date_creation < '%s'" \
% (lower, upper) + " ORDER BY date_creation DESC"
else:
ids = get_collection_reclist(args['collection']).tolist()
if len(ids) == 0:
return []
ids_str = str(ids).replace('[', '(').replace(']', ')')
sql = "SELECT date_creation FROM cmtRECORDCOMMENT \
WHERE date_creation > '%s' AND date_creation < '%s' \
AND id_bibrec IN %s ORDER BY date_creation DESC" \
% (lower, upper, ids_str)
action_dates = [x[0] for x in run_sql(sql)]
return _get_trend_from_actions(action_dates, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_search_type_distribution(args):
"""
Returns the number of searches carried out during the given
timestamp range, but also partion them by type Simple and
Advanced.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# SQL to determine all simple searches:
sql = "SELECT date FROM query INNER JOIN user_query ON id=id_query " + \
"WHERE urlargs LIKE '%p=%' " + \
"AND date > '%s' AND date < '%s' ORDER BY date DESC" % (lower, upper)
simple = [x[0] for x in run_sql(sql)]
# SQL to determine all advanced searches:
sql = "SELECT date FROM query INNER JOIN user_query ON id=id_query " + \
"WHERE urlargs LIKE '%as=1%' " + \
"AND date > '%s' AND date < '%s' ORDER BY date DESC" % (lower, upper)
advanced = [x[0] for x in run_sql(sql)]
# Compute the trend for both types
s_trend = _get_trend_from_actions(simple, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
a_trend = _get_trend_from_actions(advanced, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
# Assemble, according to return type
return [(s_trend[i][0], (s_trend[i][1], a_trend[i][1]))
for i in range(len(s_trend))]
def get_keyevent_trend_download_frequency(args):
"""
Returns the number of full text downloads carried out
during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# Collect list of timestamps of insertion in the specific collection
if args.get('collection','All') == 'All':
sql = "SELECT download_time FROM rnkDOWNLOADS WHERE download_time > '%s' \
AND download_time < '%s' ORDER BY download_time DESC" % (lower, upper)
else:
ids = get_collection_reclist(args['collection']).tolist()
if len(ids) == 0:
return []
ids_str = str(ids).replace('[', '(').replace(']', ')')
sql = "SELECT download_time FROM rnkDOWNLOADS WHERE download_time > '%s' \
AND download_time < '%s' AND id_bibrec IN %s \
ORDER BY download_time DESC" % (lower, upper, ids_str)
actions = [x[0] for x in run_sql(sql)]
return _get_trend_from_actions(actions, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_number_of_loans(args):
"""
Returns the number of loans carried out
during the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql = "SELECT loaned_on FROM crcLOAN " + \
"WHERE loaned_on > '%s' AND loaned_on < '%s' ORDER BY loaned_on DESC"\
% (lower, upper)
action_dates = [x[0] for x in run_sql(sql)]
return _get_trend_from_actions(action_dates, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_web_submissions(args):
"""
Returns the quantity of websubmissions in Invenio for
the given timestamp range.
@param args['doctype']: A doctype name
@type args['doctype']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
if args['doctype'] == 'all':
sql_query = "SELECT cd FROM sbmSUBMISSIONS " + \
"WHERE action='SBI' AND cd > '%s' AND cd < '%s'" % (lower, upper) + \
" AND status='finished' ORDER BY cd DESC"
else:
sql_query = "SELECT cd FROM sbmSUBMISSIONS " + \
"WHERE doctype='%s' AND action='SBI' " % args['doctype'] + \
"AND cd > '%s' AND cd < '%s' " % (lower, upper) + \
"AND status='finished' ORDER BY cd DESC"
action_dates = [x[0] for x in run_sql(sql_query)]
return _get_trend_from_actions(action_dates, 0,
args['t_start'], args['t_end'],
args['granularity'], args['t_format'])
def get_keyevent_loan_statistics(args):
"""
Data:
- Number of documents (=records) loaned
- Number of items loaned on the total number of items
- Number of items never loaned on the total number of items
- Average time between the date of the record creation and the date of the first loan
Filter by
- in a specified time span
- by user address (=Department)
- by UDC (see MARC field 080__a - list to be submitted)
- by item status (available, missing)
- by date of publication (MARC field 260__c)
- by date of the record creation in the database
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['user_address']: borrower address
@type args['user_address']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['item_status']: available, missing...
@type args['item_status']: str
@param args['publication_date']: MARC field 260__c
@type args['publication_date']: str
@param args['creation_date']: date of the record creation in the database
@type args['creation_date']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOAN l "
sql_where = "WHERE loaned_on > '%s' AND loaned_on < '%s' " % (lower, upper)
if 'user_address' in args and args['user_address'] != '':
sql_from += ", crcBORROWER bor "
sql_where += """AND l.id_crcBORROWER = bor.id AND
bor.address LIKE '%%%s%%' """ % args['user_address']
if 'udc' in args and args['udc'] != '':
sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib08x brb, bib08x b \
WHERE brb.id_bibxxx = b.id AND tag='080__a' \
AND value LIKE '%%%s%%')" % args['udc']
if 'item_status' in args and args['item_status'] != '':
sql_from += ", crcITEM i "
sql_where += "AND l.barcode = i.barcode AND i.status = '%s' " % args['item_status']
if 'publication_date' in args and args['publication_date'] != '':
sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib26x brb, bib26x b \
WHERE brb.id_bibxxx = b.id AND tag='260__c' \
AND value LIKE '%%%s%%') " % args['publication_date']
if 'creation_date' in args and args['creation_date'] != '':
sql_from += ", bibrec br "
sql_where += """AND br.id=l.id_bibrec AND br.creation_date
LIKE '%%%s%%' """ % args['creation_date']
# Number of loans:
loans = run_sql("SELECT COUNT(DISTINCT l.id_bibrec) " + sql_from + sql_where)[0][0]
# Number of items loaned on the total number of items:
items_loaned = run_sql("SELECT COUNT(DISTINCT l.barcode) " + sql_from + sql_where)[0][0]
total_items = run_sql("SELECT COUNT(*) FROM crcITEM")[0][0]
loaned_on_total = float(items_loaned) / float(total_items)
# Number of items never loaned on the total number of items
never_loaned_on_total = float(total_items - items_loaned) / float(total_items)
# Average time between the date of the record creation and the date of the first loan
avg_sql = "SELECT DATEDIFF(MIN(loaned_on), MIN(br.creation_date)) " + sql_from
if not ('creation_date' in args and args['creation_date'] != ''):
avg_sql += ", bibrec br "
avg_sql += sql_where
if not ('creation_date' in args and args['creation_date'] != ''):
avg_sql += "AND br.id=l.id_bibrec "
avg_sql += "GROUP BY l.id_bibrec, br.id"
res_avg = run_sql(avg_sql)
if len(res_avg) > 0:
avg = res_avg[0][0]
else:
avg = 0
return ((loans, ), (loaned_on_total, ), (never_loaned_on_total, ), (avg, ))
def get_keyevent_loan_lists(args):
"""
Lists:
- List of documents (= records) never loaned
- List of most loaned documents (columns: number of loans,
number of copies and the creation date of the record, in
order to calculate the number of loans by copy), sorted
by decreasing order (50 items)
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by loan period (4 week loan, one week loan...)
- by a certain number of loans
- by date of publication (MARC field 260__c)
- by date of the record creation in the database
- by user address (=Department)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['loan_period']: 4 week loan, one week loan...
@type args['loan_period']: str
@param args['min_loan']: minimum number of loans
@type args['min_loan']: int
@param args['max_loan']: maximum number of loans
@type args['max_loan']: int
@param args['publication_date']: MARC field 260__c
@type args['publication_date']: str
@param args['creation_date']: date of the record creation in the database
@type args['creation_date']: str
@param args['user_address']: borrower address
@type args['user_address']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOAN l "
sql_where = "WHERE type = 'normal' AND loaned_on > %s AND loaned_on < %s "
param = [lower, upper]
if 'user_address' in args and args['user_address'] != '':
sql_from += ", crcBORROWER bor "
sql_where += "AND l.id_crcBORROWER = bor.id AND bor.address LIKE %s "
param.append('%%%s%%' % args['user_address'])
if 'udc' in args and args['udc'] != '':
sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib08x brb, bib08x b \
WHERE brb.id_bibxxx = b.id AND tag='080__a' \
AND value LIKE %s)"
param.append('%%%s%%' % args['udc'])
if 'loan_period' in args and args['loan_period'] != '':
sql_from += ", crcITEM i "
sql_where += "AND l.barcode = i.barcode AND i.loan_period = %s "
param.append(args['loan_period'])
if 'publication_date' in args and args['publication_date'] != '':
sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib26x brb, bib26x b \
WHERE brb.id_bibxxx = b.id AND tag='260__c' \
AND value LIKE %s) "
param.append('%%%s%%' % args['publication_date'])
if 'creation_date' in args and args['creation_date'] != '':
sql_from += ", bibrec br "
sql_where += "AND br.id=l.id_bibrec AND br.creation_date LIKE %s "
param.append('%%%s%%' % args['creation_date'])
param = tuple(param)
res = [("", "Title", "Author", "Edition", "Number of loans",
"Number of copies", "Date of creation of the record")]
# Documents (= records) never loaned:
for rec, copies in run_sql("""SELECT id_bibrec, COUNT(*) FROM crcITEM WHERE
id_bibrec NOT IN (SELECT l.id_bibrec """ + sql_from + sql_where +
") GROUP BY id_bibrec", param):
loans = run_sql("SELECT COUNT(*) %s %s AND l.id_bibrec=%s" %
(sql_from, sql_where, rec), param)[0][0]
try:
creation = run_sql("SELECT creation_date FROM bibrec WHERE id=%s", (rec, ))[0][0]
except:
creation = datetime.datetime(1970, 01, 01)
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append(('Documents never loaned', book_title_from_MARC(rec), author,
edition, loans, copies, creation))
# Most loaned documents
most_loaned = []
check_num_loans = ""
if 'min_loans' in args and args['min_loans'] != '':
check_num_loans += "COUNT(*) >= %s" % args['min_loans']
if 'max_loans' in args and args['max_loans'] != '' and args['max_loans'] != 0:
if check_num_loans != "":
check_num_loans += " AND "
check_num_loans += "COUNT(*) <= %s" % args['max_loans']
if check_num_loans != "":
check_num_loans = " HAVING " + check_num_loans
mldocs = run_sql("SELECT l.id_bibrec, COUNT(*) " + sql_from + sql_where +
" GROUP BY l.id_bibrec " + check_num_loans, param)
for rec, loans in mldocs:
copies = run_sql("SELECT COUNT(*) FROM crcITEM WHERE id_bibrec=%s", (rec, ))[0][0]
most_loaned.append((rec, loans, copies, loans / copies))
if most_loaned == []:
return (res)
most_loaned.sort(cmp=lambda x, y: cmp(x[3], y[3]))
if len(most_loaned) > 50:
most_loaned = most_loaned[:49]
most_loaned.reverse()
for rec, loans, copies, _ in most_loaned:
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
try:
creation = run_sql("SELECT creation_date FROM bibrec WHERE id=%s", (rec, ))[0][0]
except:
creation = datetime.datetime(1970, 01, 01)
res.append(('Most loaned documents', book_title_from_MARC(rec), author,
edition, loans, copies, creation))
return (res)
def get_keyevent_renewals_lists(args):
"""
Lists:
- List of most renewed items stored by decreasing order (50 items)
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by collection
- by user address (=Department)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['collection']: collection of the record
@type args['collection']: str
@param args['user_address']: borrower address
@type args['user_address']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOAN l, crcITEM i "
sql_where = "WHERE loaned_on > %s AND loaned_on < %s AND i.barcode = l.barcode "
param = [lower, upper]
if 'user_address' in args and args['user_address'] != '':
sql_from += ", crcBORROWER bor "
sql_where += "AND l.id_crcBORROWER = bor.id AND bor.address LIKE %s "
param.append('%%%s%%' % args['user_address'])
if 'udc' in args and args['udc'] != '':
sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib08x brb, bib08x b \
WHERE brb.id_bibxxx = b.id AND tag='080__a' \
AND value LIKE %s)"
param.append('%%%s%%' % args['udc'])
filter_coll = False
if 'collection' in args and args['collection'] != '':
filter_coll = True
recid_list = get_collection_reclist(args['collection'])
param = tuple(param)
# Results:
res = [("Title", "Author", "Edition", "Number of renewals")]
for rec, renewals in run_sql("SELECT i.id_bibrec, SUM(number_of_renewals) "
+ sql_from + sql_where +
" GROUP BY i.id_bibrec ORDER BY SUM(number_of_renewals) DESC LIMIT 50", param):
if filter_coll and rec not in recid_list:
continue
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec), author, edition, int(renewals)))
return (res)
def get_keyevent_returns_table(args):
"""
Data:
- Number of overdue returns in a year
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# Overdue returns:
returns = run_sql("SELECT COUNT(*) FROM crcLOAN l \
WHERE loaned_on > %s AND loaned_on < %s AND \
due_date < NOW() AND (returned_on = '0000-00-00 00:00:00' \
OR returned_on > due_date)", (lower, upper))[0][0]
return ((returns, ), )
def get_keyevent_trend_returns_percentage(args):
"""
Returns the number of overdue returns and the total number of returns
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# SQL to determine overdue returns:
sql = "SELECT due_date FROM crcLOAN " + \
"WHERE loaned_on > %s AND loaned_on < %s AND " + \
"due_date < NOW() AND (returned_on = '0000-00-00 00:00:00' " + \
"OR returned_on > due_date) ORDER BY due_date DESC"
overdue = [x[0] for x in run_sql(sql, (lower, upper))]
# SQL to determine all returns:
sql = "SELECT due_date FROM crcLOAN " + \
"WHERE loaned_on > %s AND loaned_on < %s AND " + \
"due_date < NOW() ORDER BY due_date DESC"
total = [x[0] for x in run_sql(sql, (lower, upper))]
# Compute the trend for both types
s_trend = _get_trend_from_actions(overdue, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
a_trend = _get_trend_from_actions(total, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
# Assemble, according to return type
return [(s_trend[i][0], (s_trend[i][1], a_trend[i][1]))
for i in range(len(s_trend))]
def get_keyevent_ill_requests_statistics(args):
"""
Data:
- Number of ILL requests
- Number of satisfied ILL requests 3 months after the date of request
creation on a period of one year
- Percentage of satisfied ILL requests 3 months after the date of
request creation on a period of one year
- Average time between the date and the hour of the ill request
date and the date and the hour of the delivery item to the user
on a period of one year (with flexibility in the choice of the dates)
- Average time between the date and the hour the ILL request
was sent to the supplier and the date and hour of the
delivery item on a period of one year (with flexibility in
the choice of the dates)
Filter by
- in a specified time span
- by type of document (book or article)
- by user address
- by status of the request (= new, sent, etc.)
- by supplier
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of document (book or article)
@type args['doctype']: str
@param args['status']: status of the request (= new, sent, etc.)
@type args['status']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['user_address']: borrower address
@type args['user_address']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcILLREQUEST ill "
sql_where = "WHERE period_of_interest_from > %s AND period_of_interest_from < %s "
param = [lower, upper]
if 'user_address' in args and args['user_address'] != '':
sql_from += ", crcBORROWER bor "
sql_where += "AND ill.id_crcBORROWER = bor.id AND bor.address LIKE %s "
param.append('%%%s%%' % args['user_address'])
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s"
param.append(args['doctype'])
if 'status' in args and args['status'] != '':
sql_where += "AND ill.status = %s "
param.append(args['status'])
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
param = tuple(param)
# Number of requests:
requests = run_sql("SELECT COUNT(*) " + sql_from + sql_where, param)[0][0]
# Number of satisfied ILL requests 3 months after the date of request creation:
satrequests = run_sql("SELECT COUNT(*) " + sql_from + sql_where +
"AND arrival_date != '0000-00-00 00:00:00' AND \
DATEDIFF(arrival_date, period_of_interest_from) < 90 ", param)[0][0]
# Average time between the date and the hour of the ill request date and
# the date and the hour of the delivery item to the user
avgdel = run_sql("SELECT AVG(TIMESTAMPDIFF(HOUR, period_of_interest_from, request_date)) "
+ sql_from + sql_where, param)[0][0]
if avgdel is int:
avgdel = int(avgdel)
else:
avgdel = 0
# Average time between the date and the hour the ILL request was sent to
# the supplier and the date and hour of the delivery item
avgsup = run_sql("SELECT AVG(TIMESTAMPDIFF(HOUR, arrival_date, request_date)) "
+ sql_from + sql_where, param)[0][0]
if avgsup is int:
avgsup = int(avgsup)
else:
avgsup = 0
return ((requests, ), (satrequests, ), (avgdel, ), (avgsup, ))
def get_keyevent_ill_requests_lists(args):
"""
Lists:
- List of ILL requests
Filter by
- in a specified time span
- by type of request (article or book)
- by supplier
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of request (article or book)
@type args['doctype']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcILLREQUEST ill "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s"
param.append(args['doctype'])
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
# Results:
res = [("Title", "Author", "Edition")]
for item_info in run_sql("SELECT item_info " + sql_from + sql_where + " LIMIT 100", param):
item_info = eval(item_info[0])
try:
res.append((item_info['title'], item_info['authors'], item_info['edition']))
except KeyError:
None
return (res)
def get_keyevent_trend_satisfied_ill_requests_percentage(args):
"""
Returns the number of satisfied ILL requests 3 months after the date of request
creation and the total number of ILL requests
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of document (book or article)
@type args['doctype']: str
@param args['status']: status of the request (= new, sent, etc.)
@type args['status']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['user_address']: borrower address
@type args['user_address']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcILLREQUEST ill "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'user_address' in args and args['user_address'] != '':
sql_from += ", crcBORROWER bor "
sql_where += "AND ill.id_crcBORROWER = bor.id AND bor.address LIKE %s "
param.append('%%%s%%' % args['user_address'])
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s"
param.append(args['doctype'])
if 'status' in args and args['status'] != '':
sql_where += "AND ill.status = %s "
param.append(args['status'])
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
# SQL to determine satisfied ILL requests:
sql = "SELECT request_date " + sql_from + sql_where + \
"AND ADDDATE(request_date, 90) < NOW() AND (arrival_date != '0000-00-00 00:00:00' " + \
"OR arrival_date < ADDDATE(request_date, 90)) ORDER BY request_date DESC"
satisfied = [x[0] for x in run_sql(sql, param)]
# SQL to determine all ILL requests:
sql = "SELECT request_date " + sql_from + sql_where + \
" AND ADDDATE(request_date, 90) < NOW() ORDER BY request_date DESC"
total = [x[0] for x in run_sql(sql, param)]
# Compute the trend for both types
s_trend = _get_trend_from_actions(satisfied, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
a_trend = _get_trend_from_actions(total, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
# Assemble, according to return type
return [(s_trend[i][0], (s_trend[i][1], a_trend[i][1]))
for i in range(len(s_trend))]
def get_keyevent_items_statistics(args):
"""
Data:
- The total number of items
- Total number of new items added in last year
Filter by
- in a specified time span
- by collection
- by UDC (see MARC field 080__a - list to be submitted)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcITEM i "
sql_where = "WHERE "
param = []
if 'udc' in args and args['udc'] != '':
sql_where += "i.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib08x brb, bib08x b \
WHERE brb.id_bibxxx = b.id AND tag='080__a' \
AND value LIKE %s)"
param.append('%%%s%%' % args['udc'])
# Number of items:
if sql_where == "WHERE ":
sql_where = ""
items = run_sql("SELECT COUNT(i.id_bibrec) " + sql_from + sql_where, param)[0][0]
# Number of new items:
param += [lower, upper]
if sql_where == "":
sql_where = "WHERE creation_date > %s AND creation_date < %s "
else:
sql_where += " AND creation_date > %s AND creation_date < %s "
new_items = run_sql("SELECT COUNT(i.id_bibrec) " + sql_from + sql_where, param)[0][0]
return ((items, ), (new_items, ))
def get_keyevent_items_lists(args):
"""
Lists:
- The list of items
Filter by
- by library (=physical location of the item)
- by status (=on loan, available, requested, missing...)
@param args['library']: physical location of the item
@type args[library'']: str
@param args['status']: on loan, available, requested, missing...
@type args['status']: str
"""
sql_from = "FROM crcITEM i "
sql_where = "WHERE "
param = []
if 'library' in args and args['library'] != '':
sql_from += ", crcLIBRARY li "
sql_where += "li.id=i.id_crcLIBRARY AND li.name=%s "
param.append(args['library'])
if 'status' in args and args['status'] != '':
if sql_where != "WHERE ":
sql_where += "AND "
sql_where += "i.status = %s "
param.append(args['status'])
# Results:
res = [("Title", "Author", "Edition", "Barcode", "Publication date")]
if sql_where == "WHERE ":
sql_where = ""
if len(param) == 0:
sqlres = run_sql("SELECT i.barcode, i.id_bibrec " +
sql_from + sql_where + " LIMIT 100")
else:
sqlres = run_sql("SELECT i.barcode, i.id_bibrec " +
sql_from + sql_where + " LIMIT 100", tuple(param))
for barcode, rec in sqlres:
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec),
author, edition, barcode,
book_information_from_MARC(int(rec))[1]))
return (res)
def get_keyevent_loan_request_statistics(args):
"""
Data:
- Number of hold requests, one week after the date of request creation
- Number of successful hold requests transactions
- Average time between the hold request date and the date of delivery document in a year
Filter by
- in a specified time span
- by item status (available, missing)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['item_status']: available, missing...
@type args['item_status']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOANREQUEST lr "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'item_status' in args and args['item_status'] != '':
sql_from += ", crcITEM i "
sql_where += "AND lr.barcode = i.barcode AND i.status = %s "
param.append(args['item_status'])
custom_table = get_customevent_table("loanrequest")
# Number of hold requests, one week after the date of request creation:
holds = run_sql("""SELECT COUNT(*) %s, %s ws %s AND ws.request_id=lr.id AND
DATEDIFF(ws.creation_time, lr.request_date) >= 7""" %
(sql_from, custom_table, sql_where), param)[0][0]
# Number of successful hold requests transactions
succesful_holds = run_sql("SELECT COUNT(*) %s %s AND lr.status='done'" %
(sql_from, sql_where), param)[0][0]
# Average time between the hold request date and the date of delivery document in a year
avg = run_sql("""SELECT AVG(DATEDIFF(ws.creation_time, lr.request_date))
%s, %s ws %s AND ws.request_id=lr.id""" %
(sql_from, custom_table, sql_where), param)[0][0]
if avg is int:
avg = int(avg)
else:
avg = 0
return ((holds, ), (succesful_holds, ), (avg, ))
def get_keyevent_loan_request_lists(args):
"""
Lists:
- List of the most requested items
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by user address (=Department)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['user_address']: borrower address
@type args['user_address']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOANREQUEST lr "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'user_address' in args and args['user_address'] != '':
sql_from += ", crcBORROWER bor "
sql_where += "AND lr.id_crcBORROWER = bor.id AND bor.address LIKE %s "
param.append('%%%s%%' % args['user_address'])
if 'udc' in args and args['udc'] != '':
sql_where += "AND lr.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib08x brb, bib08x b \
WHERE brb.id_bibxxx = b.id AND tag='080__a' \
AND value LIKE %s)"
param.append('%%%s%%' % args['udc'])
res = [("Title", "Author", "Edition", "Barcode")]
# Most requested items:
for barcode in run_sql("SELECT lr.barcode " + sql_from + sql_where +
" GROUP BY barcode ORDER BY COUNT(*) DESC", param):
rec = get_id_bibrec(barcode[0])
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec), author, edition, barcode[0]))
return (res)
def get_keyevent_user_statistics(args):
"""
Data:
- Total number of active users (to be defined = at least one transaction in the past year)
Filter by
- in a specified time span
- by user address
- by registration date
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['user_address']: borrower address
@type args['user_address']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from_ill = "FROM crcILLREQUEST ill "
sql_from_loan = "FROM crcLOAN l "
sql_where_ill = "WHERE request_date > %s AND request_date < %s "
sql_where_loan = "WHERE loaned_on > %s AND loaned_on < %s "
sql_address = ""
param = [lower, upper, lower, upper]
if 'user_address' in args and args['user_address'] != '':
sql_address += ", crcBORROWER bor WHERE id = user AND \
address LIKE %s "
param.append('%%%s%%' % args['user_address'])
# Total number of active users:
users = run_sql("""SELECT COUNT(DISTINCT user)
FROM ((SELECT id_crcBORROWER user %s %s) UNION
(SELECT id_crcBORROWER user %s %s)) res %s""" %
(sql_from_ill, sql_where_ill, sql_from_loan,
sql_where_loan, sql_address), param)[0][0]
return ((users, ), )
def get_keyevent_user_lists(args):
"""
Lists:
- List of most intensive users (ILL requests + Loan)
Filter by
- in a specified time span
- by user address
- by registration date
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['user_address']: borrower address
@type args['user_address']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from_ill = "FROM crcILLREQUEST ill "
sql_from_loan = "FROM crcLOAN l "
sql_where_ill = "WHERE request_date > %s AND request_date < %s "
sql_where_loan = "WHERE loaned_on > %s AND loaned_on < %s "
sql_address = ""
param = [lower, upper, lower, upper]
if 'user_address' in args and args['user_address'] != '':
sql_address += ", crcBORROWER bor WHERE id = user AND \
address LIKE %s "
param.append('%%%s%%' % args['user_address'])
res = [("Name", "Address", "Mailbox", "E-mail", "Number of transactions")]
# List of most intensive users (ILL requests + Loan):
for borrower_id, trans in run_sql("SELECT user, SUM(trans) FROM \
((SELECT id_crcBORROWER user, COUNT(*) trans %s %s GROUP BY id_crcBORROWER) UNION \
(SELECT id_crcBORROWER user, COUNT(*) trans %s %s GROUP BY id_crcBORROWER)) res %s \
GROUP BY user ORDER BY SUM(trans) DESC"
% (sql_from_ill, sql_where_ill, sql_from_loan, sql_where_loan, sql_address), param):
name, address, mailbox, email = get_borrower_data(borrower_id)
res.append((name, address, mailbox, email, int(trans)))
return (res)
# KEY EVENT SNAPSHOT SECTION
def get_keyevent_snapshot_uptime_cmd():
"""
A specific implementation of get_current_event().
@return: The std-out from the UNIX command 'uptime'.
@type: str
"""
return _run_cmd('uptime').strip().replace(' ', ' ')
def get_keyevent_snapshot_apache_processes():
"""
A specific implementation of get_current_event().
@return: The std-out from the UNIX command 'uptime'.
@type: str
"""
# The number of Apache processes (root+children)
return _run_cmd('ps -e | grep apache2 | grep -v grep | wc -l')
def get_keyevent_snapshot_bibsched_status():
"""
A specific implementation of get_current_event().
@return: Information about the number of tasks in the different status modes.
@type: [(str, int)]
"""
sql = "SELECT status, COUNT(status) FROM schTASK GROUP BY status"
return [(x[0], int(x[1])) for x in run_sql(sql)]
def get_keyevent_snapshot_sessions():
"""
A specific implementation of get_current_event().
@return: The current number of website visitors (guests, logged in)
@type: (int, int)
"""
# SQL to retrieve sessions in the Guests
sql = "SELECT COUNT(session_expiry) " + \
"FROM session INNER JOIN user ON uid=id " + \
"WHERE email = '' AND " + \
"session_expiry-%d < unix_timestamp() AND " \
% WEBSTAT_SESSION_LENGTH + \
"unix_timestamp() < session_expiry"
guests = run_sql(sql)[0][0]
# SQL to retrieve sessions in the Logged in users
sql = "SELECT COUNT(session_expiry) " + \
"FROM session INNER JOIN user ON uid=id " + \
"WHERE email <> '' AND " + \
"session_expiry-%d < unix_timestamp() AND " \
% WEBSTAT_SESSION_LENGTH + \
"unix_timestamp() < session_expiry"
logged_ins = run_sql(sql)[0][0]
# Assemble, according to return type
return (guests, logged_ins)
def get_keyevent_bibcirculation_report(freq='yearly'):
"""
Monthly and yearly report with the total number of circulation
transactions (loans, renewals, returns, ILL requests, hold request).
@param freq: yearly or monthly
@type freq: str
@return: loans, renewals, returns, ILL requests, hold request
@type: (int, int, int, int, int)
"""
if freq == 'monthly':
datefrom = datetime.date.today().strftime("%Y-%m-01 00:00:00")
else: #yearly
datefrom = datetime.date.today().strftime("%Y-01-01 00:00:00")
loans, renewals, returns = run_sql("""SELECT COUNT(*),
SUM(number_of_renewals), COUNT(returned_on<>'0000-00-00')
FROM crcLOAN WHERE loaned_on > %s""", (datefrom, ))[0]
illrequests = run_sql("SELECT COUNT(*) FROM crcILLREQUEST WHERE request_date > %s",
(datefrom, ))[0][0]
holdrequest = run_sql("SELECT COUNT(*) FROM crcLOANREQUEST WHERE request_date > %s",
(datefrom, ))[0][0]
return (loans, renewals, returns, illrequests, holdrequest)
# ERROR LOG STATS
def update_error_log_analyzer():
"""Creates splitted files for today's errors"""
_run_cmd('bash %s/webstat -e -is' % CFG_BINDIR)
def get_invenio_error_log_ranking():
""" Returns the ranking of the errors in the invenio log"""
return _run_cmd('bash %s/webstat -e -ir' % CFG_BINDIR)
def get_invenio_last_n_errors(nerr):
"""Returns the last nerr errors in the invenio log (without details)"""
return _run_cmd('bash %s/webstat -e -il %d' % (CFG_BINDIR, nerr))
def get_invenio_error_details(error):
"""Returns the complete text of the invenio error."""
out = _run_cmd('bash %s/webstat -e -id %s' % (CFG_BINDIR, error))
return out
def get_apache_error_log_ranking():
""" Returns the ranking of the errors in the apache log"""
return _run_cmd('bash %s/webstat -e -ar' % CFG_BINDIR)
# CUSTOM EVENT SECTION
def get_customevent_trend(args):
"""
Returns trend data for a custom event over a given
timestamp range.
@param args['event_id']: The event id
@type args['event_id']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
@param args['cols']: Columns and it's content that will be include
if don't exist or it's empty it will include all cols
@type args['cols']: [ [ str, str ], ]
"""
# Get a MySQL friendly date
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
tbl_name = get_customevent_table(args['event_id'])
col_names = get_customevent_args(args['event_id'])
sql_query = ["SELECT creation_time FROM %s WHERE creation_time > '%s'"
% (tbl_name, lower)]
sql_query.append("AND creation_time < '%s'" % upper)
sql_param = []
for col_bool, col_title, col_content in args['cols']:
if not col_title in col_names:
continue
if col_content:
if col_bool == "and" or col_bool == "":
sql_query.append("AND %s"
% wash_table_column_name(col_title))
elif col_bool == "or":
sql_query.append("OR %s"
% wash_table_column_name(col_title))
elif col_bool == "and_not":
sql_query.append("AND NOT %s"
% wash_table_column_name(col_title))
else:
continue
sql_query.append(" LIKE %s")
sql_param.append("%" + col_content + "%")
sql_query.append("ORDER BY creation_time DESC")
sql = ' '.join(sql_query)
dates = [x[0] for x in run_sql(sql, tuple(sql_param))]
return _get_trend_from_actions(dates, 0, args['t_start'], args['t_end'],
args['granularity'], args['t_format'])
def get_customevent_dump(args):
"""
Similar to a get_event_trend implemention, but NO refining aka frequency
handling is carried out what so ever. This is just a dump. A dump!
@param args['event_id']: The event id
@type args['event_id']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
@param args['cols']: Columns and it's content that will be include
if don't exist or it's empty it will include all cols
@type args['cols']: [ [ str, str ], ]
"""
# Get a MySQL friendly date
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# Get customevents
# events_list = [(creation_time, event, [arg1, arg2, ...]), ...]
event_list = []
event_cols = {}
for event_id, i in [(args['ids'][i], str(i))
for i in range(len(args['ids']))]:
# Get all the event arguments and creation times
tbl_name = get_customevent_table(event_id)
col_names = get_customevent_args(event_id)
sql_query = ["SELECT * FROM %s WHERE creation_time > '%s'" % (tbl_name,
lower)] # Note: SELECT * technique is okay here
sql_query.append("AND creation_time < '%s'" % upper)
sql_param = []
for col_bool, col_title, col_content in args['cols' + i]:
if not col_title in col_names:
continue
if col_content:
if col_bool == "and" or col_bool == "":
sql_query.append("AND %s" % \
wash_table_column_name(col_title))
elif col_bool == "or":
sql_query.append("OR %s" % \
wash_table_column_name(col_title))
elif col_bool == "and_not":
sql_query.append("AND NOT %s" % \
wash_table_column_name(col_title))
else:
continue
sql_query.append(" LIKE %s")
sql_param.append("%" + col_content + "%")
sql_query.append("ORDER BY creation_time DESC")
sql = ' '.join(sql_query)
res = run_sql(sql, tuple(sql_param))
for row in res:
event_list.append((row[1], event_id, row[2:]))
# Get the event col names
try:
event_cols[event_id] = cPickle.loads(run_sql(
"SELECT cols FROM staEVENT WHERE id = %s",
(event_id, ))[0][0])
except TypeError:
event_cols[event_id] = ["Unnamed"]
event_list.sort()
output = []
for row in event_list:
temp = [row[1], row[0].strftime('%Y-%m-%d %H:%M:%S')]
arguments = ["%s: %s" % (event_cols[row[1]][i],
row[2][i]) for i in range(len(row[2]))]
temp.extend(arguments)
output.append(tuple(temp))
return output
def get_customevent_table(event_id):
"""
Helper function that for a certain event id retrives the corresponding
event table name.
"""
res = run_sql(
"SELECT CONCAT('staEVENT', number) FROM staEVENT WHERE id = %s", (event_id, ))
try:
return res[0][0]
except IndexError:
# No such event table
return None
def get_customevent_args(event_id):
"""
Helper function that for a certain event id retrives the corresponding
event argument (column) names.
"""
res = run_sql("SELECT cols FROM staEVENT WHERE id = %s", (event_id, ))
try:
if res[0][0]:
return cPickle.loads(res[0][0])
else:
return []
except IndexError:
# No such event table
return None
# CUSTOM SUMMARY SECTION
def get_custom_summary_data(query, tag):
"""Returns the annual report data for the specified year
@param year: Year of publication on the journal
@type year: int
@param query: Search query to make customized report
@type query: str
@param tag: MARC tag for the output
@type tag: str
"""
# Check arguments
if tag == '':
tag = "909C4p"
# First get records of the year
recids = perform_request_search(p=query, of="id")
# Then return list by tag
pub = list(get_most_popular_field_values(recids, tag))
sel = 0
for elem in pub:
sel += elem[1]
if len(pub) == 0:
return []
if len(recids) - sel != 0:
pub.append(('Others', len(recids) - sel))
pub.append(('TOTAL', len(recids)))
return pub
def create_custom_summary_graph(data, path, title):
"""
Creates a pie chart with the information from the custom summary and
saves it in the file specified by the path argument
"""
# If no input, we don't bother about anything
if len(data) == 0:
return
os.environ['HOME'] = CFG_TMPDIR
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except ImportError:
return
# make a square figure and axes
matplotlib.rcParams['font.size'] = 8
labels = [x[0] for x in data]
numb_elem = float(len(labels))
width = 6 + numb_elem / 7
gfile = plt.figure(1, figsize=(width, 6))
plt.axes([0.1, 0.1, 4.2 / width, 0.7])
numb = [x[1] for x in data]
total = sum(numb)
fracs = [x * 100 / total for x in numb]
colors = []
random.seed()
for i in range(numb_elem):
col = 0.5 + float(i) / (numb_elem * 2.0)
rand = random.random() / 2.0
if i % 3 == 0:
red = col
green = col + rand
blue = col - rand
if green > 1.0:
green = 1
elif i % 3 == 1:
red = col - rand
green = col
blue = col + rand
if blue > 1.0:
blue = 1
elif i % 3 == 2:
red = col + rand
green = col - rand
blue = col
if red > 1.0:
red = 1
colors.append((red, green, blue))
patches = plt.pie(fracs, colors=tuple(colors), labels=labels,
autopct='%1i%%', pctdistance=0.8, shadow=True)[0]
ttext = plt.title(title)
plt.setp(ttext, size='xx-large', color='b', family='monospace', weight='extra bold')
legend_keywords = {"prop": {"size": "small"}}
plt.figlegend(patches, labels, 'lower right', **legend_keywords)
plt.savefig(path)
plt.close(gfile)
# GRAPHER
def create_graph_trend(trend, path, settings):
"""
Creates a graph representation out of data produced from get_event_trend.
@param trend: The trend data
@type trend: [(str, str|int|(str|int,...))]
@param path: Where to store the graph
@type path: str
@param settings: Dictionary of graph parameters
@type settings: dict
"""
# If no input, we don't bother about anything
if len(trend) == 0:
return
# If no filename is given, we'll assume STD-out format and ASCII.
if path == '':
settings["format"] = 'asciiart'
if settings["format"] == 'asciiart':
create_graph_trend_ascii_art(trend, path, settings)
elif settings["format"] == 'gnuplot':
create_graph_trend_gnu_plot(trend, path, settings)
elif settings["format"] == "flot":
create_graph_trend_flot(trend, path, settings)
def create_graph_trend_ascii_art(trend, path, settings):
"""Creates the graph trend using ASCII art"""
out = ""
if settings["multiple"] is not None:
# Tokens that will represent the different data sets (maximum 16 sets)
# Set index (=100) to the biggest of the histogram sums
index = max([sum(x[1]) for x in trend])
# Print legend box
out += "Legend: %s\n\n" % ", ".join(["%s (%s)" % x
for x in zip(settings["multiple"], WEBSTAT_GRAPH_TOKENS)])
else:
index = max([x[1] for x in trend])
width = 82
# Figure out the max length of the xtics, in order to left align
xtic_max_len = max([len(_to_datetime(x[0]).strftime(
settings["xtic_format"])) for x in trend])
for row in trend:
# Print the xtic
xtic = _to_datetime(row[0]).strftime(settings["xtic_format"])
out_row = xtic + ': ' + ' ' * (xtic_max_len - len(xtic)) + '|'
try:
col_width = (1.0 * width / index)
except ZeroDivisionError:
col_width = 0
if settings["multiple"] is not None:
# The second value of the row-tuple, represents the n values from
# the n data sets. Each set, will be represented by a different
# ASCII character, chosen from the randomized string
# 'WEBSTAT_GRAPH_TOKENS'.
# NOTE: Only up to 16 (len(WEBSTAT_GRAPH_TOKENS)) data
# sets are supported.
total = sum(row[1])
for i in range(len(row[1])):
col = row[1][i]
try:
out_row += WEBSTAT_GRAPH_TOKENS[i] * int(1.0 * col * col_width)
except ZeroDivisionError:
break
if len([i for i in row[1] if type(i) is int and i > 0]) - 1 > 0:
out_row += out_row[-1]
else:
total = row[1]
try:
out_row += '-' * int(1.0 * total * col_width)
except ZeroDivisionError:
break
# Print sentinel, and the total
out += out_row + '>' + ' ' * (xtic_max_len + 4 +
width - len(out_row)) + str(total) + '\n'
# Write to destination file
if path == '':
print out
else:
open(path, 'w').write(out)
def create_graph_trend_gnu_plot(trend, path, settings):
"""Creates the graph trend using the GNU plot library"""
try:
import Gnuplot
except ImportError:
return
gnup = Gnuplot.Gnuplot()
gnup('set style data linespoints')
if 'size' in settings:
gnup('set terminal png tiny size %s' % settings['size'])
else:
gnup('set terminal png tiny')
gnup('set output "%s"' % path)
if settings["title"] != '':
gnup.title(settings["title"].replace("\"", ""))
if settings["xlabel"] != '':
gnup.xlabel(settings["xlabel"])
if settings["ylabel"] != '':
gnup.ylabel(settings["ylabel"])
if settings["xtic_format"] != '':
xtics = 'set xtics ('
xtics += ', '.join(['"%s" %d' %
(_to_datetime(trend[i][0], '%Y-%m-%d \
%H:%M:%S').strftime(settings["xtic_format"]), i)
for i in range(len(trend))]) + ')'
gnup(xtics)
gnup('set format y "%.0f"')
# If we have multiple data sets, we need to do
# some magic to make Gnuplot eat it,
# This is basically a matrix transposition,
# and the addition of index numbers.
if settings["multiple"] is not None:
cols = len(trend[0][1])
rows = len(trend)
plot_items = []
y_max = 0
y_min = 0
for col in range(cols):
data = []
for row in range(rows):
data.append([row, trend[row][1][col]])
plot_items.append(Gnuplot.PlotItems
.Data(data, title=settings["multiple"][col]))
tmp_max = max([x[col] for x in data])
tmp_min = min([x[col] for x in data])
if tmp_max > y_max:
y_max = tmp_max
if tmp_min < y_min:
y_min = tmp_min
if y_max - y_min < 5 and y_min != 0:
gnup('set ytic %d, 1, %d' % (y_min - 1, y_max + 2))
elif y_max < 5:
gnup('set ytic 1')
gnup.plot(*plot_items)
else:
data = [x[1] for x in trend]
y_max = max(data)
y_min = min(data)
if y_max - y_min < 5 and y_min != 0:
gnup('set ytic %d, 1, %d' % (y_min - 1, y_max + 2))
elif y_max < 5:
gnup('set ytic 1')
gnup.plot(data)
def create_graph_trend_flot(trend, path, settings):
"""Creates the graph trend using the flot library"""
out = """<!--[if IE]><script language="javascript" type="text/javascript"
src="%(site)s/js/excanvas.min.js"></script><![endif]-->
<script language="javascript" type="text/javascript" src="%(site)s/js/jquery.min.js"></script>
<script language="javascript" type="text/javascript" src="%(site)s/js/jquery.flot.min.js"></script>
<script language="javascript" type="text/javascript" src="%(site)s/js/jquery.flot.selection.min.js"></script>
<script id="source" language="javascript" type="text/javascript">
document.write('<div style="float:left"><div id="placeholder" style="width:500px;height:400px"></div></div>'+
'<div id="miniature" style="float:left;margin-left:20px;margin-top:50px">' +
'<div id="overview" style="width:250px;height:200px"></div>' +
'<p id="overviewLegend" style="margin-left:10px"></p>' +
'</div>');
$(function () {
function parseDate(sdate){
var div1 = sdate.split(' ');
var day = div1[0].split('-');
var hour = div1[1].split(':');
return new Date(day[0], day[1]-1, day[2], hour[0], hour[1], hour[2]).getTime()
- (new Date().getTimezoneOffset() * 60 * 1000) ;
}
function getData() {""" % \
{'site': CFG_SITE_URL}
# Create variables with the format dn = [[x1,y1], [x2,y2]]
minx = trend[0][0]
maxx = trend[0][0]
if settings["multiple"] is not None:
cols = len(trend[0][1])
rows = len(trend)
first = 0
for col in range(cols):
out += """var d%d = [""" % (col)
for row in range(rows):
if(first == 0):
first = 1
else:
out += ", "
if trend[row][0] < minx:
minx = trend[row][0]
if trend[row][0] > maxx:
maxx = trend[row][0]
out += '[parseDate("%s"),%d]' % \
(_to_datetime(trend[row][0], '%Y-%m-%d \
%H:%M:%S'), trend[row][1][col])
out += "];\n"
out += "return [\n"
first = 0
for col in range(cols):
if first == 0:
first = 1
else:
out += ", "
out += '{data : d%d, label : "%s"}' % \
(col, settings["multiple"][col])
out += "];\n}\n"
else:
out += """var d1 = ["""
rows = len(trend)
first = 0
for row in range(rows):
if trend[row][0] < minx:
minx = trend[row][0]
if trend[row][0] > maxx:
maxx = trend[row][0]
if first == 0:
first = 1
else:
out += ', '
out += '[parseDate("%s"),%d]' % \
(_to_datetime(trend[row][0], '%Y-%m-%d %H:%M:%S'),
trend[row][1])
out += """];
return [d1];
}
"""
# Set options
tics = ""
if settings["xtic_format"] != '':
tics = 'xaxis: { mode:"time",min:parseDate("%s"),max:parseDate("%s")},'\
% (_to_datetime(minx, '%Y-%m-%d %H:%M:%S'),
_to_datetime(maxx, '%Y-%m-%d %H:%M:%S'))
tics += """
yaxis: {
tickDecimals : 0
},
"""
out += """var options ={
series: {
lines: { show: true },
points: { show: false }
},
legend: { show : false},
%s
grid: { hoverable: true, clickable: true },
selection: { mode: "xy" }
};
""" % tics
# Write the plot method in javascript
out += """var startData = getData();
var plot = $.plot($("#placeholder"), startData, options);
var overview = $.plot($("#overview"), startData, {
legend: { show: true, container: $("#overviewLegend") },
series: {
lines: { show: true, lineWidth: 1 },
shadowSize: 0
},
%s
grid: { color: "#999" },
selection: { mode: "xy" }
});
""" % tics
# Tooltip and zoom
out += """ function showTooltip(x, y, contents) {
$('<div id="tooltip">' + contents + '</div>').css( {
position: 'absolute',
display: 'none',
top: y - 5,
left: x + 10,
border: '1px solid #fdd',
padding: '2px',
'background-color': '#fee',
opacity: 0.80
}).appendTo("body").fadeIn(200);
}
var previousPoint = null;
$("#placeholder").bind("plothover", function (event, pos, item) {
if (item) {
if (previousPoint != item.datapoint) {
previousPoint = item.datapoint;
$("#tooltip").remove();
var y = item.datapoint[1];
showTooltip(item.pageX, item.pageY, y);
}
}
else {
$("#tooltip").remove();
previousPoint = null;
}
});
$("#placeholder").bind("plotclick", function (event, pos, item) {
if (item) {
plot.highlight(item.series, item.datapoint);
}
});
$("#placeholder").bind("plotselected", function (event, ranges) {
// clamp the zooming to prevent eternal zoom
if (ranges.xaxis.to - ranges.xaxis.from < 0.00001){
ranges.xaxis.to = ranges.xaxis.from + 0.00001;}
if (ranges.yaxis.to - ranges.yaxis.from < 0.00001){
ranges.yaxis.to = ranges.yaxis.from + 0.00001;}
// do the zooming
plot = $.plot($("#placeholder"), startData,
$.extend(true, {}, options, {
xaxis: { min: ranges.xaxis.from, max: ranges.xaxis.to },
yaxis: { min: ranges.yaxis.from, max: ranges.yaxis.to }
}));
// don't fire event on the overview to prevent eternal loop
overview.setSelection(ranges, true);
});
$("#overview").bind("plotselected", function (event, ranges) {
plot.setSelection(ranges);
});
});
</script>
<noscript>Your browser does not support JavaScript!
Please, select another output format</noscript>"""
open(path, 'w').write(out)
def create_graph_table(data, path, settings):
"""
Creates a html table representation out of data.
@param data: The data
@type data: (str,...)
@param path: Where to store the graph
@type path: str
@param settings: Dictionary of table parameters
@type settings: dict
"""
out = """<table border="1">
"""
if settings['rows'] == []:
for row in data:
out += """<tr>
"""
for value in row:
out += """<td>%s</td>
""" % value
out += "</tr>"
else:
for dta, value in zip(settings['rows'], data):
out += """<tr>
<td>%s</td>
<td>
""" % dta
for vrow in value:
out += """%s<br />
""" % vrow
out = out[:-6] + "</td></tr>"
out += "</table>"
open(path, 'w').write(out)
def create_graph_dump(dump, path):
"""
Creates a graph representation out of data produced from get_event_trend.
@param dump: The dump data
@type dump: [(str|int,...)]
@param path: Where to store the graph
@type path: str
"""
out = ""
if len(dump) == 0:
out += "No actions for this custom event " + \
"are registered in the given time range."
else:
# Make every row in dump equally long, insert None if appropriate.
max_len = max([len(x) for x in dump])
events = [tuple(list(x) + [None] * (max_len - len(x))) for x in dump]
cols = ["Event", "Date and time"] + ["Argument %d" % i
for i in range(max_len - 2)]
column_widths = [max([len(str(x[i])) \
for x in events + [cols]]) + 3 for i in range(len(events[0]))]
for i in range(len(cols)):
out += cols[i] + ' ' * (column_widths[i] - len(cols[i]))
out += "\n"
for i in range(len(cols)):
out += '=' * (len(cols[i])) + ' ' * (column_widths[i] - len(cols[i]))
out += "\n\n"
for action in dump:
for i in range(len(action)):
if action[i] is None:
temp = ''
else:
temp = action[i]
out += str(temp) + ' ' * (column_widths[i] - len(str(temp)))
out += "\n"
# Write to destination file
if path == '':
print out
else:
open(path, 'w').write(out)
# EXPORT DATA TO SLS
def get_search_frequency(day=datetime.datetime.now().date()):
"""Returns the number of searches performed in the chosen day"""
searches = get_keyevent_trend_search_type_distribution(get_args(day))
return sum(searches[0][1])
def get_total_records(day=datetime.datetime.now().date()):
"""Returns the total number of records which existed in the chosen day"""
tomorrow = (datetime.datetime.now() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")
args = {'collection': CFG_SITE_NAME, 't_start': day.strftime("%Y-%m-%d"),
't_end': tomorrow, 'granularity': "day", 't_format': "%Y-%m-%d"}
try:
return get_keyevent_trend_collection_population(args)[0][1]
except IndexError:
return 0
def get_new_records(day=datetime.datetime.now().date()):
"""Returns the number of new records submitted in the chosen day"""
args = {'collection': CFG_SITE_NAME,
't_start': (day - datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
't_end': day.strftime("%Y-%m-%d"), 'granularity': "day",
't_format': "%Y-%m-%d"}
try:
return (get_total_records(day) -
get_keyevent_trend_collection_population(args)[0][1])
except IndexError:
return 0
def get_download_frequency(day=datetime.datetime.now().date()):
"""Returns the number of downloads during the chosen day"""
return get_keyevent_trend_download_frequency(get_args(day))[0][1]
def get_comments_frequency(day=datetime.datetime.now().date()):
"""Returns the number of comments during the chosen day"""
return get_keyevent_trend_comments_frequency(get_args(day))[0][1]
def get_loans_frequency(day=datetime.datetime.now().date()):
"""Returns the number of comments during the chosen day"""
return get_keyevent_trend_number_of_loans(get_args(day))[0][1]
def get_web_submissions(day=datetime.datetime.now().date()):
"""Returns the number of web submissions during the chosen day"""
args = get_args(day)
args['doctype'] = 'all'
return get_keyevent_trend_web_submissions(args)[0][1]
def get_alerts(day=datetime.datetime.now().date()):
"""Returns the number of alerts during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'alerts'
return get_customevent_trend(args)[0][1]
def get_journal_views(day=datetime.datetime.now().date()):
"""Returns the number of journal displays during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'journals'
return get_customevent_trend(args)[0][1]
def get_basket_views(day=datetime.datetime.now().date()):
"""Returns the number of basket displays during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'baskets'
return get_customevent_trend(args)[0][1]
def get_args(day):
"""Returns the most common arguments for the exporting to SLS methods"""
return {'t_start': day.strftime("%Y-%m-%d"),
't_end': (day + datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
'granularity': "day", 't_format': "%Y-%m-%d"}
# EXPORTER
def export_to_python(data, req):
"""
Exports the data to Python code.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
_export("text/x-python", str(data), req)
def export_to_csv(data, req):
"""
Exports the data to CSV.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
csv_list = [""""%s",%s""" % (x[0], ",".join([str(y) for y in \
((type(x[1]) is tuple) and x[1] or (x[1], ))])) for x in data]
_export('text/csv', '\n'.join(csv_list), req)
def export_to_excel(data, req):
"""
Exports the data to excel.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
if not xlwt_imported:
raise Exception("Module xlwt not installed")
book = xlwt.Workbook(encoding="utf-8")
sheet1 = book.add_sheet('Sheet 1')
for row in range(0, len(data)):
for col in range(0, len(data[row])):
sheet1.write(row, col, "%s" % data[row][col])
filename = CFG_TMPDIR + "/webstat_export_" + \
str(time.time()).replace('.', '') + '.xls'
book.save(filename)
redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
% (CFG_SITE_URL, os.path.basename(filename), 'application/vnd.ms-excel'))
# INTERNAL
def _export(mime, content, req):
"""
Helper function to pass on the export call. Create a
temporary file in which the content is stored, then let
redirect to the export web interface.
"""
filename = CFG_TMPDIR + "/webstat_export_" + \
str(time.time()).replace('.', '')
open(filename, 'w').write(content)
redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
% (CFG_SITE_URL, os.path.basename(filename), mime))
def _get_trend_from_actions(action_dates, initial_value,
t_start, t_end, granularity, dt_format):
"""
Given a list of dates reflecting some sort of action/event, and some additional parameters,
an internal data format is returned. 'initial_value' set to zero, means that the frequency
will not be accumulative, but rather non-causal.
@param action_dates: A list of dates, indicating some sort of action/event.
@type action_dates: [datetime.datetime]
@param initial_value: The numerical offset the first action's value should make use of.
@type initial_value: int
@param t_start: Start time for the time domain in format %Y-%m-%d %H:%M:%S
@type t_start: str
@param t_stop: End time for the time domain in format %Y-%m-%d %H:%M:%S
@type t_stop: str
@param granularity: The granularity of the time domain, span between values.
Possible values are [year,month,day,hour,minute,second].
@type granularity: str
@param dt_format: Format of the 't_start' and 't_stop' parameters
@type dt_format: str
@return: A list of tuples zipping a time-domain and a value-domain
@type: [(str, int)]
"""
# Append the maximum date as a sentinel indicating we're done
action_dates.insert(0, datetime.datetime.max)
# Create an iterator running from the first day of activity
dt_iter = _get_datetime_iter(t_start, granularity, dt_format)
# Construct the datetime tuple for the stop time
stop_at = _to_datetime(t_end, dt_format) - datetime.timedelta(seconds=1)
# If our t_start is more recent than the initial action_dates, we need to
# drop those.
t_start_dt = _to_datetime(t_start, dt_format)
while action_dates[-1] < t_start_dt:
action_dates = action_dates[:-1]
vector = [(None, initial_value)]
# pylint: disable=E1101
old = dt_iter.next()
# pylint: enable=E1101
upcoming_action = action_dates.pop()
for current in dt_iter:
# Counter of action_dates in the current span, set the initial value to
# zero to avoid accumlation.
if initial_value != 0:
actions_here = vector[-1][1]
else:
actions_here = 0
# Check to see if there's an action date in the current span
while old <= upcoming_action < current:
actions_here += 1
try:
upcoming_action = action_dates.pop()
except IndexError:
upcoming_action = datetime.datetime.max
vector.append((old.strftime('%Y-%m-%d %H:%M:%S'), actions_here))
old = current
# Make sure to stop the iteration at the end time
if current > stop_at:
break
# Remove the first bogus tuple, and return
return vector[1:]
def _get_datetime_iter(t_start, granularity='day',
dt_format='%Y-%m-%d %H:%M:%S'):
"""
Returns an iterator over datetime elements starting at an arbitrary time,
with granularity of a [year,month,day,hour,minute,second].
@param t_start: An arbitrary starting time in format %Y-%m-%d %H:%M:%S
@type t_start: str
@param granularity: The span between iterable elements, default is 'days'.
Possible values are [year,month,day,hour,minute,second].
@type granularity: str
@param format: Format of the 't_start' parameter
@type format: str
@return: An iterator of points in time
@type: iterator over datetime elements
"""
tim = _to_datetime(t_start, dt_format)
# Make a time increment depending on the granularity and the current time
# (the length of years and months vary over time)
span = ""
while True:
yield tim
if granularity == "year":
span = (calendar.isleap(tim.year) and ["days=366"] or ["days=365"])[0]
elif granularity == "month":
span = "days=" + str(calendar.monthrange(tim.year, tim.month)[1])
elif granularity == "day":
span = "days=1"
elif granularity == "hour":
span = "hours=1"
elif granularity == "minute":
span = "minutes=1"
elif granularity == "second":
span = "seconds=1"
else:
# Default just in case
span = "days=1"
tim += eval("datetime.timedelta(" + span + ")")
def _to_datetime(dttime, dt_format='%Y-%m-%d %H:%M:%S'):
"""
Transforms a string into a datetime
"""
return datetime.datetime(*time.strptime(dttime, dt_format)[:6])
def _run_cmd(command):
"""
Runs a certain command and returns the string output. If the command is
not found a string saying so will be returned. Use with caution!
@param command: The UNIX command to execute.
@type command: str
@return: The std-out from the command.
@type: str
"""
return commands.getoutput(command)
def _get_doctypes():
"""Returns all the possible doctypes of a new submission"""
doctypes = [("all", "All")]
for doctype in get_docid_docname_alldoctypes():
doctypes.append(doctype)
return doctypes
def _get_item_statuses():
"""Returns all the possible status of an item"""
return [("available", "Available"), ("requested", "Requested"),
("on loan", "On loan"), ("missing", "Missing")]
def _get_item_doctype():
"""Returns all the possible types of document for an item"""
dts = []
for dat in run_sql("""SELECT DISTINCT(request_type)
FROM crcILLREQUEST ORDER BY request_type ASC"""):
dts.append((dat[0], dat[0]))
return dts
def _get_request_statuses():
"""Returns all the possible statuses for an ILL request"""
dts = []
for dat in run_sql("SELECT DISTINCT(status) FROM crcILLREQUEST ORDER BY status ASC"):
dts.append((dat[0], dat[0]))
return dts
def _get_libraries():
"""Returns all the possible libraries"""
dts = []
for dat in run_sql("SELECT name FROM crcLIBRARY ORDER BY name ASC"):
dts.append((dat[0], dat[0]))
return dts
def _get_loan_periods():
"""Returns all the possible loan periods for an item"""
dts = []
for dat in run_sql("SELECT DISTINCT(loan_period) FROM crcITEM ORDER BY loan_period ASC"):
dts.append((dat[0], dat[0]))
return dts
def _get_tag_name(tag):
"""
For a specific MARC tag, it returns the human-readable name
"""
res = run_sql("SELECT name FROM tag WHERE value LIKE '%%%s%%'" % (tag))
if res:
return res[0][0]
res = run_sql("SELECT name FROM tag WHERE value LIKE '%%%s%%'" % (tag[:-1]))
if res:
return res[0][0]
return ''
| gpl-2.0 |
ctoher/pymatgen | pymatgen/analysis/transition_state.py | 1 | 9167 | # coding: utf-8
from __future__ import division, unicode_literals
"""
Some reimplementation of Henkelman's Transition State Analysis utilities,
which are originally in Perl. Additional features beyond those offered by
Henkelman's utilities will be added.
This allows the usage and customization in Python.
"""
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = '[email protected]'
__date__ = '6/1/15'
import os
import glob
import numpy as np
from scipy.interpolate import PiecewisePolynomial
from pymatgen.util.plotting_utils import get_publication_quality_plot
from pymatgen.io.vaspio import Poscar, Outcar
class NEBAnalysis(object):
"""
An NEBAnalysis class.
"""
def __init__(self, outcars, structures, interpolation_order=3):
"""
Initializes an NEBAnalysis from Outcar and Structure objects. Use
the static constructors, e.g., :class:`from_dir` instead if you
prefer to have these automatically generated from a directory of NEB
calculations.
Args:
outcars ([Outcar]): List of Outcar objects. Note that these have
to be ordered from start to end along reaction coordinates.
structures ([Structure]): List of Structures along reaction
coordinate. Must be same length as outcar.
interpolation_order (int): Order of polynomial to use to
interpolate between images. Same format as order parameter in
scipy.interplotate.PiecewisePolynomial.
"""
if len(outcars) != len(structures):
raise ValueError("# of Outcars must be same as # of Structures")
# Calculate cumulative root mean square distance between structures,
# which serves as the reaction coordinate. Note that these are
# calculated from the final relaxed structures as the coordinates may
# have changed from the initial interpolation.
r = [0]
prev = structures[0]
for st in structures[1:]:
dists = np.array([s2.distance(s1) for s1, s2 in zip(prev, st)])
r.append(np.sqrt(np.sum(dists ** 2)))
prev = st
r = np.cumsum(r)
energies = []
forces = []
for i, o in enumerate(outcars):
o.read_neb()
energies.append(o.data["energy"])
if i in [0, len(outcars) - 1]:
forces.append(0)
else:
forces.append(o.data["tangent_force"])
energies = np.array(energies)
energies -= energies[0]
forces = np.array(forces)
self.r = np.array(r)
self.energies = energies
self.forces = forces
# We do a piecewise interpolation between the points. Each spline (
# cubic by default) is constrained by the boundary conditions of the
# energies and the tangent force, i.e., the derivative of
# the energy at each pair of points.
self.spline = PiecewisePolynomial(
self.r, np.array([self.energies, -self.forces]).T,
orders=interpolation_order)
def get_extrema(self, normalize_rxn_coordinate=True):
"""
Returns the positions of the extrema along the MEP. Both local
minimums and maximums are returned.
Args:
normalize_rxn_coordinate (bool): Whether to normalize the
reaction coordinate to between 0 and 1. Defaults to True.
Returns:
(min_extrema, max_extrema), where the extrema are given as
[(x1, y1), (x2, y2), ...].
"""
x = np.arange(0, np.max(self.r), 0.01)
y = self.spline(x) * 1000
scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
min_extrema = []
max_extrema = []
for i in range(1, len(x) - 1):
if y[i] < y[i-1] and y[i] < y[i+1]:
min_extrema.append((x[i] * scale, y[i]))
elif y[i] > y[i-1] and y[i] > y[i+1]:
max_extrema.append((x[i] * scale, y[i]))
return min_extrema, max_extrema
def get_plot(self, normalize_rxn_coordinate=True, label_barrier=True):
"""
Returns the NEB plot. Uses Henkelman's approach of spline fitting
each section of the reaction path based on tangent force and energies.
Args:
normalize_rxn_coordinate (bool): Whether to normalize the
reaction coordinate to between 0 and 1. Defaults to True.
label_barrier (bool): Whether to label the maximum barrier.
Returns:
matplotlib.pyplot object.
"""
plt = get_publication_quality_plot(12, 8)
scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
x = np.arange(0, np.max(self.r), 0.01)
y = self.spline(x) * 1000
plt.plot(x * scale, y, 'k-', linewidth=2)
plt.plot(self.r * scale, self.energies * 1000, 'ro', markersize=10)
plt.xlabel("Reaction coordinate")
plt.ylabel("Energy (meV)")
plt.ylim((np.min(y) - 10, np.max(y) * 1.02 + 20))
if label_barrier:
data = zip(x * scale, y)
barrier = max(data, key=lambda d: d[1])
plt.plot([0, barrier[0]], [barrier[1], barrier[1]], 'k--')
plt.annotate('%.0f meV' % barrier[1],
xy=(barrier[0] / 2, barrier[1] * 1.02),
xytext=(barrier[0] / 2, barrier[1] * 1.02),
horizontalalignment='center')
plt.tight_layout()
return plt
@classmethod
def from_dir(cls, root_dir, relaxation_dirs=None):
"""
Initializes a NEBAnalysis object from a directory of a NEB run.
Note that OUTCARs must be present in all image directories. For the
terminal OUTCARs from relaxation calculations, you can specify the
locations using relaxation_dir. If these are not specified, the code
will attempt to look for the OUTCARs in 00 and 0n directories,
followed by subdirs "start", "end" or "initial", "final" in the
root_dir. These are just some typical conventions used
preferentially in Shyue Ping's MAVRL research group. For the
non-terminal points, the CONTCAR is read to obtain structures. For
terminal points, the POSCAR is used. The image directories are
assumed to be the only directories that can be resolved to integers.
E.g., "00", "01", "02", "03", "04", "05", "06". The minimum
sub-directory structure that can be parsed is of the following form (
a 5-image example is shown):
00:
- POSCAR
- OUTCAR
01, 02, 03, 04, 05:
- CONTCAR
- OUTCAR
06:
- POSCAR
- OUTCAR
Args:
root_dir (str): Path to the root directory of the NEB calculation.
relaxation_dirs (tuple): This specifies the starting and ending
relaxation directories from which the OUTCARs are read for the
terminal points for the energies.
Returns:
NEBAnalysis object.
"""
neb_dirs = []
for d in os.listdir(root_dir):
pth = os.path.join(root_dir, d)
if os.path.isdir(pth) and d.isdigit():
i = int(d)
neb_dirs.append((i, pth))
neb_dirs = sorted(neb_dirs, key=lambda d: d[0])
outcars = []
structures = []
# Setup the search sequence for the OUTCARs for the terminal
# directories.
terminal_dirs = []
if relaxation_dirs is not None:
terminal_dirs.append(relaxation_dirs)
terminal_dirs.append((neb_dirs[0][1], neb_dirs[-1][1]))
terminal_dirs.append([os.path.join(root_dir, d)
for d in ["start", "end"]])
terminal_dirs.append([os.path.join(root_dir, d)
for d in ["initial", "final"]])
for i, d in neb_dirs:
outcar = glob.glob(os.path.join(d, "OUTCAR*"))
contcar = glob.glob(os.path.join(d, "CONTCAR*"))
poscar = glob.glob(os.path.join(d, "POSCAR*"))
terminal = i == 0 or i == neb_dirs[-1][0]
if terminal:
found = False
for ds in terminal_dirs:
od = ds[0] if i == 0 else ds[1]
outcar = glob.glob(os.path.join(od, "OUTCAR*"))
if outcar:
outcar = sorted(outcar)
outcars.append(Outcar(outcar[-1]))
found = True
break
if not found:
raise ValueError("OUTCAR cannot be found for terminal "
"point %s" % d)
structures.append(Poscar.from_file(poscar[0]).structure)
else:
outcars.append(Outcar(outcar[0]))
structures.append(Poscar.from_file(contcar[0]).structure)
return NEBAnalysis(outcars, structures)
| mit |
UWSEDS-aut17/uwseds-group-city-fynders | cityfynders/tests/test_UI_setup.py | 1 | 1628 | import unittest
import pandas as pd
from cityfynders.UI_setup import layout_setup
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import pandas as pd
import plotly
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
class UI_setup_get(unittest.TestCase):
"""
This is to test the UI_Setup.py and the function layout_setup.
This function returns a 'Div' class so we test if it returns an none-null
object
"""
def test_uisetup(self):
rank = pd.read_csv('../../data/rank_file.csv')
available = list(rank.columns.values)
for i in ['Unnamed: 0', 'City', 'State', 'Population',
'Natural_total_rank', 'Human_related_rank',
'Economy_rank', 'Tertiary_Rank', 'Latitude', 'Longitude']:
available.remove(i)
# Create a list of labels for dropdown
labels = ['Air Quality', 'Water Quality', 'Fewer Toxics',
'Fewer Hazardous Particles', 'Green Coverage',
'Fewer Crimes', 'More Hospitals', 'Early Education Options',
'University Options', 'Employment Rate', 'Sales Revenue',
'Income', 'Tuition Affordability', 'Bars', 'Restaurants',
'Museums', 'Libraries', 'Parks', 'Top Restaurants']
# Put available and labels in a two-dimensional list
pairs = [available, labels]
app = dash.Dash()
output = 0
output = layout_setup(pairs)
self.assertTrue(output)
if __name__ == '__main__':
unittest.main()
| mit |
alee156/cldev | Tony/clviz_web_tony_edits/claritybase.py | 1 | 24909 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from __future__ import print_function
__author__ = 'seelviz'
#import matplotlib as mpl
#mpl.use('Agg')
from skimage import data, img_as_float
from skimage import exposure
import plotly
from plotly.graph_objs import *
import cv2
import math, os, gc, random
import numpy as np
import nibabel as nib
import os.path
## Tony's get_brain_figure stuff
#from plotly.offline import download_plotlyjs, init_notebook_mode, iplot
#from plotly import tools
#plotly.offline.init_notebook_mode()
#
import networkx as nx
import pandas as pd
import re
"""
clarity.py
"""
class claritybase(object):
"""This class applies local equalizaiton to the img's historgram, generates the points and graphml, and plots using plotly."""
def __init__(self, token, source_directory = None):
"""Constructor that takes in the token name, loads the img, and makes a directory."""
self._token = token # Token
self._img = None # Image Data
self._shape = None # (x, y, z)
self._max = None # Max Value
self._points = None
self._source_directory = source_directory
self._brightest = None
# self._brain_figure = None
self._infile = None
self._nodefile = None
self._edgefile = None
self._filename = None
#self.loadImg(self._token + ".img")
#self.loadEqImg()
# make a directory if none exists
if not os.path.exists('output/' + token):
os.makedirs('output/' + token)
def getShape(self):
"""Function that returns the shape."""
return self._shape
def getMax(self):
"""Function that returns the max."""
return self._max
def discardImg(self):
"""Function used to get rid of the img in memory."""
del self._img
gc.collect()
return self
def brightPoints(self, path=None, points=20000):
pathname = ""
if path == None:
pathname = self._token + '/' + self._token + 'localeq.csv'
else:
pathname = path + '/' + self._token + 'localeq.csv'
total = points
bright = 255
data = self._points
allpoints = []
brightpoints = []
savePoints = []
outfile = open(pathname, 'w')
for line in data:
if line[3] == bright:
brightpoints.append([line[0], line[1], line[2], line[3]])
else:
allpoints.append([line[0], line[1], line[2], line[3]])
total = total - len(brightpoints)
print(total)
bright = bright - 1
print(bright)
if total < 0:
index = random.sample(xrange(0, len(brightpoints)), total + len(brightpoints))
for ind in index:
outfile.write(str(brightpoints[ind][0]) + "," + str(brightpoints[ind][1]) + "," + str(brightpoints[ind][2]) + "," + str(brightpoints[ind][3]) + "\n")
savePoints.append(brightpoints[ind])
else:
for item in brightpoints:
outfile.write(str(item[0]) + "," + str(item[1]) + "," + str(item[2]) + "," + str(item[3]) + "\n")
savePoints.append(item)
while(total > 0):
print("in while loop")
brightpoints = []
newallpoints = []
for item in allpoints:
if item[3] == bright:
brightpoints.append(item)
else:
newallpoints.append(item)
total = total - len(brightpoints)
print(total)
bright = bright - 1
print(bright)
if total < 0:
index = random.sample(xrange(0, len(brightpoints)), total + len(brightpoints))
for ind in index:
outfile.write(str(brightpoints[ind][0]) + "," + str(brightpoints[ind][1]) + "," + str(brightpoints[ind][2]) + "," + str(brightpoints[ind][3]) + "\n")
savePoints.append(brightpoints[ind])
else:
for item in brightpoints:
outfile.write(str(item[0]) + "," + str(item[1]) + "," + str(item[2]) + "," + str(item[3]) + "\n")
savePoints.append(item)
allpoints = newallpoints
outfile.close()
self._points = savePoints
def generate_plotly_html(self):
"""Generates the plotly from the csv file."""
# Type in the path to your csv file here
thedata = None
thedata = np.genfromtxt('output/' + self._token + '/' + self._token + 'localeq.csv',
delimiter=',', dtype='int', usecols = (0,1,2), names=['a','b','c'])
trace1 = Scatter3d(
x = thedata['a'],
y = thedata['b'],
z = thedata['c'],
mode='markers',
marker=dict(
size=1.2,
color='cyan', # set color to an array/list of desired values
colorscale='Viridis', # choose a colorscale
opacity=0.15
)
)
data = [trace1]
layout = Layout(
margin=dict(
l=0,
r=0,
b=0,
t=0
),
paper_bgcolor='rgb(0,0,0)',
plot_bgcolor='rgb(0,0,0)'
)
fig = Figure(data=data, layout=layout)
print(self._token + "plotly")
plotly.offline.plot(fig, filename= 'output/' + self._token + "/" + self._token + "_brain_pointcloud.html")
def applyLocalEq(self):
"""Applies local equilization to the img's histogram and outputs a .nii file"""
print('Generating Histogram...')
path = ""
if self._source_directory == None:
if os.path.isfile(self._token + '.img'):
path = self._token + '.img'
else:
path = self._token + '.nii'
else:
if os.path.isfile(self._source_directory + "/" + self._token + ".img"):
path = self._source_directory + "/" + self._token + ".img"
else:
path = self._source_directory + "/" + self._token + ".nii"
im = nib.load(path)
im = im.get_data()
img = im[:,:,:]
shape = im.shape
#affine = im.get_affine()
x_value = shape[0]
y_value = shape[1]
z_value = shape[2]
#####################################################
imgflat = img.reshape(-1)
#img_grey = np.array(imgflat * 255, dtype = np.uint8)
#img_eq = exposure.equalize_hist(img_grey)
#new_img = img_eq.reshape(x_value, y_value, z_value)
#globaleq = nib.Nifti1Image(new_img, np.eye(4))
#nb.save(globaleq, '/home/albert/Thumbo/AutAglobaleq.nii')
######################################################
#clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
img_grey = np.array(imgflat * 255, dtype = np.uint8)
#threshed = cv2.adaptiveThreshold(img_grey, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, 0)
cl1 = clahe.apply(img_grey)
#cv2.imwrite('clahe_2.jpg',cl1)
#cv2.startWindowThread()
#cv2.namedWindow("adaptive")
#cv2.imshow("adaptive", cl1)
#cv2.imshow("adaptive", threshed)
#plt.imshow(threshed)
localimgflat = cl1 #cl1.reshape(-1)
newer_img = localimgflat.reshape(x_value, y_value, z_value)
localeq = nib.Nifti1Image(newer_img, np.eye(4))
nib.save(localeq, 'output/' + self._token + '/' + self._token + 'localeq.nii')
# def loadImg(self, path=None, info=False):
# """Method for loading the .img file"""
## if path is None:
## path = rs.RAW_DATA_PATH
## pathname = path + self._token+".img"
# if self._source_directory == None:
# path = self._token + '.img'
# else:
# path = self._source_directory + "/" + self._token + ".img"
#
# #path = self._token + '.hdr'
#
# img = nib.load(path)
# if info:
# print(img)
# self._img = img.get_data()[:,:,:,0]
# self._shape = self._img.shape
# self._max = np.max(self._img)
# print("Image Loaded: %s"%(path))
# return self
def loadEqImg(self, path=None, info=False):
"""Function for loading the img."""
print('Inside loadEqImg')
path = ""
if self._source_directory == None:
if os.path.isfile(self._token + '.img'):
path = self._token + '.img'
else:
path = self._token + '.nii'
else:
if os.path.isfile(self._token + '.img'):
path = self._source_directory + "/" + self._token + ".img"
else:
path = self._source_directory + "/" + self._token + ".nii"
print("Loading: %s"%(path))
#pathname = path+self._token+".nii"
img = nib.load(path)
if info:
print(img)
self._img = img.get_data()
self._shape = self._img.shape
self._max = np.max(self._img)
print("Image Loaded: %s"%(path))
return self
def loadGeneratedNii(self, path=None, info=False):
"""Loads a preexisting nii file. This function is mainly used for testing"""
if path == None:
path = 'output/' + self._token + '/' + self._token + 'localeq.nii'
print("Loading: %s"%(path))
#pathname = path+self._token+".nii"
img = nib.load(path)
if info:
print(img)
#self._img = img.get_data()[:,:,:,0]
self._img = img.get_data()
self._shape = self._img.shape
self._max = np.max(self._img)
print("Image Loaded: %s"%(path))
return self
def loadInitCsv(self, path=None):
"""Method for loading the initial csv file"""
points = []
with open(path, 'r') as infile:
for line in infile:
line = line.strip().split(',')
entry = [int(line[0]), int(line[1]), int(line[2]), int(line[3])]
points.append(entry)
#points.append(str(line[0]) + "," + str(line[1]) + "," + str(line[2]))
#self._points = open(path, 'r')
self._points = points
self._infile = open(path, 'r')
self._filename = self._infile.name[:-4] if self._infile.name.endswith('.csv') else self._infile.name
print("File Loaded: %s"%(self._infile.name))
return self
def loadNodeCsv(self, path=None):
"""Method for loading the nodes csv file"""
self._nodefile = open(path, 'r')
print("File Loaded: %s"%(self._nodefile.name))
return self
def loadEdgeCsv(self, path=None):
"""Method for loading the edges csv file"""
self._edgefile = open(path, 'r')
print("File Loaded: %s"%(self._edgefile.name))
return self
def calculatePointsByNumber(self, num_points = 10000, optimize = True):
"""Method to extract points data from the img file."""
if self._img is None:
raise ValueError("Img haven't loaded, please call loadImg() first.")
threshold = .9
total = self._shape[0]*self._shape[1]*self._shape[2]
print("Coverting to points...\ntoken=%s\ntotal=%d\nmax=%f\nthreshold=%f\nnum_points=%d" \
%(self._token,total,self._max,threshold,num_points))
print("(This will take couple minutes)")
# threshold
filt = self._img > threshold * self._max
# a is just a container to hold another value for ValueError: too many values to unpack
#x, y, z, a = np.where(filt)
t = np.where(filt)
x = t[0]
y = t[1]
z = t[2]
v = self._img[filt]
if optimize:
self.discardImg()
v = np.int16(255 * (np.float32(v) / np.float32(self._max)))
l = v.shape
print("Above threshold=%d"%(l))
# sample
total_points = l[0]
print('total points:')
print(total_points)
if not 0 <= num_points <= total_points:
raise ValueError("Number of points given should be at most equal to total points: %d" % total_points)
fraction = num_points / float(total_points)
if fraction < 1.0:
# np.random.random returns random floats in the half-open interval [0.0, 1.0)
filt = np.random.random(size=l) < fraction
print('v.shape:')
print(l)
print('x.size before filter: %d' % x.size)
print('y.size before filter: %d' % y.size)
print('z.size before filter: %d' % z.size)
print('v.size before filter: %d' % v.size)
x = x[filt]
y = y[filt]
z = z[filt]
v = v[filt]
print('x.size after filter: %d' % x.size)
print('y.size after filter: %d' % y.size)
print('z.size after filter: %d' % z.size)
print('v.size after filter: %d' % v.size)
self._points = np.vstack([x,y,z,v])
self._points = np.transpose(self._points)
print("Samples=%d"%(self._points.shape[0]))
print("Finished")
return self
def calculatePoints(self, threshold=0.1, sample=0.5, optimize=True):
"""Method to extract points data from the img file."""
if not 0 <= threshold < 1:
raise ValueError("Threshold should be within [0,1).")
if not 0 < sample <= 1:
raise ValueError("Sample rate should be within (0,1].")
if self._img is None:
raise ValueError("Img haven't loaded, please call loadImg() first.")
total = self._shape[0]*self._shape[1]*self._shape[2]
print("Coverting to points...\ntoken=%s\ntotal=%d\nmax=%f\nthreshold=%f\nsample=%f"\
%(self._token,total,self._max,threshold,sample))
print("(This will take couple minutes)")
# threshold
filt = self._img > threshold * self._max
# a is just a container to hold another value for ValueError: too many values to unpack
#x, y, z, a = np.where(filt)
t = np.where(filt)
x = t[0]
y = t[1]
z = t[2]
v = self._img[filt]
if optimize:
self.discardImg()
v = np.int16(255 * (np.float32(v) / np.float32(self._max)))
l = v.shape
print("Above threshold=%d"%(l))
# sample
if sample < 1.0:
# np.random.random returns random floats in the half-open interval [0.0, 1.0)
filt = np.random.random(size=l) < sample
print('v.shape:')
print(l)
print('x.size before filter: %d' % x.size)
print('y.size before filter: %d' % y.size)
print('z.size before filter: %d' % z.size)
print('v.size before filter: %d' % v.size)
x = x[filt]
y = y[filt]
z = z[filt]
v = v[filt]
print('x.size after filter: %d' % x.size)
print('y.size after filter: %d' % y.size)
print('z.size after filter: %d' % z.size)
print('v.size after filter: %d' % v.size)
self._points = np.vstack([x,y,z,v])
self._points = np.transpose(self._points)
print("Samples=%d"%(self._points.shape[0]))
print("Finished")
return self
def savePoints(self,path=None,points=None):
"""Saves the points to a file"""
if points != None:
self._points = points
if self._points is None:
raise ValueError("Points is empty, please call imgToPoints() first.")
print('self._token asdfasdf:')
print(self._token)
pathname = 'output/' + self._token + "/" + self._token+"localeq.csv"
np.savetxt(pathname,self._points,fmt='%d',delimiter=',')
return self
def plot3d(self, infile = None,radius=5):
"""Method for plotting the Nodes and Edges"""
filename = ""
points_file = None
if infile == None:
points_file = self._points
filename = self._token
else:
print('about to load specified file')
self.loadInitCsv(infile)
infile = self._infile
filename = self._filename
# points is an array of arrays
points = self._points
outpath = 'output/' + self._token + '/'
nodename = outpath + filename + '.nodes.csv'
edgename = outpath + filename + '.edges.csv'
# for line in points_file:
# line = line.strip().split(',')
# points.append(str(line[0]) + "," + str(line[1]) + "," + str(line[2]))
radius = radius
with open(nodename, 'w') as nodefile:
with open(edgename, 'w') as edgefile:
for ind in range(len(points)):
#temp = points[ind].strip().split(',')
temp = points[ind]
x = temp[0]
y = temp[1]
z = temp[2]
v = temp[3]
# radius = 18
radius = 25
nodefile.write("s" + str(ind + 1) + "," + str(x) + "," + str(y) + "," + str(z) + "\n")
for index in range(ind + 1, len(points)):
tmp = points[index]
distance = math.sqrt(math.pow(int(x) - int(tmp[0]), 2) + math.pow(int(y) - int(tmp[1]), 2) + math.pow(int(z) - int(tmp[2]), 2))
if distance < radius:
edgefile.write("s" + str(ind + 1) + "," + "s" + str(index + 1) + "\n")
self._nodefile = nodefile
self._edgefile = edgefile
def graphmlconvert(self, nodefilename = None, edgefilename = None):
"""Method for extracting the data to a graphml file, based on the node and edge files"""
nodefile = None
edgefile = None
path = 'output/' + self._token
# If no nodefilename was entered, used the Clarity object's nodefile
if nodefilename == None:
#nodefile = self._nodefile
#nodefile = open(self._nodefile, 'r')
self.loadNodeCsv(path + "/" + self._token + ".nodes.csv")
nodefile = self._nodefile
else:
self.loadNodeCsv(nodefilename)
nodefile = self._nodefile
# If no edgefilename was entered, used the Clarity object's edgefile
if edgefilename == None:
#edgefile = self._edgefile
#edgefile = open(self._edgefile, 'r')
self.loadEdgeCsv(path + "/" + self._token + ".edges.csv")
edgefile = self._edgefile
else:
self.loadEdgeCsv(edgefilename)
edgefile = self._edgefile
# Start writing to the output graphml file
path = path + "/" + self._token + ".graphml"
with open(path, 'w') as outfile:
outfile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
outfile.write("<graphml xmlns=\"http://graphml.graphdrawing.org/xmlns\"\n")
outfile.write(" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n")
outfile.write(" xsi:schemaLocation=\"http://graphml.graphdrawing.org/xmlns\n")
outfile.write(" http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd\">\n")
outfile.write(" <key id=\"d0\" for=\"node\" attr.name=\"attr\" attr.type=\"string\"/>\n")
outfile.write(" <key id=\"e_weight\" for=\"edge\" attr.name=\"weight\" attr.type=\"double\"/>\n")
outfile.write(" <graph id=\"G\" edgedefault=\"undirected\">\n")
for line in nodefile:
if len(line) == 0:
continue
line = line.strip().split(',')
outfile.write(" <node id=\"" + line[0] + "\">\n")
outfile.write(" <data key=\"d0\">[" + line[1] + ", " + line[2] + ", " + line[3] +"]</data>\n")
outfile.write(" </node>\n")
for line in edgefile:
if len(line) == 0:
continue
line = line.strip().split(',')
outfile.write(" <edge source=\"" + line[0] + "\" target=\"" + line[1] + "\">\n")
outfile.write(" <data key=\"e_weight\">1</data>\n")
outfile.write(" </edge>\n")
outfile.write(" </graph>\n</graphml>")
def get_brain_figure(self, resolution, path = None, plot_title=''):
"""
Returns the plotly figure object for vizualizing a 3d brain network.
g: networkX object of brain
"""
print('generating plotly with edges...')
# Set tupleResolution to resolution input parameter
tupleResolution = resolution;
# EG: for Aut1367, the spacing is (0.01872, 0.01872, 0.005).
xResolution = tupleResolution[0]
yResolution = tupleResolution[1]
zResolution = tupleResolution[2]
# Now, to get the mm image size, we can multiply all x, y, z
# to get the proper mm size when plotting.
if path == None:
# If bath is not specified use the default path to the generated folder.
path = 'output/' + self._token + '/' + self._token + '.graphml'
g = nx.read_graphml(path)
# grab the node positions from the graphML file
V = nx.number_of_nodes(g)
attributes = nx.get_node_attributes(g,'attr')
node_positions_3d = pd.DataFrame(columns=['x', 'y', 'z'], index=range(V))
for n in g.nodes_iter():
node_positions_3d.loc[n] = [int((re.findall('\d+', str(attributes[n])))[0]), int((re.findall('\d+', str(attributes[n])))[1]), int((re.findall('\d+', str(attributes[n])))[2])]
# grab edge endpoints
edge_x = []
edge_y = []
edge_z = []
for e in g.edges_iter():
#strippedSource = int(e[0].replace('s', ''))
#strippedTarget = int(e[1].replace('s', ''))
source_pos = node_positions_3d.loc[e[0]]
target_pos = node_positions_3d.loc[e[1]]
edge_x += [x * xResolution for x in source_pos['x'], x * xResolution for x in target_pos['x'], None]
edge_y += [y * yResolution for y in source_pos['y'], y * yResolution for y in target_pos['y'], None]
edge_z += [z * zResolution for z in source_pos['z'], z * zResolution for z in target_pos['z'], None]
# node style
node_trace = Scatter3d(x=[x * xResolution for x in node_positions_3d['x']],
y=[x * yResolution for x in node_positions_3d['y']],
z=[x * zResolution for x in node_positions_3d['z']],
mode='markers',
# name='regions',
marker=Marker(symbol='dot',
size=6,
opacity=0.5,
color='purple'),
# text=[str(r) for r in range(V)],
# text=atlas_data['nodes'],
hoverinfo='text')
# edge style
edge_trace = Scatter3d(x=edge_x,
y=edge_y,
z=edge_z,
mode='lines',
line=Line(color='cyan', width=1),
hoverinfo='none')
# axis style
axis = dict(showbackground=False,
showline=False,
zeroline=False,
showgrid=False,
showticklabels=False)
# overall layout
layout = Layout(title=plot_title,
width=800,
height=900,
showlegend=False,
scene=Scene(xaxis=XAxis(axis),
yaxis=YAxis(axis),
zaxis=ZAxis(axis)),
margin=Margin(t=50),
hovermode='closest',
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgb(255,255,255)')
data = Data([node_trace, edge_trace])
fig = Figure(data=data, layout=layout)
plotly.offline.plot(fig, filename= 'output/' + self._token + "/" + self._token + "_edge_count_pointcloud.html")
#return fig
| apache-2.0 |
dingocuster/scikit-learn | sklearn/metrics/tests/test_common.py | 83 | 41144 | from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not coverd by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": r2_score,
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_MULTICLASS = [
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
# Those metrics don't support multiclass outputs
"average_precision_score", "weighted_average_precision_score",
"micro_average_precision_score", "macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_average_precision_score",
"roc_auc_score", "micro_roc_auc", "weighted_roc_auc",
"macro_roc_auc", "samples_roc_auc",
"coverage_error",
"brier_score_loss",
"label_ranking_loss",
]
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Treshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "weighted_f1_score", "micro_f1_score", "macro_f1_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f2_score", "weighted_precision_score",
"weighted_recall_score",
"micro_f0.5_score", "micro_f2_score", "micro_precision_score",
"micro_recall_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"cohen_kappa_score",
"confusion_matrix",
"hamming_loss",
"matthews_corrcoef_score",
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(NOT_SYMMETRIC_METRICS,
THRESHOLDED_METRICS,
METRIC_UNDEFINED_MULTICLASS),
set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss",
"brier_score_loss"):
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if name in METRIC_UNDEFINED_MULTICLASS or name in THRESHOLDED_METRICS:
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 += [0]*n_classes
y2 += [0]*n_classes
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
def test_raise_value_error_multilabel_sequences():
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclasss_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg="Weighted scores for array and list sample_weight input are "
"not equal (%f != %f) for %s" % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# binary output
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multilabel indicator
_, ya = make_multilabel_classification(n_features=1, n_classes=20,
random_state=0, n_samples=100,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=20,
random_state=1, n_samples=100,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.shape[1] > 0:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
| bsd-3-clause |
tafaRU/odoo | addons/resource/faces/timescale.py | 170 | 3902 | ############################################################################
# Copyright (C) 2005 by Reithinger GmbH
# [email protected]
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
import faces.pcalendar as pcal
import matplotlib.cbook as cbook
import datetime
import sys
class TimeScale(object):
def __init__(self, calendar):
self.data_calendar = calendar
self._create_chart_calendar()
self.now = self.to_num(self.data_calendar.now)
def to_datetime(self, xval):
return xval.to_datetime()
def to_num(self, date):
return self.chart_calendar.WorkingDate(date)
def is_free_slot(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1 != dt2
def is_free_day(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1.date() != dt2.date()
def _create_chart_calendar(self):
dcal = self.data_calendar
ccal = self.chart_calendar = pcal.Calendar()
ccal.minimum_time_unit = 1
#pad worktime slots of calendar (all days should be equally long)
slot_sum = lambda slots: sum(map(lambda slot: slot[1] - slot[0], slots))
day_sum = lambda day: slot_sum(dcal.get_working_times(day))
max_work_time = max(map(day_sum, range(7)))
#working_time should have 2/3
sum_time = 3 * max_work_time / 2
#now create timeslots for ccal
def create_time_slots(day):
src_slots = dcal.get_working_times(day)
slots = [0, src_slots, 24*60]
slots = tuple(cbook.flatten(slots))
slots = zip(slots[:-1], slots[1:])
#balance non working slots
work_time = slot_sum(src_slots)
non_work_time = sum_time - work_time
non_slots = filter(lambda s: s not in src_slots, slots)
non_slots = map(lambda s: (s[1] - s[0], s), non_slots)
non_slots.sort()
slots = []
i = 0
for l, s in non_slots:
delta = non_work_time / (len(non_slots) - i)
delta = min(l, delta)
non_work_time -= delta
slots.append((s[0], s[0] + delta))
i += 1
slots.extend(src_slots)
slots.sort()
return slots
min_delta = sys.maxint
for i in range(7):
slots = create_time_slots(i)
ccal.working_times[i] = slots
min_delta = min(min_delta, min(map(lambda s: s[1] - s[0], slots)))
ccal._recalc_working_time()
self.slot_delta = min_delta
self.day_delta = sum_time
self.week_delta = ccal.week_time
_default_scale = TimeScale(pcal._default_calendar)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/scipy/cluster/hierarchy.py | 5 | 93671 | """
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from . import _hierarchy_wrap
import scipy.spatial.distance as distance
from scipy.lib.six import string_types
from scipy.lib.six import xrange
_cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2,
'weighted': 6}
_cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5}
_cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union(
set(_cpy_euclid_methods.keys()))
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering on the condensed
distance matrix y.
y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
A 4 by :math:`(n-1)` matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm. This is called UPGMA.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
+ \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str, optional
The distance metric to use. See the ``distance.pdist`` function for a
list of valid distance metrics.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
"""
if not isinstance(method, string_types):
raise TypeError("Argument 'method' must be a string.")
y = _convert_to_double(np.asarray(y, order='c'))
s = y.shape
if len(s) == 1:
distance.is_valid_y(y, throw=True, name='y')
d = distance.num_obs_y(y)
if method not in _cpy_non_euclid_methods:
raise ValueError("Valid methods when the raw observations are "
"omitted are 'single', 'complete', 'weighted', "
"and 'average'.")
# Since the C code does not support striding using strides.
[y] = _copy_arrays_if_base_present([y])
Z = np.zeros((d - 1, 4))
_hierarchy_wrap.linkage_wrap(y, Z, int(d),
int(_cpy_non_euclid_methods[method]))
elif len(s) == 2:
X = y
n = s[0]
m = s[1]
if method not in _cpy_linkage_methods:
raise ValueError('Invalid method: %s' % method)
if method in _cpy_non_euclid_methods:
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy_wrap.linkage_wrap(dm, Z, n,
int(_cpy_non_euclid_methods[method]))
elif method in _cpy_euclid_methods:
if metric != 'euclidean':
raise ValueError(("Method '%s' requires the distance metric "
"to be euclidean") % method)
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy_wrap.linkage_euclid_wrap(dm, Z, X, m, n,
int(_cpy_euclid_methods[method]))
return Z
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object.
The reference r to the root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When False, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# The number of original objects is equal to the number of rows minus
# 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != np.bool:
X = np.bool_(X)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = np.double(X)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see ``linkage`` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n - 1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy_wrap.cophenetic_distances_wrap(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy ** 2
denomB = Zz ** 2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage
(hierarchical clustering). See ``linkage`` documentation
for more information on its form.
d : int, optional
The number of links up to `d` levels below each
non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row
contains the link statistics for the non-singleton cluster
``i``. The link statistics are computed over the link
heights for links :math:`d` levels below the cluster
``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is
the number of links included in the calculation; and
``R[i,3]`` is the inconsistency coefficient,
.. math:: \\frac{\\mathtt{Z[i,2]}-\\mathtt{R[i,0]}} {R[i,1]}
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy_wrap.inconsistent_wrap(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy_wrap.calculate_cluster_sizes_wrap(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
try:
if type(R) != np.ndarray:
if name:
raise TypeError(('Variable \'%s\' passed as inconsistency '
'matrix is not a numpy array.') % name)
else:
raise TypeError('Variable passed as inconsistency matrix '
'is not a numpy array.')
if R.dtype != np.double:
if name:
raise TypeError(('Inconsistency matrix \'%s\' must contain '
'doubles (double).') % name)
else:
raise TypeError('Inconsistency matrix must contain doubles '
'(double).')
if len(R.shape) != 2:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have '
'shape=2 (i.e. be two-dimensional).') % name)
else:
raise ValueError('Inconsistency matrix must have shape=2 '
'(i.e. be two-dimensional).')
if R.shape[1] != 4:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have 4 '
'columns.') % name)
else:
raise ValueError('Inconsistency matrix must have 4 columns.')
if R.shape[0] < 1:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have at '
'least one row.') % name)
else:
raise ValueError('Inconsistency matrix must have at least '
'one row.')
if (R[:, 0] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link height means.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link height means.')
if (R[:, 1] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link height standard '
'deviations.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link height standard deviations.')
if (R[:, 2] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link counts.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link counts.')
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional
ndarray (type double) with :math:`n`
rows and 4 columns. The first two columns must contain indices
between 0 and :math:`2n-1`. For a given row ``i``,
:math:`0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1`
and :math:`0 \\leq Z[i,1] \\leq i+n-1`
(i.e. a cluster cannot join another cluster unless the cluster
being joined has been generated.)
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True iff the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
try:
if type(Z) != np.ndarray:
if name:
raise TypeError(('\'%s\' passed as a linkage is not a valid '
'array.') % name)
else:
raise TypeError('Variable is not a valid array.')
if Z.dtype != np.double:
if name:
raise TypeError('Linkage matrix \'%s\' must contain doubles.'
% name)
else:
raise TypeError('Linkage matrix must contain doubles.')
if len(Z.shape) != 2:
if name:
raise ValueError(('Linkage matrix \'%s\' must have shape=2 '
'(i.e. be two-dimensional).') % name)
else:
raise ValueError('Linkage matrix must have shape=2 '
'(i.e. be two-dimensional).')
if Z.shape[1] != 4:
if name:
raise ValueError('Linkage matrix \'%s\' must have 4 columns.'
% name)
else:
raise ValueError('Linkage matrix must have 4 columns.')
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or
(Z[:, 1] < 0).any()):
if name:
raise ValueError(('Linkage \'%s\' contains negative '
'indices.') % name)
else:
raise ValueError('Linkage contains negative indices.')
if (Z[:, 2] < 0).any():
if name:
raise ValueError(('Linkage \'%s\' contains negative '
'distances.') % name)
else:
raise ValueError('Linkage contains negative distances.')
if (Z[:, 3] < 0).any():
if name:
raise ValueError('Linkage \'%s\' contains negative counts.'
% name)
else:
raise ValueError('Linkage contains negative counts.')
if _check_hierarchy_uses_cluster_before_formed(Z):
if name:
raise ValueError(('Linkage \'%s\' uses non-singleton cluster '
'before its formed.') % name)
else:
raise ValueError("Linkage uses non-singleton cluster before "
"it's formed.")
if _check_hierarchy_uses_cluster_more_than_once(Z):
if name:
raise ValueError(('Linkage \'%s\' uses the same cluster more '
'than once.') % name)
else:
raise ValueError('Linkage uses the same cluster more than '
'once.')
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do:
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do:
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, `monocrit[i] >= monocrit[j]`.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy_wrap.cluster_in_wrap(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy_wrap.cluster_dist_wrap(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy_wrap.cluster_maxclust_dist_wrap(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy_wrap.cluster_monocrit_wrap(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy_wrap.cluster_maxclust_monocrit_wrap(Z, monocrit, T,
int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See ``linkage`` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy_wrap.prelist_wrap(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot the dendrogram. Use no_plot=True to calculate the dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Depenendent variable plot height
dvw = mh + mh * 0.05
ivticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
ax.xaxis.set_ticks_position('bottom')
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_fs), lbls)
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'bottom':
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(p))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(p))
map(lambda lbl: lbl.set_rotation(leaf_fs), lbls)
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'left':
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the
# links
for line in ax.get_yticklines():
line.set_visible(False)
elif orientation == 'right':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('right')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
# Let's use collections instead. This way there is a separate legend
# item for each tree grouping, rather than stupidly one for each line
# segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the non-blue link groupings, i.e. those groupings below the
# color threshold.
for color in colors_used:
if color != 'b':
ax.add_collection(colors_to_collections[color])
# If there is a blue grouping (i.e., links above the color threshold),
# it should go last.
if 'b' in colors_to_collections:
ax.add_collection(colors_to_collections['b'])
if contraction_marks is not None:
if orientation in ('left', 'right'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((y, x),
width=dvw / 100, height=1.0)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if orientation in ('top', 'bottom'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((x, y),
width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for dendrogram color_threshold.
Parameters
----------
palette : list
A list of matplotlib color codes. The order of
the color codes is the order in which the colors are cycled
through when color thresholding in the dendrogram.
"""
if type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, color_list=None,
leaf_font_size=None, leaf_rotation=None, leaf_label_func=None,
no_leaves=False, show_contracted=False,
link_color_func=None, ax=None):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_label_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do:
>>> # First define the leaf label function.
>>> def llf(id):
... if id < n:
... return str(id)
... else:
>>> return '[%d %d %1.2f]' % (id, count, R[n-id,3])
>>>
>>> # The text for the leaf nodes is going to be big so force
>>> # a rotation of 90 degrees.
>>> dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example:
>>> dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'icoords'``
A list of lists ``[I1, I2, ..., Ip]`` where ``Ik`` is a list of 4
independent variable coordinates corresponding to the line that
represents the k'th link painted.
``'dcoords'``
A list of lists ``[I2, I2, ..., Ip]`` where ``Ik`` is a list of 4
independent variable coordinates corresponding to the line that
represents the k'th link painted.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
"""
# Features under consideration.
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
if no_leaves:
ivl = None
else:
ivl = []
if color_threshold is None or \
(isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
if show_contracted:
contraction_marks = []
else:
contraction_marks = None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2 * n - 2, iv=0.0, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, Z[i - n, 0], n, contraction_marks)
_append_contraction_marks_sub(Z, iv, Z[i - n, 1], n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, Z[i - n, 0], n, contraction_marks)
_append_contraction_marks_sub(Z, iv, Z[i - n, 1], n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the max(Z[*,2]) for all nodes * below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# it's label is either the empty string or the number of original
# observations belonging to cluster i.
if i < 2 * n - p and i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = 'b'
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy_wrap.get_max_dist_for_each_cluster_wrap(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy_wrap.get_max_Rfield_for_each_cluster_wrap(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy_wrap.get_max_Rfield_for_each_cluster_wrap(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy_wrap.leaders_wrap(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
# These are test functions to help me test the leaders function.
def _leaders_test(Z, T):
tr = to_tree(Z)
_leaders_test_recurs_mark(tr, T)
return tr
def _leader_identify(tr, T):
if tr.is_leaf():
return T[tr.id]
else:
left = tr.get_left()
right = tr.get_right()
lfid = _leader_identify(left, T)
rfid = _leader_identify(right, T)
print('ndid: %d lid: %d lfid: %d rid: %d rfid: %d'
% (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid))
if lfid != rfid:
if lfid != -1:
print('leader: %d with tag %d' % (left.id, lfid))
if rfid != -1:
print('leader: %d with tag %d' % (right.id, rfid))
return -1
else:
return lfid
def _leaders_test_recurs_mark(tr, T):
if tr.is_leaf():
tr.asgn = T[tr.id]
else:
tr.asgn = -1
_leaders_test_recurs_mark(tr.left, T)
_leaders_test_recurs_mark(tr.right, T)
| agpl-3.0 |
bnaul/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 3 | 15265 | import sys
import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
import pytest
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import if_safe_multiprocessing_with_blas
from sklearn.exceptions import NotFittedError
from io import StringIO
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_components = 3
block = np.full((3, 3), n_components, dtype=int)
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_components, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_components, X = _build_sparse_mtx()
prior = 1. / n_components
lda_1 = LatentDirichletAllocation(n_components=n_components,
doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
evaluate_every=1, learning_method='batch',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=10., evaluate_every=1,
learning_method='online', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=10., total_samples=100,
random_state=rng)
for i in range(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_method='batch', random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative and should be normalized
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_components = 3
lda = LatentDirichletAllocation(n_components=n_components,
random_state=rng)
X_trans = lda.fit_transform(X)
assert (X_trans > 0.0).any()
assert_array_almost_equal(np.sum(X_trans, axis=1),
np.ones(X_trans.shape[0]))
@pytest.mark.parametrize('method', ('online', 'batch'))
def test_lda_fit_transform(method):
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_components=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_components = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=5., total_samples=20,
random_state=rng)
lda.partial_fit(X_1)
with pytest.raises(ValueError, match=r"^The provided data has"):
lda.partial_fit(X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_components', LatentDirichletAllocation(n_components=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
with pytest.raises(ValueError, match=regex):
model.fit(X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = np.full((5, 10), -1.)
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
with pytest.raises(ValueError, match=regex):
lda.fit(X)
def test_lda_no_component_error():
# test `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = ("This LatentDirichletAllocation instance is not fitted yet. "
"Call 'fit' with appropriate arguments before using this "
"estimator.")
with pytest.raises(NotFittedError, match=regex):
lda.perplexity(X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_components = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_components=n_components,
random_state=rng)
lda.partial_fit(X)
with pytest.raises(ValueError, match=r"^The provided data has"):
lda.partial_fit(X_2)
@if_safe_multiprocessing_with_blas
@pytest.mark.parametrize('method', ('online', 'batch'))
def test_lda_multi_jobs(method):
n_components, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_components=n_components, n_jobs=2,
learning_method=method,
evaluate_every=1, random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_components = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=5., total_samples=20,
random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_components))
with pytest.raises(ValueError, match=r'Number of samples'):
lda._perplexity_precomp_distr(X, invalid_n_samples)
# invalid topic number
invalid_n_components = rng.randint(4, size=(n_samples, n_components + 1))
with pytest.raises(ValueError, match=r'Number of topics'):
lda._perplexity_precomp_distr(X, invalid_n_components)
@pytest.mark.parametrize('method', ('online', 'batch'))
def test_lda_perplexity(method):
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_components, X = _build_sparse_mtx()
lda_1 = LatentDirichletAllocation(n_components=n_components,
max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit(X)
perp_1 = lda_1.perplexity(X, sub_sampling=False)
lda_2.fit(X)
perp_2 = lda_2.perplexity(X, sub_sampling=False)
assert perp_1 >= perp_2
perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True)
assert perp_1_subsampling >= perp_2_subsampling
@pytest.mark.parametrize('method', ('online', 'batch'))
def test_lda_score(method):
# Test LDA score for batch training
# score should be higher after each iteration
n_components, X = _build_sparse_mtx()
lda_1 = LatentDirichletAllocation(n_components=n_components,
max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert score_2 >= score_1
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
lda.fit(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X.toarray())
assert_almost_equal(perp_1, perp_2)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=10,
random_state=0)
lda.fit(X)
perplexity_1 = lda.perplexity(X, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_fit_perplexity():
# Test that the perplexity computed during fit is consistent with what is
# returned by the perplexity method
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1,
learning_method='batch', random_state=0,
evaluate_every=1)
lda.fit(X)
# Perplexity computed at end of fit method
perplexity1 = lda.bound_
# Result of perplexity method on the train set
perplexity2 = lda.perplexity(X)
assert_almost_equal(perplexity1, perplexity2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
def check_verbosity(verbose, evaluate_every, expected_lines,
expected_perplexities):
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=3,
learning_method='batch',
verbose=verbose,
evaluate_every=evaluate_every,
random_state=0)
out = StringIO()
old_out, sys.stdout = sys.stdout, out
try:
lda.fit(X)
finally:
sys.stdout = old_out
n_lines = out.getvalue().count('\n')
n_perplexity = out.getvalue().count('perplexity')
assert expected_lines == n_lines
assert expected_perplexities == n_perplexity
@pytest.mark.parametrize(
'verbose,evaluate_every,expected_lines,expected_perplexities',
[(False, 1, 0, 0),
(False, 0, 0, 0),
(True, 0, 3, 0),
(True, 1, 3, 3),
(True, 2, 3, 1)])
def test_verbosity(verbose, evaluate_every, expected_lines,
expected_perplexities):
check_verbosity(verbose, evaluate_every, expected_lines,
expected_perplexities)
| bsd-3-clause |
dhruv13J/scikit-learn | sklearn/manifold/t_sne.py | 106 | 20057 | # Author: Alexander Fabisch -- <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i + 1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
return self
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X)
return self.embedding_
| bsd-3-clause |
richardtran415/pymatgen | pymatgen/io/lammps/tests/test_inputs.py | 5 | 4354 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import filecmp
import os
import re
import shutil
import unittest
import pandas as pd
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.io.lammps.data import LammpsData
from pymatgen.io.lammps.inputs import LammpsRun, write_lammps_inputs
from pymatgen.util.testing import PymatgenTest
class LammpsRunTest(unittest.TestCase):
maxDiff = None
def test_md(self):
s = Structure.from_spacegroup(225, Lattice.cubic(3.62126), ["Cu"], [[0, 0, 0]])
ld = LammpsData.from_structure(s, atom_style="atomic")
ff = "\n".join(["pair_style eam", "pair_coeff * * Cu_u3.eam"])
md = LammpsRun.md(data=ld, force_field=ff, temperature=1600.0, nsteps=10000)
md.write_inputs(output_dir="md")
with open(os.path.join("md", "in.md")) as f:
md_script = f.read()
script_string = """# Sample input script template for MD
# Initialization
units metal
atom_style atomic
# Atom definition
read_data md.data
#read_restart md.restart
# Force field settings (consult official document for detailed formats)
pair_style eam
pair_coeff * * Cu_u3.eam
# Create velocities
velocity all create 1600.0 142857 mom yes rot yes dist gaussian
# Ensemble constraints
#fix 1 all nve
fix 1 all nvt temp 1600.0 1600.0 0.1
#fix 1 all npt temp 1600.0 1600.0 0.1 iso $pressure $pressure 1.0
# Various operations within timestepping
#fix ...
#compute ...
# Output settings
#thermo_style custom ... # control the thermo data type to output
thermo 100 # output thermo data every N steps
#dump 1 all atom 100 traj.*.gz # dump a snapshot every 100 steps
# Actions
run 10000
"""
self.assertEqual(md_script, script_string)
self.assertTrue(os.path.exists(os.path.join("md", "md.data")))
@classmethod
def tearDownClass(cls):
temp_dirs = ["md"]
for td in temp_dirs:
if os.path.exists(td):
shutil.rmtree(td)
class FuncTest(unittest.TestCase):
def test_write_lammps_inputs(self):
# script template
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "lammps", "kappa.txt")) as f:
kappa_template = f.read()
kappa_settings = {"method": "heat"}
write_lammps_inputs(output_dir="heat", script_template=kappa_template, settings=kappa_settings)
with open(os.path.join("heat", "in.lammps")) as f:
kappa_script = f.read()
fix_hot = re.search(r"fix\s+hot\s+all\s+([^\s]+)\s+", kappa_script)
# placeholders supposed to be filled
self.assertEqual(fix_hot.group(1), "heat")
fix_cold = re.search(r"fix\s+cold\s+all\s+([^\s]+)\s+", kappa_script)
self.assertEqual(fix_cold.group(1), "heat")
lattice = re.search(r"lattice\s+fcc\s+(.*)\n", kappa_script)
# parentheses not supposed to be filled
self.assertEqual(lattice.group(1), "${rho}")
pair_style = re.search(r"pair_style\slj/cut\s+(.*)\n", kappa_script)
self.assertEqual(pair_style.group(1), "${rc}")
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "lammps", "in.peptide")) as f:
peptide_script = f.read()
# copy data file
src = os.path.join(PymatgenTest.TEST_FILES_DIR, "lammps", "data.quartz")
write_lammps_inputs(output_dir="path", script_template=peptide_script, data=src)
dst = os.path.join("path", "data.peptide")
self.assertTrue(filecmp.cmp(src, dst, shallow=False))
# write data file from obj
obj = LammpsData.from_file(src, atom_style="atomic")
write_lammps_inputs(output_dir="obj", script_template=peptide_script, data=obj)
obj_read = LammpsData.from_file(os.path.join("obj", "data.peptide"), atom_style="atomic")
pd.testing.assert_frame_equal(obj_read.masses, obj.masses)
pd.testing.assert_frame_equal(obj_read.atoms, obj.atoms)
@classmethod
def tearDownClass(cls):
temp_dirs = ["heat", "path", "obj"]
for td in temp_dirs:
if os.path.exists(td):
shutil.rmtree(td)
if __name__ == "__main__":
unittest.main()
| mit |
cchaoss/paparazzi | sw/airborne/test/math/compare_utm_enu.py | 77 | 2714 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import sys
import os
PPRZ_SRC = os.getenv("PAPARAZZI_SRC", "../../../..")
sys.path.append(PPRZ_SRC + "/sw/lib/python")
from pprz_math.geodetic import *
from pprz_math.algebra import DoubleRMat, DoubleEulers, DoubleVect3
from math import radians, degrees, tan
import matplotlib.pyplot as plt
import numpy as np
# Origin at ENAC
UTM_EAST0 = 377349 # in m
UTM_NORTH0 = 4824583 # in m
UTM_ZONE0 = 31
ALT0 = 147.000 # in m
utm_origin = UtmCoor_d(north=UTM_NORTH0, east=UTM_EAST0, alt=ALT0, zone=UTM_ZONE0)
print("origin %s" % utm_origin)
lla_origin = utm_origin.to_lla()
ecef_origin = lla_origin.to_ecef()
ltp_origin = ecef_origin.to_ltp_def()
print(ltp_origin)
# convergence angle to "true north" is approx 1 deg here
earth_radius = 6378137.0
n = 0.9996 * earth_radius
UTM_DELTA_EAST = 500000.
dist_to_meridian = utm_origin.east - UTM_DELTA_EAST
conv = dist_to_meridian / n * tan(lla_origin.lat)
# or (middle meridian of UTM zone 31 is at 3deg)
#conv = atan(tan(lla_origin.lon - radians(3))*sin(lla_origin.lat))
print("approx. convergence angle (north error compared to meridian): %f deg" % degrees(conv))
# Rotation matrix to correct for "true north"
R = DoubleEulers(psi=-conv).to_rmat()
# calculate ENU coordinates for 100 points in 100m distance
nb_points = 100
dist_points = 100
enu_res = np.zeros((nb_points, 2))
enu_res_c = np.zeros((nb_points, 2))
utm_res = np.zeros((nb_points, 2))
for i in range(0, nb_points):
utm = UtmCoor_d()
utm.north = i * dist_points + utm_origin.north
utm.east = i * dist_points+ utm_origin.east
utm.alt = utm_origin.alt
utm.zone = utm_origin.zone
#print(utm)
utm_res[i, 0] = utm.east - utm_origin.east
utm_res[i, 1] = utm.north - utm_origin.north
lla = utm.to_lla()
#print(lla)
ecef = lla.to_ecef()
enu = ecef.to_enu(ltp_origin)
enu_res[i, 0] = enu.x
enu_res[i, 1] = enu.y
enu_c = R * DoubleVect3(enu.x, enu.y, enu.z)
enu_res_c[i, 0] = enu_c.x
enu_res_c[i, 1] = enu_c.y
#print(enu)
dist = np.linalg.norm(utm_res, axis=1)
error = np.linalg.norm(utm_res - enu_res, axis=1)
error_c = np.linalg.norm(utm_res - enu_res_c, axis=1)
plt.figure(1)
plt.subplot(311)
plt.title("utm vs. enu")
plt.plot(enu_res[:, 0], enu_res[:, 1], 'g', label="ENU")
plt.plot(utm_res[:, 0], utm_res[:, 1], 'r', label="UTM")
plt.ylabel("y/north [m]")
plt.xlabel("x/east [m]")
plt.legend(loc='upper left')
plt.subplot(312)
plt.plot(dist, error, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error [m]")
plt.subplot(313)
plt.plot(dist, error_c, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error with north fix [m]")
plt.show()
| gpl-2.0 |
raghavrv/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 56 | 11274 | """
Testing Recursive feature elimination
"""
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater, assert_equal, assert_true
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature elimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Verifying that steps < 1 don't blow up.
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=.2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfecv_verbose_output():
# Check verbose=1 is producing an output.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
sys.stdout = StringIO()
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, verbose=1)
rfecv.fit(X, y)
verbose_output = sys.stdout
verbose_output.seek(0)
assert_greater(len(verbose_output.readline()), 0)
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
def test_rfe_cv_n_jobs():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
rfecv = RFECV(estimator=SVC(kernel='linear'))
rfecv.fit(X, y)
rfecv_ranking = rfecv.ranking_
rfecv_grid_scores = rfecv.grid_scores_
rfecv.set_params(n_jobs=2)
rfecv.fit(X, y)
assert_array_almost_equal(rfecv.ranking_, rfecv_ranking)
assert_array_almost_equal(rfecv.grid_scores_, rfecv_grid_scores)
| bsd-3-clause |
alexmoratalla/yambopy | scripts/analyse_gw.py | 2 | 4597 | from __future__ import print_function
from builtins import str
from builtins import range
import matplotlib
#matplotlib.use('Agg') # prevents crashes if no X server present (clusters)
from yambopy import *
import matplotlib.pyplot as plt
import sys
import argparse
import numpy as np
import operator
"""
Study the convergence of GW calculations by looking at the change in band-gap value.
The script reads from <folder> all results from <variable> calculations and display them.
Use the band and k-point options (or change default values) according to the size of your k-grid and
the location of the band extrema.
"""
parser = argparse.ArgumentParser(description='Study GW convergence with regards to the band-gap value.')
parser.add_argument('folder' , help='Folder containing SAVE and convergence runs.')
parser.add_argument('variable' , help='Variable tested (e.g. FFTGvecs)' )
parser.add_argument('-bc','--bandc' , help='Lowest conduction band number' , default=53, type=int)
parser.add_argument('-kc','--kpointc' , help='K-point index for conduction band', default=19, type=int)
parser.add_argument('-bv','--bandv' , help='Highest valence band number' , default=52, type=int)
parser.add_argument('-kv','--kpointv' , help='K-point index for valence band' , default=1, type=int)
parser.add_argument('-np','--nopack' , help='Skips packing o- files into .json files', action='store_false')
parser.add_argument('-t' ,'--text' , help='Also print a text file for reference' , action='store_true')
args = parser.parse_args()
folder = args.folder
var = args.variable
bandc = args.bandc
kpointc= args.kpointc
bandv = args.bandv
kpointv= args.kpointv
nopack = args.nopack
text = args.text
print('Valence band: ',bandv,'conduction band: ',bandc)
print('K-point VB: ',kpointv, ' k-point CB: ',kpointc)
# Packing results (o-* files) from the calculations into yambopy-friendly .json files
if nopack: # True by default, False if -np used
print('Packing ...')
pack_files_in_folder(folder,mask=var)
pack_files_in_folder(folder,mask='reference')
print('Packing done.')
else:
print('Packing skipped.')
# importing data from .json files in <folder>
print('Importing...')
data = YamboAnalyser(folder)
# extract data according to relevant variable
outvars = data.get_data(var)
invars = data.get_inputfiles_tag(var)
tags = data.get_tags(var)
# Get only files related to the convergence study of the variable,
# ordered to have a smooth plot
keys=[]
sorted_invars = sorted(list(invars.items()), key=operator.itemgetter(1))
for i in range(0,len(sorted_invars)):
key=sorted_invars[i][0]
if key.startswith(var) or key=='reference.json':
keys.append(key)
print('Files detected: ',keys)
print('Preparing output...')
### Output
# Unit of the variable :
unit = invars[keys[0]]['variables'][var][1]
# The following variables are used to make the script compatible with both short and extended output
#kpindex = tags[keys[0]].tolist().index('K-point')
kpindex = tags[keys[0]].tolist().index('Kpoint_index') # Alejandro
bdindex = tags[keys[0]].tolist().index('Band')
e0index = tags[keys[0]].tolist().index('Eo')
gwindex = tags[keys[0]].tolist().index('E-Eo')
array = np.zeros((len(keys),2))
for i,key in enumerate(keys):
# input value
# GbndRnge and BndsRnX_ are special cases
if var.startswith('GbndRng') or var.startswith('BndsRnX'):
# format : [1, nband, ...]
array[i][0] = invars[key]['variables'][var][0][1]
else:
array[i][0] = invars[key]['variables'][var][0]
# Output value (gap energy)
# First the relevant lines are identified
valence=[]
conduction=[]
for j in range(len(outvars[key]+1)):
if outvars[key][j][kpindex]==kpointc and outvars[key][j][bdindex]==bandc:
conduction=outvars[key][j]
elif outvars[key][j][kpindex]==kpointv and outvars[key][j][bdindex]==bandv:
valence = outvars[key][j]
# Then the gap can be calculated
array[i][1] = conduction[e0index]+conduction[gwindex]-(valence[e0index]+valence[gwindex])
if text:
filename = folder+'_'+var+'.dat'
header = +var+'('+str(unit)+'), gap'
np.savetxt(filename,array,delimiter='\t',header=header)
print(filename)
plt.plot(array[:,0],array[:,1],'o-')
plt.xlabel(var+' ('+unit+')')
plt.ylabel('E_gw = E_lda + \Delta E')
plt.show()
#plt.savefig(folder+'_'+var+'.png')
# Plot all of the different GW bandstructures in the same plot
#ya = YamboAnalyser(folder)
#ya.plot_gw('qp',cols=(lambda x: x[2],lambda x: x[3]+x[4]))
| bsd-3-clause |
JPFrancoia/scikit-learn | doc/conf.py | 12 | 9568 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpy_ext.numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
'sphinx_gallery.gen_gallery',
]
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2016, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
sphinx_gallery_conf = {
'doc_module': 'sklearn',
'reference_url': {
'sklearn': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
'nibabel': 'http://nipy.org/nibabel'}
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600,
'sphx_glr_plot_outlier_detection_003.png': 372,
'sphx_glr_plot_gpr_co2_001.png': 350,
'sphx_glr_plot_adaboost_twoclass_001.png': 372,
'sphx_glr_plot_compare_methods_001.png': 349}
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('build-finished', make_carousel_thumbs)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
amuramatsu/dwf | examples/AnalogIn_Record.py | 1 | 2201 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
DWF Python Example
Author: Digilent, Inc.
Revision: 10/17/2013
Requires:
Python 2.7, 3.3 or later
numpy, matplotlib
"""
import dwf
import time
import matplotlib.pyplot as plt
#print DWF version
print("DWF Version: " + dwf.FDwfGetVersion())
#constants
HZ_ACQ = 10e3
N_SAMPLES = 100000
#open device
print("Opening first device")
dwf_ao = dwf.DwfAnalogOut()
print("Preparing to read sample...")
print("Generating sine wave...")
dwf_ao.nodeEnableSet(0, dwf_ao.NODE.CARRIER, True)
dwf_ao.nodeFunctionSet(0, dwf_ao.NODE.CARRIER, dwf_ao.FUNC.SINE)
dwf_ao.nodeFrequencySet(0, dwf_ao.NODE.CARRIER, 1.0)
dwf_ao.nodeAmplitudeSet(0, dwf_ao.NODE.CARRIER, 2.0)
dwf_ao.configure(0, True)
#set up acquisition
dwf_ai = dwf.DwfAnalogIn(dwf_ao)
dwf_ai.channelEnableSet(0, True)
dwf_ai.channelRangeSet(0, 5.0)
dwf_ai.acquisitionModeSet(dwf_ai.ACQMODE.RECORD)
dwf_ai.frequencySet(HZ_ACQ)
dwf_ai.recordLengthSet(N_SAMPLES / HZ_ACQ)
#wait at least 2 seconds for the offset to stabilize
time.sleep(2)
#begin acquisition
dwf_ai.configure(False, True)
print(" waiting to finish")
rgdSamples = []
cSamples = 0
fLost = False
fCorrupted = False
while cSamples < N_SAMPLES:
sts = dwf_ai.status(True)
if cSamples == 0 and sts in (dwf_ai.STATE.CONFIG,
dwf_ai.STATE.PREFILL,
dwf_ai.STATE.ARMED):
# Acquisition not yet started.
continue
cAvailable, cLost, cCorrupted = dwf_ai.statusRecord()
cSamples += cLost
if cLost > 0:
fLost = True
if cCorrupted > 0:
fCorrupted = True
if cAvailable == 0:
continue
if cSamples + cAvailable > N_SAMPLES:
cAvailable = N_SAMPLES - cSamples
# get samples
rgdSamples.extend(dwf_ai.statusData(0, cAvailable))
cSamples += cAvailable
print("Recording finished")
if fLost:
print("Samples were lost! Reduce frequency")
if cCorrupted:
print("Samples could be corrupted! Reduce frequency")
with open("record.csv", "w") as f:
for v in rgdSamples:
f.write("%s\n" % v)
plt.plot(rgdSamples)
plt.show()
| mit |
homeslike/OpticalTweezer | scripts/p0.9_at0.05/vCOMhistogram.py | 28 | 2448 | import math
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from subprocess import call
from scipy.stats import norm
# proc = call("ls *.dat",shell=True)
# datetime = "170123_2033_"
datetime = sys.argv[1]+"_"
gasTempDataIn = np.genfromtxt(datetime+"gasTempData.dat",usecols=0,skip_header=100)
gasTempDataOut = np.genfromtxt(datetime+"gasTempData.dat",usecols=1,skip_header=100)
vCOMData_x = np.genfromtxt(datetime+"vCOMData.dat",usecols=0,skip_header=100)
vCOMData_y = np.genfromtxt(datetime+"vCOMData.dat",usecols=1,skip_header=100)
vCOMData_z = np.genfromtxt(datetime+"vCOMData.dat",usecols=2,skip_header=100)
N = 32
vSqd = []
for i in range(0,len(vCOMData_x)):
vSqd.append((vCOMData_x[i]*vCOMData_x[i]+vCOMData_x[i]*vCOMData_x[i]+vCOMData_x[i]*vCOMData_x[i])*0.5)
vSqdMean = np.mean(vSqd)
histogram_x,bins_x = np.histogram(vCOMData_x,bins=100,normed=True)
histogram_y,bins_y = np.histogram(vCOMData_y,bins=100,normed=True)
histogram_z,bins_z = np.histogram(vCOMData_z,bins=100,normed=True)
inTemp = np.mean(gasTempDataIn)
outTemp = np.mean(gasTempDataOut)
statistics = open(datetime+"statistics.dat","w")
statistics.write("GasIn: " + str(inTemp)+"\n")
statistics.write("GasOut: " + str(outTemp)+"\n")
statistics.write("T_COM: " + str(2./3. * vSqdMean)+"\n")
statistics.write("Mu_x " + str(np.mean(vCOMData_x))+"\n")
statistics.write("Sigma_x: " + str(np.std(vCOMData_x))+"\n")
statistics.write("Mu_y " + str(np.mean(vCOMData_y))+"\n")
statistics.write("Sigma_y: " + str(np.std(vCOMData_y))+"\n")
statistics.write("Mu_z " + str(np.mean(vCOMData_z))+"\n")
statistics.write("Sigma_z: " + str(np.std(vCOMData_z))+"\n")
histogram_x_file = open(datetime+"histogram_vx.dat","w")
histogram_y_file = open(datetime+"histogram_vy.dat","w")
histogram_z_file = open(datetime+"histogram_vz.dat","w")
for i in range(0,len(histogram_x)):
histogram_x_file.write(str(bins_x[i]) + "\t" + str(histogram_x[i]) + "\n")
histogram_y_file.write(str(bins_y[i]) + "\t" + str(histogram_y[i]) + "\n")
histogram_z_file.write(str(bins_z[i]) + "\t" + str(histogram_z[i]) + "\n")
# plt.figure(1)
# plt.hist(vCOMData_x,bins=100)
# plt.figure(2)
# plt.hist(vCOMData_y,bins=100)
# plt.figure(3)
# plt.hist(vCOMData_z,bins=100)
# plt.show()
# plt.figure(1)
# plt.plot(vSqd)
# plt.plot((0,700),(vSqdMean,vSqdMean))
# plt.figure(2)
# plt.hist(vCOMData_x,bins=100,normed=True)
# plt.plot(x,gasInPDF)
# plt.show()
| mit |
socialsensor/public-figure-image-ranking | python/staticCommEventTask.py | 1 | 35270 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name:
# Purpose: This .py file is the class file that does all the work
# It ranks images in specific events
#
# Required libs: python-dateutil, numpy,matplotlib,pyparsing
# Author: konkonst
#
# Created: 30/03/2014
# Copyright: (c) ITI (CERTH) 2014
# Licence: <apache licence 2.0>
#-------------------------------------------------------------------------------
import json, codecs, os, glob, time, dateutil.parser, collections, datetime, pickle, re, math
import urllib.request,webbrowser
import networkx as nx
class communitystatic:
@classmethod
def from_txt(cls,filename,dataset_path_results,dataset_path_tmp,timeLimit = 0):
#Nodes that will be removed
try:
stopNodes = open('./data/txt/stopNodes.txt').readlines()
stopNodes = [x.strip().lower() for x in stopNodes]
stopNodes = [x.replace(' - ','_').replace(' ','_') for x in stopNodes if x]
except:
stopNodes = []
'''Parse the txt files into authors/mentions/alltime lists'''
authors, mentions = {}, {}
totPics,totPeople = 0,0
photodict={}
userdict = {}
captiondict = {}
emptyvalues = 0
print(filename)
with codecs.open(filename, "r", 'utf-8',errors='ignore') as f:
for line in f:
read_line = line.strip().encode('utf-8')
read_line = read_line.decode('utf-8')
try:
splitLine = read_line.split("\t")
dt = dateutil.parser.parse(splitLine[0])
mytime = int(time.mktime(dt.timetuple()))
numFaces = int(splitLine[5])
if mytime > timeLimit:
eventIds = splitLine[1].replace(' ','').split(',')
if not eventIds:
emptyvalues+=1
else:
for eventId in eventIds:
if eventId:
peopleLine = splitLine[3].strip(', \n').strip('\n').replace('.','').replace('?','-').replace(', ',',').replace(', ,','').lower().split(",")
peopleLine = [x.replace(' - ','_').replace(' ','_') for x in peopleLine if x]
peopleLine = [x for x in peopleLine if x not in stopNodes]
peopleLine.sort()
numofpeople = len(peopleLine)
if numofpeople > 1:#and numFaces>=numofpeople
if eventId not in photodict:
photodict[eventId] = {}
if eventId not in userdict:
userdict[eventId] = {}
if eventId not in captiondict:
captiondict[eventId] = {}
if eventId not in authors:
authors[eventId] = []
if eventId not in mentions:
mentions[eventId] = []
photodict[eventId][totPics] = {}
photodict[eventId][totPics]['nodes'] = peopleLine
photodict[eventId][totPics]['url'] = splitLine[2]
photodict[eventId][totPics]['date'] = mytime
captiondict[eventId][totPics] = splitLine[-2]
totPeople+=numofpeople
for idx,tmpAuth in enumerate(peopleLine[:-1]):
if tmpAuth in userdict[eventId]:
userdict[eventId][tmpAuth].append(totPics)
else:
userdict[eventId][tmpAuth]=[totPics]
theRest=peopleLine[idx+1:]
for tmpMent in theRest:
authors[eventId].append(tmpAuth)
mentions[eventId].append(tmpMent)
if peopleLine[-1] in userdict[eventId]:
userdict[eventId][peopleLine[-1]].append(totPics)
else:
userdict[eventId][peopleLine[-1]]=[totPics]
totPics+=1
except:
pass
f.close()
statsfile = open(dataset_path_results+"basicstats.txt",'w')
statement = ('Total # of Event Pics= ' + str(totPics)+
"\nTotal # of People in Event Pics: "+str(totPeople)+
"\nTotal empty eventIds: "+str(emptyvalues))
print(statement)
statsfile.write(statement)
statsfile.close()
return cls(authors, mentions, photodict, userdict, captiondict, dataset_path_results, dataset_path_tmp)
def __init__(self, authors, mentions, photodict, userdict, captiondict, dataset_path_results, dataset_path_tmp):
self.authors = authors
self.mentions = mentions
self.photodict = photodict
self.userdict = userdict
self.captiondict = captiondict
self.uniqueUsers = {}
self.dataset_path_results = dataset_path_results
self.dataset_path_tmp = dataset_path_tmp
def extraction(self,commDetectMethod, eventIdInput):
self.eventIdInput = eventIdInput
'''Extract adjacency lists,mats,user and community centrality and communities bags'''
import community,link_clustering_din
import Demon as D
# '''Write all of the captions of the event to a file'''
# allcaptions = []
# for pic,(caption) in self.captiondict[eventIdInput].items():
# allcaptions.append(caption)
# allcaptions = set(allcaptions)
# captionfile = codecs.open(self.dataset_path_results+str(self.eventIdInput)+"/allcaptions.txt", 'w', "utf-8")
# for caption in allcaptions:
# captionfile.write(caption + "\n")
# captionfile.close()
self.commPgRnkBag = {}
'''Extract unique users globally and construct dictionary'''
usrs = self.authors[eventIdInput].copy()
usrs.extend(self.mentions[eventIdInput])
usrs = list(set(usrs))
usrs.sort()
uniqueUsers, counter1 = {}, 0
for tmpusrs in usrs:
uniqueUsers[tmpusrs] = counter1
counter1 += 1
self.uniqueUsers = uniqueUsers
'''Make pairs of users with weights'''
usersPair = list(zip(self.authors[eventIdInput], self.mentions[eventIdInput]))
#Create weighted adjacency list
weighted = collections.Counter(usersPair)
weighted = list(weighted.items())
adjusrs, weights = zip(*weighted)
adjauthors, adjments = zip(*adjusrs)
adjList = list(zip(adjauthors, adjments, weights))
'''Make pairs of users(their uniqueUsers Ids) with weights for Copra'''
authorsNum,mentionsNum = [], []
for idx,auth in enumerate(adjauthors):
authorsNum.append(uniqueUsers[auth])
mentionsNum.append(uniqueUsers[adjments[idx]])
adjListNum = list(zip(authorsNum, mentionsNum, weights))
del(adjusrs,adjauthors,adjments,weights,weighted,usersPair, authorsNum, mentionsNum, self.authors,self.mentions, self.captiondict)
# '''Write pairs of users to txt file for Gephi'''
# if not os.path.exists(self.dataset_path_results+"forGephi/"):
# os.makedirs(self.dataset_path_results+"forGephi/")
# my_txt = codecs.open(self.dataset_path_results+"forGephi/event_"+str(self.eventIdInput)+".txt", "w", "utf-8")
# my_txt.write("Source,Target,Weight,Type"+"\n")
# for line in adjList:
# line = list(line)
# line.append('Undirected')
# my_txt.write(",".join(str(x) for x in line) + "\n")
# my_txt.close()
'''Write pairs of users to txt file for COPRA and invert the uniqueUsers dictionary'''
if commDetectMethod[0] == 'Copra':
my_txt = open(self.dataset_path_tmp+"forCopra.txt", "w")
for line in adjListNum:
my_txt.write(" ".join(str(x) for x in line) + "\n")
my_txt.close()
uniqueUsersInv = {v:k for k, v in uniqueUsers.items()}
'''Construct networkX graph'''
tempGraph = nx.Graph()
tempGraph.add_weighted_edges_from(adjList)
tempGraph.remove_edges_from(tempGraph.selfloop_edges())
'''Extract the centrality of each user using the PageRank algorithm'''
tempUserPgRnk = nx.pagerank(tempGraph, alpha=0.85, max_iter=100, tol=0.001)
minPGR=min((pgr for k,(pgr) in tempUserPgRnk.items()))
for k in tempUserPgRnk.items():
tempUserPgRnk[k[0]]/=minPGR
self.userPgRnkBag = tempUserPgRnk
'''Detect Communities using the louvain algorithm'''
partitionLouv = community.best_partition(tempGraph)
# inv_partitionLouv = {}
# for k, v in partitionLouv.items():
# inv_partitionLouv[v] = inv_partitionLouv.get(v, [])
# inv_partitionLouv[v].append(k)
# inv_partitionLouv[v].sort()
# strCommsLouv = [inv_partitionLouv[x] for x in inv_partitionLouv]
# strCommsLouv.sort(key=len, reverse=True)
if commDetectMethod[0] == 'Ahn':
'''Detect Communities using the Ahn algorithm'''
inv_partition = link_clustering_din.ahnsmethod(adjList, threshold=commDetectMethod[1])
strComms = [inv_partition[x] for x in inv_partition]
strComms.sort(key=len, reverse=True)
elif commDetectMethod[0] == 'Demon':
DemObj = D.Demon()
strComms = DemObj.execute(tempGraph, weighted=True, min_community_size=1)
strComms.sort(key=len, reverse=True)
inv_partition=0
elif commDetectMethod[0] == 'Copra':
'''Detect Communities using the Copra algorithm'''
os.system("java -cp ./copra.jar COPRA " + self.dataset_path_tmp + "forCopra.txt -w -extrasimplify -v 10 -mo -repeat 3")
if os.path.exists(self.dataset_path_tmp+'best-clusters-forCopra.txt'):
os.remove(self.dataset_path_tmp+'best-clusters-forCopra.txt')
os.rename('./best-clusters-forCopra.txt',self.dataset_path_tmp + 'best-clusters-forCopra.txt')
flag = False
strComms = []
while flag == False:
try:
with open(self.dataset_path_tmp+'best-clusters-forCopra.txt', "r") as f:
for line in f:
read_line = line.strip()
numComm = [uniqueUsersInv[int(x)] for x in read_line.split(' ')]
strComms.append(numComm)
strComms.sort()
flag = True
except:
time.sleep(6)
pass
inv_partition=0
if os.path.exists(self.dataset_path_tmp+'best-clusters-forCopra.txt'):
os.remove(self.dataset_path_tmp+'best-clusters-forCopra.txt')
os.remove(self.dataset_path_tmp+'forCopra.txt')
for filename in glob.glob("./clusters*.txt"):#delete txt files
os.remove(filename)
else:
print('No such method as:'+commDetectMethod[0])
del(adjList,inv_partition)
'''Construct Community Dictionary'''
self.commStrBag = strComms
self.partitionLouv = partitionLouv
self.commDetectMethod = commDetectMethod[0]
self.photodict = self.photodict[self.eventIdInput]
self.userdict = self.userdict[self.eventIdInput]
# statement = 'Total # of communities is '+str(len(strComms))+'\n'
# statsfile = open(self.dataset_path_results+ str(self.eventIdInput) +"/basicstats.txt",'w')
# print(statement)
# statsfile.write(statement)
# statsfile.close()
dataCommPck = open(self.dataset_path_tmp+'comm_'+self.commDetectMethod+'Ev_'+str(self.eventIdInput)+'.pck','wb')
pickle.dump(self, dataCommPck , protocol = 2)
dataCommPck.close()
return self
def photoRetrieval(self, topImages, captiondict,decisionforAll):
# if not os.path.exists(self.dataset_path_results+str(self.eventIdInput)):
# os.makedirs(self.dataset_path_results+str(self.eventIdInput))
decisionforAll = str(decisionforAll).lower()
self.decisionforAll = decisionforAll
'''Rank users in accordance to their centrality'''
usrCentrSorted = sorted(self.userPgRnkBag, key = self.userPgRnkBag.get, reverse = True)
maxPGR=max((pgr for k,(pgr) in self.userPgRnkBag.items()))
for k in self.userPgRnkBag.items():
self.userPgRnkBag[k[0]]/=maxPGR
'''Retrieve images in respect to their overlapping of communities'''
#Check for overlapping nodes in between communities
overlNodes = {}
for node in self.uniqueUsers.keys():
nodeComms = []
for idx, comms in enumerate(self.commStrBag):
if node in comms:
nodeComms.append(idx)
if len(nodeComms) > 1:
overlNodes[node]=nodeComms
#Retrieve images if any overlapping occurs
if len(overlNodes) > 0:
rankedOverlNodes = sorted(overlNodes, key=lambda k: len(overlNodes[k])+self.userPgRnkBag[k], reverse=True)
'''Retrieve photos with most common overlappers'''
photoPool, maxPlain = [], 0
for node in rankedOverlNodes:
photoPool.extend(self.userdict[node])
maxPlain = max(maxPlain,len(self.userdict[node]))
if decisionforAll == str('y') or decisionforAll == str(3):
doIt = input('\nRetrieve photos with most common overlappers???(y or n)')
else:
doIt = 'y'
if str(doIt).lower() == 'y' or str(doIt).lower() == str(3):
commonPhotos = collections.Counter(photoPool)
commonKeys = list(commonPhotos.keys())
commonVals = list(commonPhotos.values())
for keyidx,k in enumerate(commonKeys):
if commonVals[keyidx] < 2:
del(commonPhotos[k])
poolIntersex2 = {}
if len(commonPhotos) > 0:
poolIntersex = commonPhotos.most_common()
mostcomm, combinationDict = [], {}#combinationDict is used to retrieve images if the presented one is incorrect
for vals in poolIntersex:
commonNodes = list(set(self.photodict[vals[0]]['nodes']) & set(rankedOverlNodes))
poolIntersex2[vals[0]] = sum([len(overlNodes[k]) for k in commonNodes])
entropy = [self.partitionLouv[x] for x in self.photodict[vals[0]]['nodes']]
entropy = myentropy(entropy)
tmp = [rankedOverlNodes.index(x) for x in self.photodict[vals[0]]['nodes'] if x in rankedOverlNodes]
mostcomm.append([vals[0], poolIntersex2[vals[0]], entropy, recRankCustom(tmp)])
if ','.join(self.photodict[vals[0]]['nodes']) in combinationDict:
combinationDict[','.join(self.photodict[vals[0]]['nodes'])].append(vals[0])
combinationDict[','.join(self.photodict[vals[0]]['nodes'])] = list(set(combinationDict[','.join(self.photodict[vals[0]]['nodes'])]))
combinationDict[','.join(self.photodict[vals[0]]['nodes'])].sort()
else:
combinationDict[','.join(self.photodict[vals[0]]['nodes'])] = [vals[0]]
mostcomm.sort(key=lambda k: (k[1]*(1+k[2]),k[3]), reverse = True)
else:
mostcomm = []
combinationDict = {}
rankedPhotos, measurementContainer = [], {}
for comb in mostcomm:
rankedPhotos.append(comb[0])
measurementContainer[comb[0]] = comb[1]*(1+comb[2])
#Save results
methodName = 'Coappearing_Overlappers'
print('\n'+methodName)
if decisionforAll == str('y') or decisionforAll == str(3):
resultsSaver(methodName,topImages,str(self.eventIdInput),rankedPhotos,measurementContainer,self.photodict,self.dataset_path_results,captiondict,combinationDict)
else:
resultsSaverAll(methodName,topImages,str(self.eventIdInput),rankedPhotos,measurementContainer,self.photodict,self.dataset_path_results,captiondict,combinationDict)
else:
combinationDict = {}
'''Retrieve photos according to the overlappers' ranking'''
if decisionforAll == str('y') or decisionforAll == str(3):
doIt = input('\nRetrieve photos according to the overlappers\' ranking???(y or n)')
else:
doIt = 'y'
if str(doIt).lower() == 'y' or str(doIt).lower() == 3:
rankedPhotos = []
cnt= 0
measurementContainer = {}
while cnt <= maxPlain:
for node in rankedOverlNodes:
photokeys = self.userdict[node]
if cnt <= len(photokeys):
photoRank = {}
for photoId in photokeys:
tmp = [usrCentrSorted.index(x) for x in self.photodict[photoId]['nodes']]
photoRank[photoId] = recRankCustom(tmp)
measurementContainer[photoId] = photoRank[photoId]
if ','.join(self.photodict[photoId]['nodes']) in combinationDict:
combinationDict[','.join(self.photodict[photoId]['nodes'])].append(photoId)
combinationDict[','.join(self.photodict[photoId]['nodes'])] = list(set(combinationDict[','.join(self.photodict[photoId]['nodes'])]))
else:
combinationDict[','.join(self.photodict[photoId]['nodes'])] = [photoId]
plainRanking = sorted(photoRank, key = photoRank.get, reverse = True)
cnt2=0
try:
while plainRanking[cnt+cnt2] in rankedPhotos:
cnt2+=1
rankedPhotos.append(plainRanking[cnt+cnt2])
except:
pass
cnt+=1
#Save results
methodName = 'Ranked_Overlappers'
print('\n'+methodName)
if decisionforAll == str('y') or decisionforAll == str(3):
resultsSaver(methodName,topImages,self.eventIdInput,rankedPhotos,measurementContainer,self.photodict,self.dataset_path_results,captiondict,combinationDict)
else:
resultsSaverAll(methodName,topImages,self.eventIdInput,rankedPhotos,measurementContainer,self.photodict,self.dataset_path_results,captiondict,combinationDict)
else:
print('No overlapping occurred')
combinationDict = {}
'''Rank images by creating measuring value for each photo using reciprocal of ranks of the contained nodes'''
if decisionforAll == str('y') or decisionforAll == str(3):
doIt = input('\nRank images by reciprocal of ranks of the contained nodes???(y or n)')
else:
doIt = 'y'
if str(doIt).lower() == 'y' or str(doIt).lower() == str(3):
# photoTxt = open(self.dataset_path_results+str(self.eventIdInput)+'/allphotos.txt','w')
photokeys = self.photodict.keys()
photoRank = {}
for photoId in photokeys:
tmp = [usrCentrSorted.index(x) for x in self.photodict[photoId]['nodes']]
photoRank[photoId] = recRankCustom(tmp)
if ','.join(self.photodict[photoId]['nodes']) in combinationDict:
combinationDict[','.join(self.photodict[photoId]['nodes'])].append(photoId)
combinationDict[','.join(self.photodict[photoId]['nodes'])] = list(set(combinationDict[','.join(self.photodict[photoId]['nodes'])]))
else:
combinationDict[','.join(self.photodict[photoId]['nodes'])] = [photoId]
#Save all images and all urls for this event
# photoTxt.write(self.photodict[photoId]['url']+'\n')
# photoTxt.close()
plainRanking = sorted(photoRank, key = photoRank.get, reverse = True)#sort photos by RRF value
#Save results
methodName = 'ReciprocalRanks'
print('\n'+methodName)
if decisionforAll == str('y') or decisionforAll == str(3):
resultsSaver(methodName,topImages,self.eventIdInput,plainRanking,photoRank,self.photodict,self.dataset_path_results,captiondict,combinationDict)
else:
resultsSaverAll(methodName,topImages,self.eventIdInput,plainRanking,photoRank,self.photodict,self.dataset_path_results,captiondict,combinationDict)
'''Create baseline solutions'''
def popularity_coapperence(self, topImages, captiondict):
decisionforAll = self.decisionforAll
'''Aggregate node frequency (popularity)'''
peoplePopularity = {}#compute node popularity in this specific event
for node in self.userdict.keys():
peoplePopularity[node] = len(self.userdict[node])
sortedPeople = sorted(peoplePopularity, key = peoplePopularity.get, reverse = True)
photoRank, photoRankSum = {}, {} #use node popularity in event to compute photo overall popularity
nodePool, photoPool, combinationDict = [], [], {}#combinationDict is used to retrieve images if the presented one is incorrect
for photo in self.photodict:
photoPool.append(photo)
tmpPop = [sortedPeople.index(x) for x in self.photodict[photo]['nodes']]
photoRank[photo] = recRankCustom(tmpPop)
tmpPopSum = [peoplePopularity[x] for x in self.photodict[photo]['nodes']]
photoRankSum[photo] = sum(tmpPopSum)
nodePool.append(self.photodict[photo]['nodes'])#create photopool for coappearence frequency below
if ','.join(self.photodict[photo]['nodes']) in combinationDict:
combinationDict[','.join(self.photodict[photo]['nodes'])].append(photo)
combinationDict[','.join(self.photodict[photo]['nodes'])] = list(set(combinationDict[','.join(self.photodict[photo]['nodes'])]))
else:
combinationDict[','.join(self.photodict[photo]['nodes'])] = [photo]
if decisionforAll == str('y') or decisionforAll == str(3):
doIt = input('\nRank images by node popularity (frequency)???(y or n)')
else:
doIt = 'y'
if str(doIt).lower() == 'y' or str(doIt).lower() == str(3):
plainRanking = sorted(photoRank, key = photoRank.get, reverse = True)
plainRankingSum = sorted(photoRankSum, key = photoRankSum.get, reverse = True)
#Save results
methodName = 'basePopularityRRF'
print('\n'+methodName)
if decisionforAll == str('y') or decisionforAll == str(3):
resultsSaver(methodName,topImages,self.eventIdInput,plainRanking,photoRank,self.photodict,self.dataset_path_results,captiondict,combinationDict)
else:
resultsSaverAll(methodName,topImages,self.eventIdInput,plainRanking,photoRank,self.photodict,self.dataset_path_results,captiondict,combinationDict)
#Save results
methodName = 'basePopularitySum'
print('\n'+methodName)
if decisionforAll == str('y') or decisionforAll == str(3):
resultsSaver(methodName,topImages,self.eventIdInput,plainRankingSum,photoRankSum,self.photodict,self.dataset_path_results,captiondict,combinationDict)
else:
resultsSaverAll(methodName,topImages,self.eventIdInput,plainRankingSum,photoRankSum,self.photodict,self.dataset_path_results,captiondict,combinationDict)
'''Coappearence frequency'''
if decisionforAll == str('y') or decisionforAll == str(3):
doIt = input('\nRank images by Coappearence frequency???(y or n)')
else:
doIt = 'y'
if str(doIt).lower() == 'y' or str(doIt).lower() == str(3):
photoCoappearence = {} #use node coappearence in event to compute photo overall popularity
coappPop = {}
for idx,nodes in enumerate(nodePool):
for idx2,poolnodes in enumerate(nodePool[idx:]):
commonNodes = list(set(nodes) & set(poolnodes))
commonNodes.sort()
if len(commonNodes) > 1:
nodestr = ','.join(commonNodes)
if nodestr not in photoCoappearence:
coappPop[nodestr] = sum([peoplePopularity[x] for x in commonNodes])
photoCoappearence[nodestr] = [photoPool[idx]]
if photoPool[idx2+idx] not in photoCoappearence[nodestr]:
photoCoappearence[nodestr].append(photoPool[idx2+idx])
else:
if photoPool[idx2+idx] not in photoCoappearence[nodestr]:
photoCoappearence[nodestr].append(photoPool[idx2+idx])
if photoPool[idx] not in photoCoappearence[nodestr]:
photoCoappearence[nodestr].append(photoPool[idx])
photoCoappearence = {k: v for k, v in photoCoappearence.items() if len(v)>1}
plainRanking = sorted(photoCoappearence, key=lambda k: (len(photoCoappearence[k])*len(k.split(',')),coappPop[k]), reverse = True)
photoRank = []
maxPlain2 = 0
for plain in plainRanking:
photoCoappearence[plain].sort()
photoRank.append(photoCoappearence[plain])
maxPlain2 = max(maxPlain2,len(photoCoappearence[plain]))
methodName = 'baseCoappearence'
if decisionforAll == str('y') or decisionforAll == str(3):
#save results
finPhotoPool, measurement, Coapps = {}, {}, {}
cnt,nodeChckPool = 0, []
while len(finPhotoPool) < topImages or cnt <= maxPlain2:
for k,coappPhotos in enumerate(photoRank):
try:
if len(finPhotoPool)>=topImages:
break
cnt2 = cnt
while self.photodict[coappPhotos[cnt2]]['nodes'] in nodeChckPool or coappPhotos[cnt2] in finPhotoPool:
cnt2+=1
webbrowser.open(self.photodict[coappPhotos[cnt2]]['url'])
print('\n'+'\n'.join(plainRanking[k].split(',')))
print(len(photoCoappearence[plainRanking[k]])*len(plainRanking[k].split(',')))
decision = input('\nIs the url ok? Press Enter for Yes or 3|n for No?: ')
if decision == 'move':
continue
cnt3=cnt2
while decision == 'N' or decision == 'n' or decision == str(3):
cnt3+=1
if self.photodict[coappPhotos[cnt3]]['nodes'] not in nodeChckPool and coappPhotos[cnt3] not in finPhotoPool:
webbrowser.open(self.photodict[coappPhotos[cnt3]]['url'])
decision = input('\nIs this duplicate ok?: ')
cnt2 = cnt3
else:
finPhotoPool[coappPhotos[cnt2]] = self.photodict[coappPhotos[cnt2]]['date']
measurement[coappPhotos[cnt2]] = len(photoCoappearence[plainRanking[k]])*len(plainRanking[k].split(','))
Coapps[coappPhotos[cnt2]] = plainRanking[k]
except:
pass
continue
nodeChckPool.append(self.photodict[coappPhotos[cnt2]]['nodes'])
cnt+=1
finPhotos = sorted(finPhotoPool, key = finPhotoPool.get)
finfile = codecs.open(self.dataset_path_results+'html/'+str(self.eventIdInput)+"/"+methodName+".txt",'w','utf-8')
for rP in finPhotos:
finfile.write(str(self.eventIdInput) + '\t' + self.photodict[rP]['url'] + '\t' + datetime.datetime.fromtimestamp(self.photodict[rP]['date']).strftime('%d/%m/%y')
+ '\t' + str(rP) + '\t' + captiondict[rP] + '\t'+ ' '.join(Coapps[rP].split(',')) + '\t' + str(measurement[rP]) + '\n')
finfile.close()
else:
#save results
print('\n'+methodName)
cnt = 0
finfile = codecs.open(self.dataset_path_results+'analysis/'+methodName+".txt",'a','utf-8')
while cnt <= maxPlain2:
for k,coappPhotos in enumerate(photoRank):
try:
finfile.write(str(self.eventIdInput) + '\t' + self.photodict[coappPhotos[cnt]]['url'] + '\t' + datetime.datetime.fromtimestamp(self.photodict[coappPhotos[cnt]]['date']).strftime('%d/%m/%y')
+ '\t' + str(coappPhotos[cnt]) + '\t' + captiondict[coappPhotos[cnt]] + '\t'+ ' '.join(plainRanking[k].split(',')) + '\t'+str(len(photoCoappearence[plainRanking[k]])*len(plainRanking[k].split(','))) + '\n')
except:
pass
continue
cnt+=1
finfile.close()
def product(list):
p = 1
for i in list:
p *= i
return p
def recRankFus(mylist):#Perform the Reciprocal Rank Fusion for a list of rank values
finscore = []
mylist=[x+1 for x in mylist]
for rank in mylist:
finscore.append(1/(rank))
return sum(finscore)#/len(mylist)
def recRankCustom(mylist):
finscore = []
mylist=[x+1 for x in mylist]
mylist.sort()
for idx, rank in enumerate(mylist):
if idx > 1:
proj = 2*mylist[idx-1] - mylist[idx-2]
if (proj - rank) >= 0:
finscore.append(1/(rank))
else:
value = (1/rank) - (1/proj)
finscore.append(value)
else:
finscore.append(1/(rank))
return sum(finscore)
def myentropy(data):
if not data:
return 0
entropy = 0
for x in set(data):
p_x = float(data.count(x))/len(data)
if p_x > 0:
entropy += - p_x*math.log(p_x, 2)
return entropy
def resultsSaver(methodName,topImages,event,mainResults,measurementContainer,photodict,dataset_path_results,captiondict,combinationDict):
#Save results
if not os.path.exists(dataset_path_results+'html/'+str(event)+'/'):
os.makedirs(dataset_path_results+'html/'+str(event)+'/')
nodeChckPool, finPhotoPool, measurement = [], {}, {}
for rP in mainResults:
if photodict[rP]['nodes'] in nodeChckPool:
continue
else:
webbrowser.open(photodict[rP]['url'])
print('\n'+'\n'.join(photodict[rP]['nodes']))
print(str(measurementContainer[rP]))
decision = input('\nIs the url ok? Press Enter for Yes or 3|n for No?: ')
if decision == 'move':
nodeChckPool.append(photodict[rP]['nodes'])
continue
if decision == 'N' or decision == 'n' or decision == str(3):
for im in combinationDict[','.join(photodict[rP]['nodes'])]:
if im!= rP:
webbrowser.open(photodict[im]['url'])
decision = input('\nIs this duplicate ok?: ')
if decision == 'N' or decision == 'n' or decision == str(3):
continue
else:
finPhotoPool[im] = photodict[im]['date']
nodeChckPool.append(photodict[im]['nodes'])
measurement[im] = measurementContainer[rP]
break
else:
finPhotoPool[rP] = photodict[rP]['date']
measurement[rP] = measurementContainer[rP]
if len(finPhotoPool)==topImages:
break
nodeChckPool.append(photodict[rP]['nodes'])
finPhotos = sorted(finPhotoPool, key = finPhotoPool.get)
finfile = codecs.open(dataset_path_results+'html/'+str(event)+"/"+methodName+".txt",'w','utf-8')
for rP in finPhotos:
finfile.write(str(event) + '\t' + photodict[rP]['url'] + '\t' + datetime.datetime.fromtimestamp(photodict[rP]['date']).strftime('%d/%m/%y')
+ '\t' + str(rP) + '\t' + captiondict[rP] + '\t'+ ' '.join(photodict[rP]['nodes']) + '\t'+str(measurement[rP]) + '\n')
finfile.close()
def resultsSaverAll(methodName,topImages,event,mainResults,measurementContainer,photodict,dataset_path_results,captiondict,combinationDict):
#Save results
if not os.path.exists(dataset_path_results+'analysis/'):
os.makedirs(dataset_path_results+'analysis/')
measurement = {}
finfile = codecs.open(dataset_path_results+'analysis/'+methodName+".txt",'a','utf-8')
for rP in mainResults:
finfile.write(str(event) + '\t' + photodict[rP]['url'] + '\t' + datetime.datetime.fromtimestamp(photodict[rP]['date']).strftime('%d/%m/%y')
+ '\t' + str(rP) + '\t' + captiondict[rP] + '\t'+ ' '.join(photodict[rP]['nodes']) + '\t'+str(measurementContainer[rP]) + '\n')
finfile.close() | apache-2.0 |
astocko/statsmodels | statsmodels/distributions/mixture_rvs.py | 27 | 9592 | from statsmodels.compat.python import range
import numpy as np
def _make_index(prob,size):
"""
Returns a boolean index for given probabilities.
Notes
---------
prob = [.75,.25] means that there is a 75% chance of the first column
being True and a 25% chance of the second column being True. The
columns are mutually exclusive.
"""
rv = np.random.uniform(size=(size,1))
cumprob = np.cumsum(prob)
return np.logical_and(np.r_[0,cumprob[:-1]] <= rv, rv < cumprob)
def mixture_rvs(prob, size, dist, kwargs=None):
"""
Sample from a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture_rvs(prob, 5000, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
idx = _make_index(prob,size)
sample = np.empty(size)
for i in range(len(prob)):
sample_idx = idx[...,i]
sample_size = sample_idx.sum()
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
sample[sample_idx] = dist[i].rvs(*args, **dict(loc=loc,scale=scale,
size=sample_size))
return sample
class MixtureDistribution(object):
'''univariate mixture distribution
for simple case for now (unbound support)
does not yet inherit from scipy.stats.distributions
adding pdf to mixture_rvs, some restrictions on broadcasting
Currently it does not hold any state, all arguments included in each method.
'''
#def __init__(self, prob, size, dist, kwargs=None):
def rvs(self, prob, size, dist, kwargs=None):
return mixture_rvs(prob, size, dist, kwargs=kwargs)
def pdf(self, x, prob, dist, kwargs=None):
"""
pdf a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
for i in range(len(prob)):
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
if i == 0: #assume all broadcast the same as the first dist
pdf_ = prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)
else:
pdf_ += prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)
return pdf_
def cdf(self, x, prob, dist, kwargs=None):
"""
cdf of a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
for i in range(len(prob)):
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
if i == 0: #assume all broadcast the same as the first dist
cdf_ = prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)
else:
cdf_ += prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)
return cdf_
def mv_mixture_rvs(prob, size, dist, nvars, **kwargs):
"""
Sample from a mixture of multivariate distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions instances with callable method rvs.
nvargs : int
dimension of the multivariate distribution, could be inferred instead
kwargs : tuple of dicts, optional
ignored
Examples
--------
Say we want 2000 random variables from mixture of normals with two
multivariate normal distributions, and we want to sample from the
first with probability .4 and the second with probability .6.
import statsmodels.sandbox.distributions.mv_normal as mvd
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu = np.array([-1, 0.0, 2.0])
mu2 = np.array([4, 2.0, 2.0])
mvn3 = mvd.MVNormal(mu, cov3)
mvn32 = mvd.MVNormal(mu2, cov3/2., 4)
rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
idx = _make_index(prob,size)
sample = np.empty((size, nvars))
for i in range(len(prob)):
sample_idx = idx[...,i]
sample_size = sample_idx.sum()
#loc = kwargs[i].get('loc',0)
#scale = kwargs[i].get('scale',1)
#args = kwargs[i].get('args',())
# use int to avoid numpy bug with np.random.multivariate_normal
sample[sample_idx] = dist[i].rvs(size=int(sample_size))
return sample
if __name__ == '__main__':
from scipy import stats
obs_dist = mixture_rvs([.25,.75], size=10000, dist=[stats.norm, stats.beta],
kwargs=(dict(loc=-1,scale=.5),dict(loc=1,scale=1,args=(1,.5))))
nobs = 10000
mix = MixtureDistribution()
## mrvs = mixture_rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],
## kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.75)))
mix_kwds = (dict(loc=-1,scale=.25),dict(loc=1,scale=.75))
mrvs = mix.rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
grid = np.linspace(-4,4, 100)
mpdf = mix.pdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
mcdf = mix.cdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
doplot = 1
if doplot:
import matplotlib.pyplot as plt
plt.figure()
plt.hist(mrvs, bins=50, normed=True, color='red')
plt.title('histogram of sample and pdf')
plt.plot(grid, mpdf, lw=2, color='black')
plt.figure()
plt.hist(mrvs, bins=50, normed=True, cumulative=True, color='red')
plt.title('histogram of sample and pdf')
plt.plot(grid, mcdf, lw=2, color='black')
plt.show()
| bsd-3-clause |
mpld3/mpld3 | examples/mpld3_logo.py | 2 | 3748 | """
mpld3 Logo Idea
===============
This example shows how mpld3 can be used to generate relatively intricate
vector graphics in the browser. This is an adaptation of a logo proposal by
github user debjan, in turn based on both the matplotlib and D3js logos.
"""
# Author: Jake VanderPlas
import matplotlib.pyplot as plt
from matplotlib import image, patches, colors
from matplotlib.colors import colorConverter
import numpy as np
import mpld3
imsize = np.array([319, 217])
center = [108.5, 108.5]
max_radius = 108.5
radii = np.linspace(16, max_radius, 5)
angles = np.arange(0, 360, 45)
fig = plt.figure(figsize=imsize / 50.)
ax = fig.add_axes([0, 0, 1, 1], frameon=False, xticks=[], yticks=[])
# Create a clip path for the elements
clip_path = patches.Rectangle((0, 0), imsize[0], imsize[1],
transform=ax.transData)
# Create the background gradient
x = np.array([0, 104, 196, 300])
y = np.linspace(150, 450, 86)[:, None]
c = np.cos(-np.pi / 4)
s = np.sin(-np.pi / 4)
X, Y = (c * x - s * y) - 116, (s * x + c * y)
C = np.arange(255).reshape((3, 85)).T
C = C[::-1, :]
cmap = colors.LinearSegmentedColormap.from_list("mpld3",
[[0.97, 0.6, 0.29],
[0.97, 0.59, 0.27],
[0.97, 0.58, 0.25],
[0.95, 0.44, 0.34],
[0.92, 0.51, 0.29],
[0.68, 0.21, 0.20]])
mesh = ax.pcolormesh(X, Y, C, cmap=cmap, shading='auto', zorder=0)
mesh.set_clip_path(clip_path)
# cut-off the background to form the "D" and "3" using white patches
# (this could also be done with a clip path)
kwargs = dict(fc='white', ec='none', zorder=1)
ax.add_patch(patches.Rectangle([0, 0], center[0], imsize[1], **kwargs))
ax.add_patch(patches.Circle(center, radii[2], **kwargs))
ax.add_patch(patches.Wedge(center, 127, -90, 90, width=18.5, **kwargs))
ax.add_patch(patches.Circle((252, 66), 18, **kwargs))
ax.add_patch(patches.Rectangle([216, 48], 36, 36, **kwargs))
ax.add_patch(patches.Wedge((252, 66), 101, -90, 40.1, width=35, **kwargs))
ax.add_patch(patches.Circle((252, 151), 18, **kwargs))
ax.add_patch(patches.Rectangle([216, 133], 36, 36, **kwargs))
ax.add_patch(patches.Wedge((252, 151), 101, -40.1, 90, width=35, **kwargs))
ax.add_patch(patches.Rectangle([-200, -200], 719, 200, **kwargs))
ax.add_patch(patches.Rectangle([-200, -200], 200, 617, **kwargs))
ax.add_patch(patches.Rectangle([-200, imsize[1]], 719, 200, **kwargs))
ax.add_patch(patches.Rectangle([imsize[0], -200], 200, 617, **kwargs))
# plot circles and lines
for radius in radii:
ax.add_patch(patches.Circle(center, radius, lw=0.5,
ec='gray', fc='none', zorder=2))
for angle in angles:
dx, dy = np.sin(np.radians(angle)), np.cos(np.radians(angle))
ax.plot([max_radius * (1 - dx), max_radius * (1 + dx)],
[max_radius * (1 - dy), max_radius * (1 + dy)],
'-', color='gray', lw=0.5, zorder=2)
# plot wedges within the graph
wedges = [(98, 231, 258, '#FF6600'),
(85, 170, 205, '#FFC500'),
(60, 80, 103, '#7DFF78'),
(96, 45, 58, '#FD7C1A'),
(73, 291, 308, '#CCFF28'),
(47, 146, 155, '#28FFCC'),
(25, 340, 360, '#004AFF')]
for (radius, theta1, theta2, color) in wedges:
ax.add_patch(patches.Wedge(center, radius, theta1, theta2,
fc=color, ec='black', alpha=0.6, zorder=3))
for patch in ax.patches:
patch.set_clip_path(clip_path)
ax.set_xlim(0, imsize[0])
ax.set_ylim(imsize[1], 0)
#plt.savefig('mpld3.png')
mpld3.show()
| bsd-3-clause |
habanero-rice/hclib | test/performance-regression/full-apps/qmcpack/nexus/library/extended_numpy.py | 2 | 17872 | import sys
from numpy import *
from numpy.linalg import *
from developer import unavailable
try:
from scipy.special import betainc
from scipy.optimize import fmin
from scipy.spatial import KDTree,Delaunay
scipy_unavailable = False
except ImportError:
betainc = unavailable('scipy.special' ,'betainc')
fmin = unavailable('scipy.optimize','fmin')
KDTree = unavailable('scipy.special' ,'KDTree')
Delaunay = unavailable('scipy.special' ,'Delaunay')
scipy_unavailable = True
#end try
########################################################################
############ ndgrid
########################################################################
# retrieved from
# http://www.mailinglistarchive.com/html/[email protected]/2010-05/msg00055.html
#"""
#n-dimensional gridding like Matlab's NDGRID
#
#Typical usage:
#>>> x, y, z = [0, 1], [2, 3, 4], [5, 6, 7, 8]
#>>> X, Y, Z = ndgrid(x, y, z)
#
#See ?ndgrid for details.
#"""
def ndgrid(*args, **kwargs):
"""
n-dimensional gridding like Matlab's NDGRID
The input *args are an arbitrary number of numerical sequences,
e.g. lists, arrays, or tuples.
The i-th dimension of the i-th output argument
has copies of the i-th input argument.
Optional keyword argument:
same_dtype : If False (default), the result is an ndarray.
If True, the result is a lists of ndarrays, possibly with
different dtype. This can save space if some *args
have a smaller dtype than others.
Typical usage:
>>> x, y, z = [0, 1], [2, 3, 4], [5, 6, 7, 8]
>>> X, Y, Z = ndgrid(x, y, z) # unpacking the returned ndarray into X, Y, Z
Each of X, Y, Z has shape [len(v) for v in x, y, z].
>>> X.shape == Y.shape == Z.shape == (2, 3, 4)
True
>>> X
array([[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]]])
>>> Y
array([[[2, 2, 2, 2],
[3, 3, 3, 3],
[4, 4, 4, 4]],
[[2, 2, 2, 2],
[3, 3, 3, 3],
[4, 4, 4, 4]]])
>>> Z
array([[[5, 6, 7, 8],
[5, 6, 7, 8],
[5, 6, 7, 8]],
[[5, 6, 7, 8],
[5, 6, 7, 8],
[5, 6, 7, 8]]])
With an unpacked argument list:
>>> V = [[0, 1], [2, 3, 4]]
>>> ndgrid(*V) # an array of two arrays with shape (2, 3)
array([[[0, 0, 0],
[1, 1, 1]],
[[2, 3, 4],
[2, 3, 4]]])
For input vectors of different data types, same_dtype=False makes ndgrid()
return a list of arrays with the respective dtype.
>>> ndgrid([0, 1], [1.0, 1.1, 1.2], same_dtype=False)
[array([[0, 0, 0], [1, 1, 1]]),
array([[ 1. , 1.1, 1.2], [ 1. , 1.1, 1.2]])]
Default is to return a single array.
>>> ndgrid([0, 1], [1.0, 1.1, 1.2])
array([[[ 0. , 0. , 0. ], [ 1. , 1. , 1. ]],
[[ 1. , 1.1, 1.2], [ 1. , 1.1, 1.2]]])
"""
same_dtype = kwargs.get("same_dtype", True)
V = [array(v) for v in args] # ensure all input vectors are arrays
shape = [len(v) for v in args] # common shape of the outputs
result = []
for i, v in enumerate(V):
# reshape v so it can broadcast to the common shape
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
zero = zeros(shape, dtype=v.dtype)
thisshape = ones_like(shape)
thisshape[i] = shape[i]
result.append(zero + v.reshape(thisshape))
if same_dtype:
return array(result) # converts to a common dtype
else:
return result # keeps separate dtype for each output
#if __name__ == "__main__":
# import doctest
# doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
########################################################################
############ End ndgrid
########################################################################
def simstats(x,dim=None):
shape = x.shape
ndim = len(shape)
if dim==None:
dim=ndim-1
#end if
permute = dim!=ndim-1
reshape = ndim>2
nblocks = shape[dim]
if permute:
r = range(ndim)
r.pop(dim)
r.append(dim)
permutation = tuple(r)
r = range(ndim)
r.pop(ndim-1)
r.insert(dim,ndim-1)
invperm = tuple(r)
x=x.transpose(permutation)
shape = tuple(array(shape)[array(permutation)])
dim = ndim-1
#end if
if reshape:
nvars = prod(shape[0:dim])
x=x.reshape(nvars,nblocks)
rdim=dim
dim=1
else:
nvars = shape[0]
#end if
mean = x.mean(dim)
var = x.var(dim)
N=nblocks
if ndim==1:
i=0
tempC=0.5
kappa=0.0
mtmp=mean
if abs(var)<1e-15:
kappa = 1.0
else:
ovar=1.0/var
while (tempC>0 and i<(N-1)):
kappa=kappa+2.0*tempC
i=i+1
#tempC=corr(i,x,mean,var)
tempC = ovar/(N-i)*sum((x[0:N-i]-mtmp)*(x[i:N]-mtmp))
#end while
if kappa == 0.0:
kappa = 1.0
#end if
#end if
Neff=(N+0.0)/(kappa+0.0)
if (Neff == 0.0):
Neff = 1.0
#end if
error=sqrt(var/Neff)
else:
error = zeros(mean.shape)
kappa = zeros(mean.shape)
for v in xrange(nvars):
i=0
tempC=0.5
kap=0.0
vtmp = var[v]
mtmp = mean[v]
if abs(vtmp)<1e-15:
kap = 1.0
else:
ovar = 1.0/vtmp
while (tempC>0 and i<(N-1)):
i += 1
kap += 2.0*tempC
tempC = ovar/(N-i)*sum((x[v,0:N-i]-mtmp)*(x[v,i:N]-mtmp))
#end while
if kap == 0.0:
kap = 1.0
#end if
#end if
Neff=(N+0.0)/(kap+0.0)
if (Neff == 0.0):
Neff = 1.0
#end if
kappa[v]=kap
error[v]=sqrt(vtmp/Neff)
#end for
#end if
if reshape:
x = x.reshape(shape)
mean = mean.reshape(shape[0:rdim])
var = var.reshape(shape[0:rdim])
error = error.reshape(shape[0:rdim])
kappa = kappa.reshape(shape[0:rdim])
#end if
if permute:
x=x.transpose(invperm)
#end if
return (mean,var,error,kappa)
#end def simstats
def simplestats(x,dim=None):
if dim==None:
dim=len(x.shape)-1
#end if
osqrtN = 1.0/sqrt(1.0*x.shape[dim])
mean = x.mean(dim)
error = x.var(dim)*osqrtN
return (mean,error)
#end def simplestats
def equilibration_length(x,tail=.5,plot=False,xlim=None,bounces=2):
bounces = max(1,bounces)
eqlen = 0
nx = len(x)
xt = x[int((1.-tail)*nx+.5):]
nxt = len(xt)
if nxt<10:
return eqlen
#end if
#mean = xh.mean()
#sigma = sqrt(xh.var())
xs = array(xt)
xs.sort()
mean = xs[int(.5*(nxt-1)+.5)]
sigma = (abs(xs[int((.5-.341)*nxt+.5)]-mean)+abs(xs[int((.5+.341)*nxt+.5)]-mean))/2
crossings = bounces*[0,0]
if abs(x[0]-mean)>sigma:
s = -sign(x[0]-mean)
ncrossings = 0
for i in range(nx):
dist = s*(x[i]-mean)
if dist>sigma and dist<5*sigma:
crossings[ncrossings]=i
s*=-1
ncrossings+=1
if ncrossings==2*bounces:
break
#end if
#end if
#end for
bounce = crossings[-2:]
bounce[1] = max(bounce[1],bounce[0])
#print len(x),crossings,crossings[1]-crossings[0]+1
eqlen = bounce[0]+random.randint(bounce[1]-bounce[0]+1)
#end if
if plot:
xlims = xlim
del plot,xlim
from matplotlib.pyplot import plot,figure,show,xlim
figure()
ix = arange(nx)
plot(ix,x,'b.-')
plot([0,nx],[mean,mean],'k-')
plot([0,nx],[mean+sigma,mean+sigma],'r-')
plot([0,nx],[mean-sigma,mean-sigma],'r-')
plot(ix[crossings],x[crossings],'r.')
plot(ix[bounce],x[bounce],'ro')
plot([ix[eqlen],ix[eqlen]],[x.min(),x.max()],'g-')
plot(ix[eqlen],x[eqlen],'go')
if xlims!=None:
xlim(xlims)
#end if
show()
#end if
return eqlen
#end def equilibration_length
def ttest(m1,e1,n1,m2,e2,n2):
m1 = float(m1)
e1 = float(e1)
m2 = float(m2)
e2 = float(e2)
v1 = e1**2
v2 = e2**2
t = (m1-m2)/sqrt(v1+v2)
nu = (v1+v2)**2/(v1**2/(n1-1)+v2**2/(n2-1))
x = nu/(nu+t**2)
p = 1.-betainc(nu/2,.5,x)
return p
#end def ttest
def surface_normals(x,y,z):
nu,nv = x.shape
normals = empty((nu,nv,3))
mi=nu-1
mj=nv-1
v1 = empty((3,))
v2 = empty((3,))
v3 = empty((3,))
dr = empty((3,))
dr[0] = x[0,0]-x[1,0]
dr[1] = y[0,0]-y[1,0]
dr[2] = z[0,0]-z[1,0]
drtol = 1e-4
for i in xrange(nu):
for j in xrange(nv):
iedge = i==0 or i==mi
jedge = j==0 or j==mj
if iedge:
dr[0] = x[0,j]-x[mi,j]
dr[1] = y[0,j]-y[mi,j]
dr[2] = z[0,j]-z[mi,j]
if norm(dr)<drtol:
im = mi-1
ip = 1
elif i==0:
im=i
ip=i+1
elif i==mi:
im=i-1
ip=i
#end if
else:
im=i-1
ip=i+1
#end if
if jedge:
dr[0] = x[i,0]-x[i,mj]
dr[1] = y[i,0]-y[i,mj]
dr[2] = z[i,0]-z[i,mj]
if norm(dr)<drtol:
jm = mj-1
jp = 1
elif j==0:
jm=j
jp=j+1
elif j==mj:
jm=j-1
jp=j
#end if
else:
jm=j-1
jp=j+1
#end if
v1[0] = x[ip,j]-x[im,j]
v1[1] = y[ip,j]-y[im,j]
v1[2] = z[ip,j]-z[im,j]
v2[0] = x[i,jp]-x[i,jm]
v2[1] = y[i,jp]-y[i,jm]
v2[2] = z[i,jp]-z[i,jm]
v3 = cross(v1,v2)
onorm = 1./norm(v3)
normals[i,j,:]=v3[:]*onorm
#end for
#end for
return normals
#end def surface_normals
simple_surface_coords = [set(['x','y','z']),set(['r','phi','z']),set(['r','phi','theta'])]
simple_surface_min = {'x':-1.00000000001,'y':-1.00000000001,'z':-1.00000000001,'r':-0.00000000001,'phi':-0.00000000001,'theta':-0.00000000001}
def simple_surface(origin,axes,grid):
matched=False
gk = set(grid.keys())
for c in range(3):
if gk==simple_surface_coords[c]:
matched=True
coord=c
#end if
#end for
if not matched:
print 'Error in simple_surface: invalid coordinate system provided'
print ' provided coordinates:',gk
print ' permitted coordinates:'
for c in range(3):
print ' ',simple_surface_coords[c]
#end for
exit
#end if
for k,v in grid.iteritems():
if min(v)<simple_surface_min[k]:
print 'Error in simple surface: '+k+' cannot be less than '+str(simple_surface_min[k])
print ' actual minimum: '+str(min(v))
sys.exit()
#end if
if max(v)>1.00000000001:
print 'Error in simple surface: '+k+' cannot be more than 1'
print ' actual maximum: '+str(max(v))
sys.exit()
#end if
#end if
u=empty((3,))
r=empty((3,))
if coord==0:
xl = grid['x']
yl = grid['y']
zl = grid['z']
dim = (len(xl),len(yl),len(zl))
npoints = prod(dim)
points = empty((npoints,3))
n=0
for i in xrange(dim[0]):
for j in xrange(dim[1]):
for k in xrange(dim[2]):
r[0] = xl[i]
r[1] = yl[j]
r[2] = zl[k]
points[n,:] = dot(axes,r) + origin
n+=1
#end for
#end for
#end for
elif coord==1:
rl = grid['r']
phil = 2.*pi*grid['phi']
zl = grid['z']
dim = (len(rl),len(phil),len(zl))
npoints = prod(dim)
points = empty((npoints,3))
n=0
for i in xrange(dim[0]):
for j in xrange(dim[1]):
for k in xrange(dim[2]):
u[0] = rl[i]
u[1] = phil[j]
u[2] = zl[k]
r[0] = u[0]*cos(u[1])
r[1] = u[0]*sin(u[1])
r[2] = u[2]
points[n,:] = dot(axes,r) + origin
n+=1
#end for
#end for
#end for
elif coord==2:
rl = grid['r']
phil = 2.*pi*grid['phi']
thetal = pi*grid['theta']
dim = (len(rl),len(phil),len(thetal))
if dim[0]==1:
sgn = -1. #this is to 'fix' surface normals
#sgn = 1. #this is to 'fix' surface normals
else:
sgn = 1.
#end if
npoints = prod(dim)
points = empty((npoints,3))
n=0
for i in xrange(dim[0]):
for j in xrange(dim[1]):
for k in xrange(dim[2]):
u[0] = rl[i]
u[1] = phil[j]
u[2] = thetal[k]
r[0] = sgn*u[0]*sin(u[2])*cos(u[1])
r[1] = sgn*u[0]*sin(u[2])*sin(u[1])
r[2] = sgn*u[0]*cos(u[2])
points[n,:] = dot(axes,r) + origin
n+=1
#end for
#end for
#end for
#end if
if min(dim)!=1:
print 'Error in simple_surface: minimum dimension must be 1'
print ' actual minimum dimension:',str(min(dim))
sys.exit()
#end if
dm = []
for d in dim:
if d>1:
dm.append(d)
#end if
#end for
dm=tuple(dm)
x = points[:,0].reshape(dm)
y = points[:,1].reshape(dm)
z = points[:,2].reshape(dm)
return x,y,z
#end def simple_surface
least_squares = lambda p,x,y,f: ((f(p,x)-y)**2).sum()
def func_fit(x,y,fitting_function,p0,minimizer=least_squares):
f = fitting_function
p = fmin(minimizer,p0,args=(x,y,f),maxiter=10000,maxfun=10000)
return p
#end def func_fit
def distance_table(p1,p2,ordering=0):
n1 = len(p1)
n2 = len(p2)
if not isinstance(p1,ndarray):
p1=array(p1)
#end if
if not isinstance(p2,ndarray):
p2=array(p2)
#end if
dt = zeros((n1,n2))
for i1 in xrange(n1):
for i2 in xrange(n2):
dt[i1,i2] = norm(p1[i1]-p2[i2])
#end for
#end for
if ordering==0:
return dt
else:
if ordering==1:
n=n1
elif ordering==2:
n=n2
dt=dt.T
else:
print 'distance_table Error: ordering must be 1 or 2,\n you provided '+str(ordering)+'\nexiting.'
exit()
#end if
order = empty(dt.shape,dtype=int)
for i in xrange(n):
o = dt[i].argsort()
order[i] = o
dt[i,:] = dt[i,o]
#end for
return dt,order
#end if
#end def distance_table
def nearest_neighbors(n,points,qpoints=None,return_distances=False,slow=False):
extra = 0
if qpoints==None:
qpoints=points
if len(points)>1:
extra=1
elif return_distances:
return array([]),array([])
else:
return array([])
#end if
#end if
if n>len(qpoints)-extra:
print 'nearest_neighbors Error: requested more than the total number of neighbors\n maximum is: {0}\n you requested: {1}\nexiting.'.format(len(qpoints)-extra,n)
exit()
#end if
slow = slow or scipy_unavailable
if not slow:
kt = KDTree(points)
dist,ind = kt.query(qpoints,n+extra)
else:
dtable,order = distance_table(points,qpoints,ordering=2)
dist = dtable[:,0:n+extra]
ind = order[:,0:n+extra]
#end if
if extra==0 and n==1 and not slow:
nn = atleast_2d(ind).T
else:
nn = ind[:,extra:]
#end if
if not return_distances:
return nn
else:
return nn,dist
#end if
#end def nearest_neighbors
def convex_hull(points,dimension=None,tol=None):
if dimension is None:
np,dimension = points.shape
#end if
d1 = dimension+1
tri = Delaunay(points)
all_inds = empty((d1,),dtype=bool)
all_inds[:] = True
verts = []
have_tol = tol!=None
for ni in range(len(tri.neighbors)):
n = tri.neighbors[ni]
ns = list(n)
if -1 in ns:
i = ns.index(-1)
inds = all_inds.copy()
inds[i] = False
v = tri.vertices[ni]
if have_tol:
iv = range(d1)
iv.pop(i)
c = points[v[iv[1]]]
a = points[v[i]]-c
b = points[v[iv[0]]]-c
bn = norm(b)
d = norm(a-dot(a,b)/(bn*bn)*b)
if d<tol:
inds[i]=True
#end if
#end if
verts.extend(v[inds])
#end if
#end for
verts = list(set(verts))
return verts
#end def convex_hull
| bsd-3-clause |
kiyoto/statsmodels | statsmodels/examples/try_tukey_hsd.py | 33 | 6616 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 28 15:34:18 2012
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import StringIO
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
from statsmodels.stats.libqsturng import qsturng
ss = '''\
43.9 1 1
39.0 1 2
46.7 1 3
43.8 1 4
44.2 1 5
47.7 1 6
43.6 1 7
38.9 1 8
43.6 1 9
40.0 1 10
89.8 2 1
87.1 2 2
92.7 2 3
90.6 2 4
87.7 2 5
92.4 2 6
86.1 2 7
88.1 2 8
90.8 2 9
89.1 2 10
68.4 3 1
69.3 3 2
68.5 3 3
66.4 3 4
70.0 3 5
68.1 3 6
70.6 3 7
65.2 3 8
63.8 3 9
69.2 3 10
36.2 4 1
45.2 4 2
40.7 4 3
40.5 4 4
39.3 4 5
40.3 4 6
43.2 4 7
38.7 4 8
40.9 4 9
39.7 4 10'''
#idx Treatment StressReduction
ss2 = '''\
1 mental 2
2 mental 2
3 mental 3
4 mental 4
5 mental 4
6 mental 5
7 mental 3
8 mental 4
9 mental 4
10 mental 4
11 physical 4
12 physical 4
13 physical 3
14 physical 5
15 physical 4
16 physical 1
17 physical 1
18 physical 2
19 physical 3
20 physical 3
21 medical 1
22 medical 2
23 medical 2
24 medical 2
25 medical 3
26 medical 2
27 medical 3
28 medical 1
29 medical 3
30 medical 1'''
ss3 = '''\
1 24.5
1 23.5
1 26.4
1 27.1
1 29.9
2 28.4
2 34.2
2 29.5
2 32.2
2 30.1
3 26.1
3 28.3
3 24.3
3 26.2
3 27.8'''
cylinders = np.array([8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 6, 6, 6, 4, 4,
4, 4, 4, 4, 6, 8, 8, 8, 8, 4, 4, 4, 4, 8, 8, 8, 8, 6, 6, 6, 6, 4, 4, 4, 4, 6, 6,
6, 6, 4, 4, 4, 4, 4, 8, 4, 6, 6, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 6, 6, 4, 6, 4, 4, 4, 4, 4, 4, 4, 4])
cyl_labels = np.array(['USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'France',
'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'Japan', 'USA', 'USA', 'USA', 'Japan',
'Germany', 'France', 'Germany', 'Sweden', 'Germany', 'USA', 'USA', 'USA', 'USA', 'USA', 'Germany',
'USA', 'USA', 'France', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'Germany',
'Japan', 'USA', 'USA', 'USA', 'USA', 'Germany', 'Japan', 'Japan', 'USA', 'Sweden', 'USA', 'France',
'Japan', 'Germany', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA',
'Germany', 'Japan', 'Japan', 'USA', 'USA', 'Japan', 'Japan', 'Japan', 'Japan', 'Japan', 'Japan', 'USA',
'USA', 'USA', 'USA', 'Japan', 'USA', 'USA', 'USA', 'Germany', 'USA', 'USA', 'USA'])
dta = np.recfromtxt(StringIO(ss), names=("Rust","Brand","Replication"))
dta2 = np.recfromtxt(StringIO(ss2), names = ("idx", "Treatment", "StressReduction"))
dta3 = np.recfromtxt(StringIO(ss3), names = ("Brand", "Relief"))
from statsmodels.sandbox.stats.multicomp import tukeyhsd
import statsmodels.sandbox.stats.multicomp as multi
#print tukeyhsd(dta['Brand'], dta['Rust'])
def get_thsd(mci):
var_ = np.var(mci.groupstats.groupdemean(), ddof=len(mci.groupsunique))
means = mci.groupstats.groupmean
nobs = mci.groupstats.groupnobs
resi = tukeyhsd(means, nobs, var_, df=None, alpha=0.05, q_crit=qsturng(0.95, len(means), (nobs-1).sum()))
print(resi[4])
var2 = (mci.groupstats.groupvarwithin() * (nobs - 1)).sum() \
/ (nobs - 1).sum()
assert_almost_equal(var_, var2, decimal=14)
return resi
mc = multi.MultiComparison(dta['Rust'], dta['Brand'])
res = mc.tukeyhsd()
print(res)
mc2 = multi.MultiComparison(dta2['StressReduction'], dta2['Treatment'])
res2 = mc2.tukeyhsd()
print(res2)
mc2s = multi.MultiComparison(dta2['StressReduction'][3:29], dta2['Treatment'][3:29])
res2s = mc2s.tukeyhsd()
print(res2s)
res2s_001 = mc2s.tukeyhsd(alpha=0.01)
#R result
tukeyhsd2s = np.array([1.888889,0.8888889,-1,0.2658549,-0.5908785,-2.587133,3.511923,2.368656,0.5871331,0.002837638,0.150456,0.1266072]).reshape(3,4, order='F')
assert_almost_equal(res2s_001.confint, tukeyhsd2s[:,1:3], decimal=3)
mc3 = multi.MultiComparison(dta3['Relief'], dta3['Brand'])
res3 = mc3.tukeyhsd()
print(res3)
tukeyhsd4 = multi.MultiComparison(cylinders, cyl_labels, group_order=["Sweden", "Japan", "Germany", "France", "USA"])
res4 = tukeyhsd4.tukeyhsd()
print(res4)
try:
import matplotlib.pyplot as plt
fig = res4.plot_simultaneous("USA")
plt.show()
except Exception as e:
print(e)
for mci in [mc, mc2, mc3]:
get_thsd(mci)
from scipy import stats
print(mc2.allpairtest(stats.ttest_ind, method='b')[0])
'''same as SAS:
>>> np.var(mci.groupstats.groupdemean(), ddof=3)
4.6773333333333351
>>> var_ = np.var(mci.groupstats.groupdemean(), ddof=3)
>>> tukeyhsd(means, nobs, var_, df=None, alpha=0.05, q_crit=qsturng(0.95, 3, 12))[4]
array([[ 0.95263648, 8.24736352],
[-3.38736352, 3.90736352],
[-7.98736352, -0.69263648]])
>>> tukeyhsd(means, nobs, var_, df=None, alpha=0.05, q_crit=3.77278)[4]
array([[ 0.95098508, 8.24901492],
[-3.38901492, 3.90901492],
[-7.98901492, -0.69098508]])
'''
ss5 = '''\
Comparisons significant at the 0.05 level are indicated by ***.
BRAND
Comparison Difference
Between
Means Simultaneous 95% Confidence Limits Sign.
2 - 3 4.340 0.691 7.989 ***
2 - 1 4.600 0.951 8.249 ***
3 - 2 -4.340 -7.989 -0.691 ***
3 - 1 0.260 -3.389 3.909 -
1 - 2 -4.600 -8.249 -0.951 ***
1 - 3 -0.260 -3.909 3.389 '''
ss5 = '''\
2 - 3 4.340 0.691 7.989 ***
2 - 1 4.600 0.951 8.249 ***
3 - 2 -4.340 -7.989 -0.691 ***
3 - 1 0.260 -3.389 3.909 -
1 - 2 -4.600 -8.249 -0.951 ***
1 - 3 -0.260 -3.909 3.389 '''
dta5 = np.recfromtxt(StringIO(ss5), names = ('pair', 'mean', 'lower', 'upper', 'sig'), delimiter='\t')
sas_ = dta5[[1,3,2]]
confint1 = res3.confint
confint2 = sas_[['lower','upper']].view(float).reshape((3,2))
assert_almost_equal(confint1, confint2, decimal=2)
reject1 = res3.reject
reject2 = sas_['sig'] == '***'
assert_equal(reject1, reject2)
meandiff1 = res3.meandiffs
meandiff2 = sas_['mean']
assert_almost_equal(meandiff1, meandiff2, decimal=14)
| bsd-3-clause |
manashmndl/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
pacificgilly1992/PGrainrate | Backups/PGRRpost/PGRRpost1.0.5.py | 1 | 3573 | ############################################################################
# Project: The Lenard effect of preciptation at the RUAO,
# Title: Ensemble processing of the PG, Time and Rain Rate data,
# Author: James Gilmore,
# Email: [email protected].
# Version: 1.0.5
# Date: 07/12/15
############################################################################
#Initialising the python script
from __future__ import absolute_import, division, print_function
from array import array
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats, interpolate
execfile("externals.py")
np.set_printoptions(threshold='nan')
year, month, time, rainrate, pg = np.genfromtxt('processeddata/PGdata.csv', dtype=float, delimiter=',', unpack=True)
Month = month.copy()[year.copy()!=0]
Time = time.copy()[year.copy()!=0]
Rainrate = rainrate.copy()[year.copy()!=0]
PG = pg.copy()[year.copy()!=0]
Year = year.copy()[year.copy()!=0]
PGRR = np.asarray(zip(Rainrate, PG))
PGRRsort = PGRR[np.lexsort((PGRR[:, 1], PGRR[:, 0]))]
PGsort = PGRRsort[:,1]
RRsort = PGRRsort[:,0]
bincount = 100
RainRateBin = np.zeros(bincount-1)
RainRateBinLimit = np.zeros(bincount)
TimeTipBin = np.zeros(bincount)
PGTipBin = np.zeros(bincount)
TotalBin = np.zeros(bincount)
PGTipBinMedian = np.zeros([bincount,150])
PGTipPosition = np.zeros(bincount)
PGTipBinMedianFinal = np.zeros(bincount)
#Define the Rain Rate for each bin with the centred values determined as well.
for i in range(bincount):
RainRateBinLimit[i] = i*5/bincount
for i in range(bincount-1):
RainRateBin[i] = 0.5*(RainRateBinLimit[i+1]-RainRateBinLimit[i])
############################################################################
#Define the mean (ensemble) PG and Tip Times for the statistically significant data.
for j in range(len(Year)):
for i in range(1,bincount):
if (Rainrate[j] < i*5/bincount and Rainrate[j] > (i-1)*5/bincount):
PGTipBin[i] += PG[j]
TimeTipBin[i] += Time[j]
TotalBin[i] += 1
PGTipBinned = PGTipBin.copy()/(TotalBin.copy())
TimeTipBinned = TimeTipBin.copy()/(TotalBin.copy())
#Removes NaN values
PGTipBinned = [0 if np.isnan(x) else x for x in PGTipBinned]
TimeTipBinned = [0 if np.isnan(x) else x for x in TimeTipBinned]
############################################################################
############################################################################
#Define the median PG and Tip Times for the statistically significant data.
print(PGTipBinMedian[i,:] )
for j in range(len(Year)):
for i in range(bincount):
if (Rainrate[j] < i*5/bincount and Rainrate[j] > (i-1)*5/bincount):
PGTipBinMedian[i,PGTipPosition[i]] = PG[j]
PGTipPosition[i]+=1
print("HHHHHHHHHHHH", PGTipBinMedian[3,:].copy()[PGTipBinMedian[3,:].copy()!=0])
for i in range(bincount):
PGTipBinMedian[i:,]= PGTipBinMedian[i,:][PGTipBinMedian[i,:]!=0]
PGTipBinMedianFinal[i] = np.median(PGTipBinMedian[i:,])
print(PGTipBinMedianFinal)
print(PGTipPosition)
############################################################################
#print(TotalBin)
#print(PGTipBinned)
#Calculation of the linear regression model along with statistical parameters.
#slope, intercept, r_value, p_value, std_err = stats.linregress(RainRateBin, PGTipBinned)
#print("P-Value: ", p_value)
#print("R^2 Value: ", r_value**2)
#print("Standard Error: ", std_err)
#Plot the ensemble PG against Rain Rate. See external.py for the source function.
PGRainSlim(np.max(RainRateBin)+0.2, np.max(PGTipBinned)+0.2, "PGEnsemble" + str(bincount), "png", RainRateBin, PGTipBinned)
| gpl-3.0 |
MatthieuBizien/scikit-learn | sklearn/tests/test_naive_bayes.py | 72 | 19944 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_gnb_neg_priors():
"""Test whether an error is raised in case of negative priors"""
clf = GaussianNB(priors=np.array([-1., 2.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_priors():
"""Test whether the class prior override is properly used"""
clf = GaussianNB(priors=np.array([0.3, 0.7])).fit(X, y)
assert_array_almost_equal(clf.predict_proba([[-0.1, -0.1]]),
np.array([[0.825303662161683,
0.174696337838317]]), 8)
assert_array_equal(clf.class_prior_, np.array([0.3, 0.7]))
def test_gnb_wrong_nb_priors():
""" Test whether an error is raised if the number of prior is different
from the number of class"""
clf = GaussianNB(priors=np.array([.25, .25, .25, .25]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_greater_one():
"""Test if an error is raised if the sum of prior greater than one"""
clf = GaussianNB(priors=np.array([2., 1.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_large_bias():
"""Test if good prediction when class prior favor largely one class"""
clf = GaussianNB(priors=np.array([0.01, 0.99]))
clf.fit(X, y)
assert_equal(clf.predict([[-0.1, -0.1]]), np.array([2]))
def test_check_update_with_no_data():
""" Test when the partial fit is called without any data"""
# Create an empty array
prev_points = 100
mean = 0.
var = 1.
x_empty = np.empty((0, X.shape[1]))
tmean, tvar = GaussianNB._update_mean_variance(prev_points, mean,
var, x_empty)
assert_equal(tmean, mean)
assert_equal(tvar, var)
def test_gnb_pfit_wrong_nb_features():
"""Test whether an error is raised when the number of feature changes
between two partial fit"""
clf = GaussianNB()
# Fit for the first time the GNB
clf.fit(X, y)
# Partial fit a second time with an incoherent X
assert_raises(ValueError, clf.partial_fit, np.hstack((X, X)), y)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
| bsd-3-clause |
fyffyt/pylearn2 | pylearn2/utils/image.py | 39 | 18841 | """
Utility functions for working with images.
"""
import logging
import numpy as np
plt = None
axes = None
from theano.compat.six.moves import xrange
from theano.compat.six import string_types
import warnings
try:
import matplotlib.pyplot as plt
import matplotlib.axes
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
import os
try:
from PIL import Image
except ImportError:
Image = None
from pylearn2.utils import string_utils as string
from pylearn2.utils.exc import reraise_as
from tempfile import mkstemp
from multiprocessing import Process
import subprocess
logger = logging.getLogger(__name__)
def ensure_Image():
"""Makes sure Image has been imported from PIL"""
global Image
if Image is None:
raise RuntimeError("You are trying to use PIL-dependent functionality"
" but don't have PIL installed.")
def imview(*args, **kwargs):
"""
A matplotlib-based image viewer command,
wrapping `matplotlib.pyplot.imshow` but behaving more
sensibly.
Parameters
----------
figure : TODO
TODO: write parameters section using decorators to inherit
the matplotlib docstring
Notes
-----
Parameters are identical to `matplotlib.pyplot.imshow`
but this behaves somewhat differently:
* By default, it creates a new figure (unless a
`figure` keyword argument is supplied.
* It modifies the axes of that figure to use the
full frame, without ticks or tick labels.
* It turns on `nearest` interpolation by default
(i.e., it does not antialias pixel data). This
can be overridden with the `interpolation`
argument as in `imshow`.
All other arguments and keyword arguments are passed
on to `imshow`.`
"""
if 'figure' not in kwargs:
f = plt.figure()
else:
f = kwargs['figure']
new_ax = matplotlib.axes.Axes(f,
[0, 0, 1, 1],
xticks=[],
yticks=[],
frame_on=False)
f.delaxes(f.gca())
f.add_axes(new_ax)
if len(args) < 5 and 'interpolation' not in kwargs:
kwargs['interpolation'] = 'nearest'
plt.imshow(*args, **kwargs)
def imview_async(*args, **kwargs):
"""
A version of `imview` that forks a separate process and
immediately shows the image.
Parameters
----------
window_title : str
TODO: writeme with decorators to inherit the other imviews'
docstrings
Notes
-----
Supports the `window_title` keyword argument to cope with
the title always being 'Figure 1'.
Returns the `multiprocessing.Process` handle.
"""
if 'figure' in kwargs:
raise ValueError("passing a figure argument not supported")
def fork_image_viewer():
f = plt.figure()
kwargs['figure'] = f
imview(*args, **kwargs)
if 'window_title' in kwargs:
f.set_window_title(kwargs['window_title'])
plt.show()
p = Process(None, fork_image_viewer)
p.start()
return p
def show(image):
"""
.. todo::
WRITEME
Parameters
----------
image : PIL Image object or ndarray
If ndarray, integer formats are assumed to use 0-255
and float formats are assumed to use 0-1
"""
viewer_command = string.preprocess('${PYLEARN2_VIEWER_COMMAND}')
if viewer_command == 'inline':
return imview(image)
if hasattr(image, '__array__'):
# do some shape checking because PIL just raises a tuple indexing error
# that doesn't make it very clear what the problem is
if len(image.shape) < 2 or len(image.shape) > 3:
raise ValueError('image must have either 2 or 3 dimensions but its'
' shape is ' + str(image.shape))
# The below is a temporary workaround that prevents us from crashing
# 3rd party image viewers such as eog by writing out overly large
# images.
# In the long run we should determine if this is a bug in PIL when
# producing
# such images or a bug in eog and determine a proper fix.
# Since this is hopefully just a short term workaround the
# constants below are not included in the interface to the
# function, so that 3rd party code won't start passing them.
max_height = 4096
max_width = 4096
# Display separate warnings for each direction, since it's
# common to crop only one.
if image.shape[0] > max_height:
image = image[0:max_height, :, :]
warnings.warn("Cropping image to smaller height to avoid crashing "
"the viewer program.")
if image.shape[0] > max_width:
image = image[:, 0:max_width, :]
warnings.warn("Cropping the image to a smaller width to avoid "
"crashing the viewer program.")
# This ends the workaround
if image.dtype == 'int8':
image = np.cast['uint8'](image)
elif str(image.dtype).startswith('float'):
# don't use *=, we don't want to modify the input array
image = image * 255.
image = np.cast['uint8'](image)
# PIL is too stupid to handle single-channel arrays
if len(image.shape) == 3 and image.shape[2] == 1:
image = image[:, :, 0]
try:
ensure_Image()
image = Image.fromarray(image)
except TypeError:
reraise_as(TypeError("PIL issued TypeError on ndarray of shape " +
str(image.shape) + " and dtype " +
str(image.dtype)))
# Create a temporary file with the suffix '.png'.
fd, name = mkstemp(suffix='.png')
os.close(fd)
# Note:
# Although we can use tempfile.NamedTemporaryFile() to create
# a temporary file, the function should be used with care.
#
# In Python earlier than 2.7, a temporary file created by the
# function will be deleted just after the file is closed.
# We can re-use the name of the temporary file, but there is an
# instant where a file with the name does not exist in the file
# system before we re-use the name. This may cause a race
# condition.
#
# In Python 2.7 or later, tempfile.NamedTemporaryFile() has
# the 'delete' argument which can control whether a temporary
# file will be automatically deleted or not. With the argument,
# the above race condition can be avoided.
#
image.save(name)
if os.name == 'nt':
subprocess.Popen(viewer_command + ' ' + name + ' && del ' + name,
shell=True)
else:
subprocess.Popen(viewer_command + ' ' + name + ' ; rm ' + name,
shell=True)
def pil_from_ndarray(ndarray):
"""
Converts an ndarray to a PIL image.
Parameters
----------
ndarray : ndarray
An ndarray containing an image.
Returns
-------
pil : PIL Image
A PIL Image containing the image.
"""
try:
if ndarray.dtype == 'float32' or ndarray.dtype == 'float64':
assert ndarray.min() >= 0.0
assert ndarray.max() <= 1.0
ndarray = np.cast['uint8'](ndarray * 255)
if len(ndarray.shape) == 3 and ndarray.shape[2] == 1:
ndarray = ndarray[:, :, 0]
ensure_Image()
rval = Image.fromarray(ndarray)
return rval
except Exception as e:
logger.exception('original exception: ')
logger.exception(e)
logger.exception('ndarray.dtype: {0}'.format(ndarray.dtype))
logger.exception('ndarray.shape: {0}'.format(ndarray.shape))
raise
assert False
def ndarray_from_pil(pil, dtype='uint8'):
"""
Converts a PIL Image to an ndarray.
Parameters
----------
pil : PIL Image
An image represented as a PIL Image object
dtype : str
The dtype of ndarray to create
Returns
-------
ndarray : ndarray
The image as an ndarray.
"""
rval = np.asarray(pil)
if dtype != rval.dtype:
rval = np.cast[dtype](rval)
if str(dtype).startswith('float'):
rval /= 255.
if len(rval.shape) == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
return rval
def rescale(image, shape):
"""
Scales image to be no larger than shape. PIL might give you
unexpected results beyond that.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
i = pil_from_ndarray(image)
ensure_Image()
i.thumbnail([shape[1], shape[0]], Image.ANTIALIAS)
rval = ndarray_from_pil(i, dtype=image.dtype)
return rval
resize = rescale
def fit_inside(image, shape):
"""
Scales image down to fit inside shape preserves proportions of image
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
if image.shape[0] <= shape[0] and image.shape[1] <= shape[1]:
return image.copy()
row_ratio = float(image.shape[0]) / float(shape[0])
col_ratio = float(image.shape[1]) / float(shape[1])
if row_ratio > col_ratio:
target_shape = [shape[0], min(image.shape[1] / row_ratio, shape[1])]
else:
target_shape = [min(image.shape[0] / col_ratio, shape[0]), shape[1]]
assert target_shape[0] <= shape[0]
assert target_shape[1] <= shape[1]
assert target_shape[0] == shape[0] or target_shape[1] == shape[1]
rval = rescale(image, target_shape)
return rval
def letterbox(image, shape):
"""
Pads image with black letterboxing to bring image.shape up to shape
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
assert image.shape[0] <= shape[0]
assert image.shape[1] <= shape[1]
if image.shape[0] == shape[0] and image.shape[1] == shape[1]:
return image.copy()
rval = np.zeros((shape[0], shape[1], image.shape[2]), dtype=image.dtype)
rstart = (shape[0] - image.shape[0]) / 2
cstart = (shape[1] - image.shape[1]) / 2
rend = rstart + image.shape[0]
cend = cstart + image.shape[1]
rval[rstart:rend, cstart:cend] = image
return rval
def make_letterboxed_thumbnail(image, shape):
"""
Scales image down to shape. Preserves proportions of image, introduces
black letterboxing if necessary.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3
assert len(shape) == 2
shrunk = fit_inside(image, shape)
letterboxed = letterbox(shrunk, shape)
return letterboxed
def load(filepath, rescale_image=True, dtype='float64'):
"""
Load an image from a file.
Parameters
----------
filepath : str
Path to the image file to load
rescale_image : bool
Default value: True
If True, returned images have pixel values in [0, 1]. Otherwise,
values are in [0, 255].
dtype: str
The dtype to use for the returned value
Returns
-------
img : numpy ndarray
An array containing the image that was in the file.
"""
assert isinstance(filepath, string_types)
if not rescale_image and dtype == 'uint8':
ensure_Image()
rval = np.asarray(Image.open(filepath))
assert rval.dtype == 'uint8'
return rval
s = 1.0
if rescale_image:
s = 255.
try:
ensure_Image()
rval = Image.open(filepath)
except Exception:
reraise_as(Exception("Could not open " + filepath))
numpy_rval = np.array(rval)
msg = ("Tried to load an image, got an array with %d"
" dimensions. Expected 2 or 3."
"This may indicate a mildly corrupted image file. Try "
"converting it to a different image format with a different "
"editor like gimp or imagemagic. Sometimes these programs are "
"more robust to minor corruption than PIL and will emit a "
"correctly formatted image in the new format.")
if numpy_rval.ndim not in [2, 3]:
logger.error(dir(rval))
logger.error(rval)
logger.error(rval.size)
rval.show()
raise AssertionError(msg % numpy_rval.ndim)
rval = numpy_rval
rval = np.cast[dtype](rval) / s
if rval.ndim == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
if rval.ndim != 3:
raise AssertionError("Something went wrong opening " +
filepath + '. Resulting shape is ' +
str(rval.shape) +
" (it's meant to have 3 dimensions by now)")
return rval
def save(filepath, ndarray):
"""
Saves an image to a file.
Parameters
----------
filepath : str
The path to write the file to.
ndarray : ndarray
An array containing the image to be saved.
"""
pil_from_ndarray(ndarray).save(filepath)
def scale_to_unit_interval(ndar, eps=1e-8):
"""
Scales all values in the ndarray ndar to be between 0 and 1
Parameters
----------
ndar : WRITEME
eps : WRITEME
Returns
-------
WRITEME
"""
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
Parameters
----------
x : numpy.ndarray
2-d ndarray or 4 tuple of 2-d ndarrays or None for channels,
in which every row is a flattened image.
shape : 2-tuple of ints
The first component is the height of each image,
the second component is the width.
tile_shape : 2-tuple of ints
The number of images to tile in (row, columns) form.
scale_rows_to_unit_interval : bool
Whether or not the values need to be before being plotted to [0, 1].
output_pixel_vals : bool
Whether or not the output should be pixel values (int8) or floats.
Returns
-------
y : 2d-ndarray
The return value has the same dtype as X, and is suitable for
viewing as an image with PIL.Image.fromarray.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
# colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = np.zeros(out_shape, dtype=dt) + \
channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
if __name__ == '__main__':
black = np.zeros((50, 50, 3), dtype='uint8')
red = black.copy()
red[:, :, 0] = 255
green = black.copy()
green[:, :, 1] = 255
show(black)
show(green)
show(red)
| bsd-3-clause |
cmgerber/Nuclear_Forensics_Analysis | Exporting_Origen_Data/Get_All_Isotopes.py | 1 | 1357 | #! /usr/bin/env python2.7.5
#Author: Colin Gerber
import csv
import os
import pandas as pd
def convert_to_csv(out_file_list):
'''takes an .out file with specific type of output in it and takes a prespecified section
of it to output to a csv file.'''
# creates a list to iterate through the 50 files
df_tot = pd.DataFrame()
for f in out_file_list:
if 'compiled' in f:
continue
else:
df_infile = pd.read_csv(f, index_col=0, header=None)
df_infile = df_infile.T
if len(df_tot) == 0:
df_tot = df_infile
else:
df_tot = df_tot.append(df_infile)
df_tot.to_csv('All_isotopes.csv')
def get_csv(directory):
tot_dir_list = os.listdir(directory)
return [f for f in tot_dir_list if 'export.csv' in f]
def main():
while (True):
directory = raw_input('Enter the path of the directory your files are in: ')
try:
os.chdir(directory)
#get a list of .out files
out_file_list = get_csv(directory)
convert_to_csv(out_file_list)
break
except Exception as e:
print (e)
print ('That path does not work, please try entering it again.')
# code execution begins and invokes main()
if __name__ == '__main__':
main()
| agpl-3.0 |
Solid-Mechanics/matplotlib-4-abaqus | matplotlib/streamplot.py | 4 | 19095 | """
Streamline plotting for 2D vector fields.
"""
from __future__ import division
import numpy as np
import matplotlib
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.collections as mcollections
import matplotlib.patches as patches
__all__ = ['streamplot']
def streamplot(axes, x, y, u, v, density=1, linewidth=None, color=None,
cmap=None, norm=None, arrowsize=1, arrowstyle='-|>',
minlength=0.1, transform=None):
"""Draws streamlines of a vector flow.
*x*, *y* : 1d arrays
an *evenly spaced* grid.
*u*, *v* : 2d arrays
x and y-velocities. Number of rows should match length of y, and
the number of columns should match x.
*density* : float or 2-tuple
Controls the closeness of streamlines. When `density = 1`, the domain
is divided into a 25x25 grid---*density* linearly scales this grid.
Each cell in the grid can have, at most, one traversing streamline.
For different densities in each direction, use [density_x, density_y].
*linewidth* : numeric or 2d array
vary linewidth when given a 2d array with the same shape as velocities.
*color* : matplotlib color code, or 2d array
Streamline color. When given an array with the same shape as
velocities, *color* values are converted to colors using *cmap*.
*cmap* : :class:`~matplotlib.colors.Colormap`
Colormap used to plot streamlines and arrows. Only necessary when using
an array input for *color*.
*norm* : :class:`~matplotlib.colors.Normalize`
Normalize object used to scale luminance data to 0, 1. If None, stretch
(min, max) to (0, 1). Only necessary when *color* is an array.
*arrowsize* : float
Factor scale arrow size.
*arrowstyle* : str
Arrow style specification.
See :class:`~matplotlib.patches.FancyArrowPatch`.
*minlength* : float
Minimum length of streamline in axes coordinates.
Returns:
*stream_container* : StreamplotSet
Container object with attributes
- lines: `matplotlib.collections.LineCollection` of streamlines
- arrows: collection of `matplotlib.patches.FancyArrowPatch`
objects representing arrows half-way along stream
lines.
This container will probably change in the future to allow changes
to the colormap, alpha, etc. for both lines and arrows, but these
changes should be backward compatible.
"""
grid = Grid(x, y)
mask = StreamMask(density)
dmap = DomainMap(grid, mask)
# default to data coordinates
if transform is None:
transform = axes.transData
if color is None:
color = axes._get_lines.color_cycle.next()
if linewidth is None:
linewidth = matplotlib.rcParams['lines.linewidth']
line_kw = {}
arrow_kw = dict(arrowstyle=arrowstyle, mutation_scale=10 * arrowsize)
use_multicolor_lines = isinstance(color, np.ndarray)
if use_multicolor_lines:
assert color.shape == grid.shape
line_colors = []
if np.any(np.isnan(color)):
color = np.ma.array(color, mask=np.isnan(color))
else:
line_kw['color'] = color
arrow_kw['color'] = color
if isinstance(linewidth, np.ndarray):
assert linewidth.shape == grid.shape
line_kw['linewidth'] = []
else:
line_kw['linewidth'] = linewidth
arrow_kw['linewidth'] = linewidth
## Sanity checks.
assert u.shape == grid.shape
assert v.shape == grid.shape
if np.any(np.isnan(u)):
u = np.ma.array(u, mask=np.isnan(u))
if np.any(np.isnan(v)):
v = np.ma.array(v, mask=np.isnan(v))
integrate = get_integrator(u, v, dmap, minlength)
trajectories = []
for xm, ym in _gen_starting_points(mask.shape):
if mask[ym, xm] == 0:
xg, yg = dmap.mask2grid(xm, ym)
t = integrate(xg, yg)
if t is not None:
trajectories.append(t)
if use_multicolor_lines:
if norm is None:
norm = mcolors.Normalize(color.min(), color.max())
if cmap is None:
cmap = cm.get_cmap(matplotlib.rcParams['image.cmap'])
else:
cmap = cm.get_cmap(cmap)
streamlines = []
arrows = []
for t in trajectories:
tgx = np.array(t[0])
tgy = np.array(t[1])
# Rescale from grid-coordinates to data-coordinates.
tx = np.array(t[0]) * grid.dx + grid.x_origin
ty = np.array(t[1]) * grid.dy + grid.y_origin
points = np.transpose([tx, ty]).reshape(-1, 1, 2)
streamlines.extend(np.hstack([points[:-1], points[1:]]))
# Add arrows half way along each trajectory.
s = np.cumsum(np.sqrt(np.diff(tx) ** 2 + np.diff(ty) ** 2))
n = np.searchsorted(s, s[-1] / 2.)
arrow_tail = (tx[n], ty[n])
arrow_head = (np.mean(tx[n:n + 2]), np.mean(ty[n:n + 2]))
if isinstance(linewidth, np.ndarray):
line_widths = interpgrid(linewidth, tgx, tgy)[:-1]
line_kw['linewidth'].extend(line_widths)
arrow_kw['linewidth'] = line_widths[n]
if use_multicolor_lines:
color_values = interpgrid(color, tgx, tgy)[:-1]
line_colors.extend(color_values)
arrow_kw['color'] = cmap(norm(color_values[n]))
p = patches.FancyArrowPatch(arrow_tail,
arrow_head,
transform=transform,
**arrow_kw)
axes.add_patch(p)
arrows.append(p)
lc = mcollections.LineCollection(streamlines,
transform=transform,
**line_kw)
if use_multicolor_lines:
lc.set_array(np.asarray(line_colors))
lc.set_cmap(cmap)
lc.set_norm(norm)
axes.add_collection(lc)
axes.update_datalim(((x.min(), y.min()), (x.max(), y.max())))
axes.autoscale_view(tight=True)
ac = matplotlib.collections.PatchCollection(arrows)
stream_container = StreamplotSet(lc, ac)
return stream_container
class StreamplotSet(object):
def __init__(self, lines, arrows, **kwargs):
self.lines = lines
self.arrows = arrows
# Coordinate definitions
#========================
class DomainMap(object):
"""Map representing different coordinate systems.
Coordinate definitions:
* axes-coordinates goes from 0 to 1 in the domain.
* data-coordinates are specified by the input x-y coordinates.
* grid-coordinates goes from 0 to N and 0 to M for an N x M grid,
where N and M match the shape of the input data.
* mask-coordinates goes from 0 to N and 0 to M for an N x M mask,
where N and M are user-specified to control the density of streamlines.
This class also has methods for adding trajectories to the StreamMask.
Before adding a trajectory, run `start_trajectory` to keep track of regions
crossed by a given trajectory. Later, if you decide the trajectory is bad
(e.g., if the trajectory is very short) just call `undo_trajectory`.
"""
def __init__(self, grid, mask):
self.grid = grid
self.mask = mask
## Constants for conversion between grid- and mask-coordinates
self.x_grid2mask = float(mask.nx - 1) / grid.nx
self.y_grid2mask = float(mask.ny - 1) / grid.ny
self.x_mask2grid = 1. / self.x_grid2mask
self.y_mask2grid = 1. / self.y_grid2mask
self.x_data2grid = grid.nx / grid.width
self.y_data2grid = grid.ny / grid.height
def grid2mask(self, xi, yi):
"""Return nearest space in mask-coords from given grid-coords."""
return int((xi * self.x_grid2mask) + 0.5), \
int((yi * self.y_grid2mask) + 0.5)
def mask2grid(self, xm, ym):
return xm * self.x_mask2grid, ym * self.y_mask2grid
def data2grid(self, xd, yd):
return xd * self.x_data2grid, yd * self.y_data2grid
def start_trajectory(self, xg, yg):
xm, ym = self.grid2mask(xg, yg)
self.mask._start_trajectory(xm, ym)
def reset_start_point(self, xg, yg):
xm, ym = self.grid2mask(xg, yg)
self.mask._current_xy = (xm, ym)
def update_trajectory(self, xg, yg):
if not self.grid.within_grid(xg, yg):
raise InvalidIndexError
xm, ym = self.grid2mask(xg, yg)
self.mask._update_trajectory(xm, ym)
def undo_trajectory(self):
self.mask._undo_trajectory()
class Grid(object):
"""Grid of data."""
def __init__(self, x, y):
if len(x.shape) == 2:
x_row = x[0]
assert np.allclose(x_row, x)
x = x_row
else:
assert len(x.shape) == 1
if len(y.shape) == 2:
y_col = y[:, 0]
assert np.allclose(y_col, y.T)
y = y_col
else:
assert len(y.shape) == 1
self.nx = len(x)
self.ny = len(y)
self.dx = x[1] - x[0]
self.dy = y[1] - y[0]
self.x_origin = x[0]
self.y_origin = y[0]
self.width = x[-1] - x[0]
self.height = y[-1] - y[0]
@property
def shape(self):
return self.ny, self.nx
def within_grid(self, xi, yi):
"""Return True if point is a valid index of grid."""
# Note that xi/yi can be floats; so, for example, we can't simply check
# `xi < self.nx` since `xi` can be `self.nx - 1 < xi < self.nx`
return xi >= 0 and xi <= self.nx - 1 and yi >= 0 and yi <= self.ny - 1
class StreamMask(object):
"""Mask to keep track of discrete regions crossed by streamlines.
The resolution of this grid determines the approximate spacing between
trajectories. Streamlines are only allowed to pass through zeroed cells:
When a streamline enters a cell, that cell is set to 1, and no new
streamlines are allowed to enter.
"""
def __init__(self, density):
if np.isscalar(density):
assert density > 0
self.nx = self.ny = int(30 * density)
else:
assert len(density) == 2
self.nx = int(25 * density[0])
self.ny = int(25 * density[1])
self._mask = np.zeros((self.ny, self.nx))
self.shape = self._mask.shape
self._current_xy = None
def __getitem__(self, *args):
return self._mask.__getitem__(*args)
def _start_trajectory(self, xm, ym):
"""Start recording streamline trajectory"""
self._traj = []
self._update_trajectory(xm, ym)
def _undo_trajectory(self):
"""Remove current trajectory from mask"""
for t in self._traj:
self._mask.__setitem__(t, 0)
def _update_trajectory(self, xm, ym):
"""Update current trajectory position in mask.
If the new position has already been filled, raise `InvalidIndexError`.
"""
if self._current_xy != (xm, ym):
if self[ym, xm] == 0:
self._traj.append((ym, xm))
self._mask[ym, xm] = 1
self._current_xy = (xm, ym)
else:
raise InvalidIndexError
class InvalidIndexError(Exception):
pass
class TerminateTrajectory(Exception):
pass
# Integrator definitions
#========================
def get_integrator(u, v, dmap, minlength):
# rescale velocity onto grid-coordinates for integrations.
u, v = dmap.data2grid(u, v)
# speed (path length) will be in axes-coordinates
u_ax = u / dmap.grid.nx
v_ax = v / dmap.grid.ny
speed = np.ma.sqrt(u_ax ** 2 + v_ax ** 2)
def forward_time(xi, yi):
ds_dt = interpgrid(speed, xi, yi)
if ds_dt == 0:
raise TerminateTrajectory()
dt_ds = 1. / ds_dt
ui = interpgrid(u, xi, yi)
vi = interpgrid(v, xi, yi)
return ui * dt_ds, vi * dt_ds
def backward_time(xi, yi):
dxi, dyi = forward_time(xi, yi)
return -dxi, -dyi
def integrate(x0, y0):
"""Return x, y grid-coordinates of trajectory based on starting point.
Integrate both forward and backward in time from starting point in
grid coordinates.
Integration is terminated when a trajectory reaches a domain boundary
or when it crosses into an already occupied cell in the StreamMask. The
resulting trajectory is None if it is shorter than `minlength`.
"""
dmap.start_trajectory(x0, y0)
sf, xf_traj, yf_traj = _integrate_rk12(x0, y0, dmap, forward_time)
dmap.reset_start_point(x0, y0)
sb, xb_traj, yb_traj = _integrate_rk12(x0, y0, dmap, backward_time)
# combine forward and backward trajectories
stotal = sf + sb
x_traj = xb_traj[::-1] + xf_traj[1:]
y_traj = yb_traj[::-1] + yf_traj[1:]
if stotal > minlength:
return x_traj, y_traj
else: # reject short trajectories
dmap.undo_trajectory()
return None
return integrate
def _integrate_rk12(x0, y0, dmap, f):
"""2nd-order Runge-Kutta algorithm with adaptive step size.
This method is also referred to as the improved Euler's method, or Heun's
method. This method is favored over higher-order methods because:
1. To get decent looking trajectories and to sample every mask cell
on the trajectory we need a small timestep, so a lower order
solver doesn't hurt us unless the data is *very* high resolution.
In fact, for cases where the user inputs
data smaller or of similar grid size to the mask grid, the higher
order corrections are negligible because of the very fast linear
interpolation used in `interpgrid`.
2. For high resolution input data (i.e. beyond the mask
resolution), we must reduce the timestep. Therefore, an adaptive
timestep is more suited to the problem as this would be very hard
to judge automatically otherwise.
This integrator is about 1.5 - 2x as fast as both the RK4 and RK45
solvers in most setups on my machine. I would recommend removing the
other two to keep things simple.
"""
## This error is below that needed to match the RK4 integrator. It
## is set for visual reasons -- too low and corners start
## appearing ugly and jagged. Can be tuned.
maxerror = 0.003
## This limit is important (for all integrators) to avoid the
## trajectory skipping some mask cells. We could relax this
## condition if we use the code which is commented out below to
## increment the location gradually. However, due to the efficient
## nature of the interpolation, this doesn't boost speed by much
## for quite a bit of complexity.
maxds = min(1. / dmap.mask.nx, 1. / dmap.mask.ny, 0.1)
ds = maxds
stotal = 0
xi = x0
yi = y0
xf_traj = []
yf_traj = []
while dmap.grid.within_grid(xi, yi):
xf_traj.append(xi)
yf_traj.append(yi)
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + ds * k1x,
yi + ds * k1y)
except IndexError:
# Out of the domain on one of the intermediate integration steps.
# Take an Euler step to the boundary to improve neatness.
ds, xf_traj, yf_traj = _euler_step(xf_traj, yf_traj, dmap, f)
stotal += ds
break
except TerminateTrajectory:
break
dx1 = ds * k1x
dy1 = ds * k1y
dx2 = ds * 0.5 * (k1x + k2x)
dy2 = ds * 0.5 * (k1y + k2y)
nx, ny = dmap.grid.shape
# Error is normalized to the axes coordinates
error = np.sqrt(((dx2 - dx1) / nx) ** 2 + ((dy2 - dy1) / ny) ** 2)
# Only save step if within error tolerance
if error < maxerror:
xi += dx2
yi += dy2
try:
dmap.update_trajectory(xi, yi)
except InvalidIndexError:
break
if (stotal + ds) > 2:
break
stotal += ds
# recalculate stepsize based on step error
if error == 0:
ds = maxds
else:
ds = min(maxds, 0.85 * ds * (maxerror / error) ** 0.5)
return stotal, xf_traj, yf_traj
def _euler_step(xf_traj, yf_traj, dmap, f):
"""Simple Euler integration step that extends streamline to boundary."""
ny, nx = dmap.grid.shape
xi = xf_traj[-1]
yi = yf_traj[-1]
cx, cy = f(xi, yi)
if cx == 0:
dsx = np.inf
elif cx < 0:
dsx = xi / -cx
else:
dsx = (nx - 1 - xi) / cx
if cy == 0:
dsy = np.inf
elif cy < 0:
dsy = yi / -cy
else:
dsy = (ny - 1 - yi) / cy
ds = min(dsx, dsy)
xf_traj.append(xi + cx * ds)
yf_traj.append(yi + cy * ds)
return ds, xf_traj, yf_traj
# Utility functions
#========================
def interpgrid(a, xi, yi):
"""Fast 2D, linear interpolation on an integer grid"""
Ny, Nx = np.shape(a)
if isinstance(xi, np.ndarray):
x = xi.astype(np.int)
y = yi.astype(np.int)
# Check that xn, yn don't exceed max index
xn = np.clip(x + 1, 0, Nx - 1)
yn = np.clip(y + 1, 0, Ny - 1)
else:
x = np.int(xi)
y = np.int(yi)
# conditional is faster than clipping for integers
if x == (Nx - 2):
xn = x
else:
xn = x + 1
if y == (Ny - 2):
yn = y
else:
yn = y + 1
a00 = a[y, x]
a01 = a[y, xn]
a10 = a[yn, x]
a11 = a[yn, xn]
xt = xi - x
yt = yi - y
a0 = a00 * (1 - xt) + a01 * xt
a1 = a10 * (1 - xt) + a11 * xt
ai = a0 * (1 - yt) + a1 * yt
if not isinstance(xi, np.ndarray):
if np.ma.is_masked(ai):
raise TerminateTrajectory
return ai
def _gen_starting_points(shape):
"""Yield starting points for streamlines.
Trying points on the boundary first gives higher quality streamlines.
This algorithm starts with a point on the mask corner and spirals inward.
This algorithm is inefficient, but fast compared to rest of streamplot.
"""
ny, nx = shape
xfirst = 0
yfirst = 1
xlast = nx - 1
ylast = ny - 1
x, y = 0, 0
i = 0
direction = 'right'
for i in xrange(nx * ny):
yield x, y
if direction == 'right':
x += 1
if x >= xlast:
xlast -= 1
direction = 'up'
elif direction == 'up':
y += 1
if y >= ylast:
ylast -= 1
direction = 'left'
elif direction == 'left':
x -= 1
if x <= xfirst:
xfirst += 1
direction = 'down'
elif direction == 'down':
y -= 1
if y <= yfirst:
yfirst += 1
direction = 'right'
| mit |
BiaDarkia/scikit-learn | sklearn/gaussian_process/tests/test_gpc.py | 31 | 5994 | """Testing for Gaussian process classification """
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils.testing import (assert_true, assert_greater,
assert_almost_equal, assert_array_equal)
def f(x):
return np.sin(x)
X = np.atleast_2d(np.linspace(0, 10, 30)).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = np.array(f(X).ravel() > 0, dtype=int)
fX = f(X).ravel()
y_mc = np.empty(y.shape, dtype=int) # multi-class
y_mc[fX < -0.35] = 0
y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
y_mc[fX > 0.35] = 2
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=0.1), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))]
def test_predict_consistent():
# Check binary predict decision has also predicted probability above 0.5.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X),
gpc.predict_proba(X)[:, 1] >= 0.5)
def test_lml_improving():
# Test that hyperparameter-tuning improves log-marginal likelihood.
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
# Test that lml of optimized kernel is stored correctly.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_almost_equal(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(), 7)
def test_converged_to_local_maximum():
# Test that we are in local maximum after hyperparameter-optimization.
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 0]) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 1])))
def test_lml_gradient():
# Compare analytic and numeric gradient of log marginal likelihood.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpc.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the log marginal likelihood of the chosen theta.
n_samples, n_features = 25, 2
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1e-3] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features)
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessClassifier(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_custom_optimizer():
# Test that GPC can use externally defined optimizers.
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
gpc.fit(X, y_mc)
# Checks that optimizer improved marginal likelihood
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_multi_class():
# Test GPC for multi-class classification problems.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
assert_almost_equal(y_prob.sum(1), 1)
y_pred = gpc.predict(X2)
assert_array_equal(np.argmax(y_prob, 1), y_pred)
def test_multi_class_n_jobs():
# Test that multi-class GPC produces identical results with n_jobs>1.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
gpc_2.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
y_prob_2 = gpc_2.predict_proba(X2)
assert_almost_equal(y_prob, y_prob_2)
| bsd-3-clause |
odlgroup/odl | examples/tomo/backends/astra_performance_cpu_parallel_2d_cg.py | 2 | 2895 | """Performance example of running native ASTRA vs using ODL for reconstruction.
In this example, a 512x512 image is reconstructed using the Conjugate Gradient
Least Squares method on the CPU.
In general, ASTRA is faster than ODL since it does not need to perform any
copies and all arithmetic is performed in place. Despite this, ODL is not much
slower. In this example, the overhead is about 40 %, depending on the
hardware used.
"""
import astra
import numpy as np
import matplotlib.pyplot as plt
import scipy.misc
import odl
from odl.util.testutils import timer
# Common geometry parameters
domain_size = np.array([512, 512])
n_angles = 180
det_size = 362
niter = 20
phantom = np.rot90(scipy.misc.ascent().astype('float'), -1)
# --- ASTRA ---
# Define ASTRA geometry
vol_geom = astra.create_vol_geom(domain_size[0], domain_size[1])
proj_geom = astra.create_proj_geom('parallel',
np.linalg.norm(domain_size) / det_size,
det_size,
np.linspace(0, np.pi, n_angles))
# Create ASTRA projector
proj_id = astra.create_projector('line', proj_geom, vol_geom)
# Create sinogram
sinogram_id, sinogram = astra.create_sino(phantom, proj_id)
# Create a data object for the reconstruction
rec_id = astra.data2d.create('-vol', vol_geom)
# Set up the parameters for a reconstruction algorithm using the CPU backend
cfg = astra.astra_dict('CGLS')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
cfg['ProjectorId'] = proj_id
# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)
with timer('ASTRA Run'):
# Run the algorithm
astra.algorithm.run(alg_id, niter)
# Get the result
rec = astra.data2d.get(rec_id)
# Clean up.
astra.algorithm.delete(alg_id)
astra.data2d.delete(rec_id)
astra.data2d.delete(sinogram_id)
astra.projector.delete(proj_id)
# --- ODL ---
# Create reconstruction space
reco_space = odl.uniform_discr(-domain_size / 2, domain_size / 2, domain_size)
# Create geometry
geometry = odl.tomo.parallel_beam_geometry(reco_space, n_angles, det_size)
# Create ray transform
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cpu')
# Create sinogram
data = ray_trafo(phantom)
# Solve with CGLS (aka CGN)
x = reco_space.zero()
with timer('ODL Run'):
odl.solvers.conjugate_gradient_normal(ray_trafo, x, data, niter=niter)
# Display results for comparison
plt.figure('Phantom')
plt.imshow(phantom.T, origin='lower', cmap='bone')
plt.figure('ASTRA Sinogram')
plt.imshow(sinogram.T, origin='lower', cmap='bone')
plt.figure('ASTRA Reconstruction')
plt.imshow(rec.T, origin='lower', cmap='bone')
plt.figure('ODL Sinogram')
plt.imshow(data.asarray().T, origin='lower', cmap='bone')
plt.figure('ODL Reconstruction')
plt.imshow(x.asarray().T, origin='lower', cmap='bone')
plt.show()
| mpl-2.0 |
baspijhor/paparazzi | sw/tools/calibration/calibration_utils.py | 27 | 12769 |
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function, division
import re
import numpy as np
from numpy import sin, cos
from scipy import linalg, stats
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def get_ids_in_log(filename):
"""Returns available ac_id from a log."""
f = open(filename, 'r')
ids = []
pattern = re.compile("\S+ (\S+)")
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
ac_id = m.group(1)
if not ac_id in ids:
ids.append(ac_id)
return ids
def read_log(ac_id, filename, sensor):
"""Extracts raw sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_RAW (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_scaled(ac_id, filename, sensor, t_start, t_end):
"""Extracts scaled sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_SCALED (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
if (float(m.group(1)) >= float(t_start)) and (float(m.group(1)) < (float(t_end)+1.0)):
list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_mag_current(ac_id, filename):
"""Extracts raw magnetometer and current measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_MAG_CURRENT_CALIBRATION (\S+) (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5))])
return np.array(list_meas)
def filter_meas(meas, window_size, noise_threshold):
"""Select only non-noisy data."""
filtered_meas = []
filtered_idx = []
for i in range(window_size, len(meas)-window_size):
noise = meas[i-window_size:i+window_size, :].std(axis=0)
if linalg.norm(noise) < noise_threshold:
filtered_meas.append(meas[i, :])
filtered_idx.append(i)
return np.array(filtered_meas), filtered_idx
def get_min_max_guess(meas, scale):
"""Initial boundary based calibration."""
max_meas = meas[:, :].max(axis=0)
min_meas = meas[:, :].min(axis=0)
range = max_meas - min_meas
# check if we would get division by zero
if range.all():
n = (max_meas + min_meas) / 2
sf = 2*scale/range
return np.array([n[0], n[1], n[2], sf[0], sf[1], sf[2]])
else:
return np.array([0, 0, 0, 0])
def scale_measurements(meas, p):
"""Scale the set of measurements."""
l_comp = []
l_norm = []
for m in meas[:, ]:
sm = (m - p[0:3])*p[3:6]
l_comp.append(sm)
l_norm.append(linalg.norm(sm))
return np.array(l_comp), np.array(l_norm)
def estimate_mag_current_relation(meas):
"""Calculate linear coefficient of magnetometer-current relation."""
coefficient = []
for i in range(0, 3):
gradient, intercept, r_value, p_value, std_err = stats.linregress(meas[:, 3], meas[:, i])
coefficient.append(gradient)
return coefficient
def print_xml(p, sensor, res):
"""Print xml for airframe file."""
print("")
print("<define name=\""+sensor+"_X_NEUTRAL\" value=\""+str(int(round(p[0])))+"\"/>")
print("<define name=\""+sensor+"_Y_NEUTRAL\" value=\""+str(int(round(p[1])))+"\"/>")
print("<define name=\""+sensor+"_Z_NEUTRAL\" value=\""+str(int(round(p[2])))+"\"/>")
print("<define name=\""+sensor+"_X_SENS\" value=\""+str(p[3]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Y_SENS\" value=\""+str(p[4]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Z_SENS\" value=\""+str(p[5]*2**res)+"\" integer=\"16\"/>")
print("")
def print_imu_scaled(sensor, measurements, attrs):
print("")
print(sensor+" : Time Range("+str(measurements[:,0].min(axis=0))+" : "+str(measurements[:,0].max(axis=0))+")")
np.set_printoptions(formatter={'float': '{:-7.3f}'.format})
print(" " + attrs[2] + " " + attrs[3] + " " + attrs[4])
print("Min " + str(measurements[:,1:].min(axis=0)*attrs[0]) + " " + attrs[1])
print("Max " + str(measurements[:,1:].max(axis=0)*attrs[0]) + " " + attrs[1])
print("Mean " + str(measurements[:,1:].mean(axis=0)*attrs[0]) + " " + attrs[1])
print("StDev " + str(measurements[:,1:].std(axis=0)*attrs[0]) + " " + attrs[1])
def plot_measurements(sensor, measurements):
plt.plot(measurements[:, 0])
plt.plot(measurements[:, 1])
plt.plot(measurements[:, 2])
plt.ylabel('ADC')
plt.title("Raw %s measurements" % sensor)
plt.show()
def plot_results(sensor, measurements, flt_idx, flt_meas, cp0, np0, cp1, np1, sensor_ref, blocking=True):
"""Plot calibration results."""
# plot raw measurements with filtered ones marked as red circles
plt.subplot(3, 1, 1)
plt.plot(measurements[:, 0])
plt.plot(measurements[:, 1])
plt.plot(measurements[:, 2])
plt.plot(flt_idx, flt_meas[:, 0], 'ro')
plt.plot(flt_idx, flt_meas[:, 1], 'ro')
plt.plot(flt_idx, flt_meas[:, 2], 'ro')
plt.ylabel('ADC')
plt.title('Raw '+sensor+', red dots are actually used measurements')
plt.tight_layout()
# show scaled measurements with initial guess
plt.subplot(3, 2, 3)
plt.plot(cp0[:, 0])
plt.plot(cp0[:, 1])
plt.plot(cp0[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('scaled '+sensor+' (initial guess)')
plt.xticks([])
plt.subplot(3, 2, 4)
plt.plot(np0)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('norm of '+sensor+' (initial guess)')
plt.xticks([])
# show scaled measurements after optimization
plt.subplot(3, 2, 5)
plt.plot(cp1[:, 0])
plt.plot(cp1[:, 1])
plt.plot(cp1[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('scaled '+sensor+' (optimized)')
plt.xticks([])
plt.subplot(3, 2, 6)
plt.plot(np1)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('norm of '+sensor+' (optimized)')
plt.xticks([])
# if we want to have another plot we only draw the figure (non-blocking)
# also in matplotlib before 1.0.0 there is only one call to show possible
if blocking:
plt.show()
else:
plt.draw()
def plot_imu_scaled(sensor, measurements, attrs):
"""Plot imu scaled results."""
plt.figure("Sensor Scaled")
plt.subplot(4, 1, 1)
plt.plot(measurements[:, 0], measurements[:, 1]*attrs[0])
plt.plot(measurements[:, 0], measurements[:, 2]*attrs[0])
plt.plot(measurements[:, 0], measurements[:, 3]*attrs[0])
#plt.xlabel('Time (s)')
plt.ylabel(attrs[1])
plt.title(sensor)
plt.subplot(4, 1, 2)
plt.plot(measurements[:, 0], measurements[:, 1]*attrs[0], 'b')
#plt.xlabel('Time (s)')
plt.ylabel(attrs[2])
plt.subplot(4, 1, 3)
plt.plot(measurements[:, 0], measurements[:, 2]*attrs[0], 'g')
#plt.xlabel('Time (s)')
plt.ylabel(attrs[3])
plt.subplot(4, 1, 4)
plt.plot(measurements[:, 0], measurements[:, 3]*attrs[0], 'r')
plt.xlabel('Time (s)')
plt.ylabel(attrs[4])
plt.show()
def plot_imu_scaled_fft(sensor, measurements, attrs):
"""Plot imu scaled fft results."""
#dt = 0.0769
#Fs = 1/dt
Fs = 26.0
plt.figure("Sensor Scaled - FFT")
plt.subplot(3, 1, 1)
plt.magnitude_spectrum(measurements[:, 1]*attrs[0], Fs=Fs, scale='linear')
plt.ylabel(attrs[2])
plt.title(sensor)
plt.subplot(3, 1, 2)
plt.magnitude_spectrum(measurements[:, 2]*attrs[0], Fs=Fs, scale='linear')
plt.ylabel(attrs[3])
plt.subplot(3, 1, 3)
plt.magnitude_spectrum(measurements[:, 3]*attrs[0], Fs=Fs, scale='linear')
plt.xlabel('Frequency')
plt.ylabel(attrs[4])
plt.show()
def plot_mag_3d(measured, calibrated, p):
"""Plot magnetometer measurements on 3D sphere."""
# set up points for sphere and ellipsoid wireframes
u = np.r_[0:2 * np.pi:20j]
v = np.r_[0:np.pi:20j]
wx = np.outer(cos(u), sin(v))
wy = np.outer(sin(u), sin(v))
wz = np.outer(np.ones(np.size(u)), cos(v))
ex = p[0] * np.ones(np.size(u)) + np.outer(cos(u), sin(v)) / p[3]
ey = p[1] * np.ones(np.size(u)) + np.outer(sin(u), sin(v)) / p[4]
ez = p[2] * np.ones(np.size(u)) + np.outer(np.ones(np.size(u)), cos(v)) / p[5]
# measurements
mx = measured[:, 0]
my = measured[:, 1]
mz = measured[:, 2]
# calibrated values
cx = calibrated[:, 0]
cy = calibrated[:, 1]
cz = calibrated[:, 2]
# axes size
left = 0.02
bottom = 0.05
width = 0.46
height = 0.9
rect_l = [left, bottom, width, height]
rect_r = [left/2+0.5, bottom, width, height]
fig = plt.figure(figsize=plt.figaspect(0.5))
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_l)
else:
ax = fig.add_subplot(1, 2, 1, position=rect_l, projection='3d')
# plot measurements
ax.scatter(mx, my, mz)
plt.hold(True)
# plot line from center to ellipsoid center
ax.plot([0.0, p[0]], [0.0, p[1]], [0.0, p[2]], color='black', marker='+', markersize=10)
# plot ellipsoid
ax.plot_wireframe(ex, ey, ez, color='grey', alpha=0.5)
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([mx.max() - mx.min(), my.max() - my.min(), mz.max() - mz.min()]).max()
Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (mx.max() + mx.min())
Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (my.max() + my.min())
Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (mz.max() + mz.min())
# add the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
ax.set_title('MAG raw with fitted ellipsoid and center offset')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_r)
else:
ax = fig.add_subplot(1, 2, 2, position=rect_r, projection='3d')
ax.plot_wireframe(wx, wy, wz, color='grey', alpha=0.5)
plt.hold(True)
ax.scatter(cx, cy, cz)
ax.set_title('MAG calibrated on unit sphere')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
plt.show()
def read_turntable_log(ac_id, tt_id, filename, _min, _max):
""" Read a turntable log.
return an array which first column is turnatble and next 3 are gyro
"""
f = open(filename, 'r')
pattern_g = re.compile("(\S+) "+str(ac_id)+" IMU_GYRO_RAW (\S+) (\S+) (\S+)")
pattern_t = re.compile("(\S+) "+str(tt_id)+" IMU_TURNTABLE (\S+)")
last_tt = None
list_tt = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern_t, line)
if m:
last_tt = float(m.group(2))
m = re.match(pattern_g, line)
if m and last_tt and _min < last_tt < _max:
list_tt.append([last_tt, float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_tt)
| gpl-2.0 |
mikeengland/fireant | fireant/tests/widgets/test_reacttable.py | 2 | 108071 | from unittest import TestCase
import pandas as pd
from pypika import Table
from fireant import (
DataSet,
DataType,
DayOverDay,
Field,
Rollup,
day,
)
from fireant.dataset.filters import ComparisonOperator
from fireant.tests.database.mock_database import TestDatabase
from fireant.tests.dataset.mocks import (
CumSum,
ElectionOverElection,
dimx0_metricx1_df,
dimx0_metricx2_df,
dimx1_date_df,
dimx1_date_operation_df,
dimx1_none_df,
dimx1_num_df,
dimx1_str_df,
dimx2_date_str_df,
dimx2_date_str_ref_df,
dimx2_date_str_totals_df,
dimx2_date_str_totalsx2_df,
dimx2_str_str_df,
mock_dataset,
)
from fireant.widgets.base import ReferenceItem
from fireant.widgets.reacttable import (
FormattingConditionRule,
FormattingField,
FormattingHeatMapRule,
ReactTable,
)
class FormattingRulesTests(TestCase):
@classmethod
def setUpClass(cls):
db = TestDatabase()
t0 = Table('test0')
cls.dataset = DataSet(
table=t0,
database=db,
fields=[
Field(
'timestamp',
label='Timestamp',
definition=t0.timestamp,
data_type=DataType.number,
),
Field(
'metric0',
label='Metric0',
definition=t0.metric,
data_type=DataType.number,
),
],
)
cls.df = pd.DataFrame.from_dict(
{
'$timestamp': [100, 200, 300, 400],
'$metric0': [1, 2, 3, 4],
'$metric0_dod': [1, 5, 9, 12],
'$cumsum(metric0)': [1, 3, 6, 10],
'$cumsum(metric0)_dod': [1, 6, 15, 27],
}
).set_index('$timestamp')
def test_formatting_heatmap_rule(self):
result = ReactTable(
self.dataset.fields.metric0,
formatting_rules=[
FormattingHeatMapRule(
FormattingField(metric=self.dataset.fields.metric0),
'ff0000',
)
],
).transform(self.df, [], [])
self.assertEqual(
{
'columns': [{'Header': 'Metric0', 'accessor': '$metric0', 'path_accessor': ['$metric0']}],
'data': [
{'$metric0': {'display': '1', 'raw': 1, 'color': 'fff2f2', 'text_color': '212121'}},
{'$metric0': {'display': '2', 'raw': 2, 'color': 'ffa2a2', 'text_color': '212121'}},
{'$metric0': {'display': '3', 'raw': 3, 'color': 'ff5151', 'text_color': '212121'}},
{'$metric0': {'display': '4', 'raw': 4, 'color': 'ff0000', 'text_color': 'FDFDFD'}},
],
},
result,
)
def test_formatting_heatmap_rule_with_reversed_color(self):
result = ReactTable(
self.dataset.fields.metric0,
formatting_rules=[
FormattingHeatMapRule(
FormattingField(metric=self.dataset.fields.metric0),
'ff0000',
reverse_heatmap=True,
)
],
).transform(self.df, [], [])
self.assertEqual(
{
'columns': [{'Header': 'Metric0', 'accessor': '$metric0', 'path_accessor': ['$metric0']}],
'data': [
{'$metric0': {'display': '1', 'raw': 1, 'color': 'ff0000', 'text_color': 'FDFDFD'}},
{'$metric0': {'display': '2', 'raw': 2, 'color': 'ff5151', 'text_color': '212121'}},
{'$metric0': {'display': '3', 'raw': 3, 'color': 'ffa2a2', 'text_color': '212121'}},
{'$metric0': {'display': '4', 'raw': 4, 'color': 'fff2f2', 'text_color': '212121'}},
],
},
result,
)
def test_formatting_heatmap_rule_with_just_one_value(self):
# This test is to eliminate division by zero
df = pd.DataFrame.from_dict(
{
'$metric0': [1],
'$timestamp': [0],
}
).set_index('$timestamp')
result = ReactTable(
self.dataset.fields.metric0,
formatting_rules=[
FormattingHeatMapRule(
FormattingField(metric=self.dataset.fields.metric0),
'ff0000',
)
],
).transform(df, [], [])
self.assertEqual(
{
'columns': [{'Header': 'Metric0', 'accessor': '$metric0', 'path_accessor': ['$metric0']}],
'data': [
{'$metric0': {'display': '1', 'raw': 1, 'color': 'FFFFFF', 'text_color': '212121'}},
],
},
result,
)
def test_formatting_heatmap_rule_with_2_colors(self):
result = ReactTable(
self.dataset.fields.metric0,
formatting_rules=[
FormattingHeatMapRule(
FormattingField(metric=self.dataset.fields.metric0),
'ff0000',
start_color='00ff00',
)
],
).transform(self.df, [], [])
self.assertEqual(
{
'columns': [{'Header': 'Metric0', 'accessor': '$metric0', 'path_accessor': ['$metric0']}],
'data': [
{'$metric0': {'display': '1', 'raw': 1, 'color': '00ff00', 'text_color': '212121'}},
{'$metric0': {'display': '2', 'raw': 2, 'color': 'aaffaa', 'text_color': '212121'}},
{'$metric0': {'display': '3', 'raw': 3, 'color': 'ffa2a2', 'text_color': '212121'}},
{'$metric0': {'display': '4', 'raw': 4, 'color': 'ff0000', 'text_color': 'FDFDFD'}},
],
},
result,
)
def test_single_formatting_condition_rule(self):
result = ReactTable(
self.dataset.fields.metric0,
formatting_rules=[
FormattingConditionRule(
FormattingField(metric=self.dataset.fields.metric0),
ComparisonOperator.gt,
2,
'EEEEEE',
)
],
).transform(self.df, [], [])
self.assertEqual(
{
'columns': [{'Header': 'Metric0', 'accessor': '$metric0', 'path_accessor': ['$metric0']}],
'data': [
{'$metric0': {'display': '1', 'raw': 1}},
{'$metric0': {'display': '2', 'raw': 2}},
{'$metric0': {'display': '3', 'raw': 3, 'color': 'EEEEEE', 'text_color': '212121'}},
{'$metric0': {'display': '4', 'raw': 4, 'color': 'EEEEEE', 'text_color': '212121'}},
],
},
result,
)
def test_rule_covers_row(self):
result = ReactTable(
self.dataset.fields.metric0,
formatting_rules=[
FormattingConditionRule(
FormattingField(metric=self.dataset.fields.metric0),
ComparisonOperator.gt,
2,
'EEEEEE',
covers_row=True,
)
],
).transform(self.df, [self.dataset.fields.timestamp], [])
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{'Header': 'Metric0', 'accessor': '$metric0', 'path_accessor': ['$metric0']},
],
'data': [
{
'$metric0': {'display': '1', 'raw': 1},
'$timestamp': {'display': '100', 'raw': 100},
},
{
'$metric0': {'display': '2', 'raw': 2},
'$timestamp': {'display': '200', 'raw': 200},
},
{
'$metric0': {'display': '3', 'raw': 3, 'color': 'EEEEEE', 'text_color': '212121'},
'$timestamp': {'display': '300', 'raw': 300, 'color': 'EEEEEE', 'text_color': '212121'},
},
{
'$metric0': {'display': '4', 'raw': 4, 'color': 'EEEEEE', 'text_color': '212121'},
'$timestamp': {'display': '400', 'raw': 400, 'color': 'EEEEEE', 'text_color': '212121'},
},
],
},
result,
)
def test_rule_covers_row_does_not_apply_when_pivoted(self):
result = ReactTable(
self.dataset.fields.metric0,
formatting_rules=[
FormattingConditionRule(
FormattingField(metric=self.dataset.fields.metric0),
ComparisonOperator.gt,
2,
'EEEEEE',
covers_row=True,
)
],
pivot=[self.dataset.fields.timestamp],
).transform(self.df, [self.dataset.fields.timestamp], [])
self.assertEqual(
{
'columns': [
{'Header': '', 'accessor': '$metrics'},
{'Header': '100', 'accessor': '100', 'path_accessor': ['100']},
{'Header': '200', 'accessor': '200', 'path_accessor': ['200']},
{'Header': '300', 'accessor': '300', 'path_accessor': ['300']},
{'Header': '400', 'accessor': '400', 'path_accessor': ['400']},
],
'data': [
{
'$metrics': {'raw': 'Metric0'},
'100': {'display': '1', 'raw': 1},
'200': {'display': '2', 'raw': 2},
'300': {'color': 'EEEEEE', 'display': '3', 'raw': 3, 'text_color': '212121'},
'400': {'color': 'EEEEEE', 'display': '4', 'raw': 4, 'text_color': '212121'},
}
],
},
result,
)
def test_formatting_a_reference(self):
reference = DayOverDay(self.dataset.fields.timestamp)
result = ReactTable(
self.dataset.fields.metric0,
formatting_rules=[
FormattingConditionRule(
FormattingField(
metric=self.dataset.fields.metric0,
reference=reference,
),
ComparisonOperator.gt,
6,
'EEEEEE',
)
],
).transform(self.df, [], [reference])
self.assertEqual(
{
'columns': [
{'Header': 'Metric0', 'accessor': '$metric0', 'path_accessor': ['$metric0']},
{'Header': 'Metric0 DoD', 'accessor': '$metric0_dod', 'path_accessor': ['$metric0_dod']},
],
'data': [
{
'$metric0': {'display': '1', 'raw': 1},
'$metric0_dod': {'display': '1', 'raw': 1},
},
{
'$metric0': {'display': '2', 'raw': 2},
'$metric0_dod': {'display': '5', 'raw': 5},
},
{
'$metric0': {'display': '3', 'raw': 3},
'$metric0_dod': {'display': '9', 'raw': 9, 'color': 'EEEEEE', 'text_color': '212121'},
},
{
'$metric0': {'display': '4', 'raw': 4},
'$metric0_dod': {'display': '12', 'raw': 12, 'color': 'EEEEEE', 'text_color': '212121'},
},
],
},
result,
)
def test_formatting_an_operation(self):
operation = CumSum(self.dataset.fields.metric0)
result = ReactTable(
operation,
formatting_rules=[
FormattingConditionRule(
FormattingField(operation=operation),
ComparisonOperator.gt,
5,
'EEEEEE',
)
],
).transform(self.df, [], [])
self.assertEqual(
{
'columns': [
{'Header': 'CumSum(Metric0)', 'accessor': '$cumsum(metric0)', 'path_accessor': ['$cumsum(metric0)']}
],
'data': [
{'$cumsum(metric0)': {'display': '1', 'raw': 1}},
{'$cumsum(metric0)': {'display': '3', 'raw': 3}},
{'$cumsum(metric0)': {'display': '6', 'raw': 6, 'color': 'EEEEEE', 'text_color': '212121'}},
{'$cumsum(metric0)': {'display': '10', 'raw': 10, 'color': 'EEEEEE', 'text_color': '212121'}},
],
},
result,
)
def test_formatting_a_reference_with_an_operation(self):
reference = DayOverDay(self.dataset.fields.timestamp)
operation = CumSum(self.dataset.fields.metric0)
result = ReactTable(
operation,
formatting_rules=[
FormattingConditionRule(
FormattingField(operation=operation, reference=reference),
ComparisonOperator.gt,
10,
'EEEEEE',
)
],
).transform(self.df, [], [reference])
self.assertEqual(
{
'columns': [
{
'Header': 'CumSum(Metric0)',
'accessor': '$cumsum(metric0)',
'path_accessor': ['$cumsum(metric0)'],
},
{
'Header': 'CumSum(Metric0) DoD',
'accessor': '$cumsum(metric0)_dod',
'path_accessor': ['$cumsum(metric0)_dod'],
},
],
'data': [
{
'$cumsum(metric0)': {'display': '1', 'raw': 1},
'$cumsum(metric0)_dod': {'display': '1', 'raw': 1},
},
{
'$cumsum(metric0)': {'display': '3', 'raw': 3},
'$cumsum(metric0)_dod': {'display': '6', 'raw': 6},
},
{
'$cumsum(metric0)': {'display': '6', 'raw': 6},
'$cumsum(metric0)_dod': {'display': '15', 'raw': 15, 'color': 'EEEEEE', 'text_color': '212121'},
},
{
'$cumsum(metric0)': {'display': '10', 'raw': 10},
'$cumsum(metric0)_dod': {'display': '27', 'raw': 27, 'color': 'EEEEEE', 'text_color': '212121'},
},
],
},
result,
)
def test_multiple_formatting_condition_rule(self):
result = ReactTable(
self.dataset.fields.metric0,
formatting_rules=[
FormattingConditionRule(
FormattingField(metric=self.dataset.fields.metric0),
ComparisonOperator.gt,
3,
'EEEEEE',
),
FormattingConditionRule(
FormattingField(metric=self.dataset.fields.metric0),
ComparisonOperator.lt,
2,
'AAAAAA',
),
],
).transform(self.df, [], [])
self.assertEqual(
{
'columns': [{'Header': 'Metric0', 'accessor': '$metric0', 'path_accessor': ['$metric0']}],
'data': [
{'$metric0': {'display': '1', 'raw': 1, 'color': 'AAAAAA', 'text_color': '212121'}},
{'$metric0': {'display': '2', 'raw': 2}},
{'$metric0': {'display': '3', 'raw': 3}},
{'$metric0': {'display': '4', 'raw': 4, 'color': 'EEEEEE', 'text_color': '212121'}},
],
},
result,
)
class ReactTableTransformerTests(TestCase):
maxDiff = None
def test_single_metric(self):
result = ReactTable(mock_dataset.fields.votes).transform(dimx0_metricx1_df, [], [])
self.assertEqual(
{
'columns': [{'Header': 'Votes', 'accessor': '$votes', 'path_accessor': ['$votes']}],
'data': [{'$votes': {'display': '111,674,336', 'raw': 111674336}}],
},
result,
)
def test_multiple_metrics(self):
result = ReactTable(mock_dataset.fields.votes, mock_dataset.fields.wins).transform(dimx0_metricx2_df, [], [])
self.assertEqual(
{
'columns': [
{'Header': 'Votes', 'accessor': '$votes', 'path_accessor': ['$votes']},
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
],
'data': [
{
'$votes': {'display': '111,674,336', 'raw': 111674336},
'$wins': {'display': '12', 'raw': 12},
}
],
},
result,
)
def test_multiple_metrics_reversed(self):
result = ReactTable(mock_dataset.fields.wins, mock_dataset.fields.votes).transform(dimx0_metricx2_df, [], [])
self.assertEqual(
{
'columns': [
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
{'Header': 'Votes', 'accessor': '$votes', 'path_accessor': ['$votes']},
],
'data': [
{
'$votes': {'display': '111,674,336', 'raw': 111674336},
'$wins': {'display': '12', 'raw': 12},
}
],
},
result,
)
def test_time_series_dim(self):
result = ReactTable(mock_dataset.fields.wins).transform(dimx1_date_df, [day(mock_dataset.fields.timestamp)], [])
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
],
'data': [
{
'$timestamp': {
'display': '1996-01-01',
'raw': '1996-01-01T00:00:00',
},
'$wins': {'display': '2', 'raw': 2},
},
{
'$timestamp': {
'display': '2000-01-01',
'raw': '2000-01-01T00:00:00',
},
'$wins': {'display': '2', 'raw': 2},
},
{
'$timestamp': {
'display': '2004-01-01',
'raw': '2004-01-01T00:00:00',
},
'$wins': {'display': '2', 'raw': 2},
},
{
'$timestamp': {
'display': '2008-01-01',
'raw': '2008-01-01T00:00:00',
},
'$wins': {'display': '2', 'raw': 2},
},
{
'$timestamp': {
'display': '2012-01-01',
'raw': '2012-01-01T00:00:00',
},
'$wins': {'display': '2', 'raw': 2},
},
{
'$timestamp': {
'display': '2016-01-01',
'raw': '2016-01-01T00:00:00',
},
'$wins': {'display': '2', 'raw': 2},
},
],
},
result,
)
def test_time_series_dim_with_operation(self):
result = ReactTable(CumSum(mock_dataset.fields.votes)).transform(
dimx1_date_operation_df, [day(mock_dataset.fields.timestamp)], []
)
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{'Header': 'CumSum(Votes)', 'accessor': '$cumsum(votes)', 'path_accessor': ['$cumsum(votes)']},
],
'data': [
{
'$timestamp': {
'display': '1996-01-01',
'raw': '1996-01-01T00:00:00',
},
'$cumsum(votes)': {'display': '15,220,449', 'raw': 15220449},
},
{
'$timestamp': {
'display': '2000-01-01',
'raw': '2000-01-01T00:00:00',
},
'$cumsum(votes)': {'display': '31,882,466', 'raw': 31882466},
},
{
'$timestamp': {
'display': '2004-01-01',
'raw': '2004-01-01T00:00:00',
},
'$cumsum(votes)': {'display': '51,497,398', 'raw': 51497398},
},
{
'$timestamp': {
'display': '2008-01-01',
'raw': '2008-01-01T00:00:00',
},
'$cumsum(votes)': {'display': '72,791,613', 'raw': 72791613},
},
{
'$timestamp': {
'display': '2012-01-01',
'raw': '2012-01-01T00:00:00',
},
'$cumsum(votes)': {'display': '93,363,823', 'raw': 93363823},
},
{
'$timestamp': {
'display': '2016-01-01',
'raw': '2016-01-01T00:00:00',
},
'$cumsum(votes)': {'display': '111,674,336', 'raw': 111674336},
},
],
},
result,
)
def test_dimx1_str(self):
result = ReactTable(mock_dataset.fields.wins).transform(dimx1_str_df, [mock_dataset.fields.political_party], [])
self.assertEqual(
{
'columns': [
{'Header': 'Party', 'accessor': '$political_party'},
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
],
'data': [
{
'$political_party': {
'raw': 'Democrat',
'hyperlink': 'http://example.com/Democrat',
},
'$wins': {'display': '6', 'raw': 6.0},
},
{
'$political_party': {
'raw': 'Independent',
'hyperlink': 'http://example.com/Independent',
},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$political_party': {
'raw': 'Republican',
'hyperlink': 'http://example.com/Republican',
},
'$wins': {'display': '6', 'raw': 6.0},
},
],
},
result,
)
def test_dimx1_int(self):
result = ReactTable(mock_dataset.fields.wins).transform(dimx1_num_df, [mock_dataset.fields['candidate-id']], [])
self.assertEqual(
{
'columns': [
{'Header': 'Candidate ID', 'accessor': '$candidate-id'},
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
],
'data': [
{
'$candidate-id': {'display': '1', 'raw': 1.0},
'$wins': {'display': '2', 'raw': 2.0},
},
{
'$candidate-id': {'display': '2', 'raw': 2.0},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$candidate-id': {'display': '3', 'raw': 3.0},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$candidate-id': {'display': '4', 'raw': 4.0},
'$wins': {'display': '4', 'raw': 4.0},
},
{
'$candidate-id': {'display': '5', 'raw': 5.0},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$candidate-id': {'display': '6', 'raw': 6.0},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$candidate-id': {'display': '7', 'raw': 7.0},
'$wins': {'display': '4', 'raw': 4.0},
},
{
'$candidate-id': {'display': '8', 'raw': 8.0},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$candidate-id': {'display': '9', 'raw': 9.0},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$candidate-id': {'display': '10', 'raw': 10.0},
'$wins': {'display': '2', 'raw': 2.0},
},
{
'$candidate-id': {'display': '11', 'raw': 11.0},
'$wins': {'display': '0', 'raw': 0.0},
},
],
},
result,
)
def test_dimx2_date_str(self):
result = ReactTable(mock_dataset.fields.wins).transform(
dimx2_date_str_df,
[day(mock_dataset.fields.timestamp), mock_dataset.fields.political_party],
[],
)
self.assertIn('data', result)
result['data'] = result['data'][:2] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{'Header': 'Party', 'accessor': '$political_party'},
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
],
'data': [
{
'$political_party': {
'raw': 'Democrat',
'hyperlink': 'http://example.com/Democrat',
},
'$timestamp': {
'display': '1996-01-01',
'raw': '1996-01-01T00:00:00',
},
'$wins': {'display': '2', 'raw': 2.0},
},
{
'$political_party': {
'raw': 'Independent',
'hyperlink': 'http://example.com/Independent',
},
'$timestamp': {
'display': '1996-01-01',
'raw': '1996-01-01T00:00:00',
},
'$wins': {'display': '0', 'raw': 0.0},
},
],
},
result,
)
def test_dimx2_date_str_totals_date(self):
dimensions = [
day(mock_dataset.fields.timestamp),
Rollup(mock_dataset.fields.political_party),
]
result = ReactTable(mock_dataset.fields.wins).transform(dimx2_date_str_totals_df, dimensions, [])
self.assertIn('data', result)
result['data'] = result['data'][-3:] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{'Header': 'Party', 'accessor': '$political_party'},
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
],
'data': [
{
'$political_party': {
'raw': 'Democrat',
'hyperlink': 'http://example.com/Democrat',
},
'$timestamp': {
'display': '2016-01-01',
'raw': '2016-01-01T00:00:00',
},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$political_party': {
'raw': 'Republican',
'hyperlink': 'http://example.com/Republican',
},
'$timestamp': {
'display': '2016-01-01',
'raw': '2016-01-01T00:00:00',
},
'$wins': {'display': '2', 'raw': 2.0},
},
{
'$political_party': {
'display': 'Totals',
'raw': '$totals',
},
'$timestamp': {
'display': '2016-01-01',
'raw': '2016-01-01T00:00:00',
},
'$wins': {'display': '2', 'raw': 2.0},
},
],
},
result,
)
def test_dimx2_date_str_totals_all(self):
dimensions = [
Rollup(day(mock_dataset.fields.timestamp)),
Rollup(mock_dataset.fields.political_party),
]
result = ReactTable(mock_dataset.fields.wins).transform(dimx2_date_str_totalsx2_df, dimensions, [])
self.assertIn('data', result)
result['data'] = result['data'][:3] + result['data'][-1:] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{'Header': 'Party', 'accessor': '$political_party'},
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
],
'data': [
{
'$political_party': {
'raw': 'Democrat',
'hyperlink': 'http://example.com/Democrat',
},
'$timestamp': {
'display': '1996-01-01',
'raw': '1996-01-01T00:00:00',
},
'$wins': {'display': '2', 'raw': 2.0},
},
{
'$political_party': {
'raw': 'Independent',
'hyperlink': 'http://example.com/Independent',
},
'$timestamp': {
'display': '1996-01-01',
'raw': '1996-01-01T00:00:00',
},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$political_party': {
'raw': 'Republican',
'hyperlink': 'http://example.com/Republican',
},
'$timestamp': {
'display': '1996-01-01',
'raw': '1996-01-01T00:00:00',
},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$political_party': {
'display': 'Totals',
'raw': '$totals',
},
'$timestamp': {'display': 'Totals', 'raw': '$totals'},
'$wins': {'display': '12', 'raw': 12.0},
},
],
},
result,
)
def test_dimx2_date_str_reference(self):
dimensions = [
day(mock_dataset.fields.timestamp),
mock_dataset.fields.political_party,
]
references = [ElectionOverElection(mock_dataset.fields.timestamp)]
result = ReactTable(mock_dataset.fields.votes).transform(dimx2_date_str_ref_df, dimensions, references)
self.assertIn('data', result)
result['data'] = result['data'][:2] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{'Header': 'Party', 'accessor': '$political_party'},
{'Header': 'Votes', 'accessor': '$votes', 'path_accessor': ['$votes']},
{'Header': 'Votes EoE', 'accessor': '$votes_eoe', 'path_accessor': ['$votes_eoe']},
],
'data': [
{
'$political_party': {
'raw': 'Republican',
'hyperlink': 'http://example.com/Republican',
},
'$timestamp': {
'display': '1996-01-01',
'raw': '1996-01-01T00:00:00',
},
'$votes': {'display': '6,564,547', 'raw': 6564547},
'$votes_eoe': {'display': '7,579,518', 'raw': 7579518},
},
{
'$political_party': {
'raw': 'Democrat',
'hyperlink': 'http://example.com/Democrat',
},
'$timestamp': {
'display': '2000-01-01',
'raw': '2000-01-01T00:00:00',
},
'$votes': {'display': '8,294,949', 'raw': 8294949},
'$votes_eoe': {'display': '1,076,384', 'raw': 1076384},
},
],
},
result,
)
def test_dimx1_date_metricsx2_references(self):
dimensions = [
day(mock_dataset.fields.timestamp),
mock_dataset.fields.political_party,
]
references = [ElectionOverElection(mock_dataset.fields.timestamp)]
result = ReactTable(mock_dataset.fields.votes, mock_dataset.fields.wins).transform(
dimx2_date_str_ref_df, dimensions, references
)
self.assertIn('data', result)
result['data'] = result['data'][:2] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{'Header': 'Party', 'accessor': '$political_party'},
{'Header': 'Votes', 'accessor': '$votes', 'path_accessor': ['$votes']},
{'Header': 'Votes EoE', 'accessor': '$votes_eoe', 'path_accessor': ['$votes_eoe']},
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
{'Header': 'Wins EoE', 'accessor': '$wins_eoe', 'path_accessor': ['$wins_eoe']},
],
'data': [
{
'$political_party': {
'raw': 'Republican',
'hyperlink': 'http://example.com/Republican',
},
'$timestamp': {
'display': '1996-01-01',
'raw': '1996-01-01T00:00:00',
},
'$votes': {'display': '6,564,547', 'raw': 6564547},
'$votes_eoe': {'display': '7,579,518', 'raw': 7579518},
'$wins': {'display': '0', 'raw': 0},
'$wins_eoe': {'display': '2', 'raw': 2},
},
{
'$political_party': {
'raw': 'Democrat',
'hyperlink': 'http://example.com/Democrat',
},
'$timestamp': {
'display': '2000-01-01',
'raw': '2000-01-01T00:00:00',
},
'$votes': {'display': '8,294,949', 'raw': 8294949},
'$votes_eoe': {'display': '1,076,384', 'raw': 1076384},
'$wins': {'display': '0', 'raw': 0},
'$wins_eoe': {'display': '0', 'raw': 0},
},
],
},
result,
)
def test_transpose(self):
dimensions = [mock_dataset.fields.political_party]
result = ReactTable(mock_dataset.fields.wins, transpose=True).transform(dimx1_str_df, dimensions, [])
self.assertEqual(
{
'columns': [
{'Header': '', 'accessor': '$metrics'},
{'Header': 'Democrat', 'accessor': 'Democrat', 'path_accessor': ['Democrat']},
{'Header': 'Independent', 'accessor': 'Independent', 'path_accessor': ['Independent']},
{'Header': 'Republican', 'accessor': 'Republican', 'path_accessor': ['Republican']},
],
'data': [
{
'$metrics': {'raw': 'Wins'},
'Democrat': {'display': '6', 'raw': 6.0},
'Independent': {'display': '0', 'raw': 0.0},
'Republican': {'display': '6', 'raw': 6.0},
}
],
},
result,
)
def test_transpose_without_dimension(self):
result = ReactTable(mock_dataset.fields.votes, mock_dataset.fields.wins, transpose=True).transform(
dimx1_none_df, [], []
)
self.assertEqual(
{
'columns': [
{'Header': '', 'accessor': '$metrics'},
{'Header': '', 'accessor': '0', 'path_accessor': ['0']},
],
'data': [
{
0: {'display': '111,674,336', 'raw': 111674336},
'$metrics': {'raw': 'Votes'},
},
{0: {'display': '12', 'raw': 12}, '$metrics': {'raw': 'Wins'}},
],
},
result,
)
def test_dimx2_hide_dim1(self):
dimensions = [
day(mock_dataset.fields.timestamp),
mock_dataset.fields.political_party,
]
result = ReactTable(mock_dataset.fields.wins, hide=[mock_dataset.fields.political_party]).transform(
dimx2_date_str_df, dimensions, []
)
self.assertIn('data', result)
result['data'] = result['data'][:2] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
],
'data': [
{
'$timestamp': {'display': '1996-01-01', 'raw': '1996-01-01T00:00:00'},
'$wins': {'display': '2', 'raw': 2},
},
{
'$timestamp': {'display': '1996-01-01', 'raw': '1996-01-01T00:00:00'},
'$wins': {'display': '0', 'raw': 0},
},
],
},
result,
)
def test_dimx2_metricx2_refx2_hide_metrics(self):
dimensions = [
day(mock_dataset.fields.timestamp),
mock_dataset.fields.political_party,
]
references = [ElectionOverElection(mock_dataset.fields.timestamp)]
result = ReactTable(
mock_dataset.fields.votes,
mock_dataset.fields.wins,
hide=[mock_dataset.fields.votes, mock_dataset.fields.wins],
).transform(dimx2_date_str_ref_df, dimensions, references)
self.assertIn("data", result)
result["data"] = result["data"][:2] # shorten the results to make the test easier to read
self.assertEqual(
{
"columns": [
{"Header": "Timestamp", "accessor": "$timestamp"},
{"Header": "Party", "accessor": "$political_party"},
{"Header": "Votes EoE", "accessor": "$votes_eoe", 'path_accessor': ['$votes_eoe']},
{"Header": "Wins EoE", "accessor": "$wins_eoe", 'path_accessor': ['$wins_eoe']},
],
"data": [
{
"$political_party": {
"raw": "Republican",
"hyperlink": "http://example.com/Republican",
},
"$timestamp": {
"display": "1996-01-01",
"raw": "1996-01-01T00:00:00",
},
"$votes_eoe": {"display": "7,579,518", "raw": 7579518.0},
"$wins_eoe": {"display": "2", "raw": 2.0},
},
{
"$political_party": {
"raw": "Democrat",
"hyperlink": "http://example.com/Democrat",
},
"$timestamp": {
"display": "2000-01-01",
"raw": "2000-01-01T00:00:00",
},
"$votes_eoe": {"display": "1,076,384", "raw": 1076384.0},
"$wins_eoe": {"display": "0", "raw": 0.0},
},
],
},
result,
)
def test_dimx2_fetch_only_dim1(self):
dimensions = [
day(mock_dataset.fields.timestamp),
mock_dataset.fields.political_party,
]
dimensions[1].fetch_only = True
result = ReactTable(mock_dataset.fields.wins).transform(dimx2_date_str_df, dimensions, [])
dimensions[1].fetch_only = False
self.assertIn('data', result)
result['data'] = result['data'][:2] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
],
'data': [
{
'$timestamp': {'display': '1996-01-01', 'raw': '1996-01-01T00:00:00'},
'$wins': {'display': '2', 'raw': 2},
},
{
'$timestamp': {'display': '1996-01-01', 'raw': '1996-01-01T00:00:00'},
'$wins': {'display': '0', 'raw': 0},
},
],
},
result,
)
def test_dimx2_pivot_dim1(self):
dimensions = [
day(mock_dataset.fields.timestamp),
mock_dataset.fields.political_party,
]
result = ReactTable(mock_dataset.fields.wins, pivot=[mock_dataset.fields.timestamp]).transform(
dimx2_date_str_df, dimensions, []
)
self.assertIn('data', result)
result['data'] = result['data'][:2] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Party', 'accessor': '$political_party'},
{
'Header': '1996-01-01',
'accessor': '$wins.1996-01-01T00:00:00',
'path_accessor': ['$wins', '1996-01-01T00:00:00'],
},
{
'Header': '2000-01-01',
'accessor': '$wins.2000-01-01T00:00:00',
'path_accessor': ['$wins', '2000-01-01T00:00:00'],
},
{
'Header': '2004-01-01',
'accessor': '$wins.2004-01-01T00:00:00',
'path_accessor': ['$wins', '2004-01-01T00:00:00'],
},
{
'Header': '2008-01-01',
'accessor': '$wins.2008-01-01T00:00:00',
'path_accessor': ['$wins', '2008-01-01T00:00:00'],
},
{
'Header': '2012-01-01',
'accessor': '$wins.2012-01-01T00:00:00',
'path_accessor': ['$wins', '2012-01-01T00:00:00'],
},
{
'Header': '2016-01-01',
'accessor': '$wins.2016-01-01T00:00:00',
'path_accessor': ['$wins', '2016-01-01T00:00:00'],
},
],
'data': [
{
'$political_party': {
'raw': 'Democrat',
'hyperlink': 'http://example.com/Democrat',
},
'$wins': {
'1996-01-01T00:00:00': {'display': '2', 'raw': 2.0},
'2000-01-01T00:00:00': {'display': '0', 'raw': 0.0},
'2004-01-01T00:00:00': {'display': '0', 'raw': 0.0},
'2008-01-01T00:00:00': {'display': '2', 'raw': 2.0},
'2012-01-01T00:00:00': {'display': '2', 'raw': 2.0},
'2016-01-01T00:00:00': {'display': '0', 'raw': 0.0},
},
},
{
'$political_party': {
'raw': 'Independent',
'hyperlink': 'http://example.com/Independent',
},
'$wins': {
'1996-01-01T00:00:00': {'display': '0', 'raw': 0.0},
'2000-01-01T00:00:00': {'display': '', 'raw': None},
'2004-01-01T00:00:00': {'display': '', 'raw': None},
'2008-01-01T00:00:00': {'display': '', 'raw': None},
'2012-01-01T00:00:00': {'display': '', 'raw': None},
'2016-01-01T00:00:00': {'display': '', 'raw': None},
},
},
],
},
result,
)
def test_dimx2_pivot_dim1_with_sorting(self):
dimensions = [
day(mock_dataset.fields.timestamp),
mock_dataset.fields.political_party,
]
result = ReactTable(mock_dataset.fields.wins, pivot=[mock_dataset.fields.timestamp], sort=[0]).transform(
dimx2_date_str_df, dimensions, []
)
self.assertIn('data', result)
result['data'] = result['data'][:2] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Party', 'accessor': '$political_party'},
{
'Header': '1996-01-01',
'accessor': '$wins.1996-01-01T00:00:00',
'path_accessor': ['$wins', '1996-01-01T00:00:00'],
},
{
'Header': '2000-01-01',
'accessor': '$wins.2000-01-01T00:00:00',
'path_accessor': ['$wins', '2000-01-01T00:00:00'],
},
{
'Header': '2004-01-01',
'accessor': '$wins.2004-01-01T00:00:00',
'path_accessor': ['$wins', '2004-01-01T00:00:00'],
},
{
'Header': '2008-01-01',
'accessor': '$wins.2008-01-01T00:00:00',
'path_accessor': ['$wins', '2008-01-01T00:00:00'],
},
{
'Header': '2012-01-01',
'accessor': '$wins.2012-01-01T00:00:00',
'path_accessor': ['$wins', '2012-01-01T00:00:00'],
},
{
'Header': '2016-01-01',
'accessor': '$wins.2016-01-01T00:00:00',
'path_accessor': ['$wins', '2016-01-01T00:00:00'],
},
],
'data': [
{
'$political_party': {
'raw': 'Democrat',
'hyperlink': 'http://example.com/Democrat',
},
'$wins': {
'1996-01-01T00:00:00': {'display': '2', 'raw': 2.0},
'2000-01-01T00:00:00': {'display': '0', 'raw': 0.0},
'2004-01-01T00:00:00': {'display': '0', 'raw': 0.0},
'2008-01-01T00:00:00': {'display': '2', 'raw': 2.0},
'2012-01-01T00:00:00': {'display': '2', 'raw': 2.0},
'2016-01-01T00:00:00': {'display': '0', 'raw': 0.0},
},
},
{
'$political_party': {
'raw': 'Independent',
'hyperlink': 'http://example.com/Independent',
},
'$wins': {
'1996-01-01T00:00:00': {'display': '0', 'raw': 0.0},
'2000-01-01T00:00:00': {'display': '', 'raw': None},
'2004-01-01T00:00:00': {'display': '', 'raw': None},
'2008-01-01T00:00:00': {'display': '', 'raw': None},
'2012-01-01T00:00:00': {'display': '', 'raw': None},
'2016-01-01T00:00:00': {'display': '', 'raw': None},
},
},
],
},
result,
)
def test_dimx2_pivot_dim2(self):
dimensions = [
day(mock_dataset.fields.timestamp),
mock_dataset.fields.political_party,
]
result = ReactTable(mock_dataset.fields.wins, pivot=[mock_dataset.fields.political_party]).transform(
dimx2_date_str_df, dimensions, []
)
self.assertIn('data', result)
result['data'] = result['data'][:2] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{'Header': 'Democrat', 'accessor': '$wins.Democrat', 'path_accessor': ['$wins', 'Democrat']},
{
'Header': 'Independent',
'accessor': '$wins.Independent',
'path_accessor': ['$wins', 'Independent'],
},
{'Header': 'Republican', 'accessor': '$wins.Republican', 'path_accessor': ['$wins', 'Republican']},
],
'data': [
{
'$timestamp': {
'display': '1996-01-01',
'raw': '1996-01-01T00:00:00',
},
'$wins': {
'Democrat': {'display': '2', 'raw': 2.0},
'Independent': {'display': '0', 'raw': 0.0},
'Republican': {'display': '0', 'raw': 0.0},
},
},
{
'$timestamp': {
'display': '2000-01-01',
'raw': '2000-01-01T00:00:00',
},
'$wins': {
'Democrat': {'display': '0', 'raw': 0.0},
'Independent': {'display': '', 'raw': None},
'Republican': {'display': '2', 'raw': 2.0},
},
},
],
},
result,
)
def test_metricx2_pivot_dim2(self):
dimensions = [
day(mock_dataset.fields.timestamp),
mock_dataset.fields.political_party,
]
result = ReactTable(
mock_dataset.fields.wins,
mock_dataset.fields.votes,
pivot=[mock_dataset.fields.political_party],
).transform(dimx2_date_str_df, dimensions, [])
self.assertIn('data', result)
result['data'] = result['data'][:2] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{
'Header': 'Votes',
'columns': [
{
'Header': 'Democrat',
'accessor': '$votes.Democrat',
'path_accessor': ['$votes', 'Democrat'],
},
{
'Header': 'Independent',
'accessor': '$votes.Independent',
'path_accessor': ['$votes', 'Independent'],
},
{
'Header': 'Republican',
'accessor': '$votes.Republican',
'path_accessor': ['$votes', 'Republican'],
},
],
},
{
'Header': 'Wins',
'columns': [
{
'Header': 'Democrat',
'accessor': '$wins.Democrat',
'path_accessor': ['$wins', 'Democrat'],
},
{
'Header': 'Independent',
'accessor': '$wins.Independent',
'path_accessor': ['$wins', 'Independent'],
},
{
'Header': 'Republican',
'accessor': '$wins.Republican',
'path_accessor': ['$wins', 'Republican'],
},
],
},
],
'data': [
{
'$timestamp': {
'display': '1996-01-01',
'raw': '1996-01-01T00:00:00',
},
'$votes': {
'Democrat': {'display': '7,579,518', 'raw': 7579518},
'Independent': {'display': '1,076,384', 'raw': 1076384},
'Republican': {'display': '6,564,547', 'raw': 6564547},
},
'$wins': {
'Democrat': {'display': '2', 'raw': 2},
'Independent': {'display': '0', 'raw': 0},
'Republican': {'display': '0', 'raw': 0},
},
},
{
'$timestamp': {
'display': '2000-01-01',
'raw': '2000-01-01T00:00:00',
},
'$votes': {
'Democrat': {'display': '8,294,949', 'raw': 8294949},
'Independent': {'display': '', 'raw': None},
'Republican': {'display': '8,367,068', 'raw': 8367068},
},
'$wins': {
'Democrat': {'display': '0', 'raw': 0},
'Independent': {'display': '', 'raw': None},
'Republican': {'display': '2', 'raw': 2},
},
},
],
},
result,
)
def test_dimx2_metricx2_refx2_pivot_dim2(self):
dimensions = [
day(mock_dataset.fields.timestamp),
mock_dataset.fields.political_party,
]
references = [ElectionOverElection(mock_dataset.fields.timestamp)]
result = ReactTable(
mock_dataset.fields.votes,
mock_dataset.fields.wins,
pivot=[mock_dataset.fields.political_party],
).transform(dimx2_date_str_ref_df, dimensions, references)
self.assertIn('data', result)
result['data'] = result['data'][:2] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{
'Header': 'Votes',
'columns': [
{
'Header': 'Republican',
'accessor': '$votes.Republican',
'path_accessor': ['$votes', 'Republican'],
},
{
'Header': 'Democrat',
'accessor': '$votes.Democrat',
'path_accessor': ['$votes', 'Democrat'],
},
],
},
{
'Header': 'Votes EoE',
'columns': [
{
'Header': 'Republican',
'accessor': '$votes_eoe.Republican',
'path_accessor': ['$votes_eoe', 'Republican'],
},
{
'Header': 'Democrat',
'accessor': '$votes_eoe.Democrat',
'path_accessor': ['$votes_eoe', 'Democrat'],
},
],
},
{
'Header': 'Wins',
'columns': [
{
'Header': 'Republican',
'accessor': '$wins.Republican',
'path_accessor': ['$wins', 'Republican'],
},
{
'Header': 'Democrat',
'accessor': '$wins.Democrat',
'path_accessor': ['$wins', 'Democrat'],
},
],
},
{
'Header': 'Wins EoE',
'columns': [
{
'Header': 'Republican',
'accessor': '$wins_eoe.Republican',
'path_accessor': ['$wins_eoe', 'Republican'],
},
{
'Header': 'Democrat',
'accessor': '$wins_eoe.Democrat',
'path_accessor': ['$wins_eoe', 'Democrat'],
},
],
},
],
'data': [
{
'$timestamp': {
'display': '1996-01-01',
'raw': '1996-01-01T00:00:00',
},
'$votes': {
'Democrat': {'display': '', 'raw': None},
'Republican': {'display': '6,564,547', 'raw': 6564547.0},
},
'$votes_eoe': {
'Democrat': {'display': '', 'raw': None},
'Republican': {'display': '7,579,518', 'raw': 7579518.0},
},
'$wins': {
'Democrat': {'display': '', 'raw': None},
'Republican': {'display': '0', 'raw': 0.0},
},
'$wins_eoe': {
'Democrat': {'display': '', 'raw': None},
'Republican': {'display': '2', 'raw': 2.0},
},
},
{
'$timestamp': {
'display': '2000-01-01',
'raw': '2000-01-01T00:00:00',
},
'$votes': {
'Democrat': {'display': '8,294,949', 'raw': 8294949.0},
'Republican': {'display': '8,367,068', 'raw': 8367068.0},
},
'$votes_eoe': {
'Democrat': {'display': '1,076,384', 'raw': 1076384.0},
'Republican': {'display': '6,564,547', 'raw': 6564547.0},
},
'$wins': {
'Democrat': {'display': '0', 'raw': 0.0},
'Republican': {'display': '2', 'raw': 2.0},
},
'$wins_eoe': {
'Democrat': {'display': '0', 'raw': 0.0},
'Republican': {'display': '0', 'raw': 0.0},
},
},
],
},
result,
)
def test_dimx1_int_metricx1_pivot_dim1_same_as_transpose(self):
result = ReactTable(mock_dataset.fields.wins, pivot=[mock_dataset.fields['candidate-id']]).transform(
dimx1_num_df, [mock_dataset.fields['candidate-id']], []
)
self.assertEqual(
{
'columns': [
{'Header': '', 'accessor': '$metrics'},
{'Header': '1', 'accessor': '1', 'path_accessor': ['1']},
{'Header': '2', 'accessor': '2', 'path_accessor': ['2']},
{'Header': '3', 'accessor': '3', 'path_accessor': ['3']},
{'Header': '4', 'accessor': '4', 'path_accessor': ['4']},
{'Header': '5', 'accessor': '5', 'path_accessor': ['5']},
{'Header': '6', 'accessor': '6', 'path_accessor': ['6']},
{'Header': '7', 'accessor': '7', 'path_accessor': ['7']},
{'Header': '8', 'accessor': '8', 'path_accessor': ['8']},
{'Header': '9', 'accessor': '9', 'path_accessor': ['9']},
{'Header': '10', 'accessor': '10', 'path_accessor': ['10']},
{'Header': '11', 'accessor': '11', 'path_accessor': ['11']},
],
'data': [
{
'$metrics': {'raw': 'Wins'},
'1': {'display': '2', 'raw': 2.0},
'2': {'display': '0', 'raw': 0.0},
'3': {'display': '0', 'raw': 0.0},
'4': {'display': '4', 'raw': 4.0},
'5': {'display': '0', 'raw': 0.0},
'6': {'display': '0', 'raw': 0.0},
'7': {'display': '4', 'raw': 4.0},
'8': {'display': '0', 'raw': 0.0},
'9': {'display': '0', 'raw': 0.0},
'10': {'display': '2', 'raw': 2.0},
'11': {'display': '0', 'raw': 0.0},
}
],
},
result,
)
def test_dimx1_int_metricx1_transpose(self):
result = ReactTable(
mock_dataset.fields.wins,
pivot=[mock_dataset.fields['candidate-id']],
transpose=True,
).transform(dimx1_num_df, [mock_dataset.fields['candidate-id']], [])
self.assertEqual(
{
'columns': [
{'Header': 'Candidate ID', 'accessor': '$candidate-id'},
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
],
'data': [
{
'$candidate-id': {'display': '1', 'raw': 1.0},
'$wins': {'display': '2', 'raw': 2.0},
},
{
'$candidate-id': {'display': '2', 'raw': 2.0},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$candidate-id': {'display': '3', 'raw': 3.0},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$candidate-id': {'display': '4', 'raw': 4.0},
'$wins': {'display': '4', 'raw': 4.0},
},
{
'$candidate-id': {'display': '5', 'raw': 5.0},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$candidate-id': {'display': '6', 'raw': 6.0},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$candidate-id': {'display': '7', 'raw': 7.0},
'$wins': {'display': '4', 'raw': 4.0},
},
{
'$candidate-id': {'display': '8', 'raw': 8.0},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$candidate-id': {'display': '9', 'raw': 9.0},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$candidate-id': {'display': '10', 'raw': 10.0},
'$wins': {'display': '2', 'raw': 2.0},
},
{
'$candidate-id': {'display': '11', 'raw': 11.0},
'$wins': {'display': '0', 'raw': 0.0},
},
],
},
result,
)
def test_dimx1_int_metricx2_pivot(self):
result = ReactTable(
mock_dataset.fields.wins,
mock_dataset.fields.votes,
pivot=[mock_dataset.fields['candidate-id']],
).transform(dimx1_num_df, [mock_dataset.fields['candidate-id']], [])
self.assertEqual(
{
'columns': [
{'Header': '', 'accessor': '$metrics'},
{'Header': '1', 'accessor': '1', 'path_accessor': ['1']},
{'Header': '2', 'accessor': '2', 'path_accessor': ['2']},
{'Header': '3', 'accessor': '3', 'path_accessor': ['3']},
{'Header': '4', 'accessor': '4', 'path_accessor': ['4']},
{'Header': '5', 'accessor': '5', 'path_accessor': ['5']},
{'Header': '6', 'accessor': '6', 'path_accessor': ['6']},
{'Header': '7', 'accessor': '7', 'path_accessor': ['7']},
{'Header': '8', 'accessor': '8', 'path_accessor': ['8']},
{'Header': '9', 'accessor': '9', 'path_accessor': ['9']},
{'Header': '10', 'accessor': '10', 'path_accessor': ['10']},
{'Header': '11', 'accessor': '11', 'path_accessor': ['11']},
],
'data': [
{
'1': {'display': '2', 'raw': 2.0},
'2': {'display': '0', 'raw': 0.0},
'3': {'display': '0', 'raw': 0.0},
'4': {'display': '4', 'raw': 4.0},
'5': {'display': '0', 'raw': 0.0},
'6': {'display': '0', 'raw': 0.0},
'7': {'display': '4', 'raw': 4.0},
'8': {'display': '0', 'raw': 0.0},
'9': {'display': '0', 'raw': 0.0},
'10': {'display': '2', 'raw': 2.0},
'11': {'display': '0', 'raw': 0.0},
'$metrics': {'raw': 'Wins'},
},
{
'1': {'display': '7,579,518', 'raw': 7579518.0},
'2': {'display': '6,564,547', 'raw': 6564547.0},
'3': {'display': '1,076,384', 'raw': 1076384.0},
'4': {'display': '18,403,811', 'raw': 18403811.0},
'5': {'display': '8,294,949', 'raw': 8294949.0},
'6': {'display': '9,578,189', 'raw': 9578189.0},
'7': {'display': '24,227,234', 'raw': 24227234.0},
'8': {'display': '9,491,109', 'raw': 9491109.0},
'9': {'display': '8,148,082', 'raw': 8148082.0},
'10': {'display': '13,438,835', 'raw': 13438835.0},
'11': {'display': '4,871,678', 'raw': 4871678.0},
'$metrics': {'raw': 'Votes'},
},
],
},
result,
)
def test_dimx1_date_metricx1(self):
result = ReactTable(mock_dataset.fields.wins).transform(dimx1_date_df, [day(mock_dataset.fields.timestamp)], [])
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
],
'data': [
{
'$timestamp': {
'display': '1996-01-01',
'raw': '1996-01-01T00:00:00',
},
'$wins': {'display': '2', 'raw': 2.0},
},
{
'$timestamp': {
'display': '2000-01-01',
'raw': '2000-01-01T00:00:00',
},
'$wins': {'display': '2', 'raw': 2.0},
},
{
'$timestamp': {
'display': '2004-01-01',
'raw': '2004-01-01T00:00:00',
},
'$wins': {'display': '2', 'raw': 2.0},
},
{
'$timestamp': {
'display': '2008-01-01',
'raw': '2008-01-01T00:00:00',
},
'$wins': {'display': '2', 'raw': 2.0},
},
{
'$timestamp': {
'display': '2012-01-01',
'raw': '2012-01-01T00:00:00',
},
'$wins': {'display': '2', 'raw': 2.0},
},
{
'$timestamp': {
'display': '2016-01-01',
'raw': '2016-01-01T00:00:00',
},
'$wins': {'display': '2', 'raw': 2.0},
},
],
},
result,
)
def test_dimx2_metricx1_pivot_dim2_rollup_dim2(self):
dimensions = [
day(mock_dataset.fields.timestamp),
Rollup(mock_dataset.fields.political_party),
]
result = ReactTable(mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party]).transform(
dimx2_date_str_totalsx2_df, dimensions, []
)
self.assertIn('data', result)
result['data'] = result['data'][:2] + result['data'][-1:] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{'Header': 'Democrat', 'accessor': '$votes.Democrat', 'path_accessor': ['$votes', 'Democrat']},
{
'Header': 'Independent',
'accessor': '$votes.Independent',
'path_accessor': ['$votes', 'Independent'],
},
{
'Header': 'Republican',
'accessor': '$votes.Republican',
'path_accessor': ['$votes', 'Republican'],
},
{
'Header': 'Totals',
'accessor': '$votes.$totals',
'path_accessor': ['$votes', '$totals'],
'className': 'fireant-totals',
},
],
'data': [
{
'$timestamp': {
'display': '1996-01-01',
'raw': '1996-01-01T00:00:00',
},
'$votes': {
'$totals': {'display': '15,220,449', 'raw': 15220449.0},
'Democrat': {'display': '7,579,518', 'raw': 7579518.0},
'Independent': {'display': '1,076,384', 'raw': 1076384.0},
'Republican': {'display': '6,564,547', 'raw': 6564547.0},
},
},
{
'$timestamp': {
'display': '2000-01-01',
'raw': '2000-01-01T00:00:00',
},
'$votes': {
'$totals': {'display': '16,662,017', 'raw': 16662017.0},
'Democrat': {'display': '8,294,949', 'raw': 8294949.0},
'Independent': {'display': '', 'raw': None},
'Republican': {'display': '8,367,068', 'raw': 8367068.0},
},
},
{
'$timestamp': {'display': 'Totals', 'raw': '$totals'},
'$votes': {
'$totals': {'display': '111,674,336', 'raw': 111674336.0},
'Democrat': {'display': '', 'raw': None},
'Independent': {'display': '', 'raw': None},
'Republican': {'display': '', 'raw': None},
},
},
],
},
result,
)
def test_dimx2_date_str_pivot_dim2_rollup_all(self):
political_party = Rollup(mock_dataset.fields.political_party)
dimensions = [Rollup(day(mock_dataset.fields.timestamp)), political_party]
result = ReactTable(mock_dataset.fields.wins, mock_dataset.fields.votes, pivot=[political_party]).transform(
dimx2_date_str_totalsx2_df, dimensions, []
)
self.assertIn('data', result)
result['data'] = result['data'][:2] + result['data'][-1:] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{
'Header': 'Votes',
'columns': [
{
'Header': 'Democrat',
'accessor': '$votes.Democrat',
'path_accessor': ['$votes', 'Democrat'],
},
{
'Header': 'Independent',
'accessor': '$votes.Independent',
'path_accessor': ['$votes', 'Independent'],
},
{
'Header': 'Republican',
'accessor': '$votes.Republican',
'path_accessor': ['$votes', 'Republican'],
},
{
'Header': 'Totals',
'accessor': '$votes.$totals',
'path_accessor': ['$votes', '$totals'],
'className': 'fireant-totals',
},
],
},
{
'Header': 'Wins',
'columns': [
{
'Header': 'Democrat',
'accessor': '$wins.Democrat',
'path_accessor': ['$wins', 'Democrat'],
},
{
'Header': 'Independent',
'accessor': '$wins.Independent',
'path_accessor': ['$wins', 'Independent'],
},
{
'Header': 'Republican',
'accessor': '$wins.Republican',
'path_accessor': ['$wins', 'Republican'],
},
{
'Header': 'Totals',
'accessor': '$wins.$totals',
'path_accessor': ['$wins', '$totals'],
'className': 'fireant-totals',
},
],
},
],
'data': [
{
'$timestamp': {
'display': '1996-01-01',
'raw': '1996-01-01T00:00:00',
},
'$votes': {
'$totals': {'display': '15,220,449', 'raw': 15220449.0},
'Democrat': {'display': '7,579,518', 'raw': 7579518.0},
'Independent': {'display': '1,076,384', 'raw': 1076384.0},
'Republican': {'display': '6,564,547', 'raw': 6564547.0},
},
'$wins': {
'$totals': {'display': '2', 'raw': 2.0},
'Democrat': {'display': '2', 'raw': 2.0},
'Independent': {'display': '0', 'raw': 0.0},
'Republican': {'display': '0', 'raw': 0.0},
},
},
{
'$timestamp': {
'display': '2000-01-01',
'raw': '2000-01-01T00:00:00',
},
'$votes': {
'$totals': {'display': '16,662,017', 'raw': 16662017.0},
'Democrat': {'display': '8,294,949', 'raw': 8294949.0},
'Independent': {'display': '', 'raw': None},
'Republican': {'display': '8,367,068', 'raw': 8367068.0},
},
'$wins': {
'$totals': {'display': '2', 'raw': 2.0},
'Democrat': {'display': '0', 'raw': 0.0},
'Independent': {'display': '', 'raw': None},
'Republican': {'display': '2', 'raw': 2.0},
},
},
{
'$timestamp': {'display': 'Totals', 'raw': '$totals'},
'$votes': {
'$totals': {'display': '111,674,336', 'raw': 111674336.0},
'Democrat': {'display': '', 'raw': None},
'Independent': {'display': '', 'raw': None},
'Republican': {'display': '', 'raw': None},
},
'$wins': {
'$totals': {'display': '12', 'raw': 12.0},
'Democrat': {'display': '', 'raw': None},
'Independent': {'display': '', 'raw': None},
'Republican': {'display': '', 'raw': None},
},
},
],
},
result,
)
def test_dimx2_pivot_both_dims_and_transpose(self):
political_party = Rollup(mock_dataset.fields.political_party)
dimensions = [Rollup(day(mock_dataset.fields.timestamp)), political_party]
result = ReactTable(mock_dataset.fields.wins, mock_dataset.fields.votes, pivot=[political_party]).transform(
dimx2_date_str_totalsx2_df, dimensions, []
)
self.assertIn('data', result)
result['data'] = result['data'][:4] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Timestamp', 'accessor': '$timestamp'},
{
'Header': 'Votes',
'columns': [
{
'Header': 'Democrat',
'accessor': '$votes.Democrat',
'path_accessor': ['$votes', 'Democrat'],
},
{
'Header': 'Independent',
'accessor': '$votes.Independent',
'path_accessor': ['$votes', 'Independent'],
},
{
'Header': 'Republican',
'accessor': '$votes.Republican',
'path_accessor': ['$votes', 'Republican'],
},
{
'Header': 'Totals',
'accessor': '$votes.$totals',
'path_accessor': ['$votes', '$totals'],
'className': 'fireant-totals',
},
],
},
{
'Header': 'Wins',
'columns': [
{
'Header': 'Democrat',
'accessor': '$wins.Democrat',
'path_accessor': ['$wins', 'Democrat'],
},
{
'Header': 'Independent',
'accessor': '$wins.Independent',
'path_accessor': ['$wins', 'Independent'],
},
{
'Header': 'Republican',
'accessor': '$wins.Republican',
'path_accessor': ['$wins', 'Republican'],
},
{
'Header': 'Totals',
'accessor': '$wins.$totals',
'path_accessor': ['$wins', '$totals'],
'className': 'fireant-totals',
},
],
},
],
'data': [
{
'$timestamp': {
'display': '1996-01-01',
'raw': '1996-01-01T00:00:00',
},
'$votes': {
'$totals': {'display': '15,220,449', 'raw': 15220449.0},
'Democrat': {'display': '7,579,518', 'raw': 7579518.0},
'Independent': {'display': '1,076,384', 'raw': 1076384.0},
'Republican': {'display': '6,564,547', 'raw': 6564547.0},
},
'$wins': {
'$totals': {'display': '2', 'raw': 2.0},
'Democrat': {'display': '2', 'raw': 2.0},
'Independent': {'display': '0', 'raw': 0.0},
'Republican': {'display': '0', 'raw': 0.0},
},
},
{
'$timestamp': {
'display': '2000-01-01',
'raw': '2000-01-01T00:00:00',
},
'$votes': {
'$totals': {'display': '16,662,017', 'raw': 16662017.0},
'Democrat': {'display': '8,294,949', 'raw': 8294949.0},
'Independent': {'display': '', 'raw': None},
'Republican': {'display': '8,367,068', 'raw': 8367068.0},
},
'$wins': {
'$totals': {'display': '2', 'raw': 2.0},
'Democrat': {'display': '0', 'raw': 0.0},
'Independent': {'display': '', 'raw': None},
'Republican': {'display': '2', 'raw': 2.0},
},
},
{
'$timestamp': {
'display': '2004-01-01',
'raw': '2004-01-01T00:00:00',
},
'$votes': {
'$totals': {'display': '19,614,932', 'raw': 19614932.0},
'Democrat': {'display': '9,578,189', 'raw': 9578189.0},
'Independent': {'display': '', 'raw': None},
'Republican': {'display': '10,036,743', 'raw': 10036743.0},
},
'$wins': {
'$totals': {'display': '2', 'raw': 2.0},
'Democrat': {'display': '0', 'raw': 0.0},
'Independent': {'display': '', 'raw': None},
'Republican': {'display': '2', 'raw': 2.0},
},
},
{
'$timestamp': {
'display': '2008-01-01',
'raw': '2008-01-01T00:00:00',
},
'$votes': {
'$totals': {'display': '21,294,215', 'raw': 21294215.0},
'Democrat': {'display': '11,803,106', 'raw': 11803106.0},
'Independent': {'display': '', 'raw': None},
'Republican': {'display': '9,491,109', 'raw': 9491109.0},
},
'$wins': {
'$totals': {'display': '2', 'raw': 2.0},
'Democrat': {'display': '2', 'raw': 2.0},
'Independent': {'display': '', 'raw': None},
'Republican': {'display': '0', 'raw': 0.0},
},
},
],
},
result,
)
def test_dimx2_date_str_pivot_dim2_transpose_rollup_all(self):
political_party = Rollup(mock_dataset.fields.political_party)
dimensions = [Rollup(day(mock_dataset.fields.timestamp)), political_party]
result = ReactTable(
mock_dataset.fields.wins,
mock_dataset.fields.votes,
pivot=[political_party],
transpose=True,
).transform(dimx2_date_str_totalsx2_df, dimensions, [])
self.assertIn('data', result)
result['data'] = result['data'][:2] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': '', 'accessor': '$metrics'},
{'Header': 'Party', 'accessor': '$political_party'},
{
'Header': '1996-01-01',
'accessor': '1996-01-01T00:00:00',
'path_accessor': ['1996-01-01T00:00:00'],
},
{
'Header': '2000-01-01',
'accessor': '2000-01-01T00:00:00',
'path_accessor': ['2000-01-01T00:00:00'],
},
{
'Header': '2004-01-01',
'accessor': '2004-01-01T00:00:00',
'path_accessor': ['2004-01-01T00:00:00'],
},
{
'Header': '2008-01-01',
'accessor': '2008-01-01T00:00:00',
'path_accessor': ['2008-01-01T00:00:00'],
},
{
'Header': '2012-01-01',
'accessor': '2012-01-01T00:00:00',
'path_accessor': ['2012-01-01T00:00:00'],
},
{
'Header': '2016-01-01',
'accessor': '2016-01-01T00:00:00',
'path_accessor': ['2016-01-01T00:00:00'],
},
{
'Header': 'Totals',
'accessor': '$totals',
'path_accessor': ['$totals'],
'className': 'fireant-totals',
},
],
'data': [
{
'$metrics': {'raw': 'Wins'},
'$political_party': {
'raw': 'Democrat',
'hyperlink': 'http://example.com/Democrat',
},
'$totals': {'display': '', 'raw': None},
'1996-01-01T00:00:00': {'display': '2', 'raw': 2.0},
'2000-01-01T00:00:00': {'display': '0', 'raw': 0.0},
'2004-01-01T00:00:00': {'display': '0', 'raw': 0.0},
'2008-01-01T00:00:00': {'display': '2', 'raw': 2.0},
'2012-01-01T00:00:00': {'display': '2', 'raw': 2.0},
'2016-01-01T00:00:00': {'display': '0', 'raw': 0.0},
},
{
'$metrics': {'raw': 'Wins'},
'$political_party': {
'raw': 'Independent',
'hyperlink': 'http://example.com/Independent',
},
'$totals': {'display': '', 'raw': None},
'1996-01-01T00:00:00': {'display': '0', 'raw': 0.0},
'2000-01-01T00:00:00': {'display': '', 'raw': None},
'2004-01-01T00:00:00': {'display': '', 'raw': None},
'2008-01-01T00:00:00': {'display': '', 'raw': None},
'2012-01-01T00:00:00': {'display': '', 'raw': None},
'2016-01-01T00:00:00': {'display': '', 'raw': None},
},
],
},
result,
)
def test_dimx2_pivot_dim2_rollup_all_no_rollup_on_pivot_arg(self):
dimensions = [
Rollup(day(mock_dataset.fields.timestamp)),
Rollup(mock_dataset.fields.political_party),
]
result = ReactTable(
mock_dataset.fields.wins,
mock_dataset.fields.votes,
pivot=[mock_dataset.fields.political_party],
transpose=True,
).transform(dimx2_date_str_totalsx2_df, dimensions, [])
self.assertIn('data', result)
result['data'] = result['data'][:2] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': '', 'accessor': '$metrics'},
{'Header': 'Party', 'accessor': '$political_party'},
{
'Header': '1996-01-01',
'accessor': '1996-01-01T00:00:00',
'path_accessor': ['1996-01-01T00:00:00'],
},
{
'Header': '2000-01-01',
'accessor': '2000-01-01T00:00:00',
'path_accessor': ['2000-01-01T00:00:00'],
},
{
'Header': '2004-01-01',
'accessor': '2004-01-01T00:00:00',
'path_accessor': ['2004-01-01T00:00:00'],
},
{
'Header': '2008-01-01',
'accessor': '2008-01-01T00:00:00',
'path_accessor': ['2008-01-01T00:00:00'],
},
{
'Header': '2012-01-01',
'accessor': '2012-01-01T00:00:00',
'path_accessor': ['2012-01-01T00:00:00'],
},
{
'Header': '2016-01-01',
'accessor': '2016-01-01T00:00:00',
'path_accessor': ['2016-01-01T00:00:00'],
},
{
'Header': 'Totals',
'accessor': '$totals',
'path_accessor': ['$totals'],
'className': 'fireant-totals',
},
],
'data': [
{
'$metrics': {'raw': 'Wins'},
'$political_party': {
'raw': 'Democrat',
'hyperlink': 'http://example.com/Democrat',
},
'$totals': {'display': '', 'raw': None},
'1996-01-01T00:00:00': {'display': '2', 'raw': 2.0},
'2000-01-01T00:00:00': {'display': '0', 'raw': 0.0},
'2004-01-01T00:00:00': {'display': '0', 'raw': 0.0},
'2008-01-01T00:00:00': {'display': '2', 'raw': 2.0},
'2012-01-01T00:00:00': {'display': '2', 'raw': 2.0},
'2016-01-01T00:00:00': {'display': '0', 'raw': 0.0},
},
{
'$metrics': {'raw': 'Wins'},
'$political_party': {
'raw': 'Independent',
'hyperlink': 'http://example.com/Independent',
},
'$totals': {'display': '', 'raw': None},
'1996-01-01T00:00:00': {'display': '0', 'raw': 0.0},
'2000-01-01T00:00:00': {'display': '', 'raw': None},
'2004-01-01T00:00:00': {'display': '', 'raw': None},
'2008-01-01T00:00:00': {'display': '', 'raw': None},
'2012-01-01T00:00:00': {'display': '', 'raw': None},
'2016-01-01T00:00:00': {'display': '', 'raw': None},
},
],
},
result,
)
class ReactTableHyperlinkTransformerTests(TestCase):
maxDiff = None
def test_add_hyperlink_with_formatted_values(self):
result = ReactTable(mock_dataset.fields.wins).transform(dimx1_str_df, [mock_dataset.fields.political_party], [])
self.assertEqual(
{
'columns': [
{'Header': 'Party', 'accessor': '$political_party'},
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
],
'data': [
{
'$political_party': {
'hyperlink': 'http://example.com/Democrat',
'raw': 'Democrat',
},
'$wins': {'display': '6', 'raw': 6.0},
},
{
'$political_party': {
'hyperlink': 'http://example.com/Independent',
'raw': 'Independent',
},
'$wins': {'display': '0', 'raw': 0.0},
},
{
'$political_party': {
'hyperlink': 'http://example.com/Republican',
'raw': 'Republican',
},
'$wins': {'display': '6', 'raw': 6.0},
},
],
},
result,
)
def test_do_not_add_hyperlink_to_pivoted_dimensions(self):
dimensions = [mock_dataset.fields.political_party]
result = ReactTable(mock_dataset.fields.wins, pivot=dimensions).transform(dimx1_str_df, dimensions, [])
self.assertEqual(
{
'columns': [
{'Header': '', 'accessor': '$metrics'},
{'Header': 'Democrat', 'accessor': 'Democrat', 'path_accessor': ['Democrat']},
{'Header': 'Independent', 'accessor': 'Independent', 'path_accessor': ['Independent']},
{'Header': 'Republican', 'accessor': 'Republican', 'path_accessor': ['Republican']},
],
'data': [
{
'$metrics': {'raw': 'Wins'},
'Democrat': {'display': '6', 'raw': 6},
'Independent': {'display': '0', 'raw': 0},
'Republican': {'display': '6', 'raw': 6},
}
],
},
result,
)
def test_dim_with_hyperlink_depending_on_another_dim_not_included_if_other_dim_is_not_selected(self):
result = ReactTable(mock_dataset.fields.wins).transform(dimx1_str_df, [mock_dataset.fields.political_party], [])
self.assertIn('data', result)
result['data'] = result['data'][:2] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Party', 'accessor': '$political_party'},
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
],
'data': [
{
'$political_party': {
'raw': 'Democrat',
'hyperlink': 'http://example.com/Democrat',
},
'$wins': {'display': '6', 'raw': 6},
},
{
'$political_party': {
'raw': 'Independent',
'hyperlink': 'http://example.com/Independent',
},
'$wins': {'display': '0', 'raw': 0},
},
],
},
result,
)
def test_dim_with_hyperlink_depending_on_another_dim_included_if_other_dim_is_selected(self):
result = ReactTable(mock_dataset.fields.wins).transform(
dimx2_str_str_df,
[
mock_dataset.fields.political_party,
mock_dataset.fields['candidate-name'],
],
[],
)
self.assertIn('data', result)
result['data'] = result['data'][:2] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Party', 'accessor': '$political_party'},
{'Header': 'Candidate Name', 'accessor': '$candidate-name'},
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
],
'data': [
{
'$candidate-name': {
'hyperlink': 'http://example.com/Democrat/Al Gore',
'raw': 'Al Gore',
},
'$political_party': {
'raw': 'Democrat',
'hyperlink': 'http://example.com/Democrat',
},
'$wins': {'display': '0', 'raw': 0},
},
{
'$candidate-name': {
'hyperlink': 'http://example.com/Democrat/Barrack Obama',
'raw': 'Barrack Obama',
},
'$political_party': {
'raw': 'Democrat',
'hyperlink': 'http://example.com/Democrat',
},
'$wins': {'display': '4', 'raw': 4},
},
],
},
result,
)
def test_dim_with_hyperlink_depending_on_another_dim_included_if_other_dim_is_selected_even_if_hidden(self):
result = ReactTable(mock_dataset.fields.wins, hide=[mock_dataset.fields.political_party]).transform(
dimx2_str_str_df,
[
mock_dataset.fields.political_party,
mock_dataset.fields['candidate-name'],
],
[],
)
self.assertIn('data', result)
result['data'] = result['data'][:2] # shorten the results to make the test easier to read
self.assertEqual(
{
'columns': [
{'Header': 'Candidate Name', 'accessor': '$candidate-name'},
{'Header': 'Wins', 'accessor': '$wins', 'path_accessor': ['$wins']},
],
'data': [
{
'$candidate-name': {
'hyperlink': 'http://example.com/Democrat/Al Gore',
'raw': 'Al Gore',
},
'$wins': {'display': '0', 'raw': 0},
},
{
'$candidate-name': {
'hyperlink': 'http://example.com/Democrat/Barrack Obama',
'raw': 'Barrack Obama',
},
'$wins': {'display': '4', 'raw': 4},
},
],
},
result,
)
class ReactTableReferenceItemFormatTests(TestCase):
@classmethod
def setUpClass(cls):
cls.ref_item_attrs = [
'alias',
'label',
'prefix',
'suffix',
'thousands',
'precision',
'data_type',
]
def assert_object_dict(self, obj, exp, attributes=()):
for attribute in attributes:
with self.subTest(attribute + ' should be equal'):
self.assertEqual(getattr(obj, attribute), exp[attribute])
def test_base_ref_item(self):
exp_ref_item = {
'alias': 'wins_with_style_eoe',
'label': 'Wins EoE',
'prefix': '$',
'suffix': None,
'thousands': '_',
'precision': 0,
'data_type': DataType.number,
}
ref = ElectionOverElection(mock_dataset.fields.timestamp)
ref_item = ReferenceItem(mock_dataset.fields.wins_with_style, ref)
self.assert_object_dict(ref_item, exp_ref_item, self.ref_item_attrs)
def test_ref_item_with_delta_percentage_formats_prefix_suffix(self):
exp_ref_item = {
'alias': 'wins_with_style_eoe_delta_percent',
'label': 'Wins EoE Δ%',
'prefix': None,
'suffix': '%',
'thousands': '_',
'precision': 0,
'data_type': DataType.number,
}
ref = ElectionOverElection(mock_dataset.fields.timestamp, delta=True, delta_percent=True)
ref_item = ReferenceItem(mock_dataset.fields.wins_with_style, ref)
self.assert_object_dict(ref_item, exp_ref_item, self.ref_item_attrs)
| apache-2.0 |
dsg-bielefeld/mumodo | packages/mumodo/corpus.py | 1 | 37484 | # The MIT License (MIT)
#
# Copyright (c) 2015 Dialogue Systems Group, University of Bielefeld
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""corpus.py -- corpus and resource management
Functions and classes to create mumodos (multimodal documents) with
resource objects that abstract away from the actual data files.
"""
__author__ = ["Spyros Kousidis", "Katharina Jettka", "Gerdis Anderson",
"Robert Rogalla", "Fabian Wohlgemuth", "Casey Kennington"]
__copyright__ = "Dialogue Systems Group Bielefeld - www.dsg-bielefeld.de"
__credits__ = ["Spyros Kousidis", "Katharina Jettka", "Gerdis Anderson",
"Robert Rogalla", "Fabian Wohlgemuth", "Casey Kennington"]
__license__ = "MIT"
__version__ = "2.0"
__maintainer__ = "Spyros Kousidis"
__status__ = "Development" # Development/Production/Prototype
import yaml, os, codecs, pickle
from moviepy.editor import VideoFileClip, AudioFileClip
from mumodo.mumodoIO import open_streamframe_from_xiofile,\
open_intervalframe_from_textgrid
from mumodo.analysis import slice_intervalframe_by_time, get_tier_type, \
slice_pointframe_by_time
from PIL import Image
import pandas as pd
__all__ = [
# Classes
'Resource', 'CSVResource', 'ImageResource', 'TextResource',
'BinaryResource', 'BaseAVResource', 'BaseStreamResource',
'BaseTierResource', 'AudioResource', 'VideoResource',
'XIOStreamResource', 'CSVStreamResource', 'PickledStreamResource',
'TextGridTierResource', 'CSVTierResource', 'PickledTierResource',
'Mumodo',
# Functions
'serialize_mumodo', 'build_mumodo', 'read_mumodo_from_file',
'write_mumodo_to_file'
]
class Resource(object):
""" A mumodo resource abstract class
Resources can be any form of data (e.g. video, audio, motion capture)
The object describes the resource, how it can be retrieved from a file,
and a sensible way to display the data, or cut a slice of the data. This
is is a base class used to derive children for specific resource types.
However, this class is useful for naming resources that are not covered
by the children classes (yet) but should neverthelees be part of a
mumodo.
"""
def __init__(self, **kwargs):
""" Default constructor that is always called by all resources
possible kwargs:
name -- The name of the resource. It is preferably short and
concise
description -- A description of the resource (text)
filename -- The name of the file this resource can be retrieved from.
units -- The units used for the time axis of this resource. It is useful
to define this in order to perform slicing
"""
if 'name' in kwargs:
self.__name__ = kwargs['name']
else:
self.__name__ = None
if 'description' in kwargs:
self.__description__ = kwargs['description']
else:
self.__description__ = None
if 'filename' in kwargs:
self.__filename__ = kwargs['filename']
else:
self.__filename__ = None
if 'units' in kwargs:
self.__units__ = kwargs['units']
else:
self.__units__ = None
self.__cached_object__ = None
self.__rtype__ = 'GenericResource'
self.__path_prefix__ = None
def __repr__(self):
return "{}\nname: {}\ndescription: {}\nfilename: {}\nunits: {}\n".\
format(self.__class__.__name__,
self.__name__,
self.__description__,
self.__filename__,
self.__units__)
def __toyaml__(self):
return {'name': self.__name__, 'description': self.__description__,
'filename': self.__filename__, 'units': self.__units__}
def __load__(self):
if self.__filename__ is None:
print "No filename has been linked to this resource."
return -1
if not os.path.isfile(self.get_filepath()):
print "The specified file does not exist"
return -1
return 0
def get_name(self):
""" Get the name of the resource
"""
return self.__name__
def get_type(self):
""" Get the type of the resource
"""
return self.__rtype__
def get_units(self):
""" Get the units of the resource
"""
return self.__units__
def set_units(self, units):
""" Set the units of the resource
"""
self.__units__ = units
def set_path_prefix(self, prefix):
""" Set the path prefix of the resource
In general, resource filenames are looked for in a path set
for the mumodo they belong to. This function allows to override
that setting and instead set another path to the resource's
data.
"""
self.__path_prefix__ = prefix
def get_path_prefix(self):
""" Get the path prefix of the resource
"""
return self.__path_prefix__
def get_filepath(self):
if self.__path_prefix__ is None or not isinstance(self.__path_prefix__,
basestring):
filepath = self.__filename__
else:
filepath = os.path.join(self.__path_prefix__, self.__filename__)
return filepath
class CSVResource(Resource):
""" A CSV resource coming from a CSV file
The CSV resource represents data stored in CSV files, that are not
StreamFrames or Tiers. For those you can use CSVStreamResource and
CSVTierResource, respectively.
Mumodo uses the Pandas library to handle CSV files, and will load
any CSV file into a Pandas DataFrame. Check the documentation of
pandas for more details.
In addition to the base Resource class constructor, this constructor
accepts the following additional kwargs:
kwargs - a dictionary of kwargs that are passed to the Pandas
DataFrame.from_csv constructor
The CSV resource cannot be sliced by time
"""
def __init__(self, **kwargs):
super(CSVResource, self).__init__(**kwargs)
self.__rtype__ = 'CSVResource'
if 'kwargs' in kwargs:
self.__kwargs__ = kwargs['kwargs']
else:
self.__kwargs__ = dict()
def __repr__(self):
return super(CSVResource, self).__repr__() + \
"kwargs: {}\n".format(self.__kwargs__)
def __toyaml__(self):
return dict(super(CSVResource, self).__toyaml__().items() + \
{'kwargs': self.__kwargs__}.items())
def __load__(self):
if super(CSVResource, self).__load__() < 0:
return -1
if self.__cached_object__ is None:
self.__cached_object__ = pd.DataFrame.from_csv(\
self.get_filepath(),
**self.__kwargs__)
return 0
def show(self):
""" Display the table
This method simply prints the DataFrame with CSV data
"""
if self.__load__() < 0:
return -1
print self.__cached_object__
def get_table(self):
""" Get the DataFrame object
This method returns a Pandas DataFrame with the contents of the CSV
"""
if self.__load__() < 0:
return -1
return self.__cached_object__
class BaseAVResource(Resource):
""" A base AudioVisual Data Resource Class
AV data are handled using the moviepy python package. This base class is
used to derive children that handle specific AV resources, namely audio and
video. As such, it should never be instantiated.
"""
def __init__(self, **kwargs):
super(BaseAVResource, self).__init__(**kwargs)
self.__rtype__ = 'AVResource'
if 'player' in kwargs:
self.__player__ = kwargs['player']
else:
self.__player__ = None
if 'channel' in kwargs:
self.__channel__ = kwargs['channel']
else:
self.__channel__ = None
def show(self):
"""Display the AV resource
Dis(play) the AV using an external player. The player MUST have
been set either during construction, or using the specified method
"""
if self.__player__ is None:
print "no player has been set - cannot show video"
return -1
if self.__load__() < 0:
return -1
if os.system("{} {}".format(self.__player__, self.get_filepath())) != 0:
return "I couldn't play"
def get_slice(self, t1, t2):
""" Get a slice of the audio/video
Gets a slice from the audio/video using moviepy's subclip method.
See that method's documentation for formatting the times.
"""
if self.__load__() < 0:
print "No slice can be returned."
return
return self.__cached_object__.subclip(t1, t2)
def set_player(self, player):
""" Set the external Video player
Set the external video player to a different one than the one that
was defined during construction
"""
self.__player__ = player
def get_player(self):
""" Get the external Video player
Returns the external Video player that has been set
"""
return self.__player__
def set_channel(self, channel):
""" Set the channel of the resource
Set the channel of the resource for multichannel video files.
"""
self.__channel__ = channel
def get_channel(self):
""" Get the channel of the resource
Returns the channel of the resource for multichannel video files.
"""
return self.__channel__
class VideoResource(BaseAVResource):
""" A mumodo Video resource object
A VideoResource is a type of resource based on Video files. This module
uses moviepy.editor to handle video files and is thus limited to the
formats supported by that package. This is a rich set of formats, as
moviepy is based on ffmpeg.
"""
def __init__(self, **kwargs):
""" VideoResource Constructor
In addition to the kwargs defined in the base Resource class
constructor, the following kwargs can also be defined:
player -- an external player (e.g. VLC, mplayer) that can play
the video files. The value must be the FULL path to
the executable, or the executable has to be in the
PATH
channel -- In case of multi-channel videos, an identifier
(usually a number) of the channel
"""
super(VideoResource, self).__init__(**kwargs)
self.__rtype__ = 'VideoResource'
def __repr__(self):
return super(VideoResource, self).__repr__() + \
"player: {}\nchannel: {}\n".format(self.__player__, self.__channel__)
def __toyaml__(self):
return dict(super(VideoResource, self).__toyaml__().items() + \
{'player': self.__player__, 'channel': self.__channel__}.items())
def __load__(self):
if super(VideoResource, self).__load__() < 0:
return -1
if self.__cached_object__ is None:
self.__cached_object__ = VideoFileClip(self.get_filepath())
return 0
def get_video(self):
""" Get the video object of this resource
Returns the data object itself (a moviepy VideoClip instance)
"""
if self.__load__() < 0:
print "No video can be returned."
return
return self.__cached_object__
class AudioResource(BaseAVResource):
""" A mumodo Audio resource object
An AudioResource is a type of resource based on Audio files. This module
uses moviepy.editor to handle audio files and is thus limited to the
formats supported by that package. This is a rich set of formats, as
moviepy is based on ffmpeg.
"""
def __init__(self, **kwargs):
""" AudioResource Constructor
In addition to the kwargs defined in the base Resource class
constructor, the following kwargs can also be defined:
player -- an external player (e.g. VLC, mplayer) that can play
the audio files. The value must be the FULL path to
the executable, or the executable has to be in the
PATH
channel -- In case of multi-channel audios, an identifier
(usually a number) of the channel
"""
super(AudioResource, self).__init__(**kwargs)
self.__rtype__ = 'AudioResource'
def __repr__(self):
return super(AudioResource, self).__repr__() + \
"player: {}\nchannel: {}\n".format(self.__player__, self.__channel__)
def __toyaml__(self):
return dict(super(AudioResource, self).__toyaml__().items() + \
{'player': self.__player__, 'channel': self.__channel__}.items())
def __load__(self):
if super(AudioResource, self).__load__() < 0:
return -1
if self.__cached_object__ is None:
self.__cached_object__ = AudioFileClip(self.get_filepath())
return 0
def get_audio(self):
""" Get the audio object of this resource
Returns the data object itself (a moviepy AudioClip instance)
"""
if self.__load__() < 0:
print "No audio can be returned."
return
return self.__cached_object__
class BaseStreamResource(Resource):
""" An abstract StreamResource class
This base StreamResource class serves as the parent for
children with different methods of loading the StreamFrame
"""
def __init__(self, **kwargs):
super(BaseStreamResource, self).__init__(**kwargs)
self.__rtype__ = 'StreamResource'
def get_slice(self, t1, t2):
if self.__load__() < 0:
return
return self.__cached_object__.ix[t1:t2]
def show(self):
if self.__load__() < 0:
return
print self.__cached_object__
def get_streamframe(self):
if self.__load__() < 0:
print "No StreamFrame can be returned."
return
return self.__cached_object__
class XIOStreamResource(BaseStreamResource):
""" StreamFrame from XIO file
This class implements a StreamFrame resource that is
loaded from an XIO file.
In addition to the base Resource kwargs, the constructor
accepts additional kwargs:
sensorname -- the XIO sensorname, which is the only mandatory
argument in order to open a streamframe from
an XIO file
kwargs -- a dictonary of kwargs to be passed to the function
that parses the XIO file and returns the StreamFrame.
Notably, an offset can be one of these. See the
documentation of mumodo.mumodoIO for more information
"""
def __init__(self, **kwargs):
super(XIOStreamResource, self).__init__(**kwargs)
if 'sensorname' in kwargs:
self.__sensorname__ = kwargs['sensorname']
else:
self.__sensorname__ = None
if 'kwargs' in kwargs:
self.__kwargs__ = kwargs['kwargs']
else:
self.__kwargs__ = {}
def __repr__(self):
return super(XIOStreamResource, self).__repr__() + \
"sensorname: {}\nkwargs: {}\n".format(self.__sensorname__,
self.__kwargs__)
def __toyaml__(self):
return dict(super(XIOStreamResource, self).__toyaml__().items() + \
{'sensorname': self.__sensorname__,
'kwargs': self.__kwargs__}.items())
def __load__(self):
if super(XIOStreamResource, self).__load__() < 0:
return -1
if self.__sensorname__ is None:
print "No sensorname has been set."
print "No StreamFrame can be created."
return -1
if self.__cached_object__ is None:
print "Parsing XIO file (will be done only once)."
print "Please wait ..."
self.__cached_object__ = open_streamframe_from_xiofile\
(self.get_filepath(),
self.__sensorname__,
**self.__kwargs__)
return 0
class CSVStreamResource(BaseStreamResource):
""" StreamFrame from CSV file
This class implements a StreamFrame resource that is
loaded from a CSV file.
In addition to the base Resource kwargs, the constructor
accepts additional kwargs:
kwargs -- a dictonary of kwargs to be passed to the pandas
function that parses the CSV file and returns the
StreamFrame. See Pandas documentation for more
information (DataFrame.from_csv())
"""
def __init__(self, **kwargs):
super(CSVStreamResource, self).__init__(**kwargs)
if 'kwargs' in kwargs:
self.__kwargs__ = kwargs['kwargs']
else:
self.__kwargs__ = dict()
def __repr__(self):
return super(CSVStreamResource, self).__repr__() + \
"kwargs: {}\n".format(self.__kwargs__)
def __toyaml__(self):
return dict(super(CSVStreamResource, self).__toyaml__().items() + \
{'kwargs': self.__kwargs__}.items())
def __load__(self):
if super(CSVStreamResource, self).__load__() < 0:
return -1
if self.__cached_object__ is None:
self.__cached_object__ = pd.DataFrame.from_csv(\
self.get_filepath(),
**self.__kwargs__)
return 0
class PickledStreamResource(BaseStreamResource):
""" Pickled StreamFrame Resource
This class implements a previously pickled StreamFrame
resource that is unpickled from a binary file.
In addition to the base Resource kwargs, the constructor
accepts no additional kwargs
"""
def __load__(self):
if super(PickledStreamResource, self).__load__() < 0:
return -1
if self.__cached_object__ is None:
self.__cached_object__ = pickle.load(open(self.get_filepath(),
"rb"))
return 0
class BaseTierResource(Resource):
""" Base Tier Resource
The BaseTierResource represents Interval or Point Tier resources,
and serves as a parent for children that load tiers from different
file types. IntervalTiers and PointTiers are Pandas DataFrames with
specific column names and structure. See the documentation of mumodoIO
for more information.
"""
def __init__(self, **kwargs):
super(BaseTierResource, self).__init__(**kwargs)
self.__rtype__ = 'TierResource'
def get_slice(self, t1, t2):
if self.__load__() < 0:
return
tiertype = get_tier_type(self.__cached_object__)
if tiertype == 'interval':
return slice_intervalframe_by_time(self.__cached_object__, t1, t2)
elif tiertype == 'point':
return slice_pointframe_by_time(self.__cached_object__, t1, t2)
def show(self):
if self.__load__() < 0:
return
print self.__cached_object__
def get_tier(self):
if self.__load__() < 0:
return
return self.__cached_object__
class TextGridTierResource(BaseTierResource):
""" Intervalframe or Pointframe from TextGrid
The TextGridTierResource imports intervalframes or pointframes
from Praat TextGrids. Additionally to the kwargs defined in the
base Resource class, the following kwargs are additionaly defined:
tiername -- the name of the tier for this resource
"""
def __init__(self, **kwargs):
super(TextGridTierResource, self).__init__(**kwargs)
if 'tiername' in kwargs:
self.__tiername__ = kwargs['tiername']
else:
self.__tiername__ = None
def __repr__(self):
return super(TextGridTierResource, self).__repr__() + \
"tiername: {}\n".format(self.__tiername__)
def __toyaml__(self):
return dict(super(TextGridTierResource, self).__toyaml__().items() + \
{'tiername': self.__tiername__}.items())
def __load__(self):
if super(TextGridTierResource, self).__load__() < 0:
return -1
if self.__tiername__ is None:
print "No tiername has been set."
print "No TierFrame can be created."
return -1
if self.__cached_object__ is None:
self.__cached_object__ = open_intervalframe_from_textgrid\
(self.get_filepath())[self.__tiername__]
return 0
def set_tiername(self, tiername):
""" Set the tiername for this tier resource
Arguments:
tiername -- the name of the tier to be read from the TextGrid
"""
self.__tiername__ = tiername
def get_tiername(self):
""" Return the tiername for this tier resource
"""
return self.__tiername__
class CSVTierResource(BaseTierResource):
""" Intervalframe or Pointframe from TextGrid
The TextGridTierResource imports intervalframes or pointframes
from CSV files. Additionally to the kwargs defined in the
base Resource class, the following kwargs are additionaly defined:
kwargs -- a dict of kwargs to be passed to the Pandas function that
imports dataframes from CSV. Check pandas documentation for
more information.
"""
def __init__(self, **kwargs):
super(CSVTierResource, self).__init__(**kwargs)
if 'kwargs' in kwargs:
self.__kwargs__ = kwargs['kwargs']
else:
self.__kwargs__ = dict()
def __repr__(self):
return super(CSVTierResource, self).__repr__() + \
"kwargs: {}\n".format(self.__kwargs__)
def __toyaml__(self):
return dict(super(CSVTierResource, self).__toyaml__().items() + \
{'kwargs': self.__kwargs__}.items())
def __load__(self):
if super(CSVTierResource, self).__load__() < 0:
return -1
if self.__cached_object__ is None:
self.__cached_object__ = pd.DataFrame.from_csv(\
self.get_filepath(),
**self.__kwargs__)
return 0
class PickledTierResource(BaseTierResource):
""" Intervalframe or Pointframe from Pickled data
The PickledTierResource imports intervalframes or pointframes
from pickled files. Additionally to the kwargs defined in the
base Resource class, no additional kwargs are defined:
"""
def __load__(self):
if super(PickledTierResource, self).__load__() < 0:
return -1
if self.__cached_object__ is None:
self.__cached_object__ = pickle.load(open(self.get_filepath(),
"rb"))
return 0
class ImageResource(Resource):
""" An image resource coming from an image file
The Image resource represents simply an image. Mumodo uses the Python
Image Library to handle image files. Check the documentation of this
module for more details
In addition to the base Resource class constructor, this constructor
requires no additional kwargs.
The Image resource cannot be sliced by time
"""
def __init__(self, **kwargs):
super(ImageResource, self).__init__(**kwargs)
self.__rtype__ = 'ImageResource'
def __load__(self):
if super(ImageResource, self).__load__() < 0:
return -1
if self.__cached_object__ is None:
self.__cached_object__ = Image.open(self.get_filepath())
return 0
def show(self):
""" Display the image
This method simply displays the image using the show() method of the
PIL library
"""
if self.__load__() < 0:
return -1
self.__cached_object__.show()
def get_image(self):
""" Get the image object
This method returns a PIL Image object that represents the image.
"""
if self.__load__() < 0:
return -1
return self.__cached_object__
class TextResource(Resource):
""" A text resource coming from a text file
The Text resource represents simply a text.
In addition to the base Resource class constructor, this constructor
has the following optional kwargs:
encoding -- the encoding of the text file. If not given the
default is utf-8
The Text resource cannot be sliced by time
"""
def __init__(self, **kwargs):
super(TextResource, self).__init__(**kwargs)
self.__rtype__ = 'TextResource'
if 'encoding' in kwargs:
self.__encoding__ = kwargs['encoding']
else:
self.__encoding__ = 'utf-8'
def __load__(self):
if super(TextResource, self).__load__() < 0:
return -1
if self.__cached_object__ is None or self.__cached_object__.closed:
self.__cached_object__ = codecs.open(self.get_filepath(),
"r",
encoding=self.__encoding__)
return 0
def __repr__(self):
return super(TextResource, self).__repr__() + \
"encoding: {}\n".format(self.__encoding__)
def __toyaml__(self):
return dict(super(TextResource, self).__toyaml__().items() + \
{'encoding': self.__encoding__}.items())
def show(self):
""" Show text
Simply print out the entire text
"""
if self.__load__() < 0:
return -1
self.__cached_object__.seek(0)
for line in self.__cached_object__:
print line
def get_encoding(self):
""" Get the encoding of this text resource
Returns the encoding of the file that this text resource
is stored in
"""
return self.__encoding__
def get_text(self):
""" Get the entire text
Returns the text that is represented by this resource
"""
if self.__load__() < 0:
return -1
text = ''
self.__cached_object__.seek(0)
for line in self.__cached_object__:
text += line
return text
def get_file_object(self):
""" Get the file object
Returns the file object instead of the entire text. This is
useful for big text files.
"""
if self.__load__() < 0:
return -1
return self.__cached_object__
class BinaryResource(Resource):
""" A binary data resource coming from a binary file
The Binary resource represents binary data
In addition to the base Resource class constructor, this constructor
has no additioanl kwargs:
The binary resource cannot be sliced by time, and cannot be shown, but
the file object is returned for further operations
"""
def __init__(self, **kwargs):
super(BinaryResource, self).__init__(**kwargs)
self.__rtype__ = 'BinaryResource'
def __load__(self):
if super(BinaryResource, self).__load__() < 0:
return -1
if self.__cached_object__ is None or self.__cached_object__.closed:
self.__cached_object__ = open(self.get_filepath(), "rb")
return 0
def get_file_object(self):
""" Get the file object
Returns the file object instead of the entire text. This is
useful for big text files.
"""
if self.__load__() < 0:
return -1
return self.__cached_object__
class Mumodo(object):
""" The mumodo corpus object """
def __init__(self, **kwargs):
""" The constructor of a Mumodo object
Possible kwargs
name -- The corpus name
description --- A brief description
url -- A list of remote urls where the corpus can be found.
localpath -- A local path where the corpus can be found. This will
be a path prefix to all the files of this mumodo on disk
files -- A list of the corpus files
Other instance variables initialized
ID -- an ID that is shorter then the name and can be used as a key more
reliably (e.g. has no spaces or special characters)
Other instance variables initialized
resources -- A dictionary of available resources in this Mumodo
"""
if 'name' in kwargs:
self.__name__ = kwargs['name']
else:
self.__name__ = None
if 'description' in kwargs:
self.__description__ = kwargs['description']
else:
self.__description__ = None
if 'url' in kwargs:
self.__url__ = kwargs['url']
else:
self.__url__ = None
if 'localpath' in kwargs:
self.__localpath__ = kwargs['localpath']
else:
self.__localpath__ = None
if 'files' in kwargs:
self.__files__ = kwargs['files']
else:
self.__files__ = []
if 'ID' in kwargs:
self.__ID__ = kwargs['ID']
else:
self.__ID__ = None
if 'resources' in kwargs:
self.__resources__ = kwargs['resources']
else:
self.__resources__ = dict()
self.__parent__ = None
def __repr__(self):
return "{}\nname: {}\ndescription: {}\nurl: {}\nlocalpath: \
{}\nfiles: {}\nID: \
{}\nresources: {})".format(self.__class__.__name__,
self.__name__,
self.__description__,
self.__url__,
self.__localpath__,
self.__files__,
self.__ID__,
self.__resources__.keys())
def __toyaml__(self):
return {'name': self.__name__, 'description': self.__description__,
'url': self.__url__, 'localpath': self.__localpath__,
'files': self.__files__, 'parent': self.__parent__,
'ID': self.__ID__}
def __getitem__(self, item):
""" Access a resource by its name
For convenience, the following expression will store a
reference of the resource named 'myresource'
>>> r = mymodo['myresource']
Numerical access to resources is not supported, as it is
not really needed.
"""
if len(self.__resources__) <= 0:
print "No resources have been defined yet!"
return
if item in self.__resources__:
return self.__resources__[item]
def __iter__(self):
""" iterate over the resources of this Mumodo
Mumodos are also iterable, which is helpful for lookups of
resources with specific names or types
"""
return (r for r in self.__resources__.values())
def add_resource(self, resource):
""" Add a resource to this mumodo
The resource has to be an object of type mumodo.corpus.Resource
or a derived type. In addition the new resource cannot have the
same name as an existing resource with the same name.
"""
if isinstance(resource, Resource):
rname = resource.get_name()
if rname is None or rname in self.__resources__:
print "Resource must have a unique name!"
return -1
self.__resources__[rname] = resource
resource.set_path_prefix(self.__localpath__)
if resource.__filename__ not in self.__files__:
self.__files__.append(resource.__filename__)
def get_resource_names(self):
""" Return the names of the resources attached to this Mumodo
The function returns the names of the resources in alphabetical
order.
"""
return sorted(self.__resources__.keys())
def set_localpath(self, path):
""" Set the local path for this Mumodo
This function updates each resource with a local path in which
to look up the file associated with that resource.
"""
self.__localpath__ = path
for r in self.__resources__.values():
r.set_path_prefix(path)
def get_name(self):
""" Return the name of this mumodo
"""
return self.__name__
def serialize_mumodo(mumodo, default_flow_style=False):
""" Create a human-readable and editable yaml dump
Arguments:
mumodo -- a mumodo object, populated with subcorpora and sessions
"""
ser = ""
if isinstance(mumodo, Mumodo):
ser += "!Mumodo\n"
ser += yaml.dump(mumodo.__toyaml__(),
default_flow_style=default_flow_style)
ser += "\n"
for key in sorted(mumodo.__resources__.keys()):
ser += "!{}\n".format(mumodo[key].__class__.__name__)
ser += yaml.dump(mumodo[key].__toyaml__(),
default_flow_style=default_flow_style)
ser += "\n"
return ser
def build_mumodo(stream):
""" Build and populate Mumodo object(s) from a human-readable yaml dump
Returns a list of populated mumodo objects
Arguments
stream -- a character stream
"""
blockbuffer = ''
mumodolist = []
object_type = None
keys = {'Mumodo': Mumodo,
'VideoResource': VideoResource,
'AudioResource': AudioResource,
'XIOStreamResource': XIOStreamResource,
'CSVStreamResource': CSVStreamResource,
'PickledStreamResource': PickledStreamResource,
'TextGridTierResource': TextGridTierResource,
'CSVTierResource': CSVTierResource,
'PickledTierResource': PickledTierResource,
'ImageResource': ImageResource,
'TextResource': TextResource,
'Resource': Resource,
'BinaryResource': BinaryResource,
'CSVResource': CSVResource}
for line in stream.split('\n'):
#do we have a new block?
if len(line) > 0 and line[0] == '!' or len(line) == 0:
if len(blockbuffer) > 0:
kwargs = yaml.load(blockbuffer)
blockbuffer = ''
#Create the object here
if object_type in keys.keys():
obj = keys[object_type](**kwargs)
#build the object tree
if isinstance(obj, Mumodo):
mumodolist.append(obj)
elif isinstance(obj, Resource):
mumodolist[-1].add_resource(obj)
object_type = line[1:]
continue
blockbuffer += line
blockbuffer += '\n'
return mumodolist
def read_mumodo_from_file(filepath, encoding='utf-8'):
""" Read mumodos from a file on disk
Reads the file and returns a list of mumodos found
in the file
Arguments:
filepath -- a valid filepath
Kwargs:
encoding -- a valid encoding supported by the Python codecs
package
"""
if not os.path.isfile(filepath):
print "file does not exist!"
return
inser = ""
with codecs.open(filepath, "r", encoding=encoding) as f:
for line in f:
inser += line
return build_mumodo(inser)
def write_mumodo_to_file(mumodos, filepath, encoding='utf-8'):
""" Write mumodos to a file on disk
Write a mumodo or a list of mumodos to the file
Arguments:
mumodos -- A list of mumodo objects (may contain a single object)
filepath -- a valid filepath
kwargs:
encoding -- a valid encoding supported by the Python codecs
package
"""
ser = ''
for mumodo in mumodos:
ser += serialize_mumodo(mumodo)
with codecs.open(filepath, "w", encoding=encoding) as f:
f.write(ser)
| mit |
dssg/givinggraph | givinggraph/analysis/forceatlas.py | 3 | 3347 | #! /usr/bin/python
import networkx as nx
import scipy as sp
import numpy as np
import random
import matplotlib.pyplot as plt
# Read a food web with > 100 nodes
# FW = nx.read_edgelist('web.edges', create_using= nx.DiGraph())
# Plotting using the FR layout
# nx.draw_spring(FW,iterations=500)
# plt.show()
# Utility functions
def eucl_dist(a, b):
"""
Euclidean distance
"""
Di = [(a[i] - b[i]) ** 2 for i in xrange(len(a))]
return np.sqrt(np.sum(Di))
# Now the layout function
def forceatlas2_layout(G, iterations=10, linlog=False, pos=None, nohubs=False, kr=0.001, k=None, dim=2):
"""
Options values are
g The graph to layout
iterations Number of iterations to do
linlog Whether to use linear or log repulsion
random_init Start with a random position
If false, start with FR
avoidoverlap Whether to avoid overlap of points
degreebased Degree based repulsion
"""
# We add attributes to store the current and previous convergence speed
for n in G:
G.node[n]['prevcs'] = 0
G.node[n]['currcs'] = 0
# To numpy matrix
# This comes from the spares FR layout in nx
A = nx.to_scipy_sparse_matrix(G, dtype='f')
nnodes, _ = A.shape
from scipy.sparse import coo_matrix
try:
A = A.tolil()
except:
A = (coo_matrix(A)).tolil()
if pos is None:
pos = np.asarray(np.random.random((nnodes, dim)), dtype=A.dtype)
else:
pos = pos.astype(A.dtype)
if k is None:
k = np.sqrt(1.0 / nnodes)
# Iterations
# the initial "temperature" is about .1 of domain area (=1x1)
# this is the largest step allowed in the dynamics.
t = 0.1
# simple cooling scheme.
# linearly step down by dt on each iteration so last iteration is size dt.
dt = t / float(iterations + 1)
displacement = np.zeros((dim, nnodes))
for iteration in range(iterations):
print "layout percentage:\t" + str(iteration / float(iterations) * 100) + "%"
displacement *= 0
# loop over rows
for i in range(A.shape[0]):
# difference between this row's node position and all others
delta = (pos[i] - pos).T
# distance between points
distance = np.sqrt((delta ** 2).sum(axis=0))
# enforce minimum distance of 0.01
distance = np.where(distance < 0.01, 0.01, distance)
# the adjacency matrix row
Ai = np.asarray(A.getrowview(i).toarray())
# displacement "force"
Dist = k * k / distance ** 2
if nohubs:
Dist = Dist / float(Ai.sum(axis=1) + 1)
if linlog:
Dist = np.log(Dist + 1)
displacement[:, i] +=\
(delta * (Dist - Ai * distance / k)).sum(axis=1)
# update positions
# print np.average(displacement)
length = np.sqrt((displacement ** 2).sum(axis=0))
length = np.where(length < 0.01, 0.1, length)
pos += (displacement * t / length).T
# cool temperature
t -= dt
if t < 0:
break
print t
# Return the layout
return dict(zip(G, pos))
# nx.draw(FW, forceatlas2_layout(FW,linlog=False,nohubs=False,iterations=100))
# plt.show()
| mit |
vigilv/scikit-learn | sklearn/preprocessing/label.py | 137 | 27165 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/io/excel/_odfreader.py | 2 | 6017 | from typing import List
from pandas.compat._optional import import_optional_dependency
import pandas as pd
from pandas._typing import FilePathOrBuffer, Scalar
from pandas.io.excel._base import _BaseExcelReader
class _ODFReader(_BaseExcelReader):
"""Read tables out of OpenDocument formatted files
Parameters
----------
filepath_or_buffer: string, path to be parsed or
an open readable stream.
"""
def __init__(self, filepath_or_buffer: FilePathOrBuffer):
import_optional_dependency("odf")
super().__init__(filepath_or_buffer)
@property
def _workbook_class(self):
from odf.opendocument import OpenDocument
return OpenDocument
def load_workbook(self, filepath_or_buffer: FilePathOrBuffer):
from odf.opendocument import load
return load(filepath_or_buffer)
@property
def empty_value(self) -> str:
"""Property for compat with other readers."""
return ""
@property
def sheet_names(self) -> List[str]:
"""Return a list of sheet names present in the document"""
from odf.table import Table
tables = self.book.getElementsByType(Table)
return [t.getAttribute("name") for t in tables]
def get_sheet_by_index(self, index: int):
from odf.table import Table
tables = self.book.getElementsByType(Table)
return tables[index]
def get_sheet_by_name(self, name: str):
from odf.table import Table
tables = self.book.getElementsByType(Table)
for table in tables:
if table.getAttribute("name") == name:
return table
raise ValueError("sheet {name} not found".format(name))
def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]:
"""Parse an ODF Table into a list of lists
"""
from odf.table import CoveredTableCell, TableCell, TableRow
covered_cell_name = CoveredTableCell().qname
table_cell_name = TableCell().qname
cell_names = {covered_cell_name, table_cell_name}
sheet_rows = sheet.getElementsByType(TableRow)
empty_rows = 0
max_row_len = 0
table = [] # type: List[List[Scalar]]
for i, sheet_row in enumerate(sheet_rows):
sheet_cells = [x for x in sheet_row.childNodes if x.qname in cell_names]
empty_cells = 0
table_row = [] # type: List[Scalar]
for j, sheet_cell in enumerate(sheet_cells):
if sheet_cell.qname == table_cell_name:
value = self._get_cell_value(sheet_cell, convert_float)
else:
value = self.empty_value
column_repeat = self._get_column_repeat(sheet_cell)
# Queue up empty values, writing only if content succeeds them
if value == self.empty_value:
empty_cells += column_repeat
else:
table_row.extend([self.empty_value] * empty_cells)
empty_cells = 0
table_row.extend([value] * column_repeat)
if max_row_len < len(table_row):
max_row_len = len(table_row)
row_repeat = self._get_row_repeat(sheet_row)
if self._is_empty_row(sheet_row):
empty_rows += row_repeat
else:
# add blank rows to our table
table.extend([[self.empty_value]] * empty_rows)
empty_rows = 0
for _ in range(row_repeat):
table.append(table_row)
# Make our table square
for row in table:
if len(row) < max_row_len:
row.extend([self.empty_value] * (max_row_len - len(row)))
return table
def _get_row_repeat(self, row) -> int:
"""Return number of times this row was repeated
Repeating an empty row appeared to be a common way
of representing sparse rows in the table.
"""
from odf.namespaces import TABLENS
return int(row.attributes.get((TABLENS, "number-rows-repeated"), 1))
def _get_column_repeat(self, cell) -> int:
from odf.namespaces import TABLENS
return int(cell.attributes.get((TABLENS, "number-columns-repeated"), 1))
def _is_empty_row(self, row) -> bool:
"""Helper function to find empty rows
"""
for column in row.childNodes:
if len(column.childNodes) > 0:
return False
return True
def _get_cell_value(self, cell, convert_float: bool) -> Scalar:
from odf.namespaces import OFFICENS
cell_type = cell.attributes.get((OFFICENS, "value-type"))
if cell_type == "boolean":
if str(cell) == "TRUE":
return True
return False
if cell_type is None:
return self.empty_value
elif cell_type == "float":
# GH5394
cell_value = float(cell.attributes.get((OFFICENS, "value")))
if cell_value == 0.0 and str(cell) != cell_value: # NA handling
return str(cell)
if convert_float:
val = int(cell_value)
if val == cell_value:
return val
return cell_value
elif cell_type == "percentage":
cell_value = cell.attributes.get((OFFICENS, "value"))
return float(cell_value)
elif cell_type == "string":
return str(cell)
elif cell_type == "currency":
cell_value = cell.attributes.get((OFFICENS, "value"))
return float(cell_value)
elif cell_type == "date":
cell_value = cell.attributes.get((OFFICENS, "date-value"))
return pd.to_datetime(cell_value)
elif cell_type == "time":
return pd.to_datetime(str(cell)).time()
else:
raise ValueError("Unrecognized type {}".format(cell_type))
| apache-2.0 |
tcm129/trading-with-python | lib/qtpandas.py | 77 | 7937 | '''
Easy integration of DataFrame into pyqt framework
Copyright: Jev Kuznetsov
Licence: BSD
'''
from PyQt4.QtCore import (QAbstractTableModel,Qt,QVariant,QModelIndex,SIGNAL)
from PyQt4.QtGui import (QApplication,QDialog,QVBoxLayout, QHBoxLayout, QTableView, QPushButton,
QWidget,QTableWidget, QHeaderView, QFont,QMenu,QAbstractItemView)
from pandas import DataFrame, Index
class DataFrameModel(QAbstractTableModel):
''' data model for a DataFrame class '''
def __init__(self,parent=None):
super(DataFrameModel,self).__init__(parent)
self.df = DataFrame()
self.columnFormat = {} # format columns
def setFormat(self,fmt):
"""
set string formatting for the output
example : format = {'close':"%.2f"}
"""
self.columnFormat = fmt
def setDataFrame(self,dataFrame):
self.df = dataFrame
self.signalUpdate()
def signalUpdate(self):
''' tell viewers to update their data (this is full update, not efficient)'''
self.layoutChanged.emit()
def __repr__(self):
return str(self.df)
def setData(self,index,value, role=Qt.EditRole):
if index.isValid():
row,column = index.row(), index.column()
dtype = self.df.dtypes.tolist()[column] # get column dtype
if np.issubdtype(dtype,np.float):
val,ok = value.toFloat()
elif np.issubdtype(dtype,np.int):
val,ok = value.toInt()
else:
val = value.toString()
ok = True
if ok:
self.df.iloc[row,column] = val
return True
return False
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(
QAbstractTableModel.flags(self, index)|
Qt.ItemIsEditable)
def appendRow(self, index, data=0):
self.df.loc[index,:] = data
self.signalUpdate()
def deleteRow(self, index):
idx = self.df.index[index]
#self.beginRemoveRows(QModelIndex(), index,index)
#self.df = self.df.drop(idx,axis=0)
#self.endRemoveRows()
#self.signalUpdate()
#------------- table display functions -----------------
def headerData(self,section,orientation,role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
try:
return self.df.columns.tolist()[section]
except (IndexError, ):
return QVariant()
elif orientation == Qt.Vertical:
try:
#return self.df.index.tolist()
return str(self.df.index.tolist()[section])
except (IndexError, ):
return QVariant()
def data(self, index, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if not index.isValid():
return QVariant()
col = self.df.ix[:,index.column()] # get a column slice first to get the right data type
elm = col[index.row()]
#elm = self.df.ix[index.row(),index.column()]
if self.df.columns[index.column()] in self.columnFormat.keys():
return QVariant(self.columnFormat[self.df.columns[index.column()]] % elm )
else:
return QVariant(str(elm))
def sort(self,nCol,order):
self.layoutAboutToBeChanged.emit()
if order == Qt.AscendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=True)
elif order == Qt.DescendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=False)
self.layoutChanged.emit()
def rowCount(self, index=QModelIndex()):
return self.df.shape[0]
def columnCount(self, index=QModelIndex()):
return self.df.shape[1]
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("delete row")
Action.triggered.connect(self.deleteRow)
menu.exec_(event.globalPos())
def deleteRow(self):
print "Action triggered from " + self.name
print 'Selected rows:'
for idx in self.selectionModel().selectedRows():
print idx.row()
# self.model.deleteRow(idx.row())
class DataFrameWidget(QWidget):
''' a simple widget for using DataFrames in a gui '''
def __init__(self,name='DataFrameTable1', parent=None):
super(DataFrameWidget,self).__init__(parent)
self.name = name
self.dataModel = DataFrameModel()
self.dataModel.setDataFrame(DataFrame())
self.dataTable = QTableView()
#self.dataTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.dataTable.setSortingEnabled(True)
self.dataTable.setModel(self.dataModel)
self.dataModel.signalUpdate()
#self.dataTable.setFont(QFont("Courier New", 8))
layout = QVBoxLayout()
layout.addWidget(self.dataTable)
self.setLayout(layout)
def setFormat(self,fmt):
""" set non-default string formatting for a column """
for colName, f in fmt.iteritems():
self.dataModel.columnFormat[colName]=f
def fitColumns(self):
self.dataTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
def setDataFrame(self,df):
self.dataModel.setDataFrame(df)
def resizeColumnsToContents(self):
self.dataTable.resizeColumnsToContents()
def insertRow(self,index, data=None):
self.dataModel.appendRow(index,data)
#-----------------stand alone test code
def testDf():
''' creates test dataframe '''
data = {'int':[1,2,3],'float':[1./3,2.5,3.5],'string':['a','b','c'],'nan':[np.nan,np.nan,np.nan]}
return DataFrame(data, index=Index(['AAA','BBB','CCC']))[['int','float','string','nan']]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
df = testDf() # make up some data
self.table = DataFrameWidget(parent=self)
self.table.setDataFrame(df)
#self.table.resizeColumnsToContents()
self.table.fitColumns()
self.table.setFormat({'float': '%.2f'})
#buttons
#but_add = QPushButton('Add')
but_test = QPushButton('Test')
but_test.clicked.connect(self.testFcn)
hbox = QHBoxLayout()
#hbox.addself.table(but_add)
hbox.addWidget(but_test)
layout = QVBoxLayout()
layout.addWidget(self.table)
layout.addLayout(hbox)
self.setLayout(layout)
def testFcn(self):
print 'test function'
self.table.insertRow('foo')
if __name__=='__main__':
import sys
import numpy as np
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
| bsd-3-clause |
litaotao/mpld3 | mpld3/tests/test_elements.py | 16 | 5658 | """
Test creation of basic plot elements
"""
import numpy as np
import matplotlib.pyplot as plt
from .. import fig_to_dict, fig_to_html
from numpy.testing import assert_equal
def test_line():
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.random.random(10),
'--k', alpha=0.3, zorder=10, lw=2)
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
line = axrep['lines'][0]
assert_equal(list(sorted(line.keys())),
['alpha', 'color', 'coordinates', 'dasharray', 'data', 'id',
'linewidth', 'xindex', 'yindex', 'zorder'])
assert_equal(line['alpha'], 0.3)
assert_equal(line['color'], "#000000")
assert_equal(line['coordinates'], 'data')
assert_equal(line['dasharray'], '6,6')
assert_equal(line['zorder'], 10)
assert_equal(line['linewidth'], 2)
def test_markers():
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.random.random(10),
'^k', alpha=0.3, zorder=10, mec='r', mew=2, c='b')
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
markers = axrep['markers'][0]
assert_equal(list(sorted(markers.keys())),
['alpha', 'coordinates', 'data', 'edgecolor', 'edgewidth',
'facecolor', 'id', 'markerpath', 'xindex', 'yindex',
'zorder'])
assert_equal(markers['alpha'], 0.3)
assert_equal(markers['zorder'], 10)
assert_equal(markers['coordinates'], 'data')
assert_equal(markers['edgecolor'], '#FF0000')
assert_equal(markers['edgewidth'], 2)
assert_equal(markers['facecolor'], '#0000FF')
assert_equal(markers['markerpath'][0],
[[0.0, -3.0], [-3.0, 3.0], [3.0, 3.0]])
assert_equal(markers['markerpath'][1],
['M', 'L', 'L', 'Z'])
def test_scatter():
fig, ax = plt.subplots()
ax.scatter(np.arange(10), np.random.random(10), c='r', s=30,
marker='^', alpha=0.3, lw=2, edgecolors='b', zorder=10)
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
points = axrep['collections'][0]
assert_equal(list(sorted(points.keys())),
['alphas', 'edgecolors', 'edgewidths', 'facecolors', 'id',
'offsetcoordinates', 'offsets', 'pathcoordinates', 'paths',
'pathtransforms', 'xindex', 'yindex', 'zorder'])
assert_equal(points['alphas'], [0.3])
assert_equal(points['zorder'], 10)
assert_equal(points['edgecolors'], ['#0000FF'])
assert_equal(points['facecolors'], ['#FF0000'])
assert_equal(points['edgewidths'], (2.0,))
assert_equal(points['paths'][0][0],
[[0.0, 0.5], [-0.5, -0.5], [0.5, -0.5]])
assert_equal(points['paths'][0][1],
['M', 'L', 'L', 'Z'])
assert_equal(points['pathtransforms'],
[[6.085806194501846, 0.0, 0.0, 6.085806194501846, 0.0, 0.0]])
def test_patch():
fig, ax = plt.subplots()
ax.add_patch(plt.Rectangle((0, 0), 1, 2, alpha=0.2, linewidth=2,
edgecolor='green', facecolor='red', zorder=3))
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
path = axrep['paths'][0]
assert_equal(list(sorted(path.keys())),
['alpha', 'coordinates', 'dasharray', 'data', 'edgecolor',
'edgewidth', 'facecolor', 'id', 'pathcodes',
'xindex', 'yindex', 'zorder'])
assert_equal(path['alpha'], 0.2)
assert_equal(path['edgecolor'], "#008000")
assert_equal(path['facecolor'], "#FF0000")
assert_equal(path['edgewidth'], 2)
assert_equal(path['zorder'], 3)
def test_text():
fig, ax = plt.subplots()
ax.text(0.1, 0.1, "abcde", size=14, color='red', alpha=0.7,
rotation=15, ha='center', va='center')
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
text = axrep['texts'][0]
assert_equal(list(sorted(text.keys())),
['alpha', 'color', 'coordinates', 'fontsize', 'h_anchor',
'id', 'position', 'rotation', 'text', 'v_baseline',
'zorder'])
assert_equal(text['alpha'], 0.7)
assert_equal(text['color'], "#FF0000")
assert_equal(text['text'], "abcde")
assert_equal(text['rotation'], -15)
assert_equal(text['fontsize'], 14)
assert_equal(text['position'], [0.1, 0.1])
assert_equal(text['h_anchor'], 'middle')
assert_equal(text['v_baseline'], 'central')
assert_equal(text['zorder'], 3)
assert_equal(text['coordinates'], "data")
def test_image():
fig, ax = plt.subplots()
ax.imshow(np.random.random((20, 20)), cmap=plt.cm.binary,
alpha=0.2, zorder=4, extent=(2, 4, 3, 5))
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
image = axrep['images'][0]
# TODO: how to test data?
assert_equal(list(sorted(image.keys())),
['alpha', 'coordinates', 'data', 'extent', 'id', 'zorder'])
assert_equal(image['alpha'], 0.2)
assert_equal(image['extent'], (2, 4, 3, 5))
assert_equal(image['zorder'], 4)
assert_equal(image['coordinates'], "data")
def test_ticks():
plt.xticks([1,2,3])
rep = fig_to_html(plt.gcf())
# TODO: use casperjs here if available to confirm that the xticks
# are rendeder as expected
# pandas tslib generates ticks with unusual dtypes
# test that they are converted to html successfully
plt.xticks(np.array([1,2,3], dtype=np.int32))
rep = fig_to_html(plt.gcf())
# custom ticks should appear in the correct place, with the
# correct text
positions, labels = [0, 1, 10], ['A','B','C']
rep = fig_to_html(plt.gcf())
# TODO: use casperjs here if available to confirm that the xticks
# are rendeder as expected
| bsd-3-clause |
arvinsahni/ml4 | news_sentiment_files.py | 2 | 3086 | import pandas as pd
import psycopg2
##connect to db
conn = psycopg2.connect(database="", user="", password="",
host="", port="")
## Query to get data where relevancy score is higher than 0.6
cur = conn.cursor()
cur.execute("select person, url, website, title, substr(pubdate,1,8) as pubdate, sentiment, trump, hillary from data where trump > 0.6 or hillary > 0.6;")
data = cur.fetchall()
conn.commit()
##List of sources to be used
top_30_list = ['wsj.com', 'ap.org', 'bloomberg.com', 'breitbart.com', 'cadillacnews.com', 'cbsnews.com', 'chicagotribune.com', 'cnn.com', 'democraticunderground.com',
'foxnews.com', 'freerepublic.com', 'gazette.com', 'huffingtonpost.com', 'msn.com', 'msnbc.com', 'mysanantonio.com',
'newsmax.com', 'patch.com', 'reuters.com', 'srnnews.com', 'tbo.com', 'thehill.com', 'theweek.com', 'time.com',
'townhall.com', 'washingtonpost.com', 'washingtontimes.com', 'yahoo.com']
## Put data in pandas dataframe and filter
data = pd.DataFrame(data, columns=['person','url', 'website', 'title','date','score', 'relevancy_trump', 'relevancy_hillary'])
data = data[data['website'].isin(top_30_list)]
data['date']=pd.to_datetime(data['date']) ## convert to date times from string
temp = data[data['person'].isin(['hillary', 'trump'])] ## filter out gary and jill
temp = temp[['person', 'website','score','date','url']] ## leave only needed columns
temp.loc[temp['person'] == 'hillary','person'] = 'Clinton' ## rename to Clinton
temp.loc[temp['person'] == 'trump','person'] = 'Trump' ## rename to Trump
temp.columns=['Candidate','Source','Score','Date','url'] ## rename columns
## Aggregate all news sentiment score by Candidate and Date
avg_sentiment = temp[['Candidate','Date','Score']].groupby(['Candidate','Date']).mean().reset_index()
## Get article counts by date to filter out dates where there are not a lot of articles
temp_count = temp.groupby('Date').count()
temp = temp[temp['Date'].isin(temp_count[temp_count['Candidate'] > 75].index.tolist())]
avg_sentiment = avg_sentiment[avg_sentiment['Date'].isin(temp_count[temp_count['Candidate'] > 75].index.tolist())]
## Aggregate score and count by Candidate, Source and Date
top25_sentiment = temp.groupby(['Candidate', 'Source', 'Date']).agg(['mean','count']).reset_index()
top25_sentiment.columns =['Candidate', 'Source', 'Date', 'Score', 'Count'] ## Collapse to regular index from multi-index
## Get full index to fill in dates by Candidate and source where mean/count does not exist
full_index = pd.MultiIndex.from_product([('Clinton', 'Trump'), top_30_list, pd.date_range(top25_sentiment['Date'].min(), top25_sentiment['Date'].max())], names=['Candidate', 'Source', 'Date'])
top25_sentiment = top25_sentiment.set_index(['Candidate','Source','Date']) ## Collapse to regular index from multi-index
top25_sentiment = top25_sentiment.reindex(full_index).reset_index().fillna(0) ## fill NA with 0
## Write data to csv files
top25_sentiment.to_csv('sentiment.csv', index=False)
avg_sentiment.to_csv('avg_sentiment.csv', index=False) | mit |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/site-packages/dask/base.py | 2 | 31253 | from __future__ import absolute_import, division, print_function
from collections import OrderedDict, Iterator
from functools import partial
from hashlib import md5
from operator import getitem
import inspect
import pickle
import os
import threading
import uuid
import warnings
from toolz import merge, groupby, curry, identity
from toolz.functoolz import Compose
from .compatibility import long, unicode
from .context import thread_state
from .core import flatten, quote, get as simple_get
from .hashing import hash_buffer_hex
from .utils import Dispatch, ensure_dict
from . import config, local, threaded
__all__ = ("DaskMethodsMixin",
"is_dask_collection",
"compute", "persist", "optimize", "visualize",
"tokenize", "normalize_token")
def is_dask_collection(x):
"""Returns ``True`` if ``x`` is a dask collection"""
try:
return x.__dask_graph__() is not None
except (AttributeError, TypeError):
return False
class DaskMethodsMixin(object):
"""A mixin adding standard dask collection methods"""
__slots__ = ()
def visualize(self, filename='mydask', format=None, optimize_graph=False,
**kwargs):
"""Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
color: {None, 'order'}, optional
Options to color nodes. Provide ``cmap=`` keyword for additional
colormap
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Examples
--------
>>> x.visualize(filename='dask.pdf') # doctest: +SKIP
>>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See Also
--------
dask.base.visualize
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
http://dask.pydata.org/en/latest/optimize.html
"""
return visualize(self, filename=filename, format=format,
optimize_graph=optimize_graph, **kwargs)
def persist(self, **kwargs):
"""Persist this dask collection into memory
This turns a lazy Dask collection into a Dask collection with the same
metadata, but now with the results fully computed or actively computing
in the background.
The action of function differs significantly depending on the active
task scheduler. If the task scheduler supports asynchronous computing,
such as is the case of the dask.distributed scheduler, then persist
will return *immediately* and the return value's task graph will
contain Dask Future objects. However if the task scheduler only
supports blocking computation then the call to persist will *block*
and the return value's task graph will contain concrete Python results.
This function is particularly useful when using distributed systems,
because the results will be kept in distributed memory, rather than
returned to the local process as with compute.
Parameters
----------
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
**kwargs
Extra keywords to forward to the scheduler function.
Returns
-------
New dask collections backed by in-memory data
See Also
--------
dask.base.persist
"""
(result,) = persist(self, traverse=False, **kwargs)
return result
def compute(self, **kwargs):
"""Compute this dask collection
This turns a lazy Dask collection into its in-memory equivalent.
For example a Dask.array turns into a :func:`numpy.array` and a Dask.dataframe
turns into a Pandas dataframe. The entire dataset must fit into memory
before calling this operation.
Parameters
----------
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
kwargs
Extra keywords to forward to the scheduler function.
See Also
--------
dask.base.compute
"""
(result,) = compute(self, traverse=False, **kwargs)
return result
def compute_as_if_collection(cls, dsk, keys, get=None, scheduler=None, **kwargs):
"""Compute a graph as if it were of type cls.
Allows for applying the same optimizations and default scheduler."""
schedule = get_scheduler(get=get, scheduler=scheduler, cls=cls)
dsk2 = optimization_function(cls)(ensure_dict(dsk), keys, **kwargs)
return schedule(dsk2, keys, **kwargs)
def dont_optimize(dsk, keys, **kwargs):
return dsk
def optimization_function(x):
return getattr(x, '__dask_optimize__', dont_optimize)
def collections_to_dsk(collections, optimize_graph=True, **kwargs):
"""
Convert many collections into a single dask graph, after optimization
"""
optimizations = (kwargs.pop('optimizations', None) or
config.get('optimizations', []))
if optimize_graph:
groups = groupby(optimization_function, collections)
groups = {opt: _extract_graph_and_keys(val)
for opt, val in groups.items()}
for opt in optimizations:
groups = {k: (opt(dsk, keys), keys)
for k, (dsk, keys) in groups.items()}
dsk = merge(*(opt(dsk, keys, **kwargs)
for opt, (dsk, keys) in groups.items()))
else:
dsk, _ = _extract_graph_and_keys(collections)
return dsk
def _extract_graph_and_keys(vals):
"""Given a list of dask vals, return a single graph and a list of keys such
that ``get(dsk, keys)`` is equivalent to ``[v.compute() v in vals]``."""
dsk = {}
keys = []
for v in vals:
d = v.__dask_graph__()
if hasattr(d, 'dicts'):
for dd in d.dicts.values():
dsk.update(dd)
else:
dsk.update(d)
keys.append(v.__dask_keys__())
return dsk, keys
def unpack_collections(*args, **kwargs):
"""Extract collections in preparation for compute/persist/etc...
Intended use is to find all collections in a set of (possibly nested)
python objects, do something to them (compute, etc...), then repackage them
in equivalent python objects.
Parameters
----------
*args
Any number of objects. If it is a dask collection, it's extracted and
added to the list of collections returned. By default, python builtin
collections are also traversed to look for dask collections (for more
information see the ``traverse`` keyword).
traverse : bool, optional
If True (default), builtin python collections are traversed looking for
any dask collections they might contain.
Returns
-------
collections : list
A list of all dask collections contained in ``args``
repack : callable
A function to call on the transformed collections to repackage them as
they were in the original ``args``.
"""
traverse = kwargs.pop('traverse', True)
collections = []
repack_dsk = {}
collections_token = uuid.uuid4().hex
def _unpack(expr):
if is_dask_collection(expr):
tok = tokenize(expr)
if tok not in repack_dsk:
repack_dsk[tok] = (getitem, collections_token, len(collections))
collections.append(expr)
return tok
tok = uuid.uuid4().hex
if not traverse:
tsk = quote(expr)
else:
# Treat iterators like lists
typ = list if isinstance(expr, Iterator) else type(expr)
if typ in (list, tuple, set):
tsk = (typ, [_unpack(i) for i in expr])
elif typ is dict:
tsk = (dict, [[_unpack(k), _unpack(v)]
for k, v in expr.items()])
else:
return expr
repack_dsk[tok] = tsk
return tok
out = uuid.uuid4().hex
repack_dsk[out] = (tuple, [_unpack(i) for i in args])
def repack(results):
dsk = repack_dsk.copy()
dsk[collections_token] = quote(results)
return simple_get(dsk, out)
return collections, repack
def optimize(*args, **kwargs):
"""Optimize several dask collections at once.
Returns equivalent dask collections that all share the same merged and
optimized underlying graph. This can be useful if converting multiple
collections to delayed objects, or to manually apply the optimizations at
strategic points.
Note that in most cases you shouldn't need to call this method directly.
Parameters
----------
*args : objects
Any number of objects. If a dask object, its graph is optimized and
merged with all those of all other dask objects before returning an
equivalent dask collection. Non-dask arguments are passed through
unchanged.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``optimize``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
optimizations : list of callables, optional
Additional optimization passes to perform.
**kwargs
Extra keyword arguments to forward to the optimization passes.
Examples
--------
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> a2, b2 = optimize(a, b)
>>> a2.compute() == a.compute()
True
>>> b2.compute() == b.compute()
True
"""
collections, repack = unpack_collections(*args, **kwargs)
if not collections:
return args
dsk = collections_to_dsk(collections, **kwargs)
postpersists = [a.__dask_postpersist__() if is_dask_collection(a)
else (None, a) for a in args]
keys, postpersists = [], []
for a in collections:
keys.extend(flatten(a.__dask_keys__()))
postpersists.append(a.__dask_postpersist__())
return repack([r(dsk, *s) for r, s in postpersists])
# TODO: remove after deprecation cycle of `dask.optimize` module completes
from . import optimize as _deprecated_optimize
for _m in _deprecated_optimize.__all__:
setattr(optimize, _m, getattr(_deprecated_optimize, _m))
def compute(*args, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
args : object
Any number of objects. If it is a dask object, it's computed and the
result is returned. By default, python builtin collections are also
traversed to look for dask objects (for more information see the
``traverse`` keyword). Non-dask arguments are passed through unchanged.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``compute``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the optimizations for each collection are applied
before computation. Otherwise the graph is run as is. This can be
useful for debugging.
kwargs
Extra keywords to forward to the scheduler function.
Examples
--------
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> compute(a, b)
(45, 4.5)
By default, dask objects inside python collections will also be computed:
>>> compute({'a': a, 'b': b, 'c': 1}) # doctest: +SKIP
({'a': 45, 'b': 4.5, 'c': 1},)
"""
traverse = kwargs.pop('traverse', True)
optimize_graph = kwargs.pop('optimize_graph', True)
collections, repack = unpack_collections(*args, traverse=traverse)
if not collections:
return args
schedule = get_scheduler(get=kwargs.pop('get', None),
scheduler=kwargs.pop('scheduler', None),
collections=collections)
dsk = collections_to_dsk(collections, optimize_graph, **kwargs)
keys = [x.__dask_keys__() for x in collections]
postcomputes = [x.__dask_postcompute__() for x in collections]
results = schedule(dsk, keys, **kwargs)
return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
def visualize(*args, **kwargs):
"""
Visualize several dask graphs at once.
Requires ``graphviz`` to be installed. All options that are not the dask
graph(s) should be passed as keyword arguments.
Parameters
----------
dsk : dict(s) or collection(s)
The dask graph(s) to visualize.
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
color: {None, 'order'}, optional
Options to color nodes. Provide ``cmap=`` keyword for additional
colormap
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Examples
--------
>>> x.visualize(filename='dask.pdf') # doctest: +SKIP
>>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See Also
--------
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
http://dask.pydata.org/en/latest/optimize.html
"""
from dask.dot import dot_graph
filename = kwargs.pop('filename', 'mydask')
optimize_graph = kwargs.pop('optimize_graph', False)
dsks = [arg for arg in args if isinstance(arg, dict)]
args = [arg for arg in args if is_dask_collection(arg)]
dsk = collections_to_dsk(args, optimize_graph=optimize_graph)
for d in dsks:
dsk.update(d)
color = kwargs.get('color')
if color == 'order':
from .order import order
import matplotlib.pyplot as plt
o = order(dsk)
try:
cmap = kwargs.pop('cmap')
except KeyError:
cmap = plt.cm.RdBu
if isinstance(cmap, str):
import matplotlib.pyplot as plt
cmap = getattr(plt.cm, cmap)
mx = max(o.values()) + 1
colors = {k: _colorize(cmap(v / mx, bytes=True)) for k, v in o.items()}
kwargs['function_attributes'] = {k: {'color': v, 'label': str(o[k])}
for k, v in colors.items()}
kwargs['data_attributes'] = {k: {'color': v} for k, v in colors.items()}
elif color:
raise NotImplementedError("Unknown value color=%s" % color)
return dot_graph(dsk, filename=filename, **kwargs)
def persist(*args, **kwargs):
""" Persist multiple Dask collections into memory
This turns lazy Dask collections into Dask collections with the same
metadata, but now with their results fully computed or actively computing
in the background.
For example a lazy dask.array built up from many lazy calls will now be a
dask.array of the same shape, dtype, chunks, etc., but now with all of
those previously lazy tasks either computed in memory as many small :class:`numpy.array`
(in the single-machine case) or asynchronously running in the
background on a cluster (in the distributed case).
This function operates differently if a ``dask.distributed.Client`` exists
and is connected to a distributed scheduler. In this case this function
will return as soon as the task graph has been submitted to the cluster,
but before the computations have completed. Computations will continue
asynchronously in the background. When using this function with the single
machine scheduler it blocks until the computations have finished.
When using Dask on a single machine you should ensure that the dataset fits
entirely within memory.
Examples
--------
>>> df = dd.read_csv('/path/to/*.csv') # doctest: +SKIP
>>> df = df[df.name == 'Alice'] # doctest: +SKIP
>>> df['in-debt'] = df.balance < 0 # doctest: +SKIP
>>> df = df.persist() # triggers computation # doctest: +SKIP
>>> df.value().min() # future computations are now fast # doctest: +SKIP
-10
>>> df.value().max() # doctest: +SKIP
100
>>> from dask import persist # use persist function on multiple collections
>>> a, b = persist(a, b) # doctest: +SKIP
Parameters
----------
*args: Dask collections
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``persist``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
**kwargs
Extra keywords to forward to the scheduler function.
Returns
-------
New dask collections backed by in-memory data
"""
traverse = kwargs.pop('traverse', True)
optimize_graph = kwargs.pop('optimize_graph', True)
collections, repack = unpack_collections(*args, traverse=traverse)
if not collections:
return args
schedule = get_scheduler(get=kwargs.pop('get', None),
scheduler=kwargs.pop('scheduler', None),
collections=collections)
if inspect.ismethod(schedule):
try:
from distributed.client import default_client
except ImportError:
pass
else:
try:
client = default_client()
except ValueError:
pass
else:
if client.get == schedule:
results = client.persist(collections,
optimize_graph=optimize_graph,
**kwargs)
return repack(results)
dsk = collections_to_dsk(collections, optimize_graph, **kwargs)
keys, postpersists = [], []
for a in collections:
a_keys = list(flatten(a.__dask_keys__()))
rebuild, state = a.__dask_postpersist__()
keys.extend(a_keys)
postpersists.append((rebuild, a_keys, state))
results = schedule(dsk, keys, **kwargs)
d = dict(zip(keys, results))
results2 = [r({k: d[k] for k in ks}, *s) for r, ks, s in postpersists]
return repack(results2)
############
# Tokenize #
############
def tokenize(*args, **kwargs):
""" Deterministic token
>>> tokenize([1, 2, '3'])
'7d6a880cd9ec03506eee6973ff551339'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
if kwargs:
args = args + (kwargs,)
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
normalize_token = Dispatch()
normalize_token.register((int, long, float, str, unicode, bytes, type(None),
type, slice, complex, type(Ellipsis)),
identity)
@normalize_token.register(dict)
def normalize_dict(d):
return normalize_token(sorted(d.items(), key=str))
@normalize_token.register(OrderedDict)
def normalize_ordered_dict(d):
return type(d).__name__, normalize_token(list(d.items()))
@normalize_token.register(set)
def normalize_set(s):
return normalize_token(sorted(s, key=str))
@normalize_token.register((tuple, list))
def normalize_seq(seq):
return type(seq).__name__, list(map(normalize_token, seq))
@normalize_token.register(object)
def normalize_object(o):
method = getattr(o, '__dask_tokenize__', None)
if method is not None:
return method()
return normalize_function(o) if callable(o) else uuid.uuid4().hex
function_cache = {}
function_cache_lock = threading.Lock()
def normalize_function(func):
try:
return function_cache[func]
except KeyError:
result = _normalize_function(func)
if len(function_cache) >= 500: # clear half of cache if full
with function_cache_lock:
if len(function_cache) >= 500:
for k in list(function_cache)[::2]:
del function_cache[k]
function_cache[func] = result
return result
except TypeError: # not hashable
return _normalize_function(func)
def _normalize_function(func):
if isinstance(func, curry):
func = func._partial
if isinstance(func, Compose):
first = getattr(func, 'first', None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, partial):
args = tuple(normalize_token(i) for i in func.args)
if func.keywords:
kws = tuple((k, normalize_token(v))
for k, v in sorted(func.keywords.items()))
else:
kws = None
return (normalize_function(func.func), args, kws)
else:
try:
result = pickle.dumps(func, protocol=0)
if b'__main__' not in result: # abort on dynamic functions
return result
except Exception:
pass
try:
import cloudpickle
return cloudpickle.dumps(func, protocol=0)
except Exception:
return str(func)
@normalize_token.register_lazy("pandas")
def register_pandas():
import pandas as pd
@normalize_token.register(pd.Index)
def normalize_index(ind):
return [ind.name, normalize_token(ind.values)]
@normalize_token.register(pd.Categorical)
def normalize_categorical(cat):
return [normalize_token(cat.codes),
normalize_token(cat.categories),
cat.ordered]
@normalize_token.register(pd.Series)
def normalize_series(s):
return [s.name, s.dtype,
normalize_token(s._data.blocks[0].values),
normalize_token(s.index)]
@normalize_token.register(pd.DataFrame)
def normalize_dataframe(df):
data = [block.values for block in df._data.blocks]
data += [df.columns, df.index]
return list(map(normalize_token, data))
@normalize_token.register_lazy("numpy")
def register_numpy():
import numpy as np
@normalize_token.register(np.ndarray)
def normalize_array(x):
if not x.shape:
return (str(x), x.dtype)
if hasattr(x, 'mode') and getattr(x, 'filename', None):
if hasattr(x.base, 'ctypes'):
offset = (x.ctypes.get_as_parameter().value -
x.base.ctypes.get_as_parameter().value)
else:
offset = 0 # root memmap's have mmap object as base
return (x.filename, os.path.getmtime(x.filename), x.dtype,
x.shape, x.strides, offset)
if x.dtype.hasobject:
try:
data = hash_buffer_hex('-'.join(x.flat).encode('utf-8'))
except TypeError:
data = hash_buffer_hex(b'-'.join([unicode(item).encode('utf-8') for item in
x.flat]))
else:
try:
data = hash_buffer_hex(x.ravel(order='K').view('i1'))
except (BufferError, AttributeError, ValueError):
data = hash_buffer_hex(x.copy().ravel(order='K').view('i1'))
return (data, x.dtype, x.shape, x.strides)
@normalize_token.register(np.matrix)
def normalize_matrix(x):
return type(x).__name__, normalize_array(x.view(type=np.ndarray))
normalize_token.register(np.dtype, repr)
normalize_token.register(np.generic, repr)
@normalize_token.register(np.ufunc)
def normalize_ufunc(x):
try:
name = x.__name__
if getattr(np, name) is x:
return 'np.' + name
except AttributeError:
return normalize_function(x)
@normalize_token.register_lazy("scipy")
def register_scipy():
import scipy.sparse as sp
def normalize_sparse_matrix(x, attrs):
return type(x).__name__, normalize_seq((normalize_token(getattr(x, key))
for key in attrs))
for cls, attrs in [(sp.dia_matrix, ('data', 'offsets', 'shape')),
(sp.bsr_matrix, ('data', 'indices', 'indptr',
'blocksize', 'shape')),
(sp.coo_matrix, ('data', 'row', 'col', 'shape')),
(sp.csr_matrix, ('data', 'indices', 'indptr', 'shape')),
(sp.csc_matrix, ('data', 'indices', 'indptr', 'shape')),
(sp.lil_matrix, ('data', 'rows', 'shape'))]:
normalize_token.register(cls,
partial(normalize_sparse_matrix, attrs=attrs))
@normalize_token.register(sp.dok_matrix)
def normalize_dok_matrix(x):
return type(x).__name__, normalize_token(sorted(x.items()))
def _colorize(t):
""" Convert (r, g, b) triple to "#RRGGBB" string
For use with ``visualize(color=...)``
Examples
--------
>>> _colorize((255, 255, 255))
'#FFFFFF'
>>> _colorize((0, 32, 128))
'#002080'
"""
t = t[:3]
i = sum(v * 256 ** (len(t) - i - 1) for i, v in enumerate(t))
h = hex(int(i))[2:].upper()
h = '0' * (6 - len(h)) + h
return "#" + h
named_schedulers = {
'sync': local.get_sync,
'synchronous': local.get_sync,
'single-threaded': local.get_sync,
'threads': threaded.get,
'threading': threaded.get,
}
try:
from dask import multiprocessing as dask_multiprocessing
except ImportError:
pass
else:
named_schedulers.update({
'processes': dask_multiprocessing.get,
'multiprocessing': dask_multiprocessing.get,
})
_warnned_on_get = [False]
def warn_on_get(get):
if _warnned_on_get[0]:
return
else:
if get in named_schedulers.values():
_warnned_on_get[0] = True
warnings.warn("The get= keyword has been deprecated. "
"Please use the scheduler= keyword instead with the "
"name of the desired scheduler "
"like 'threads' or 'processes'")
def get_scheduler(get=None, scheduler=None, collections=None, cls=None):
""" Get scheduler function
There are various ways to specify the scheduler to use:
1. Passing in get= parameters (deprecated)
2. Passing in scheduler= parameters
3. Passing these into global confiuration
4. Using defaults of a dask collection
This function centralizes the logic to determine the right scheduler to use
from those many options
"""
if get is not None:
if scheduler is not None:
raise ValueError("Both get= and scheduler= provided. Choose one")
warn_on_get(get)
return get
if scheduler is not None:
if scheduler.lower() in named_schedulers:
return named_schedulers[scheduler.lower()]
elif scheduler.lower() in ('dask.distributed', 'distributed'):
from distributed.worker import get_client
return get_client().get
else:
raise ValueError("Expected one of [distributed, %s]" % ', '.join(sorted(named_schedulers)))
# else: # try to connect to remote scheduler with this name
# return get_client(scheduler).get
if config.get('scheduler', None):
return get_scheduler(scheduler=config.get('scheduler', None))
if config.get('get', None):
warn_on_get(config.get('get', None))
return config.get('get', None)
if getattr(thread_state, 'key', False):
from distributed.worker import get_worker
return get_worker().client.get
if cls is not None:
return cls.__dask_scheduler__
if collections:
collections = [c for c in collections if c is not None]
if collections:
get = collections[0].__dask_scheduler__
if not all(c.__dask_scheduler__ == get for c in collections):
raise ValueError("Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler=` parameter explicitly in compute or "
"globally with `set_options`.")
return get
return None
| gpl-3.0 |
jat255/hyperspy | hyperspy/_signals/lazy.py | 1 | 40452 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
from functools import partial
import warnings
import numpy as np
import dask.array as da
import dask.delayed as dd
from dask import threaded
from dask.diagnostics import ProgressBar
from itertools import product
from hyperspy.signal import BaseSignal
from hyperspy.exceptions import VisibleDeprecationWarning
from hyperspy.external.progressbar import progressbar
from hyperspy.misc.array_tools import _requires_linear_rebin
from hyperspy.misc.hist_tools import histogram_dask
from hyperspy.misc.machine_learning import import_sklearn
from hyperspy.misc.utils import multiply, dummy_context_manager
_logger = logging.getLogger(__name__)
lazyerror = NotImplementedError('This method is not available in lazy signals')
def to_array(thing, chunks=None):
"""Accepts BaseSignal, dask or numpy arrays and always produces either
numpy or dask array.
Parameters
----------
thing : {BaseSignal, dask.array.Array, numpy.ndarray}
the thing to be converted
chunks : {None, tuple of tuples}
If None, the returned value is a numpy array. Otherwise returns dask
array with the chunks as specified.
Returns
-------
res : {numpy.ndarray, dask.array.Array}
"""
if thing is None:
return None
if isinstance(thing, BaseSignal):
thing = thing.data
if chunks is None:
if isinstance(thing, da.Array):
thing = thing.compute()
if isinstance(thing, np.ndarray):
return thing
else:
raise ValueError
else:
if isinstance(thing, np.ndarray):
thing = da.from_array(thing, chunks=chunks)
if isinstance(thing, da.Array):
if thing.chunks != chunks:
thing = thing.rechunk(chunks)
return thing
else:
raise ValueError
class LazySignal(BaseSignal):
"""A Lazy Signal instance that delays computation until explicitly saved
(assuming storing the full result of computation in memory is not feasible)
"""
_lazy = True
def compute(self, progressbar=True, close_file=False):
"""Attempt to store the full signal in memory.
close_file: bool
If True, attemp to close the file associated with the dask
array data if any. Note that closing the file will make all other
associated lazy signals inoperative.
"""
if progressbar:
cm = ProgressBar
else:
cm = dummy_context_manager
with cm():
da = self.data
data = da.compute()
if close_file:
self.close_file()
self.data = data
self._lazy = False
self._assign_subclass()
def close_file(self):
"""Closes the associated data file if any.
Currently it only supports closing the file associated with a dask
array created from an h5py DataSet (default HyperSpy hdf5 reader).
"""
arrkey = None
for key in self.data.dask.keys():
if "array-original" in key:
arrkey = key
break
if arrkey:
try:
self.data.dask[arrkey].file.close()
except AttributeError:
_logger.exception("Failed to close lazy Signal file")
def _get_dask_chunks(self, axis=None, dtype=None):
"""Returns dask chunks.
Aims:
- Have at least one signal (or specified axis) in a single chunk,
or as many as fit in memory
Parameters
----------
axis : {int, string, None, axis, tuple}
If axis is None (default), returns chunks for current data shape so
that at least one signal is in the chunk. If an axis is specified,
only that particular axis is guaranteed to be "not sliced".
dtype : {string, np.dtype}
The dtype of target chunks.
Returns
-------
Tuple of tuples, dask chunks
"""
dc = self.data
dcshape = dc.shape
for _axis in self.axes_manager._axes:
if _axis.index_in_array < len(dcshape):
_axis.size = int(dcshape[_axis.index_in_array])
if axis is not None:
need_axes = self.axes_manager[axis]
if not np.iterable(need_axes):
need_axes = [need_axes, ]
else:
need_axes = self.axes_manager.signal_axes
if dtype is None:
dtype = dc.dtype
elif not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
typesize = max(dtype.itemsize, dc.dtype.itemsize)
want_to_keep = multiply([ax.size for ax in need_axes]) * typesize
# @mrocklin reccomends to have around 100MB chunks, so we do that:
num_that_fit = int(100. * 2.**20 / want_to_keep)
# want to have at least one "signal" per chunk
if num_that_fit < 2:
chunks = [tuple(1 for _ in range(i)) for i in dc.shape]
for ax in need_axes:
chunks[ax.index_in_array] = dc.shape[ax.index_in_array],
return tuple(chunks)
sizes = [
ax.size for ax in self.axes_manager._axes if ax not in need_axes
]
indices = [
ax.index_in_array for ax in self.axes_manager._axes
if ax not in need_axes
]
while True:
if multiply(sizes) <= num_that_fit:
break
i = np.argmax(sizes)
sizes[i] = np.floor(sizes[i] / 2)
chunks = []
ndim = len(dc.shape)
for i in range(ndim):
if i in indices:
size = float(dc.shape[i])
split_array = np.array_split(
np.arange(size), np.ceil(size / sizes[indices.index(i)]))
chunks.append(tuple(len(sp) for sp in split_array))
else:
chunks.append((dc.shape[i], ))
return tuple(chunks)
def _make_lazy(self, axis=None, rechunk=False, dtype=None):
self.data = self._lazy_data(axis=axis, rechunk=rechunk, dtype=dtype)
def change_dtype(self, dtype, rechunk=True):
from hyperspy.misc import rgb_tools
if not isinstance(dtype, np.dtype) and (dtype not in
rgb_tools.rgb_dtypes):
dtype = np.dtype(dtype)
self._make_lazy(rechunk=rechunk, dtype=dtype)
super().change_dtype(dtype)
change_dtype.__doc__ = BaseSignal.change_dtype.__doc__
def _lazy_data(self, axis=None, rechunk=True, dtype=None):
"""Return the data as a dask array, rechunked if necessary.
Parameters
----------
axis: None, DataAxis or tuple of data axes
The data axis that must not be broken into chunks when `rechunk`
is `True`. If None, it defaults to the current signal axes.
rechunk: bool, "dask_auto"
If `True`, it rechunks the data if necessary making sure that the
axes in ``axis`` are not split into chunks. If `False` it does
not rechunk at least the data is not a dask array, in which case
it chunks as if rechunk was `True`. If "dask_auto", rechunk if
necessary using dask's automatic chunk guessing.
"""
if rechunk == "dask_auto":
new_chunks = "auto"
else:
new_chunks = self._get_dask_chunks(axis=axis, dtype=dtype)
if isinstance(self.data, da.Array):
res = self.data
if self.data.chunks != new_chunks and rechunk:
_logger.info(
"Rechunking.\nOriginal chunks: %s" % str(self.data.chunks))
res = self.data.rechunk(new_chunks)
_logger.info(
"Final chunks: %s " % str(res.chunks))
else:
if isinstance(self.data, np.ma.masked_array):
data = np.where(self.data.mask, np.nan, self.data)
else:
data = self.data
res = da.from_array(data, chunks=new_chunks)
assert isinstance(res, da.Array)
return res
def _apply_function_on_data_and_remove_axis(self, function, axes,
out=None, rechunk=True):
def get_dask_function(numpy_name):
# Translate from the default numpy to dask functions
translations = {'amax': 'max', 'amin': 'min'}
if numpy_name in translations:
numpy_name = translations[numpy_name]
return getattr(da, numpy_name)
function = get_dask_function(function.__name__)
axes = self.axes_manager[axes]
if not np.iterable(axes):
axes = (axes, )
ar_axes = tuple(ax.index_in_array for ax in axes)
if len(ar_axes) == 1:
ar_axes = ar_axes[0]
# For reduce operations the actual signal and navigation
# axes configuration does not matter. Hence we leave
# dask guess the chunks
if rechunk is True:
rechunk = "dask_auto"
current_data = self._lazy_data(rechunk=rechunk)
# Apply reducing function
new_data = function(current_data, axis=ar_axes)
if not new_data.ndim:
new_data = new_data.reshape((1, ))
if out:
if out.data.shape == new_data.shape:
out.data = new_data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
else:
s = self._deepcopy_with_new_data(new_data)
s._remove_axis([ax.index_in_axes_manager for ax in axes])
return s
def rebin(self, new_shape=None, scale=None,
crop=False, out=None, rechunk=True):
factors = self._validate_rebin_args_and_get_factors(
new_shape=new_shape,
scale=scale)
if _requires_linear_rebin(arr=self.data, scale=factors):
if new_shape:
raise NotImplementedError(
"Lazy rebin requires that the new shape is a divisor "
"of the original signal shape e.g. if original shape "
"(10| 6), new_shape=(5| 3) is valid, (3 | 4) is not.")
else:
raise NotImplementedError(
"Lazy rebin requires scale to be integer and divisor of the "
"original signal shape")
axis = {ax.index_in_array: ax
for ax in self.axes_manager._axes}[factors.argmax()]
self._make_lazy(axis=axis, rechunk=rechunk)
return super().rebin(new_shape=new_shape,
scale=scale, crop=crop, out=out)
rebin.__doc__ = BaseSignal.rebin.__doc__
def __array__(self, dtype=None):
return self.data.__array__(dtype=dtype)
def _make_sure_data_is_contiguous(self):
self._make_lazy(rechunk=True)
def diff(self, axis, order=1, out=None, rechunk=True):
arr_axis = self.axes_manager[axis].index_in_array
def dask_diff(arr, n, axis):
# assume arr is da.Array already
n = int(n)
if n == 0:
return arr
if n < 0:
raise ValueError("order must be positive")
nd = len(arr.shape)
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return dask_diff(arr[slice1] - arr[slice2], n - 1, axis=axis)
else:
return arr[slice1] - arr[slice2]
current_data = self._lazy_data(axis=axis, rechunk=rechunk)
new_data = dask_diff(current_data, order, arr_axis)
if not new_data.ndim:
new_data = new_data.reshape((1, ))
s = out or self._deepcopy_with_new_data(new_data)
if out:
if out.data.shape == new_data.shape:
out.data = new_data
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
axis2 = s.axes_manager[axis]
new_offset = self.axes_manager[axis].offset + (order * axis2.scale / 2)
axis2.offset = new_offset
s.get_dimensions_from_data()
if out is None:
return s
else:
out.events.data_changed.trigger(obj=out)
diff.__doc__ = BaseSignal.diff.__doc__
def integrate_simpson(self, axis, out=None):
axis = self.axes_manager[axis]
from scipy import integrate
axis = self.axes_manager[axis]
data = self._lazy_data(axis=axis, rechunk=True)
new_data = data.map_blocks(
integrate.simps,
x=axis.axis,
axis=axis.index_in_array,
drop_axis=axis.index_in_array,
dtype=data.dtype)
s = out or self._deepcopy_with_new_data(new_data)
if out:
if out.data.shape == new_data.shape:
out.data = new_data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
else:
s._remove_axis(axis.index_in_axes_manager)
return s
integrate_simpson.__doc__ = BaseSignal.integrate_simpson.__doc__
def valuemax(self, axis, out=None, rechunk=True):
idx = self.indexmax(axis, rechunk=rechunk)
old_data = idx.data
data = old_data.map_blocks(
lambda x: self.axes_manager[axis].index2value(x))
if out is None:
idx.data = data
return idx
else:
out.data = data
out.events.data_changed.trigger(obj=out)
valuemax.__doc__ = BaseSignal.valuemax.__doc__
def valuemin(self, axis, out=None, rechunk=True):
idx = self.indexmin(axis, rechunk=rechunk)
old_data = idx.data
data = old_data.map_blocks(
lambda x: self.axes_manager[axis].index2value(x))
if out is None:
idx.data = data
return idx
else:
out.data = data
out.events.data_changed.trigger(obj=out)
valuemin.__doc__ = BaseSignal.valuemin.__doc__
def get_histogram(self, bins='fd', out=None, rechunk=True, **kwargs):
if 'range_bins' in kwargs:
_logger.warning("'range_bins' argument not supported for lazy "
"signals")
del kwargs['range_bins']
from hyperspy.signals import Signal1D
data = self._lazy_data(rechunk=rechunk).flatten()
hist, bin_edges = histogram_dask(data, bins=bins, **kwargs)
if out is None:
hist_spec = Signal1D(hist)
hist_spec._lazy = True
hist_spec._assign_subclass()
else:
hist_spec = out
# we always overwrite the data because the computation is lazy ->
# the result signal is lazy. Assume that the `out` is already lazy
hist_spec.data = hist
hist_spec.axes_manager[0].scale = bin_edges[1] - bin_edges[0]
hist_spec.axes_manager[0].offset = bin_edges[0]
hist_spec.axes_manager[0].size = hist.shape[-1]
hist_spec.axes_manager[0].name = 'value'
hist_spec.metadata.General.title = (
self.metadata.General.title + " histogram")
hist_spec.metadata.Signal.binned = True
if out is None:
return hist_spec
else:
out.events.data_changed.trigger(obj=out)
get_histogram.__doc__ = BaseSignal.get_histogram.__doc__
@staticmethod
def _estimate_poissonian_noise_variance(dc, gain_factor, gain_offset,
correlation_factor):
variance = (dc * gain_factor + gain_offset) * correlation_factor
# The lower bound of the variance is the gaussian noise.
variance = da.clip(variance, gain_offset * correlation_factor, np.inf)
return variance
# def _get_navigation_signal(self, data=None, dtype=None):
# return super()._get_navigation_signal(data=data, dtype=dtype).as_lazy()
# _get_navigation_signal.__doc__ = BaseSignal._get_navigation_signal.__doc__
# def _get_signal_signal(self, data=None, dtype=None):
# return super()._get_signal_signal(data=data, dtype=dtype).as_lazy()
# _get_signal_signal.__doc__ = BaseSignal._get_signal_signal.__doc__
def _calculate_summary_statistics(self, rechunk=True):
if rechunk is True:
# Use dask auto rechunk instead of HyperSpy's one, what should be
# better for these operations
rechunk = "dask_auto"
data = self._lazy_data(rechunk=rechunk)
_raveled = data.ravel()
_mean, _std, _min, _q1, _q2, _q3, _max = da.compute(
da.nanmean(data),
da.nanstd(data),
da.nanmin(data),
da.percentile(_raveled, [25, ]),
da.percentile(_raveled, [50, ]),
da.percentile(_raveled, [75, ]),
da.nanmax(data), )
return _mean, _std, _min, _q1, _q2, _q3, _max
def _map_all(self, function, inplace=True, **kwargs):
calc_result = dd(function)(self.data, **kwargs)
if inplace:
self.data = da.from_delayed(calc_result, shape=self.data.shape,
dtype=self.data.dtype)
return None
return self._deepcopy_with_new_data(calc_result)
def _map_iterate(self,
function,
iterating_kwargs=(),
show_progressbar=None,
parallel=None,
max_workers=None,
ragged=None,
inplace=True,
**kwargs):
if ragged not in (True, False):
raise ValueError('"ragged" kwarg has to be bool for lazy signals')
_logger.debug("Entering '_map_iterate'")
size = max(1, self.axes_manager.navigation_size)
from hyperspy.misc.utils import (create_map_objects,
map_result_construction)
func, iterators = create_map_objects(function, size, iterating_kwargs,
**kwargs)
iterators = (self._iterate_signal(), ) + iterators
res_shape = self.axes_manager._navigation_shape_in_array
# no navigation
if not len(res_shape) and ragged:
res_shape = (1,)
all_delayed = [dd(func)(data) for data in zip(*iterators)]
if ragged:
if inplace:
raise ValueError("In place computation is not compatible with "
"ragged array for lazy signal.")
# Shape of the signal dimension will change for the each nav.
# index, which means we can't predict the shape and the dtype needs
# to be python object to support numpy ragged array
sig_shape = ()
sig_dtype = np.dtype('O')
else:
one_compute = all_delayed[0].compute()
# No signal dimension for scalar
if np.isscalar(one_compute):
sig_shape = ()
sig_dtype = type(one_compute)
else:
sig_shape = one_compute.shape
sig_dtype = one_compute.dtype
pixels = [
da.from_delayed(
res, shape=sig_shape, dtype=sig_dtype) for res in all_delayed
]
if ragged:
if show_progressbar is None:
from hyperspy.defaults_parser import preferences
show_progressbar = preferences.General.show_progressbar
# We compute here because this is not sure if this is possible
# to make a ragged dask array: we need to provide a chunk size...
res_data = np.empty(res_shape, dtype=sig_dtype)
_logger.info("Lazy signal is computed to make the ragged array.")
if show_progressbar:
cm = ProgressBar
else:
cm = dummy_context_manager
with cm():
try:
for i, pixel in enumerate(pixels):
res_data.flat[i] = pixel.compute()
except MemoryError:
raise MemoryError("The use of 'ragged' array requires the "
"computation of the lazy signal.")
else:
if len(pixels) > 0:
for step in reversed(res_shape):
_len = len(pixels)
starts = range(0, _len, step)
ends = range(step, _len + step, step)
pixels = [
da.stack(
pixels[s:e], axis=0) for s, e in zip(starts, ends)
]
res_data = pixels[0]
res = map_result_construction(
self, inplace, res_data, ragged, sig_shape, lazy=not ragged)
return res
def _iterate_signal(self):
if self.axes_manager.navigation_size < 2:
yield self()
return
nav_dim = self.axes_manager.navigation_dimension
sig_dim = self.axes_manager.signal_dimension
nav_indices = self.axes_manager.navigation_indices_in_array[::-1]
nav_lengths = np.atleast_1d(
np.array(self.data.shape)[list(nav_indices)])
getitem = [slice(None)] * (nav_dim + sig_dim)
data = self._lazy_data()
for indices in product(*[range(l) for l in nav_lengths]):
for res, ind in zip(indices, nav_indices):
getitem[ind] = res
yield data[tuple(getitem)]
def _block_iterator(self,
flat_signal=True,
get=threaded.get,
navigation_mask=None,
signal_mask=None):
"""A function that allows iterating lazy signal data by blocks,
defining the dask.Array.
Parameters
----------
flat_signal: bool
returns each block flattened, such that the shape (for the
particular block) is (navigation_size, signal_size), with
optionally masked elements missing. If false, returns
the equivalent of s.inav[{blocks}].data, where masked elements are
set to np.nan or 0.
get : dask scheduler
the dask scheduler to use for computations;
default `dask.threaded.get`
navigation_mask : {BaseSignal, numpy array, dask array}
The navigation locations marked as True are not returned (flat) or
set to NaN or 0.
signal_mask : {BaseSignal, numpy array, dask array}
The signal locations marked as True are not returned (flat) or set
to NaN or 0.
"""
self._make_lazy()
data = self._data_aligned_with_axes
nav_chunks = data.chunks[:self.axes_manager.navigation_dimension]
indices = product(*[range(len(c)) for c in nav_chunks])
signalsize = self.axes_manager.signal_size
sig_reshape = (signalsize,) if signalsize else ()
data = data.reshape((self.axes_manager.navigation_shape[::-1] +
sig_reshape))
if signal_mask is None:
signal_mask = slice(None) if flat_signal else \
np.zeros(self.axes_manager.signal_size, dtype='bool')
else:
try:
signal_mask = to_array(signal_mask).ravel()
except ValueError:
# re-raise with a message
raise ValueError("signal_mask has to be a signal, numpy or"
" dask array, but "
"{} was given".format(type(signal_mask)))
if flat_signal:
signal_mask = ~signal_mask
if navigation_mask is None:
nav_mask = da.zeros(
self.axes_manager.navigation_shape[::-1],
chunks=nav_chunks,
dtype='bool')
else:
try:
nav_mask = to_array(navigation_mask, chunks=nav_chunks)
except ValueError:
# re-raise with a message
raise ValueError("navigation_mask has to be a signal, numpy or"
" dask array, but "
"{} was given".format(type(navigation_mask)))
if flat_signal:
nav_mask = ~nav_mask
for ind in indices:
chunk = get(data.dask,
(data.name, ) + ind + (0,) * bool(signalsize))
n_mask = get(nav_mask.dask, (nav_mask.name, ) + ind)
if flat_signal:
yield chunk[n_mask, ...][..., signal_mask]
else:
chunk = chunk.copy()
value = np.nan if np.can_cast('float', chunk.dtype) else 0
chunk[n_mask, ...] = value
chunk[..., signal_mask] = value
yield chunk.reshape(chunk.shape[:-1] +
self.axes_manager.signal_shape[::-1])
def decomposition(
self,
normalize_poissonian_noise=False,
algorithm="SVD",
output_dimension=None,
signal_mask=None,
navigation_mask=None,
get=threaded.get,
num_chunks=None,
reproject=True,
print_info=True,
**kwargs
):
"""Perform Incremental (Batch) decomposition on the data.
The results are stored in ``self.learning_results``.
Read more in the :ref:`User Guide <big_data.decomposition>`.
Parameters
----------
normalize_poissonian_noise : bool, default False
If True, scale the signal to normalize Poissonian noise using
the approach described in [KeenanKotula2004]_.
algorithm : {'SVD', 'PCA', 'ORPCA', 'ORNMF'}, default 'SVD'
The decomposition algorithm to use.
output_dimension : int or None, default None
Number of components to keep/calculate. If None, keep all
(only valid for 'SVD' algorithm)
get : dask scheduler
the dask scheduler to use for computations;
default `dask.threaded.get`
num_chunks : int or None, default None
the number of dask chunks to pass to the decomposition model.
More chunks require more memory, but should run faster. Will be
increased to contain at least ``output_dimension`` signals.
navigation_mask : {BaseSignal, numpy array, dask array}
The navigation locations marked as True are not used in the
decomposition.
signal_mask : {BaseSignal, numpy array, dask array}
The signal locations marked as True are not used in the
decomposition.
reproject : bool, default True
Reproject data on the learnt components (factors) after learning.
print_info : bool, default True
If True, print information about the decomposition being performed.
In the case of sklearn.decomposition objects, this includes the
values of all arguments of the chosen sklearn algorithm.
**kwargs
passed to the partial_fit/fit functions.
References
----------
.. [KeenanKotula2004] M. Keenan and P. Kotula, "Accounting for Poisson noise
in the multivariate analysis of ToF-SIMS spectrum images", Surf.
Interface Anal 36(3) (2004): 203-212.
See Also
--------
* :py:meth:`~.learn.mva.MVA.decomposition` for non-lazy signals
* :py:func:`dask.array.linalg.svd`
* :py:class:`sklearn.decomposition.IncrementalPCA`
* :py:class:`~.learn.rpca.ORPCA`
* :py:class:`~.learn.ornmf.ORNMF`
"""
if kwargs.get("bounds", False):
warnings.warn(
"The `bounds` keyword is deprecated and will be removed "
"in v2.0. Since version > 1.3 this has no effect.",
VisibleDeprecationWarning,
)
kwargs.pop("bounds", None)
# Deprecate 'ONMF' for 'ORNMF'
if algorithm == "ONMF":
warnings.warn(
"The argument `algorithm='ONMF'` has been deprecated and will "
"be removed in future. Please use `algorithm='ORNMF'` instead.",
VisibleDeprecationWarning,
)
algorithm = "ORNMF"
# Check algorithms requiring output_dimension
algorithms_require_dimension = ["PCA", "ORPCA", "ORNMF"]
if algorithm in algorithms_require_dimension and output_dimension is None:
raise ValueError(
"`output_dimension` must be specified for '{}'".format(algorithm)
)
explained_variance = None
explained_variance_ratio = None
_al_data = self._data_aligned_with_axes
nav_chunks = _al_data.chunks[: self.axes_manager.navigation_dimension]
sig_chunks = _al_data.chunks[self.axes_manager.navigation_dimension :]
num_chunks = 1 if num_chunks is None else num_chunks
blocksize = np.min([multiply(ar) for ar in product(*nav_chunks)])
nblocks = multiply([len(c) for c in nav_chunks])
if output_dimension and blocksize / output_dimension < num_chunks:
num_chunks = np.ceil(blocksize / output_dimension)
blocksize *= num_chunks
# Initialize return_info and print_info
to_return = None
to_print = [
"Decomposition info:",
" normalize_poissonian_noise={}".format(normalize_poissonian_noise),
" algorithm={}".format(algorithm),
" output_dimension={}".format(output_dimension)
]
# LEARN
if algorithm == "PCA":
if not import_sklearn.sklearn_installed:
raise ImportError("algorithm='PCA' requires scikit-learn")
obj = import_sklearn.sklearn.decomposition.IncrementalPCA(n_components=output_dimension)
method = partial(obj.partial_fit, **kwargs)
reproject = True
to_print.extend(["scikit-learn estimator:", obj])
elif algorithm == "ORPCA":
from hyperspy.learn.rpca import ORPCA
batch_size = kwargs.pop("batch_size", None)
obj = ORPCA(output_dimension, **kwargs)
method = partial(obj.fit, batch_size=batch_size)
elif algorithm == "ORNMF":
from hyperspy.learn.ornmf import ORNMF
batch_size = kwargs.pop("batch_size", None)
obj = ORNMF(output_dimension, **kwargs)
method = partial(obj.fit, batch_size=batch_size)
elif algorithm != "SVD":
raise ValueError("'algorithm' not recognised")
original_data = self.data
try:
_logger.info("Performing decomposition analysis")
if normalize_poissonian_noise:
_logger.info("Scaling the data to normalize Poissonian noise")
data = self._data_aligned_with_axes
ndim = self.axes_manager.navigation_dimension
sdim = self.axes_manager.signal_dimension
nm = da.logical_not(
da.zeros(self.axes_manager.navigation_shape[::-1], chunks=nav_chunks)
if navigation_mask is None
else to_array(navigation_mask, chunks=nav_chunks)
)
sm = da.logical_not(
da.zeros(self.axes_manager.signal_shape[::-1], chunks=sig_chunks)
if signal_mask is None
else to_array(signal_mask, chunks=sig_chunks)
)
ndim = self.axes_manager.navigation_dimension
sdim = self.axes_manager.signal_dimension
bH, aG = da.compute(
data.sum(axis=tuple(range(ndim))),
data.sum(axis=tuple(range(ndim, ndim + sdim))),
)
bH = da.where(sm, bH, 1)
aG = da.where(nm, aG, 1)
raG = da.sqrt(aG)
rbH = da.sqrt(bH)
coeff = raG[(...,) + (None,) * rbH.ndim] * rbH[(None,) * raG.ndim + (...,)]
coeff.map_blocks(np.nan_to_num)
coeff = da.where(coeff == 0, 1, coeff)
data = data / coeff
self.data = data
# LEARN
if algorithm == "SVD":
reproject = False
from dask.array.linalg import svd
try:
self._unfolded4decomposition = self.unfold()
# TODO: implement masking
if navigation_mask or signal_mask:
raise NotImplementedError("Masking is not yet implemented for lazy SVD")
U, S, V = svd(self.data)
if output_dimension is None:
min_shape = min(min(U.shape), min(V.shape))
else:
min_shape = output_dimension
U = U[:, :min_shape]
S = S[:min_shape]
V = V[:min_shape]
factors = V.T
explained_variance = S ** 2 / self.data.shape[0]
loadings = U * S
finally:
if self._unfolded4decomposition is True:
self.fold()
self._unfolded4decomposition is False
else:
this_data = []
try:
for chunk in progressbar(
self._block_iterator(
flat_signal=True,
get=get,
signal_mask=signal_mask,
navigation_mask=navigation_mask,
),
total=nblocks,
leave=True,
desc="Learn",
):
this_data.append(chunk)
if len(this_data) == num_chunks:
thedata = np.concatenate(this_data, axis=0)
method(thedata)
this_data = []
if len(this_data):
thedata = np.concatenate(this_data, axis=0)
method(thedata)
except KeyboardInterrupt: # pragma: no cover
pass
# GET ALREADY CALCULATED RESULTS
if algorithm == "PCA":
explained_variance = obj.explained_variance_
explained_variance_ratio = obj.explained_variance_ratio_
factors = obj.components_.T
elif algorithm == "ORPCA":
factors, loadings = obj.finish()
loadings = loadings.T
elif algorithm == "ORNMF":
factors, loadings = obj.finish()
loadings = loadings.T
# REPROJECT
if reproject:
if algorithm == "PCA":
method = obj.transform
def post(a):
return np.concatenate(a, axis=0)
elif algorithm == "ORPCA":
method = obj.project
def post(a):
return np.concatenate(a, axis=1).T
elif algorithm == "ORNMF":
method = obj.project
def post(a):
return np.concatenate(a, axis=1).T
_map = map(
lambda thing: method(thing),
self._block_iterator(
flat_signal=True,
get=get,
signal_mask=signal_mask,
navigation_mask=navigation_mask,
),
)
H = []
try:
for thing in progressbar(_map, total=nblocks, desc="Project"):
H.append(thing)
except KeyboardInterrupt: # pragma: no cover
pass
loadings = post(H)
if explained_variance is not None and explained_variance_ratio is None:
explained_variance_ratio = explained_variance / explained_variance.sum()
# RESHUFFLE "blocked" LOADINGS
ndim = self.axes_manager.navigation_dimension
if algorithm != "SVD": # Only needed for online algorithms
try:
loadings = _reshuffle_mixed_blocks(
loadings, ndim, (output_dimension,), nav_chunks
).reshape((-1, output_dimension))
except ValueError:
# In case the projection step was not finished, it's left
# as scrambled
pass
finally:
self.data = original_data
target = self.learning_results
target.decomposition_algorithm = algorithm
target.output_dimension = output_dimension
if algorithm != "SVD":
target._object = obj
target.factors = factors
target.loadings = loadings
target.explained_variance = explained_variance
target.explained_variance_ratio = explained_variance_ratio
# Rescale the results if the noise was normalized
if normalize_poissonian_noise is True:
target.factors = target.factors * rbH.ravel()[:, np.newaxis]
target.loadings = target.loadings * raG.ravel()[:, np.newaxis]
# Print details about the decomposition we just performed
if print_info:
print("\n".join([str(pr) for pr in to_print]))
def _reshuffle_mixed_blocks(array, ndim, sshape, nav_chunks):
"""Reshuffles dask block-shuffled array
Parameters
----------
array : np.ndarray
the array to reshuffle
ndim : int
the number of navigation (shuffled) dimensions
sshape : tuple of ints
The shape
"""
splits = np.cumsum([multiply(ar)
for ar in product(*nav_chunks)][:-1]).tolist()
if splits:
all_chunks = [
ar.reshape(shape + sshape)
for shape, ar in zip(
product(*nav_chunks), np.split(array, splits))
]
def split_stack_list(what, step, axis):
total = len(what)
if total != step:
return [
np.concatenate(
what[i:i + step], axis=axis)
for i in range(0, total, step)
]
else:
return np.concatenate(what, axis=axis)
for chunks, axis in zip(nav_chunks[::-1], range(ndim - 1, -1, -1)):
step = len(chunks)
all_chunks = split_stack_list(all_chunks, step, axis)
return all_chunks
else:
return array
| gpl-3.0 |
merenlab/anvio | anvio/terminal.py | 1 | 36347 | # -*- coding: utf-8
# pylint: disable=line-too-long
"""Relations with the console output, Progress and Run classes"""
import os
import re
import sys
import time
import fcntl
import numpy as np
import struct
import pandas as pd
import termios
import datetime
import textwrap
from colored import fore, back, style
from collections import OrderedDict
import anvio
import anvio.dictio as dictio
import anvio.constants as constants
from anvio.errors import TerminalError
from anvio.ttycolors import color_text as c
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__maintainer__ = "A. Murat Eren"
__email__ = "[email protected]"
__status__ = "Development"
# clean garbage garbage:
ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
non_ascii_escape = re.compile(r'[^\x00-\x7F]+')
CLEAR = lambda line: ansi_escape.sub('', non_ascii_escape.sub('', line.strip()))
class SuppressAllOutput(object):
def __enter__(self):
sys.stderr.flush()
self.old_stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
sys.stdout.flush()
self.old_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_value, traceback):
sys.stderr.flush()
sys.stderr = self.old_stderr
sys.stdout.flush()
sys.stdout = self.old_stdout
def remove_spaces(text):
while True:
if text.find(" ") > -1:
text = text.replace(" ", " ")
else:
break
return text
def pluralize(word, number, sfp="s", sfs=None, pfs=None, alt=None):
"""Pluralize a given word mindfully.
We often run into a situation where the word of choice depends on the number of items
it descripes and it can take a lot of extra space in the code. For instance, take this:
>>> f"You have {num_sequences_in_fasta_file} sequences in your FASTA file."
This will print "You have 1 seqeunces in your FASTA file" like an idiot when there is only
one sequence. An alternative is to do something more elaborate:
>>> f"You have {num_sequences_in_fasta_file} {'sequence' if num_sequences_in_fasta_file == 1 else 'seqeunces'}"
Even though this will work beautifully, it works at the expense of the readability of the code
for a minor inconvenience.
THE PURPOSE of this function is to fix this problem in a more elegant fashion. The following call is
equivalent to the second example:
>>> f"You have {pluralize('sequence', num_sequences_in_fasta_file)} in your FASTA file."
Alternatively, you can provide this function an `alt`, in which case it would return `word` for singular
and `alt` for plural cases:
>>> f"{pluralize('these do not', number, alt='this does not')}."
Voila.
Parameters
==========
word: str
The word to conditionally plurlize
number: int
The number of items the word intends to describe
sfp: str, 's'
Suffix for plural. The character that needs to be added to the end of the
`word` if plural.
sfs: str, None
Suffix for singular. The same for `sfp` for singular case.
pfs: str, None
Prefix for singular. `pfs` will replace `1` in the final output (common
parameters could be `pfs="a single" or pfs="only one"`).
alternative: str, None
If you provide an alternative, pluralize will discard every other parameter
and will simply return `word` for singular, and `alt` for plural case.
"""
plural = number != 1
if plural:
if alt:
return alt
else:
return f"{pretty_print(number)} {word}{sfp}"
else:
if alt:
return word
else:
if sfs:
return f"{pretty_print(number)} {word}{sfs}"
else:
if pfs:
return f"{pfs} {word}"
else:
return f"{number} {word}"
class Progress:
def __init__(self, verbose=True):
self.pid = None
self.verbose = verbose
self.terminal_width = None
self.is_tty = sys.stdout.isatty()
self.get_terminal_width()
self.current = None
self.progress_total_items = None
self.progress_current_item = 0
self.t = Timer(self.progress_total_items)
self.LEN = lambda s: len(s.encode('utf-16-le')) // 2
if anvio.NO_PROGRESS or anvio.QUIET:
self.verbose = False
def get_terminal_width(self):
try:
self.terminal_width = max(get_terminal_size()[0], 60)
except:
# Getting the terminal size failed. It could be for many reasons: they may not have a
# screen, they may be running TempleOS, etc. We respond by giving a generous terminal
# width so that if they can see it at all, it truncates only the longest update messages.
self.terminal_width = 120
def new(self, pid, discard_previous_if_exists=False, progress_total_items=None):
if self.pid:
if discard_previous_if_exists:
self.end()
else:
raise TerminalError("Progress.new() can't be called before ending the previous one (Existing: '%s', Competing: '%s')." % (self.pid, pid))
if not self.verbose:
return
self.pid = '%s %s' % (get_date(), pid)
self.get_terminal_width()
self.current = None
self.step = None
self.progress_total_items = progress_total_items
self.progress_current_item = 0
self.t = Timer(self.progress_total_items)
def update_pid(self, pid):
self.pid = '%s %s' % (get_date(), pid)
def increment(self, increment_to=None):
if increment_to:
self.progress_current_item = increment_to
else:
self.progress_current_item += 1
self.t.make_checkpoint(increment_to = increment_to)
def write(self, c, dont_update_current=False):
eta_c = ' ETA: %s' % str(self.t.eta()) if self.progress_total_items else ''
surpass = self.terminal_width - self.LEN(c) - self.LEN(eta_c)
if surpass < 0:
c = c[0:-(-surpass + 6)] + ' (...)'
else:
if not dont_update_current:
self.current = c
c += ' ' * surpass
c += eta_c
if self.verbose:
if self.progress_total_items and self.is_tty:
p_text = ''
p_length = self.LEN(p_text)
end_point = self.LEN(c) - self.LEN(eta_c)
break_point = round(end_point * self.progress_current_item / self.progress_total_items)
# see a full list of color codes: https://gitlab.com/dslackw/colored
if p_length >= break_point:
sys.stderr.write(back.CYAN + fore.BLACK + c[:break_point] + \
back.GREY_30 + fore.WHITE + c[break_point:end_point] + \
back.CYAN + fore.CYAN + c[end_point] + \
back.GREY_50 + fore.LIGHT_CYAN + c[end_point:] + \
style.RESET)
else:
sys.stderr.write(back.CYAN + fore.BLACK + c[:break_point - p_length] + \
back.SALMON_1 + fore.BLACK + p_text + \
back.GREY_30 + fore.WHITE + c[break_point:end_point] + \
back.GREY_50 + fore.LIGHT_CYAN + c[end_point:] + \
style.RESET)
sys.stderr.flush()
else:
sys.stderr.write(back.CYAN + fore.BLACK + c + style.RESET)
sys.stderr.flush()
def reset(self):
self.clear()
def clear(self):
if not self.verbose:
return
null = '\r' + ' ' * (self.terminal_width)
sys.stderr.write(null)
sys.stderr.write('\r')
sys.stderr.flush()
self.current = None
self.step = None
def append(self, msg):
if not self.verbose:
return
self.write('%s%s' % (self.current, msg))
def step_start(self, step, symbol="⚙ "):
if not self.pid:
raise TerminalError("You don't have an active progress to do it :/")
if not self.current:
raise TerminalError("You don't have a current progress bad :(")
if self.step:
raise TerminalError("You already have an unfinished step :( Here it is: '%s'." % self.step)
if not self.verbose:
return
self.step = " / %s " % (step)
self.write(self.current + self.step + symbol, dont_update_current=True)
def step_end(self, symbol="👍"):
if not self.step:
raise TerminalError("You don't have an ongoing step :(")
if not self.verbose:
return
self.write(self.current + self.step + symbol)
self.step = None
def update(self, msg, increment=False):
self.msg = msg
if not self.verbose:
return
if not self.pid:
raise TerminalError('Progress with null pid will not update for msg "%s"' % msg)
if increment:
self.increment()
self.clear()
self.write('\r[%s] %s' % (self.pid, msg))
def end(self, timing_filepath=None):
"""End the current progress
Parameters
==========
timing_filepath : str, None
Store the timings of this progress to the filepath `timing_filepath`. File will only be
made if a progress_total_items parameter was made during self.new()
"""
if timing_filepath and self.progress_total_items is not None:
self.t.gen_file_report(timing_filepath)
self.pid = None
if not self.verbose:
return
self.clear()
class Run:
def __init__(self, log_file_path=None, verbose=True, width=45):
self.log_file_path = log_file_path
self.info_dict = {}
self.verbose = verbose
self.width = width
self.single_line_prefixes = {0: '',
1: '* ',
2: ' - ',
3: ' > '}
if anvio.QUIET:
self.verbose = False
def log(self, line):
if not self.log_file_path:
self.warning("The run object got a logging request, but it was not inherited with "
"a log file path :(")
return
with open(self.log_file_path, "a") as log_file: log_file.write('[%s] %s\n' % (get_date(), CLEAR(line)))
def write(self, line, quiet=False, overwrite_verbose=False):
if self.log_file_path:
self.log(line)
if (self.verbose and not quiet) or (overwrite_verbose and not anvio.QUIET):
try:
sys.stderr.write(line)
except:
sys.stderr.write(line.encode('utf-8'))
def info(self, key, value, quiet=False, display_only=False, overwrite_verbose=False, nl_before=0, nl_after=0, lc='cyan',
mc='yellow', progress=None, align_long_values=True):
"""
This function prints information nicely to the terminal in the form:
key ........: value
PARAMETERS
==========
key : str
what to print before the dots
value : str
what to print after the dots
quiet : boolean
the standard anvi'o quiet parameter which, if True, suppresses some output
display_only : boolean
if False, the key value pair is also stored in the info dictionary
overwrite_verbose : boolean
if True, downstream quiet parameters (though not the global --quiet) are ignored to produce more verbose output
nl_before : int
number of lines to print before the key-value line
nl_after : int
number of lines to print after the key-value line
lc : color str
the color of the label (key)
mc : color str
the color of the value
progress : Progress instance
provides the Progress bar to use
align_long_values : boolean
if True, values that are longer than the terminal width will be broken up into different lines that
align nicely
"""
if not display_only:
self.info_dict[key] = value
if value is None:
value = "None"
elif isinstance(value, bool) or isinstance(value, float) or isinstance(value, list):
value = "%s" % value
elif isinstance(value, str):
value = remove_spaces(value)
elif isinstance(value, int):
value = pretty_print(value)
label = constants.get_pretty_name(key)
info_line = "%s%s %s: %s\n%s" % ('\n' * nl_before, c(label, lc),
'.' * (self.width - len(label)),
c(str(value), mc), '\n' * nl_after)
if align_long_values:
terminal_width = get_terminal_size()[0]
wrap_width = terminal_width - self.width - 3
wrapped_value_lines = textwrap.wrap(value, width=wrap_width, break_long_words=False, break_on_hyphens=False)
if len(wrapped_value_lines) == 0:
aligned_value_str = value
else:
aligned_value_str = wrapped_value_lines[0]
for line in wrapped_value_lines[1:]:
aligned_value_str += "\n %s %s" % (' ' * self.width, line)
info_line = "%s%s %s: %s\n%s" % ('\n' * nl_before, c(label, lc),
'.' * (self.width - len(label)),
c(str(aligned_value_str), mc), '\n' * nl_after)
if progress:
progress.clear()
self.write(info_line, overwrite_verbose=False, quiet=quiet)
progress.update(progress.msg)
else:
self.write(info_line, quiet=quiet, overwrite_verbose=overwrite_verbose)
def info_single(self, message, overwrite_verbose=False, mc='yellow', nl_before=0, nl_after=0, cut_after=80, level=1, progress=None):
if isinstance(message, str):
message = remove_spaces(message)
if level not in self.single_line_prefixes:
raise TerminalError("the `info_single` function does not know how to deal with a level of %d :/" % level)
if cut_after:
message_line = c("%s%s\n" % (self.single_line_prefixes[level], textwrap.fill(str(message), cut_after)), mc)
else:
message_line = c("%s%s\n" % (self.single_line_prefixes[level], str(message)), mc)
message_line = ('\n' * nl_before) + message_line + ('\n' * nl_after)
if progress:
progress.clear()
self.write(message_line, overwrite_verbose=False)
progress.update(progress.msg)
else:
self.write(message_line, overwrite_verbose=False)
def warning(self, message, header='WARNING', lc='red', raw=False, overwrite_verbose=False, nl_before=0, nl_after=0):
if isinstance(message, str):
message = remove_spaces(message)
message_line = ''
header_line = c("%s\n%s\n%s\n" % (('\n' * nl_before), header,
'=' * (self.width + 2)), lc)
if raw:
message_line = c("%s\n\n%s" % ((message), '\n' * nl_after), lc)
else:
message_line = c("%s\n\n%s" % (textwrap.fill(str(message), 80), '\n' * nl_after), lc)
self.write((header_line + message_line) if message else header_line, overwrite_verbose=overwrite_verbose)
def store_info_dict(self, destination, strip_prefix=None):
if strip_prefix:
# mostly to get rid of output_dir prefix in output file names.
# surprisingly enough, this is the best place to do it. live
# and learn :/
self.info_dict = dictio.strip_prefix_from_dict_values(self.info_dict, strip_prefix)
dictio.write_serialized_object(self.info_dict, destination)
def quit(self):
if self.log_file_path:
self.log('Bye.')
class Timer:
"""Manages an ordered dictionary, where each key is a checkpoint name and value is a timestamp.
Examples
========
>>> from anvio.terminal import Timer
>>> import time
>>> t = Timer(); time.sleep(1)
>>> t.make_checkpoint('checkpoint_name'); time.sleep(1)
>>> timedelta = t.timedelta_to_checkpoint(timestamp=t.timestamp(), checkpoint_key='checkpoint_name')
>>> print(t.format_time(timedelta, fmt = '{days} days, {hours} hours, {seconds} seconds', zero_padding=0))
>>> print(t.time_elapsed())
0 days, 0 hours, 1 seconds
00:00:02
>>> t = Timer(3) # 3 checkpoints expected until completion
>>> for _ in range(3):
>>> time.sleep(1); t.make_checkpoint()
>>> print('complete: %s' % t.complete)
>>> print(t.eta(fmt='ETA: {seconds} seconds'))
complete: False
ETA: 02 seconds
complete: False
ETA: 01 seconds
complete: True
ETA: 00 seconds
"""
def __init__(self, required_completion_score=None, initial_checkpoint_key=0, score=0):
self.timer_start = self.timestamp()
self.initial_checkpoint_key = initial_checkpoint_key
self.last_checkpoint_key = self.initial_checkpoint_key
self.checkpoints = OrderedDict([(initial_checkpoint_key, self.timer_start)])
self.num_checkpoints = 0
self.required_completion_score = required_completion_score
self.score = score
self.complete = False
self.last_eta = None
self.last_eta_timestamp = self.timer_start
self.scores = {self.initial_checkpoint_key: self.score}
def timestamp(self):
return datetime.datetime.fromtimestamp(time.time())
def timedelta_to_checkpoint(self, timestamp, checkpoint_key=None):
if not checkpoint_key: checkpoint_key = self.initial_checkpoint_key
timedelta = timestamp - self.checkpoints[checkpoint_key]
return timedelta
def make_checkpoint(self, checkpoint_key = None, increment_to = None):
if not checkpoint_key:
checkpoint_key = self.num_checkpoints + 1
if checkpoint_key in self.checkpoints:
raise TerminalError('Timer.make_checkpoint :: %s already exists as a checkpoint key. '
'All keys must be unique' % (str(checkpoint_key)))
checkpoint = self.timestamp()
self.checkpoints[checkpoint_key] = checkpoint
self.last_checkpoint_key = checkpoint_key
self.num_checkpoints += 1
if increment_to:
self.score = increment_to
else:
self.score += 1
self.scores[checkpoint_key] = self.score
if self.required_completion_score and self.score >= self.required_completion_score:
self.complete = True
return checkpoint
def gen_report(self, title='Time Report', run=Run()):
checkpoint_last = self.initial_checkpoint_key
run.warning('', header=title, lc='yellow', nl_before=1, nl_after=0)
for checkpoint_key, checkpoint in self.checkpoints.items():
if checkpoint_key == self.initial_checkpoint_key:
continue
run.info(str(checkpoint_key), '+%s' % self.timedelta_to_checkpoint(checkpoint, checkpoint_key=checkpoint_last))
checkpoint_last = checkpoint_key
run.info('Total elapsed', '=%s' % self.timedelta_to_checkpoint(checkpoint, checkpoint_key=self.initial_checkpoint_key))
def gen_dataframe_report(self):
"""Returns a dataframe"""
d = {'key': [], 'time': [], 'score': []}
for checkpoint_key, checkpoint in self.checkpoints.items():
d['key'].append(checkpoint_key)
d['time'].append(checkpoint)
d['score'].append(self.scores[checkpoint_key])
return pd.DataFrame(d)
def gen_file_report(self, filepath):
"""Writes to filepath, will overwrite"""
self.gen_dataframe_report().to_csv(filepath, sep='\t', index=False)
def calculate_time_remaining(self, infinite_default = '∞:∞:∞'):
if self.complete:
return datetime.timedelta(seconds = 0)
if not self.required_completion_score:
return None
if not self.score:
return infinite_default
time_elapsed = self.checkpoints[self.last_checkpoint_key] - self.checkpoints[0]
fraction_completed = self.score / self.required_completion_score
time_remaining_estimate = time_elapsed / fraction_completed - time_elapsed
return time_remaining_estimate
def eta(self, fmt=None, zero_padding=0):
# Calling format_time hundreds or thousands of times per second is expensive. Therefore if
# eta was called within the last half second, the previous ETA is returned without further
# calculation.
eta_timestamp = self.timestamp()
if eta_timestamp - self.last_eta_timestamp < datetime.timedelta(seconds = 0.5) and self.num_checkpoints > 0:
return self.last_eta
eta = self.calculate_time_remaining()
eta = self.format_time(eta, fmt, zero_padding) if isinstance(eta, datetime.timedelta) else str(eta)
self.last_eta = eta
self.last_eta_timestamp = eta_timestamp
return eta
def time_elapsed(self, fmt=None):
return self.format_time(self.timedelta_to_checkpoint(self.timestamp(), checkpoint_key = 0), fmt=fmt)
def format_time(self, timedelta, fmt = '{hours}:{minutes}:{seconds}', zero_padding = 2):
"""Formats time
Examples of `fmt`. Suppose the timedelta is seconds = 1, minutes = 1, hours = 1.
{hours}h {minutes}m {seconds}s --> 01h 01m 01s
{seconds} seconds --> 3661 seconds
{weeks} weeks {minutes} minutes --> 0 weeks 61 minutes
{hours}h {seconds}s --> 1h 61s
"""
unit_hierarchy = ['seconds', 'minutes', 'hours', 'days', 'weeks']
unit_denominations = {'weeks': 7, 'days': 24, 'hours': 60, 'minutes': 60, 'seconds': 1}
if not fmt:
# use the highest two non-zero units, e.g. if it is 7200s, use {hours}h{minutes}m
seconds = int(timedelta.total_seconds())
if seconds < 60:
fmt = '{seconds}s'
else:
m = 1
for i, unit in enumerate(unit_hierarchy):
if not seconds // (m * unit_denominations[unit]) >= 1:
fmt = '{%s}%s{%s}%s' % (unit_hierarchy[i-1],
unit_hierarchy[i-1][0],
unit_hierarchy[i-2],
unit_hierarchy[i-2][0])
break
elif unit == unit_hierarchy[-1]:
fmt = '{%s}%s{%s}%s' % (unit_hierarchy[i],
unit_hierarchy[i][0],
unit_hierarchy[i-1],
unit_hierarchy[i-1][0])
break
else:
m *= unit_denominations[unit]
# parse units present in fmt
format_order = []
for i, x in enumerate(fmt):
if x == '{':
for j, k in enumerate(fmt[i:]):
if k == '}':
unit = fmt[i+1:i+j]
format_order.append(unit)
break
if not format_order:
raise TerminalError('Timer.format_time :: fmt = \'%s\' contains no time units.' % (fmt))
for unit in format_order:
if unit not in unit_hierarchy:
raise TerminalError('Timer.format_time :: \'%s\' is not a valid unit. Use any of %s.'\
% (unit, ', '.join(unit_hierarchy)))
# calculate the value for each unit (e.g. 'seconds', 'days', etc) found in fmt
format_values_dict = {}
smallest_unit = unit_hierarchy[[unit in format_order for unit in unit_hierarchy].index(True)]
units_less_than_or_equal_to_smallest_unit = unit_hierarchy[::-1][unit_hierarchy[::-1].index(smallest_unit):]
seconds_in_base_unit = 1
for a in [v for k, v in unit_denominations.items() if k in units_less_than_or_equal_to_smallest_unit]:
seconds_in_base_unit *= a
r = int(timedelta.total_seconds()) // seconds_in_base_unit
for i, lower_unit in enumerate(unit_hierarchy):
if lower_unit in format_order:
m = 1
for upper_unit in unit_hierarchy[i+1:]:
m *= unit_denominations[upper_unit]
if upper_unit in format_order:
format_values_dict[upper_unit], format_values_dict[lower_unit] = divmod(r, m)
break
else:
format_values_dict[lower_unit] = r
break
r = format_values_dict[upper_unit]
format_values = [format_values_dict[unit] for unit in format_order]
style_str = '0' + str(zero_padding) if zero_padding else ''
for unit in format_order:
fmt = fmt.replace('{%s}' % unit, '%' + '%s' % (style_str) + 'd')
formatted_time = fmt % (*[format_value for format_value in format_values],)
return formatted_time
def _test_format_time(self):
"""Run this and visually inspect its working"""
run = Run()
for exponent in range(1, 7):
seconds = 10 ** exponent
td = datetime.timedelta(seconds = seconds)
run.warning('', header='TESTING %s' % td, lc='yellow')
fmts = [
None,
"SECONDS {seconds}",
"MINUTES {minutes}",
"HOURS {hours}",
"DAYS {days}",
"WEEKS {weeks}",
"MINUTES {minutes} SECONDS {seconds}",
"SECONDS {seconds} MINUTES {minutes}",
"HOURS {hours} MINUTES {minutes}",
"DAYS {days} HOURS {hours}",
"WEEKS {weeks} DAYS {days}",
"WEEKS {weeks} HOURS {hours}",
"WEEKS {weeks} MINUTES {minutes}",
"DAYS {days} MINUTES {minutes}",
"HOURS {hours} SECONDS {seconds}",
"DAYS {days} MINUTES {minutes} SECONDS {seconds}",
"WEEKS {weeks} HOURS {hours} DAYS {days} SECONDS {seconds} MINUTES {minutes}",
]
for fmt in fmts:
run.info(str(fmt), self.format_time(td, fmt=fmt))
class TimeCode(object):
"""Time a block of code.
This context manager times blocks of code, and calls run.info afterwards to report
the time (unless quiet = True). See also time_program()
Parameters
==========
sc: 'green'
run info color with no runtime error
success_msg: None
If None, it is set to 'Code ran succesfully in'
fc: 'green'
run info color with runtime error
failure_msg: None
If None, it is set to 'Code failed within'
run: Run()
Provide a pre-existing Run instance if you want
quiet: False,
If True, run.info is not called and datetime object is stored
as `time` (see examples)
suppress_first: 0,
Supress output if code finishes within this many seconds.
Examples
========
>>> import time
>>> import anvio.terminal as terminal
>>> # EXAMPLE 1
>>> with terminal.TimeCode() as t:
>>> time.sleep(5)
✓ Code finished successfully after 05s
>>> # EXAMPLE 2
>>> with terminal.TimeCode() as t:
>>> time.sleep(5)
>>> print(asdf) # undefined variable
✖ Code encountered error after 05s
>>> # EXAMPLE 3
>>> with terminal.TimeCode(quiet=True) as t:
>>> time.sleep(5)
>>> print(t.time)
0:00:05.000477
"""
def __init__(self, success_msg=None, sc='green', fc='red', failure_msg=None, run=Run(), quiet=False, suppress_first=0):
self.run = run
self.run.single_line_prefixes = {0: '✓ ', 1: '✖ '}
self.quiet = quiet
self.suppress_first = suppress_first
self.sc, self.fc = sc, fc
self.s_msg, self.f_msg = success_msg, failure_msg
self.s_msg = self.s_msg if self.s_msg else 'Code finished after '
self.f_msg = self.f_msg if self.f_msg else 'Code encountered error after '
def __enter__(self):
self.timer = Timer()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.time = self.timer.timedelta_to_checkpoint(self.timer.timestamp())
if self.quiet or self.time <= datetime.timedelta(seconds=self.suppress_first):
return
return_code = 0 if exception_type is None else 1
msg, color = (self.s_msg, self.sc) if not return_code else (self.f_msg, self.fc)
self.run.info_single(msg + str(self.time), nl_before=1, mc=color, level=return_code)
def time_program(program_method):
"""A decorator used to time anvio programs.
For a concrete example, see `bin/anvi-profile`.
Examples
========
>>> import anvio.terminal as terminal
>>> @terminal.time_program
>>> def main(args):
>>> <do stuff>
>>> if __name__ == '__main__':
>>> <do stuff>
>>> main(args)
"""
import inspect
program_name = os.path.basename(inspect.getfile(program_method))
TimeCode_params = {
'success_msg': '%s took ' % program_name,
'failure_msg': '%s encountered an error after ' % program_name,
'suppress_first': 3, # avoid clutter when program finishes or fails within 3 seconds
}
def wrapper(*args, **kwargs):
with TimeCode(**TimeCode_params):
program_method(*args, **kwargs)
return wrapper
class TrackMemory(object):
"""Track the total memory over time
Parameters
==========
at_most_every : int or float, 5
Memory is only calculated at most every 5 seconds, despite how many times self.measure is
called
"""
def __init__(self, at_most_every=5):
self.t = None
self.at_most_every = at_most_every
def start(self):
self.t = Timer(score=self._get_mem())
return self.get_last(), self.get_last_diff()
def measure(self):
if self.t is None:
raise TerminalError("TrackMemory :: You must start the tracker with self.start()")
if self.t.timedelta_to_checkpoint(self.t.timestamp(), self.t.last_checkpoint_key) < datetime.timedelta(seconds = self.at_most_every):
return False
self.t.make_checkpoint(increment_to=self._get_mem())
return True
def gen_report(self):
df = self.t.gen_dataframe_report().rename(columns={'score': 'bytes'}).set_index('key', drop=True)
df['memory'] = df['bytes'].apply(self._format)
return df
def get_last(self):
"""Get the memory of the last measurement"""
return self._format(self.t.scores[self.t.last_checkpoint_key])
def get_last_diff(self):
"""Get the memory difference between the two latest measurements"""
last_key = self.t.last_checkpoint_key
if last_key == 0:
return '+??'
return self._format_diff(self._diff(last_key, last_key - 1))
def _diff(self, key2, key1):
return self.t.scores[key2] - self.t.scores[key1]
def _format(self, mem):
if np.isnan(mem):
return '??'
formatted = anvio.utils.human_readable_file_size(abs(mem))
return ('-' if mem < 0 else '') + formatted
def _format_diff(self, mem):
if np.isnan(mem):
return '+??'
formatted = anvio.utils.human_readable_file_size(abs(mem))
return ('-' if mem < 0 else '+') + formatted
def _get_mem(self):
mem = anvio.utils.get_total_memory_usage(keep_raw=True)
if mem is None:
return np.nan
return mem
def pretty_print(n):
"""Pretty print function for very big integers"""
if not isinstance(n, int):
return n
ret = []
n = str(n)
for i in range(len(n) - 1, -1, -1):
ret.append(n[i])
if (len(n) - i) % 3 == 0:
ret.append(',')
ret.reverse()
return ''.join(ret[1:]) if ret[0] == ',' else ''.join(ret)
def tabulate(*args, **kwargs):
"""
Uses the function `tabulate` in the `tabulate` module to tabulate data. This function behaves
almost identically, but exists because currently multiline cells that have ANSI colors break the
formatting of the table grid. These issues can be tracked to assess the status of this bug and
whether or not it has been fixed:
https://bitbucket.org/astanin/python-tabulate/issues/170/ansi-color-code-doesnt-work-with-linebreak
https://bitbucket.org/astanin/python-tabulate/issues/176/ansi-color-codes-create-issues-with
Until then, this overwrites a function in the module to preserve formatting when using multiline
cells with ANSI color codes.
"""
import tabulate
def _align_column(strings, alignment, minwidth=0, has_invisible=True, enable_widechars=False, is_multiline=False):
strings, padfn = tabulate._align_column_choose_padfn(strings, alignment, has_invisible)
width_fn = tabulate._choose_width_fn(has_invisible, enable_widechars, is_multiline)
s_widths = list(map(width_fn, strings))
maxwidth = max(max(s_widths), minwidth)
if is_multiline:
if not enable_widechars and not has_invisible:
padded_strings = [
"\n".join([padfn(maxwidth, s) for s in ms.splitlines()])
for ms in strings]
else:
lines = [line.splitlines() for line in strings]
lines_pad = [[(s, maxwidth + len(s) - width_fn(s)) for s in group]
for group in lines]
padded_strings = ["\n".join([padfn(w, s) for s, w in group])
for group in lines_pad]
else:
if not enable_widechars and not has_invisible:
padded_strings = [padfn(maxwidth, s) for s in strings]
else:
s_lens = list(map(len, strings))
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
padded_strings = [padfn(w, s) for s, w in zip(strings, visible_widths)]
return padded_strings
tabulate._align_column = _align_column
return tabulate.tabulate(*args, **kwargs)
def get_date():
return time.strftime("%d %b %y %H:%M:%S", time.localtime())
def get_terminal_size():
"""function was taken from http://stackoverflow.com/a/566752"""
def ioctl_GWINSZ(fd):
try:
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
cr = (25, 80)
return int(cr[1]), int(cr[0])
class Logger:
"""Utility class that makes it easier to use Anvio's nice logging in command runners."""
def __init__(self, run=Run(), progress=Progress()):
self.run = run
self.progress = progress
| gpl-3.0 |
expectocode/telegram-analysis | venn_chatlog.py | 2 | 3984 | #!/usr/bin/env python3
"""
A program to plot the overlap of chats
"""
import argparse
from json import loads
from os import path
import matplotlib.pyplot as plt
from matplotlib_venn import venn2, venn3
from collections import defaultdict
def get_active_users(filepath):
minimum = 3
counter = defaultdict(int) #store events from each user
#names = {} #dict
active_users = set()
with open(filepath, 'r') as jsonfile:
events = (loads(line) for line in jsonfile)
for event in events:
if "from" in event:
if "id" in event["from"] and "print_name" in event["from"]:
user = event['from']['id']
counter[user] += 1
for person, frequency in counter.items():
if frequency > minimum:
active_users.add(person)
return active_users
def main():
"""
main function
"""
parser = argparse.ArgumentParser(
description="Visualise the overlap between 2 or 3 chats \n but note that the program is not truly accurate as it counts users who have left to be part of a chat. Also note that for 3 chats, perfect geometry may be impossible.")
parser.add_argument(
'-f','--files',
help='paths to the json file(s) (chat logs) to analyse. Note these must be at the end of the arguments.',
nargs='+',
required = True)
parser.add_argument('-a','--active-users',
help='Only look at active users (users who have sent more than 3 messages)',
action='store_true')
parser.add_argument('-o', '--output-folder',
help='the folder to save the graph image in')
args = parser.parse_args()
filepaths = args.files
savefolder = args.output_folder
filenames = []
users = [set() for filepath in filepaths]
#create a list of users for each chat
for index,filepath in enumerate(filepaths):
_, temp = path.split(filepath)
filenames.append(temp)
filenames[filepaths.index(filepath)] , _ = path.splitext(
filenames[filepaths.index(filepath)] )
print(filenames[index], "users:")
with open(filepath, 'r') as jsonfile:
events = (loads(line) for line in jsonfile)
#generator, so whole file is not put in mem
#a dict with dates as keys and frequency as values
for event in events:
if "action" in event and event["action"]["type"] == "chat_add_user":
#print(event['action']['user']['id'], ":", event['action']['user']['print_name'])
users[index].add(event['from']['id'])
elif "action" in event and event['action']['type'] == 'chat_add_user_link':
#print(event['from']['id'], ":", event['from']['print_name'])
users[index].add(event['from']['id'])
#print("index:",index)
#print("len(users):",len(users))
if args.active_users:
active = get_active_users(filepath)
users[index] = users[index] & active
print(len(users[index]),"users")
if len(users) == 2:
venn2([users[0], users[1]],(filenames[0], filenames[1]))
elif len(users) == 3:
venn3([users[0], users[1], users[2]],(filenames[0], filenames[1], filenames[2]))
#print(users)
if savefolder is not None:
#if there is a given folder to save the figure in, save it there
names_string = '_'.join(filenames)
if len(names_string) > 200:
#file name likely to be so long as to cause issues
figname = input(
"This diagram is going to have a very long file name. Please enter a custom name(no need to add an extension): ")
else:
figname = 'User overlap in {}'.format(names_string).replace('/','_')
plt.savefig("{}/{}.png".format(savefolder, figname))
else:
plt.show()
if __name__ == "__main__":
main()
| mit |
DSLituiev/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 34 | 25693 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.model_selection import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, f)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, f)(X_test)
assert_array_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstrapping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstrapping features may generate duplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
| bsd-3-clause |
pratapvardhan/pandas | pandas/tests/test_common.py | 2 | 7579 | # -*- coding: utf-8 -*-
import pytest
import os
import collections
from functools import partial
import numpy as np
from pandas import Series, DataFrame, Timestamp
from pandas.compat import range, lmap
import pandas.core.common as com
from pandas.core import ops
from pandas.io.common import _get_handle
import pandas.util.testing as tm
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assert_raises_regex(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
assert com._mut_exclusive(a=None, b=2) == 2
def test_get_callable_name():
getname = com._get_callable_name
def fn(x):
return x
lambda_ = lambda x: x
part1 = partial(fn)
part2 = partial(part1)
class somecall(object):
def __call__(self):
return x # noqa
assert getname(fn) == 'fn'
assert getname(lambda_)
assert getname(part1) == 'fn'
assert getname(part2) == 'fn'
assert getname(somecall()) == 'somecall'
assert getname(1) is None
def test_any_none():
assert (com._any_none(1, 2, 3, None))
assert (not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert (com._all_not_none(1, 2, 3, 4))
assert (not com._all_not_none(1, 2, 3, None))
assert (not com._all_not_none(None, None, None, None))
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2), (2, 3), (3, 4)]
result = list(com.iterpairs(data))
assert (result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.map_indices_py(data)
assert (result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert ((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert ([4, 5, 6] == inter)
def test_intersection():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.intersection(a, b))
assert (a == inter)
def test_groupby():
values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']
expected = {'f': ['foo', 'foo3'],
'b': ['bar', 'baz', 'baz2'],
'q': ['qux']}
grouped = com.groupby(values, lambda x: x[0])
for k, v in grouped:
assert v == expected[k]
def test_random_state():
import numpy.random as npr
# Check with seed
state = com._random_state(5)
assert state.uniform() == npr.RandomState(5).uniform()
# Check with random state object
state2 = npr.RandomState(10)
assert com._random_state(state2).uniform() == npr.RandomState(10).uniform()
# check with no arg random state
assert com._random_state() is np.random
# Error for floats or strings
with pytest.raises(ValueError):
com._random_state('test')
with pytest.raises(ValueError):
com._random_state(5.5)
@pytest.mark.parametrize('left, right, expected', [
(Series([1], name='x'), Series([2], name='x'), 'x'),
(Series([1], name='x'), Series([2], name='y'), None),
(Series([1]), Series([2], name='x'), None),
(Series([1], name='x'), Series([2]), None),
(Series([1], name='x'), [2], 'x'),
([1], Series([2], name='y'), 'y')])
def test_maybe_match_name(left, right, expected):
assert ops._maybe_match_name(left, right) == expected
def test_dict_compat():
data_datetime64 = {np.datetime64('1990-03-15'): 1,
np.datetime64('2015-03-15'): 2}
data_unchanged = {1: 2, 3: 4, 5: 6}
expected = {Timestamp('1990-3-15'): 1, Timestamp('2015-03-15'): 2}
assert (com._dict_compat(data_datetime64) == expected)
assert (com._dict_compat(expected) == expected)
assert (com._dict_compat(data_unchanged) == data_unchanged)
def test_standardize_mapping():
# No uninitialized defaultdicts
with pytest.raises(TypeError):
com.standardize_mapping(collections.defaultdict)
# No non-mapping subtypes, instance
with pytest.raises(TypeError):
com.standardize_mapping([])
# No non-mapping subtypes, class
with pytest.raises(TypeError):
com.standardize_mapping(list)
fill = {'bad': 'data'}
assert (com.standardize_mapping(fill) == dict)
# Convert instance to type
assert (com.standardize_mapping({}) == dict)
dd = collections.defaultdict(list)
assert isinstance(com.standardize_mapping(dd), partial)
@pytest.mark.parametrize('obj', [
DataFrame(100 * [[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
columns=['X', 'Y', 'Z']),
Series(100 * [0.123456, 0.234567, 0.567567], name='X')])
@pytest.mark.parametrize('method', ['to_pickle', 'to_json', 'to_csv'])
def test_compression_size(obj, method, compression_only):
with tm.ensure_clean() as filename:
getattr(obj, method)(filename, compression=compression_only)
compressed = os.path.getsize(filename)
getattr(obj, method)(filename, compression=None)
uncompressed = os.path.getsize(filename)
assert uncompressed > compressed
@pytest.mark.parametrize('obj', [
DataFrame(100 * [[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
columns=['X', 'Y', 'Z']),
Series(100 * [0.123456, 0.234567, 0.567567], name='X')])
@pytest.mark.parametrize('method', ['to_csv', 'to_json'])
def test_compression_size_fh(obj, method, compression_only):
with tm.ensure_clean() as filename:
f, _handles = _get_handle(filename, 'w', compression=compression_only)
with f:
getattr(obj, method)(f)
assert not f.closed
assert f.closed
compressed = os.path.getsize(filename)
with tm.ensure_clean() as filename:
f, _handles = _get_handle(filename, 'w', compression=None)
with f:
getattr(obj, method)(f)
assert not f.closed
assert f.closed
uncompressed = os.path.getsize(filename)
assert uncompressed > compressed
# GH 21227
def test_compression_warning(compression_only):
df = DataFrame(100 * [[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
columns=['X', 'Y', 'Z'])
with tm.ensure_clean() as filename:
f, _handles = _get_handle(filename, 'w', compression=compression_only)
with tm.assert_produces_warning(RuntimeWarning,
check_stacklevel=False):
with f:
df.to_csv(f, compression=compression_only)
| bsd-3-clause |
YeoLab/anchor | anchor/bayesian.py | 1 | 10289 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.misc import logsumexp
from .names import NEAR_ZERO, NEAR_HALF, NEAR_ONE, BIMODAL, NULL_MODEL
from .model import ModalityModel
from .visualize import MODALITY_TO_CMAP, _ModelLoglikPlotter, MODALITY_ORDER
CHANGING_PARAMETERS = np.arange(2, 21, step=1)
TWO_PARAMETER_MODELS = {
BIMODAL: {'alphas': 1. / (CHANGING_PARAMETERS + 10),
'betas': 1./(CHANGING_PARAMETERS+10)},
NEAR_HALF: {'alphas': CHANGING_PARAMETERS,
'betas': CHANGING_PARAMETERS}}
ONE_PARAMETER_MODELS = {
NEAR_ZERO: {'alphas': 1, 'betas': CHANGING_PARAMETERS},
NEAR_ONE: {'alphas': CHANGING_PARAMETERS, 'betas': 1}
}
class BayesianModalities(object):
"""Use Bayesian methods to estimate modalities of splicing events"""
score_name = '$\log_2 K$'
def __init__(self, one_parameter_models=ONE_PARAMETER_MODELS,
two_parameter_models=TWO_PARAMETER_MODELS,
logbf_thresh=10):
"""Initialize an object with models to estimate splicing modality
Parameters
----------
step : float
Distance between parameter values
vmax : float
Maximum parameter value
logbf_thresh : float
Minimum threshold at which the bayes factor difference is defined
to be significant
"""
self.logbf_thresh = logbf_thresh
# self.modality_to_cmap = modality_to_cmap
self.one_param_models = {k: ModalityModel(**v)
for k, v in one_parameter_models.items()}
self.two_param_models = {k: ModalityModel(**v)
for k, v in two_parameter_models.items()}
self.models = self.one_param_models.copy()
self.models.update(self.two_param_models)
def _single_feature_logliks_one_step(self, feature, models):
"""Get log-likelihood of models at each parameterization for given data
Parameters
----------
feature : pandas.Series
Percent-based values of a single feature. May contain NAs, but only
non-NA values are used.
Returns
-------
logliks : pandas.DataFrame
"""
x_non_na = feature[~feature.isnull()]
if x_non_na.empty:
return pd.DataFrame()
else:
dfs = []
for name, model in models.items():
df = model.single_feature_logliks(feature)
df['Modality'] = name
dfs.append(df)
return pd.concat(dfs, ignore_index=True)
@staticmethod
def assert_non_negative(x):
"""Ensure all values are greater than zero
Parameters
----------
x : array_like
A numpy array
Raises
------
AssertionError
If any value in ``x`` is less than 0
"""
assert np.all(x[np.isfinite(x)] >= 0)
@staticmethod
def assert_less_than_or_equal_1(x):
"""Ensure all values are less than 1
Parameters
----------
x : array_like
A numpy array
Raises
------
AssertionError
If any value in ``x`` are greater than 1
"""
assert np.all(x[np.isfinite(x)] <= 1)
def fit(self, data):
"""Get the modality assignments of each splicing event in the data
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_events) dataframe of splicing events' PSI scores.
Must be psi scores which range from 0 to 1
Returns
-------
log2_bayes_factors : pandas.DataFrame
A (n_modalities, n_events) dataframe of the estimated log2
bayes factor for each splicing event, for each modality
Raises
------
AssertionError
If any value in ``data`` does not fall only between 0 and 1.
"""
self.assert_less_than_or_equal_1(data.values.flat)
self.assert_non_negative(data.values.flat)
if isinstance(data, pd.DataFrame):
log2_bayes_factors = data.apply(self.single_feature_fit)
elif isinstance(data, pd.Series):
log2_bayes_factors = self.single_feature_fit(data)
log2_bayes_factors.name = self.score_name
return log2_bayes_factors
def predict(self, log2_bayes_factors, reset_index=False):
"""Guess the most likely modality for each event
For each event that has at least one non-NA value, if no modalilites
have logsumexp'd logliks greater than the log Bayes factor threshold,
then they are assigned the 'multimodal' modality, because we cannot
reject the null hypothesis that these did not come from the uniform
distribution.
Parameters
----------
log2_bayes_factors : pandas.DataFrame
A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0,
bimodal, and middle modalities. If an event has no bayes factors
for any of those modalities, it is ignored
reset_index : bool
If True, remove the first level of the index from the dataframe.
Useful if you are using this function to apply to a grouped
dataframe where the first level is something other than the
modality, e.g. the celltype
Returns
-------
modalities : pandas.Series
A (n_events,) series with the most likely modality for each event
"""
if reset_index:
x = log2_bayes_factors.reset_index(level=0, drop=True)
else:
x = log2_bayes_factors
if isinstance(x, pd.DataFrame):
not_na = (x.notnull() > 0).any()
not_na_columns = not_na[not_na].index
x.ix[NULL_MODEL, not_na_columns] = self.logbf_thresh
elif isinstance(x, pd.Series):
x[NULL_MODEL] = self.logbf_thresh
return x.idxmax()
def fit_predict(self, data):
"""Convenience function to assign modalities directly from data"""
return self.predict(self.fit(data))
def single_feature_logliks(self, feature):
"""Calculate log-likelihoods of each modality's parameterization
Used for plotting the estimates of a single feature
Parameters
----------
featre : pandas.Series
A single feature's values. All values must range from 0 to 1.
Returns
-------
logliks : pandas.DataFrame
The log-likelihood the data, for each model, for each
parameterization
Raises
------
AssertionError
If any value in ``x`` does not fall only between 0 and 1.
"""
self.assert_less_than_or_equal_1(feature.values)
self.assert_non_negative(feature.values)
logliks = self._single_feature_logliks_one_step(
feature, self.one_param_models)
logsumexps = self.logliks_to_logsumexp(logliks)
# If none of the one-parameter models passed, try the two-param models
if (logsumexps <= self.logbf_thresh).all():
logliks_two_params = self._single_feature_logliks_one_step(
feature, self.two_param_models)
logliks = pd.concat([logliks, logliks_two_params])
return logliks
@staticmethod
def logliks_to_logsumexp(logliks):
return logliks.groupby('Modality')[r'$\log$ Likelihood'].apply(
logsumexp)
def single_feature_fit(self, feature):
"""Get the log2 bayes factor of the fit for each modality"""
if np.isfinite(feature).sum() == 0:
series = pd.Series(index=MODALITY_ORDER)
else:
logbf_one_param = pd.Series(
{k: v.logsumexp_logliks(feature) for
k, v in self.one_param_models.items()})
# Check if none of the previous features fit
if (logbf_one_param <= self.logbf_thresh).all():
logbf_two_param = pd.Series(
{k: v.logsumexp_logliks(feature)
for k, v in self.two_param_models.items()})
series = pd.concat([logbf_one_param, logbf_two_param])
series[NULL_MODEL] = self.logbf_thresh
else:
series = logbf_one_param
series.index.name = 'Modality'
series.name = self.score_name
return series
def plot_single_feature_calculation(self, feature, renamed=''):
if np.isfinite(feature).sum() == 0:
raise ValueError('The feature has no finite values')
logliks = self.single_feature_logliks(feature)
logsumexps = self.logliks_to_logsumexp(logliks)
logsumexps[NULL_MODEL] = self.logbf_thresh
plotter = _ModelLoglikPlotter()
return plotter.plot(feature, logliks, logsumexps, self.logbf_thresh,
renamed=renamed)
def violinplot(self, n=1000, figsize=None, **kwargs):
r"""Visualize all modality family members with parameters
Use violinplots to visualize distributions of modality family members
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
fig : matplotlib.Figure object
Figure object with violins plotted
"""
if figsize is None:
nrows = len(self.models)
width = max(len(m.rvs) for name, m in self.models.items())*0.625
height = nrows*2.5
figsize = width, height
fig, axes = plt.subplots(nrows=nrows, figsize=figsize)
for ax, model_name in zip(axes, MODALITY_ORDER):
try:
model = self.models[model_name]
cmap = MODALITY_TO_CMAP[model_name]
palette = cmap(np.linspace(0, 1, len(model.rvs)))
model.violinplot(n=n, ax=ax, palette=palette, **kwargs)
ax.set(title=model_name, xlabel='')
except KeyError:
continue
fig.tight_layout()
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e417.py | 2 | 6371 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
e400
'learn_init': False
independently_centre_inputs : True
e401
input is in range [0,1]
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
# max_input_power=100,
max_diff = 100,
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.75,
skip_probability_for_first_appliance=0,
one_target_per_seq=False,
n_seq_per_batch=64,
# subsample_target=4,
include_diff=True,
include_power=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs=True,
# standardise_input=True,
# standardise_targets=True,
# unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-6,
learning_rate_changes_by_iteration={
# 1000: 1e-4,
# 4000: 1e-5
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=10)
)
def exp_a(name):
# ReLU hidden layers
# linear output
# output one appliance
# 0% skip prob for first appliance
# 100% skip prob for other appliances
# input is diff
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': BidirectionalRecurrentLayer,
'num_units': 50,
'W_in_to_hid': Normal(std=1),
'W_hid_to_hid': Identity(scale=0.9),
'nonlinearity': rectify,
'learn_init': False,
'precompute_input': True
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=1/sqrt(50))
}
]
net = Net(**net_dict_copy)
net.load_params(5000)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
# EXPERIMENTS = list('abcdefghi')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
Kamp9/scipy | doc/source/tutorial/examples/normdiscr_plot1.py | 84 | 1547 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2, 1) #integer grid
gridlimitsnorm = (grid-0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd=rvs
f,l = np.histogram(rvs, bins=gridlimits)
sfreq = np.vstack([gridint, f, probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.pdf(ind, scale=nd_std),
color='b')
plt.ylabel('Frequency')
plt.title('Frequency and Probability of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| bsd-3-clause |
anisridhar/Raga-Identification-Program | ragas.py | 1 | 2814 | import matplotlib.pyplot as plt
from scipy.io import wavfile # get the api
from scipy.fftpack import fft
from pylab import *
import os
import math
import contextlib # for urllib.urlopen()
import urllib
import os
################################
#### Ragas.py functions ########
################################
def readFile(filename, mode="rt"):
# rt = "read text"
with open(filename, mode) as fin:
return fin.read()
def addRagasToDict(textfile):
#returns dictionary
#maps raga name to list containing its constituent notes (only Arohanam)
ragaDict = dict()
text = readFile(textfile)
ragaList = text.splitlines()
for raga in ragaList:
nameStartIndex = raga.index("|")
nameEndIndex = raga.index("|",nameStartIndex+1)
name = raga[nameStartIndex+1:nameEndIndex].strip()
notes = raga[nameEndIndex+1:].strip()
notesList = notes.split()
#G1 should become R2
#N1 should become D2
for i in xrange(len(notesList)):
if notesList[i] == 'G1':
notesList[i] = 'R2'
elif notesList[i] == 'N1':
notesList[i] = 'D2'
ragaDict[name] = notesList
return ragaDict
def isRagam2(notesList, thresholdPercentage, ragam):
#takes in a list of notes, thresholdPercentage, ragam name (string)
#to determine whether a list of notes is a particular ragam
ragaDict = addRagasToDict("RagaDatabase.txt")
ragaNotes = ragaDict[ragam] #raga arohanam"
numRagam = 0
for note in notesList:
if note in ragaNotes:
numRagam += 1
percentageRagam = numRagam*1.0/len(notesList)
return percentageRagam >= thresholdPercentage
def findPosRagams(notesList, thresholdPercentage):
ragaDict = addRagasToDict("RagaDatabase.txt")
posRagas = []
for ragam in ragaDict:
ragaNotes = ragaDict[ragam]
numRagam = 0
for note in notesList:
if note in ragaNotes:
numRagam += 1
percentageRagam = numRagam*1.0/len(notesList)
if percentageRagam >= thresholdPercentage:
posRagas += [ragam]
return posRagas
def readFile(filename, mode="rt"):
# rt = "read text"
with open(filename, mode) as fin:
return fin.read()
def frequencyToNote(freq):
lowSa = 636
a = 1.057994353 #factor to get to new notes
k = math.log(freq*1.0/lowSa, a)
k = int(round(k))
notesList = (["S", "R1", "R2", "G2", "G3",
"M1", "M2", "P", "D1", "D2", "N2", "N3"])
return notesList[k%12]
def windowFunction(n):
timeInterval = 250 #0.5 s = 500 mS
endTime = n*timeInterval
return endTime
#mohanam = ["S", "R2", "G3", "P", "D2"]
#madhyamavathi = ["S", "R2", "M1", "P", "N2"]
#hindolam = ["S", "G1", "M1", "D1", "N1"]
| mit |
davande/runipy | runipy/main.py | 2 | 4898 |
from __future__ import print_function
import argparse
from sys import stderr, stdout, stdin, exit
import os.path
import logging
import codecs
import runipy
from runipy.notebook_runner import NotebookRunner, NotebookError
from IPython.nbformat.current import read, write
from IPython.config import Config
from IPython.nbconvert.exporters.html import HTMLExporter
def main():
log_format = '%(asctime)s %(levelname)s: %(message)s'
log_datefmt = '%m/%d/%Y %I:%M:%S %p'
parser = argparse.ArgumentParser()
parser.add_argument('--version', '-v', action='version', version=runipy.__version__,
help='print version information')
parser.add_argument('input_file', nargs='?',
help='.ipynb file to run (or stdin)')
parser.add_argument('output_file', nargs='?',
help='.ipynb file to save cell output to')
parser.add_argument('--quiet', '-q', action='store_true',
help='don\'t print anything unless things go wrong')
parser.add_argument('--overwrite', '-o', action='store_true',
help='write notebook output back to original notebook')
parser.add_argument('--html', nargs='?', default=False,
help='output an HTML snapshot of the notebook')
parser.add_argument('--template', nargs='?', default=False,
help='template to use for HTML output')
parser.add_argument('--pylab', action='store_true',
help='start notebook with pylab enabled')
parser.add_argument('--matplotlib', action='store_true',
help='start notebook with matplotlib inlined')
parser.add_argument('--skip-exceptions', '-s', action='store_true',
help='if an exception occurs in a cell, continue running the subsequent cells')
parser.add_argument('--stdout', action='store_true',
help='print notebook to stdout (or use - as output_file')
parser.add_argument('--stdin', action='store_true',
help='read notebook from stdin (or use - as input_file)')
parser.add_argument('--no-chdir', action='store_true',
help="do not change directory to notebook's at kernel startup")
parser.add_argument('--profile-dir',
help="set the profile location directly")
args = parser.parse_args()
if args.overwrite:
if args.output_file is not None:
print('Error: output_filename must not be provided if '
'--overwrite (-o) given', file=stderr)
exit(1)
else:
args.output_file = args.input_file
if not args.quiet:
logging.basicConfig(level=logging.INFO, format=log_format, datefmt=log_datefmt)
working_dir = None
if args.input_file == '-' or args.stdin: # force stdin
payload = stdin
elif not args.input_file and stdin.isatty(): # no force, empty stdin
parser.print_help()
exit()
elif not args.input_file: # no file -> default stdin
payload = stdin
else: # must have specified normal input_file
payload = open(args.input_file)
working_dir = os.path.dirname(args.input_file)
if args.no_chdir:
working_dir = None
if args.profile_dir:
profile_dir = os.path.expanduser(args.profile_dir)
else:
profile_dir = None
logging.info('Reading notebook %s', payload.name)
nb = read(payload, 'json')
nb_runner = NotebookRunner(nb, args.pylab, args.matplotlib, profile_dir, working_dir)
exit_status = 0
try:
nb_runner.run_notebook(skip_exceptions=args.skip_exceptions)
except NotebookError:
exit_status = 1
if args.output_file and args.output_file != '-':
logging.info('Saving to %s', args.output_file)
write(nb_runner.nb, open(args.output_file, 'w'), 'json')
if args.stdout or args.output_file == '-':
write(nb_runner.nb, stdout, 'json')
print()
if args.html is not False:
if args.html is None:
# if --html is given but no filename is provided,
# come up with a sane output name based on the
# input filename
if args.input_file.endswith('.ipynb'):
args.html = args.input_file[:-6] + '.html'
else:
args.html = args.input_file + '.html'
if args.template is False:
exporter = HTMLExporter()
else:
exporter = HTMLExporter(
config=Config({'HTMLExporter':{'template_file':args.template, 'template_path': ['.', '/']}}))
logging.info('Saving HTML snapshot to %s' % args.html)
output, resources = exporter.from_notebook_node(nb_runner.nb)
codecs.open(args.html, 'w', encoding='utf-8').write(output)
nb_runner.shutdown_kernel()
if exit_status != 0:
logging.warning('Exiting with nonzero exit status')
exit(exit_status)
if __name__ == '__main__':
main()
| bsd-2-clause |
griffincalme/PhosphositeOrthology | pairwise_to_uniprot.py | 1 | 6595 | # requires oma-uniprot.txt
import pandas as pd
import time
import os
from collections import defaultdict
import ast
start_secs = time.time()
def make_dir(path):
try:
os.makedirs(path)
except:
pass
def remove_header_lines(file_in, file_out, junk_string):
with open(file_in) as oldfile, open(file_out, 'w') as newfile:
for line in oldfile:
if junk_string not in line:
newfile.write(line)
def pairwise_to_uniprot():
uniprot_conversion_table = 'oma-uniprot.txt'
conversion_table_clean = 'oma-uniprot_clean.txt' # remove junk header lines
# remove header lines from oma to uniprot conversion table
remove_header_lines(uniprot_conversion_table, conversion_table_clean, '#')
# import oma-uniprot conversion table
conversion_df = pd.read_table(conversion_table_clean, names=['OMA_ID', 'uniprot_ID'])
#print('\nConversion table\n')
#print(conversion_df.head())
# keep only rows without underscore in uniprotID (e.g. 'CDON_HUMAN'), dbPAF does not use this type of UniProt ID anyways
conversion_df = conversion_df[conversion_df.uniprot_ID.str.contains('_') == False]
########
# old dict method overwrites values for duplicate keys
# conversion_dict = list(conversion_df.set_index('OMA_ID').to_dict().values()).pop()
# new method, although slower, preserves all values inside lists
# it works!
# print(def_dict['HUMAN20719']) returns ['P35222', 'A0A024R2Q3'] don't forget underscored items were cleaned out
# make intermediate list groupings of analogous IDs
# {"HUMAN02..." : ['A0A0...', 'Q9H6...', etc...]
# replace OMA id in main df cell with uniprot list
# need default dict object to append values of identical keys to growing value lists
def_dict = defaultdict(list)
for index, row in conversion_df.iterrows():
oma_id = row['OMA_ID']
uniprot_id = row['uniprot_ID']
def_dict[oma_id].append(uniprot_id)
# default dict to regular dict
conversion_dict = dict(def_dict)
########
#print('\n\nConversion dictionary built, running ID translation...\n')
#print('\n')
orthologs_output_dir = 'uniprot_PairwiseOrthologs/'
make_dir(orthologs_output_dir)
orthologs_input_dir = 'OMAPairwiseOrthologs/'
for ortho_filename in os.listdir(orthologs_input_dir):
#print(orthologs_input_dir + ortho_filename)
formatted_ortholog = orthologs_output_dir + ortho_filename
# remove junk header lines
remove_header_lines((orthologs_input_dir + ortho_filename), formatted_ortholog, '#')
df = pd.read_table(formatted_ortholog,
names=['protein1', 'protein2', 'organism1', 'organism2', 'orthology_type', 'oma_group'])
df['organism1'] = [i.split(' |')[0] for i in df['organism1']] # keep only OMA identifier
df['organism2'] = [i.split(' |')[0] for i in df['organism2']]
new_name1 = df['organism1'][0][:5]
new_name2 = df['organism2'][0][:5] # new col name is 5-letter initial e.g. 'HUMAN', 'MOUSE, 'CAEEL'
df.columns = df.columns.str.replace('organism1', new_name1)
df.columns = df.columns.str.replace('organism2', new_name2)
# Translate organism OMA ID to UniProt ID in df using conversion dict
df[new_name1].update(df[new_name1].map(conversion_dict))
# Translate human OMA ID to UniProt ID
df[new_name2].update(df[new_name2].map(conversion_dict))
########
# Make new row in df for each possible ID translation in conversion_df
# so "HUMAN02..." --> A0A0....
# --> Q9H6....
# --> etc... up to 8 uniprot ID translations per OMA
# expand lists of uniprot IDs into own rows
########
'''
df_expanded = pd.DataFrame()
for indexA, rowA in df.iterrows():
animal_index = 2
row_2_str = rowA[animal_index] # CAEEL row, etc
if row_2_str[:5] == df.columns.values.tolist()[animal_index]: # if column header 'CAEEL' matches row cell
df_expanded = df_expanded.append(rowA) # then don't interpret as list
else:
literal_row_2 = ast.literal_eval(row_2_str) # interprets ['blah', 'foo', 'bar'] as list rather than string
for i in literal_row_2: # for each element in cell list, make new row
temp_rowA = rowA # make temporary copy to overwrite, need to preserve
temp_rowA[animal_index] = i # overwrite animal column with individual uniprot id
df_expanded = df_expanded.append(temp_rowA) # save new row
df_out = pd.DataFrame()
for indexB, rowB in df_expanded.iterrows():
human_index = 'HUMAN'
row_3_str = rowB[human_index] # HUMAN row
if row_3_str[:5] == 'HUMAN': # maybe should do try/except rather than hard code if/else to detect
df_out = df_out.append(rowB) # leftover OMA IDs that are true strings not lists
else:
literal_row_3 = ast.literal_eval(row_3_str) # interprets ['blah'] as a list rather than a string, [3]rd col
for i in literal_row_3: # for each element in cell list, make new row
temp_rowB = rowB # make temporary copy to overwrite, need to preserve
temp_rowB[human_index] = i # overwrite animal column with individual uniprot id
df_out = df_out.append(temp_rowB) # save new row
########
#df_out.to_csv(orthologs_output_dir + ortho_filename[:-4] + '_UniprotIDs.txt', sep='\t', index=False)
'''
df.to_csv(orthologs_output_dir + ortho_filename[:-4] + '_UniprotIDs.txt', sep='\t', index=False)
# delete temporary 'cleaned pairwise ortholog' files
os.remove(formatted_ortholog)
# delete temporary 'cleaned oma-uniprot conversion' file
os.remove(conversion_table_clean)
if __name__ == '__main__':
pairwise_to_uniprot()
#end_secs = time.time()
#runsecs = end_secs - start_secs
#print(' ')
#print(str(round(runsecs, 2)) + ' seconds')
| mit |
ky822/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
matthew-brett/pymc | pymc/examples/gp/more_examples/Geostats/mcmc.py | 2 | 3372 | # ============================================================
# = WARNING: This example is very computationally demanding! =
# ============================================================
# I have set the map resolutions to give nice-looking results, but I am using
# an 8-core, 3.0GHz Apple Mac Pro with 8GB of RAM and with environment variable
# OMP_NUM_THREADS set to 8. If you are using a less powerful machine, you may
# want to change the 'm' parameters
# below.
# The MCMC takes several hours on my machine. To make it run faster, thin the
# dataset in getdata.py
from model import *
# from mpl_toolkits.basemap import Basemap
# from matplotlib import *
from pylab import *
import model
# ====================
# = Do the inference =
# ====================
# Use the HDF5 database backend, because the trace will take up a lot of memory.
# You can use the 'ram' backend instead if you don't have PyTables installed, but
# you should thin the trace more.
WalkerSampler = MCMC(model, db='hdf5')
WalkerSampler.use_step_method(GPEvaluationGibbs, walker_v, V, d)
# WalkerSampler.isample(50000,10000,100)
WalkerSampler.isample(500,100,10)
n = len(WalkerSampler.trace('V')[:])
# ==========================
# = Mean and variance maps =
# ==========================
# This computation is O(m^2)
m = 201
xplot = linspace(x.min(),x.max(),m)
yplot = linspace(y.min(),y.max(),m)
dplot = dstack(meshgrid(xplot,yplot))
Msurf = zeros(dplot.shape[:2])
E2surf = zeros(dplot.shape[:2])
# Get E[v] and E[v**2] over the entire posterior
for i in xrange(n):
# Reset all variables to their values at frame i of the trace
WalkerSampler.remember(0,i)
# Evaluate the observed mean
Msurf_i, Vsurf_i = point_eval(WalkerSampler.walker_v.M_obs.value, WalkerSampler.walker_v.C_obs.value, dplot)
Msurf += Msurf_i/n
# Evaluate the observed covariance with one argument
E2surf += (Vsurf_i + Msurf_i**2)/n
# Get the posterior variance and standard deviation
Vsurf = E2surf - Msurf**2
SDsurf = sqrt(Vsurf)
# Plot mean and standard deviation surfaces
close('all')
imshow(Msurf, extent=[x.min(),x.max(),y.min(),y.max()],interpolation='nearest')
plot(x,y,'r.',markersize=4)
axis([x.min(),x.max(),y.min(),y.max()])
title('Posterior predictive mean surface')
colorbar()
savefig('elevmean.pdf')
figure()
imshow(SDsurf, extent=[x.min(),x.max(),y.min(),y.max()],interpolation='nearest')
plot(x,y,'r.',markersize=4)
axis([x.min(),x.max(),y.min(),y.max()])
title('Posterior predictive standard deviation surface')
colorbar()
savefig('elevvar.pdf')
# ====================
# = Realization maps =
# ====================
# Use thinner input arrays, this computation is O(m^6)!!
m = 101
xplot = linspace(x.min(),x.max(),m)
yplot = linspace(y.min(),y.max(),m)
dplot = dstack(meshgrid(yplot,xplot))
indices = random_integers(n,size=2)
for j,i in enumerate(indices):
# Reset all variables to their values at frame i of the trace
WalkerSampler.remember(0,i)
# Evaluate the Gaussian process realisation
R = WalkerSampler.walker_v.f.value(dplot)
# Plot the realization
figure()
imshow(R,extent=[x.min(),x.max(),y.min(),y.max()],interpolation='nearest')
plot(x,y,'r.',markersize=4)
axis([x.min(),x.max(),y.min(),y.max()])
title('Realization from the posterior predictive distribution')
colorbar()
savefig('elevdraw%i.pdf'%j) | mit |
wlamond/scikit-learn | sklearn/grid_search.py | 5 | 40816 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
from .exceptions import ChangedBehaviorWarning
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. This module will be removed in 0.20.",
DeprecationWarning)
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ParameterGrid` instead.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ParameterSampler` instead.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int, RandomState instance or None, optional (default=None)
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.fit_grid_point` instead.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
@property
def classes_(self):
return self.best_estimator_.classes_
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.inverse_transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.GridSearchCV` instead.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs: int, default: 1 :
The maximum number of estimators fit in parallel.
- If -1 all CPUs are used.
- If 1 is given, no parallel computing code is used at all,
which is useful for debugging.
- For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used.
For example, with ``n_jobs = -2`` all CPUs but one are used.
.. versionchanged:: 0.17
Upgraded to joblib 0.9.3.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape='ovr', degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.RandomizedSearchCV` instead.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs: int, default: 1 :
The maximum number of estimators fit in parallel.
- If -1 all CPUs are used.
- If 1 is given, no parallel computing code is used at all,
which is useful for debugging.
- For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used.
For example, with ``n_jobs = -2`` all CPUs but one are used.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int, RandomState instance or None, optional, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
glennq/scikit-learn | examples/cluster/plot_cluster_comparison.py | 58 | 4681 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
zfrenchee/pandas | pandas/core/categorical.py | 1 | 80425 | # pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import types
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_platform_int,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_list_like, is_sequence,
is_scalar,
is_dict_like)
from pandas.core.common import is_null_slice, _maybe_box_datetimelike
from pandas.core.algorithms import factorize, take_1d, unique1d
from pandas.core.accessor import PandasDelegate
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg
from pandas.core.config import get_option
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of values.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If not given, the resulting categorical will not be ordered.
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype()
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
ordered = dtype.ordered
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = dtype
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = dtype
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _constructor(self):
return Categorical
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
categories=self.categories,
ordered=self.ordered,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype._update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
if is_datetimelike(self.categories):
return [_maybe_box_datetimelike(x) for x in self]
return np.array(self).tolist()
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation oder to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
try:
codes = np.asarray(codes, np.int64)
except:
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype._validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _get_labels(self):
"""
Get the category labels (deprecated).
Deprecated, use .codes!
"""
warn("'labels' is deprecated. Use 'codes' instead", FutureWarning,
stacklevel=2)
return self.codes
labels = property(fget=_get_labels, fset=_set_codes)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _codes_for_groupby(self, sort):
"""
If sort=False, return a copy of self, coded with categories as
returned by .unique(), followed by any categories not appearing in
the data. If sort=True, return self.
This method is needed solely to ensure the categorical index of the
GroupBy result has categories in the order of appearance in the data
(GH-8868).
Parameters
----------
sort : boolean
The value of the sort parameter groupby was called with.
Returns
-------
Categorical
If sort=False, the new categories are set to the order of
appearance in codes (unless ordered=True, in which case the
original order is preserved), followed by any unrepresented
categories in the original order.
"""
# Already sorted according to self.categories; all is fine
if sort:
return self
# sort=False should order groups in as-encountered order (GH-8868)
cat = self.unique()
# But for groupby to work, all categories should be present,
# including those missing from the data (GH-13179), which .unique()
# above dropped
cat.add_categories(
self.categories[~self.categories.isin(cat.categories)],
inplace=True)
return self.reorder_categories(cat.categories)
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""Apply mapper function to its categories (not codes).
Parameters
----------
mapper : callable
Function to be applied. When all categories are mapped
to different categories, the result will be Categorical which has
the same order property as the original. Otherwise, the result will
be np.ndarray.
Returns
-------
applied : Categorical or Index.
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, _ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype._validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='v', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Both missing values (-1 in .codes) and NA as a category are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
# String/object and float categories can hold np.nan
if self.categories.dtype.kind in ['S', 'O', 'f']:
if np.nan in self.categories:
nan_pos = np.where(isna(self.categories))[0]
# we only have one NA in categories
ret = np.logical_or(ret, self._codes == nan_pos)
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Both missing values (-1 in .codes) and NA as a category are detected.
NA is removed from the categories if present.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
if isna(result.categories).any():
result = result.remove_categories([np.nan])
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN, even if NaN is a category.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import isna, Series, CategoricalIndex
obj = (self.remove_categories([np.nan]) if dropna and
isna(self.categories).any() else self)
code, cat = obj._codes, obj.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def argsort(self, ascending=True, kind='quicksort', *args, **kwargs):
"""
Returns the indices that would sort the Categorical instance if
'sort_values' was called. This function is implemented to provide
compatibility with numpy ndarray objects.
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
"""
ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
result = np.argsort(self._codes.copy(), kind=kind, **kwargs)
if not ascending:
result = result[::-1]
return result
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
values = self._codes
# Make sure that we also get NA in categories
if self.categories.dtype.kind in ['S', 'O', 'f']:
if np.nan in self.categories:
values = values.copy()
nan_pos = np.where(isna(self.categories))[0]
# we only have one NA in categories
values[values == nan_pos] = -1
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
values = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
values[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_scalar(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = values == -1
if mask.any():
values = values.copy()
if isna(value):
values[mask] = -1
else:
values[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(values, categories=self.categories,
ordered=self.ordered, fastpath=True)
def take_nd(self, indexer, allow_fill=True, fill_value=None):
""" Take the codes by the indexer, fill with the fill_value.
For internal compatibility with numpy arrays.
"""
# filling must always be None/nan here
# but is passed thru internally
assert isna(fill_value)
codes = take_1d(self._codes, indexer, allow_fill=True, fill_value=-1)
result = self._constructor(codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
_codes = self._codes[slicer]
return self._constructor(values=_codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values())
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if com.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
categories=self.categories,
ordered=self.ordered, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
# FIXME: the following can be removed after GH7820 is fixed:
# https://github.com/pandas-dev/pandas/issues/7820
# float categories do currently return -1 for np.nan, even if np.nan is
# included in the index -> "repair" this here
if isna(rvalue).any() and isna(self.categories).any():
nan_pos = np.where(isna(self.categories))[0]
lindexer[lindexer == -1] = nan_pos
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation """
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(numeric_only=numeric_only, **kwds)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
good = self._codes != -1
values = sorted(htable.mode_int64(_ensure_int64(self._codes[good])))
result = self._constructor(values=values, categories=self.categories,
ordered=self.ordered, fastpath=True)
return result
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = sorted(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
return (self.is_dtype_equal(other) and
np.array_equal(self._codes, other._codes))
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
# The Series.cat accessor
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, values, index, name):
self.categorical = values
self.index = index
self.name = name
self._freeze()
def _delegate_property_get(self, name):
return getattr(self.categorical, name)
def _delegate_property_set(self, name, new_values):
return setattr(self.categorical, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self.categorical.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self.categorical, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
@classmethod
def _make_accessor(cls, data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
return CategoricalAccessor(data.values, data.index,
getattr(data, 'name', None),)
CategoricalAccessor._add_delegate_accessors(delegate=Categorical,
accessors=["categories",
"ordered"],
typ='property')
CategoricalAccessor._add_delegate_accessors(delegate=Categorical, accessors=[
"rename_categories", "reorder_categories", "add_categories",
"remove_categories", "remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"], typ='method')
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if not is_dtype_equal(values.dtype, categories.dtype):
values = _ensure_object(values)
categories = _ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
isinstance(list_like, types.GeneratorType)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
cat = Categorical(values, ordered=True)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
| bsd-3-clause |
jreback/pandas | pandas/tests/indexes/categorical/test_equals.py | 2 | 3033 | import numpy as np
import pytest
from pandas import Categorical, CategoricalIndex, Index
class TestEquals:
def test_equals_categorical(self):
ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True)
ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True)
assert ci1.equals(ci1)
assert not ci1.equals(ci2)
assert ci1.equals(ci1.astype(object))
assert ci1.astype(object).equals(ci1)
assert (ci1 == ci1).all()
assert not (ci1 != ci1).all()
assert not (ci1 > ci1).all()
assert not (ci1 < ci1).all()
assert (ci1 <= ci1).all()
assert (ci1 >= ci1).all()
assert not (ci1 == 1).all()
assert (ci1 == Index(["a", "b"])).all()
assert (ci1 == ci1.values).all()
# invalid comparisons
with pytest.raises(ValueError, match="Lengths must match"):
ci1 == Index(["a", "b", "c"])
msg = "Categoricals can only be compared if 'categories' are the same"
with pytest.raises(TypeError, match=msg):
ci1 == ci2
with pytest.raises(TypeError, match=msg):
ci1 == Categorical(ci1.values, ordered=False)
with pytest.raises(TypeError, match=msg):
ci1 == Categorical(ci1.values, categories=list("abc"))
# tests
# make sure that we are testing for category inclusion properly
ci = CategoricalIndex(list("aabca"), categories=["c", "a", "b"])
assert not ci.equals(list("aabca"))
# Same categories, but different order
# Unordered
assert ci.equals(CategoricalIndex(list("aabca")))
# Ordered
assert not ci.equals(CategoricalIndex(list("aabca"), ordered=True))
assert ci.equals(ci.copy())
ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"])
assert not ci.equals(list("aabca"))
assert not ci.equals(CategoricalIndex(list("aabca")))
assert ci.equals(ci.copy())
ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"])
assert not ci.equals(list("aabca") + [np.nan])
assert ci.equals(CategoricalIndex(list("aabca") + [np.nan]))
assert not ci.equals(CategoricalIndex(list("aabca") + [np.nan], ordered=True))
assert ci.equals(ci.copy())
def test_equals_categorical_unordered(self):
# https://github.com/pandas-dev/pandas/issues/16603
a = CategoricalIndex(["A"], categories=["A", "B"])
b = CategoricalIndex(["A"], categories=["B", "A"])
c = CategoricalIndex(["C"], categories=["B", "A"])
assert a.equals(b)
assert not a.equals(c)
assert not b.equals(c)
def test_equals_non_category(self):
# GH#37667 Case where other contains a value not among ci's
# categories ("D") and also contains np.nan
ci = CategoricalIndex(["A", "B", np.nan, np.nan])
other = Index(["A", "B", "D", np.nan])
assert not ci.equals(other)
| bsd-3-clause |
jeremyosborne/python | ml/hello.py | 1 | 1430 | # http://scikit-learn.org/stable/tutorial/basic/tutorial.html
import matplotlib.pyplot as plt
# MacOS rendering problem fix: https://stackoverflow.com/questions/29433824/unable-to-import-matplotlib-pyplot-as-plt-in-virtualenv#comment64137123_35107136
import numpy as np
# import pickle
from sklearn import datasets
from sklearn import svm
from sklearn.externals import joblib
print('\n\n\n*** numpy ***\n\n\n')
a = np.arange(15).reshape(3, 5)
print('np.arange(15).reshape(3, 5):\n', a)
print('')
zeros = np.zeros((10, 2))
print('np.zeros((10, 2))\n', zeros)
print('\n\n\n*** sklearn ***\n\n\n')
iris = datasets.load_iris()
digits = datasets.load_digits()
print('datasets must be in (n_samples, n_features) shape.')
print('Shape of original digit images (cannot be consumed by scikit learn):', digits.images.shape)
# Need to flatten the 8x8 image into a vactor 64 length.
print('Shape of digits dataset that can be consumed by scikitlearn:', digits.data.shape)
# print('digits descriptor:', digits.DESCR)
clf = svm.SVC(gamma=0.001, C=100.)
# fit == learn
clf.fit(digits.data[:-1], digits.target[:-1])
# predict values based of previous training
x = clf.predict(digits.data[-1:])
print('Predict the value of the last digit:', x)
# Can save (serialize) model data via pickle.
# s = pickle.dumps(clf)
# but this seems not recommended,
# instead user their to-file serializer...
joblib.dump(clf, 'saved-classifier.digits.pkl')
| mit |
kadrlica/pointing | setup.py | 1 | 1215 | from setuptools import setup, find_packages
#import versioneer
import glob
NAME = 'pointing'
CLASSIFIERS = """\
Development Status :: 2 - Pre-Alpha
Intended Audience :: Science/Research
Intended Audience :: Developers
Programming Language :: Python
Natural Language :: English
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Physics
Topic :: Scientific/Engineering :: Astronomy
Operating System :: MacOS
Operating System :: POSIX
License :: OSI Approved :: MIT License
"""
URL = 'https://github.com/kadrlica/%s'%NAME
DESC = "Plot telescope pointings"
LONG_DESC = "See %s"%URL
setup(
name=NAME,
#version=versioneer.get_version(),
#cmdclass=versioneer.get_cmdclass(),
version="2.0.0",
url=URL,
author='Alex Drlica-Wagner',
author_email='[email protected]',
scripts = ['bin/pointing','bin/pointing.py'],
install_requires=[
'numpy >= 1.7',
'matplotlib >= 1.2.0',
'basemap >= 1.0.6',
'setuptools',
],
packages=['pointing'],
package_data={'pointing':['data/*.txt','data/*.dat']},
description=DESC,
long_description=LONG_DESC,
platforms='any',
classifiers = [_f for _f in CLASSIFIERS.split('\n') if _f]
)
| mit |
jeffschulte/protein | compare-convergence.py | 2 | 1579 | from __future__ import division
import sys
import numpy as np
import matplotlib
if "show" not in sys.argv:
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pylab
datafile = 'data/shape-p/box-plot--p-0.30-0.30-0.00-0.00-15.00'
def ignoreme(value):
return 0.0
def readbox(name):
data = np.loadtxt(name, converters = {0: ignoreme, 1: ignoreme})
data = data[:,2:]
good = np.zeros((7, len(data[0,:])))
nsections = len(data[:,0])//7
for i in range(7):
# add up over all sections!
good[i,:] = np.sum(data[i*nsections:(i+1)*nsections,:], axis=0)
return good[:,0:]
boxfull = readbox(datafile + '-full_array.dat')
boxexact = readbox(datafile + '-exact.dat')
proteins = ['nATP', 'nADP', 'nE', 'ND', 'NDE', 'NflD', 'NflE']
def print_analysis(which):
pylab.figure()
pylab.title(proteins[which])
pylab.plot(boxfull[which,:])
pylab.plot(boxexact[which,:])
print 'working on', proteins[which]
print '=================='
#print 'full mean', boxfull[which,:].mean()
#print 'full stdev', boxfull[which,:].std()
num_uncorrelated = len(boxfull[which,:])/200.0
error = boxfull[which,:].std()/np.sqrt(num_uncorrelated)
#print 'full error assuming uncorrelated', error
#print 'exact mean', boxexact[which,:].mean()
#print ''
print 'num_uncorrelated', num_uncorrelated, 'fractional uncertainty', error/boxfull[which,:].mean()
print 'off by', (boxfull[which,:].mean() - boxexact[which,:].mean())/error, 'standard errors'
for i in range(5):
print_analysis(i)
pylab.show()
| mit |
cdegroc/scikit-learn | sklearn/svm/tests/test_sparse.py | 1 | 7784 | import numpy as np
from scipy import linalg
from scipy import sparse
from sklearn import datasets, svm, linear_model
from numpy.testing import assert_array_almost_equal, \
assert_array_equal, assert_equal
from nose.tools import assert_raises, assert_true
from sklearn.datasets.samples_generator import make_classification
from sklearn.svm.tests import test_svm
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
perm = np.random.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def test_SVC():
"""Check that sparse SVC gives the same result as SVC"""
clf = svm.SVC(kernel='linear').fit(X, Y)
sp_clf = svm.SVC(kernel='linear').fit(X_sp, Y)
assert_array_equal(sp_clf.predict(T), true_result)
assert_true(sparse.issparse(sp_clf.support_vectors_))
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.todense())
assert_true(sparse.issparse(sp_clf.dual_coef_))
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
assert_true(sparse.issparse(sp_clf.coef_))
assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
assert_array_almost_equal(clf.predict(T), sp_clf.predict(T))
# refit with a different dataset
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.todense())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
assert_array_almost_equal(clf.predict(T2), sp_clf.predict(T2))
def test_SVC_iris():
"""Test the sparse SVC with the iris dataset"""
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.todense(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.todense())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
assert_array_almost_equal(
clf.predict(iris.data.todense()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
def test_error():
"""
Test that it gives proper exception on deficient input
"""
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_LinearSVC():
"""
Similar to test_SVC
"""
clf = svm.LinearSVC().fit(X, Y)
sp_clf = svm.LinearSVC().fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.raw_coef_, sp_clf.raw_coef_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.raw_coef_, sp_clf.raw_coef_, decimal=4)
def test_LinearSVC_iris():
"""Test the sparse LinearSVC with the iris dataset"""
sp_clf = svm.LinearSVC().fit(iris.data, iris.target)
clf = svm.LinearSVC().fit(iris.data.todense(), iris.target)
assert_array_almost_equal(clf.label_, sp_clf.label_)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.raw_coef_, sp_clf.raw_coef_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.todense()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.todense()))
def test_weight():
"""
Test class weights
"""
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(C=180),
svm.LinearSVC(C=len(X)),
svm.SVC(C=len(X))):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
"""
Test weights on individual samples
"""
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
"""
Test that sparse liblinear honours intercept_scaling param
"""
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_realdata():
"""
Test on a subset from the 20newsgroups dataset.
This catchs some bugs if input is not correctly converted into
sparse format or weights are not correctly initialized.
"""
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.todense(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.todense())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
def test_sparse_scale_C():
"""Check that sparse LibSVM/LibLinear works ok with scaling of C"""
params = dict(kernel='linear', C=0.1)
classes = [(svm.SVC, params),
(svm.SVR, params),
(svm.NuSVR, params),
(svm.LinearSVC, {}),
(linear_model.LogisticRegression, {})
]
for cls, params in classes:
clf = cls(scale_C=True, **params).fit(X, Y)
clf_no_scale = cls(scale_C=False, **params).fit(X, Y)
sp_clf = cls(scale_C=True, **params).fit(X_sp, Y)
sp_clf_coef_ = sp_clf.coef_
if sparse.issparse(sp_clf_coef_):
sp_clf_coef_ = sp_clf_coef_.todense()
assert_array_almost_equal(clf.coef_, sp_clf_coef_, 5)
error_with_scale = linalg.norm(clf_no_scale.coef_
- sp_clf_coef_) / linalg.norm(clf_no_scale.coef_)
assert_true(error_with_scale > 1e-3)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/matplotlib/backends/backend_pdf.py | 1 | 91548 | # -*- coding: iso-8859-1 -*-
"""
A PDF matplotlib backend
Author: Jouni K Seppänen <[email protected]>
"""
import codecs
import os
import re
import sys
import time
import warnings
import zlib
import numpy as np
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from io import StringIO as BytesIO
from datetime import datetime
from math import ceil, cos, floor, pi, sin
try:
set
except NameError:
from sets import Set as set
import matplotlib
from matplotlib import __version__, rcParams, get_data_path
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.cbook import Bunch, is_string_like, reverse_dict, \
get_realpath_and_stat, is_writable_file_like, maxdict
from matplotlib.mlab import quad2cubic
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.afm import AFM
import matplotlib.type1font as type1font
import matplotlib.dviread as dviread
from matplotlib.ft2font import FT2Font, FIXED_WIDTH, ITALIC, LOAD_NO_SCALE, \
LOAD_NO_HINTING, KERNING_UNFITTED
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D, Bbox, BboxBase, TransformedPath
from matplotlib.path import Path
from matplotlib import ttconv
# Overview
#
# The low-level knowledge about pdf syntax lies mainly in the pdfRepr
# function and the classes Reference, Name, Operator, and Stream. The
# PdfFile class knows about the overall structure of pdf documents.
# It provides a "write" method for writing arbitrary strings in the
# file, and an "output" method that passes objects through the pdfRepr
# function before writing them in the file. The output method is
# called by the RendererPdf class, which contains the various draw_foo
# methods. RendererPdf contains a GraphicsContextPdf instance, and
# each draw_foo calls self.check_gc before outputting commands. This
# method checks whether the pdf graphics state needs to be modified
# and outputs the necessary commands. GraphicsContextPdf represents
# the graphics state, and its "delta" method returns the commands that
# modify the state.
# Add "pdf.use14corefonts: True" in your configuration file to use only
# the 14 PDF core fonts. These fonts do not need to be embedded; every
# PDF viewing application is required to have them. This results in very
# light PDF files you can use directly in LaTeX or ConTeXt documents
# generated with pdfTeX, without any conversion.
# These fonts are: Helvetica, Helvetica-Bold, Helvetica-Oblique,
# Helvetica-BoldOblique, Courier, Courier-Bold, Courier-Oblique,
# Courier-BoldOblique, Times-Roman, Times-Bold, Times-Italic,
# Times-BoldItalic, Symbol, ZapfDingbats.
#
# Some tricky points:
#
# 1. The clip path can only be widened by popping from the state
# stack. Thus the state must be pushed onto the stack before narrowing
# the clip path. This is taken care of by GraphicsContextPdf.
#
# 2. Sometimes it is necessary to refer to something (e.g., font,
# image, or extended graphics state, which contains the alpha value)
# in the page stream by a name that needs to be defined outside the
# stream. PdfFile provides the methods fontName, imageObject, and
# alphaState for this purpose. The implementations of these methods
# should perhaps be generalized.
# TODOs:
#
# * the alpha channel of images
# * image compression could be improved (PDF supports png-like compression)
# * encoding of fonts, including mathtext fonts and unicode support
# * TTF support has lots of small TODOs, e.g., how do you know if a font
# is serif/sans-serif, or symbolic/non-symbolic?
# * draw_markers, draw_line_collection, etc.
def fill(strings, linelen=75):
"""Make one string from sequence of strings, with whitespace
in between. The whitespace is chosen to form lines of at most
linelen characters, if possible."""
currpos = 0
lasti = 0
result = []
for i, s in enumerate(strings):
length = len(s)
if currpos + length < linelen:
currpos += length + 1
else:
result.append(b' '.join(strings[lasti:i]))
lasti = i
currpos = length
result.append(b' '.join(strings[lasti:]))
return b'\n'.join(result)
# PDF strings are supposed to be able to include any eight-bit data,
# except that unbalanced parens and backslashes must be escaped by a
# backslash. However, sf bug #2708559 shows that the carriage return
# character may get read as a newline; these characters correspond to
# \gamma and \Omega in TeX's math font encoding. Escaping them fixes
# the bug.
_string_escape_regex = re.compile(br'([\\()\r\n])')
def _string_escape(match):
m = match.group(0)
if m in br'\()': return b'\\' + m
elif m == b'\n': return br'\n'
elif m == b'\r': return br'\r'
assert False
def pdfRepr(obj):
"""Map Python objects to PDF syntax."""
# Some objects defined later have their own pdfRepr method.
if hasattr(obj, 'pdfRepr'):
return obj.pdfRepr()
# Floats. PDF does not have exponential notation (1.0e-10) so we
# need to use %f with some precision. Perhaps the precision
# should adapt to the magnitude of the number?
elif isinstance(obj, (float, np.floating)):
if not np.isfinite(obj):
raise ValueError("Can only output finite numbers in PDF")
r = ("%.10f" % obj).encode('ascii')
return r.rstrip(b'0').rstrip(b'.')
# Booleans. Needs to be tested before integers since
# isinstance(True, int) is true.
elif isinstance(obj, bool):
return [b'false', b'true'][obj]
# Integers are written as such.
elif isinstance(obj, (int, np.integer)):
return ("%d" % obj).encode('ascii')
# Unicode strings are encoded in UTF-16BE with byte-order mark.
elif isinstance(obj, str):
try:
# But maybe it's really ASCII?
s = obj.encode('ASCII')
return pdfRepr(s)
except UnicodeEncodeError:
s = codecs.BOM_UTF16_BE + obj.encode('UTF-16BE')
return pdfRepr(s)
# Strings are written in parentheses, with backslashes and parens
# escaped. Actually balanced parens are allowed, but it is
# simpler to escape them all. TODO: cut long strings into lines;
# I believe there is some maximum line length in PDF.
elif isinstance(obj, bytes):
return b'(' + _string_escape_regex.sub(_string_escape, obj) + b')'
# Dictionaries. The keys must be PDF names, so if we find strings
# there, we make Name objects from them. The values may be
# anything, so the caller must ensure that PDF names are
# represented as Name objects.
elif isinstance(obj, dict):
r = [b"<<"]
r.extend([Name(key).pdfRepr() + b" " + pdfRepr(val)
for key, val in obj.items()])
r.append(b">>")
return fill(r)
# Lists.
elif isinstance(obj, (list, tuple)):
r = [b"["]
r.extend([pdfRepr(val) for val in obj])
r.append(b"]")
return fill(r)
# The null keyword.
elif obj is None:
return b'null'
# A date.
elif isinstance(obj, datetime):
r = obj.strftime('D:%Y%m%d%H%M%S')
if time.daylight: z = time.altzone
else: z = time.timezone
if z == 0: r += 'Z'
elif z < 0: r += "+%02d'%02d'" % ((-z)//3600, (-z)%3600)
else: r += "-%02d'%02d'" % (z//3600, z%3600)
return pdfRepr(r)
# A bounding box
elif isinstance(obj, BboxBase):
return fill([pdfRepr(val) for val in obj.bounds])
else:
raise TypeError("Don't know a PDF representation for %s objects." \
% type(obj))
class Reference(object):
"""PDF reference object.
Use PdfFile.reserveObject() to create References.
"""
def __init__(self, id):
self.id = id
def __repr__(self):
return "<Reference %d>" % self.id
def pdfRepr(self):
return ("%d 0 R" % self.id).encode('ascii')
def write(self, contents, file):
write = file.write
write(("%d 0 obj\n" % self.id).encode('ascii'))
write(pdfRepr(contents))
write(b"\nendobj\n")
class Name(object):
"""PDF name object."""
__slots__ = ('name',)
_regex = re.compile(r'[^!-~]')
def __init__(self, name):
if isinstance(name, Name):
self.name = name.name
else:
if isinstance(name, bytes):
name = name.decode('ascii')
self.name = self._regex.sub(Name.hexify, name).encode('ascii')
def __repr__(self):
return "<Name %s>" % self.name
def __str__(self):
return '/' + str(self.name)
@staticmethod
def hexify(match):
return '#%02x' % ord(match.group())
def pdfRepr(self):
return b'/' + self.name
class Operator(object):
"""PDF operator object."""
__slots__ = ('op',)
def __init__(self, op):
self.op = op
def __repr__(self):
return '<Operator %s>' % self.op
def pdfRepr(self):
return self.op
# PDF operators (not an exhaustive list)
_pdfops = dict(close_fill_stroke=b'b', fill_stroke=b'B', fill=b'f',
closepath=b'h', close_stroke=b's', stroke=b'S', endpath=b'n',
begin_text=b'BT', end_text=b'ET',
curveto=b'c', rectangle=b're', lineto=b'l', moveto=b'm',
concat_matrix=b'cm',
use_xobject=b'Do',
setgray_stroke=b'G', setgray_nonstroke=b'g',
setrgb_stroke=b'RG', setrgb_nonstroke=b'rg',
setcolorspace_stroke=b'CS', setcolorspace_nonstroke=b'cs',
setcolor_stroke=b'SCN', setcolor_nonstroke=b'scn',
setdash=b'd', setlinejoin=b'j', setlinecap=b'J', setgstate=b'gs',
gsave=b'q', grestore=b'Q',
textpos=b'Td', selectfont=b'Tf', textmatrix=b'Tm',
show=b'Tj', showkern=b'TJ',
setlinewidth=b'w', clip=b'W', shading=b'sh')
Op = Bunch(**dict([(name, Operator(value))
for name, value in _pdfops.items()]))
def _paint_path(closep, fillp, strokep):
"""Return the PDF operator to paint a path in the following way:
closep: close the path before painting
fillp: fill the path with the fill color
strokep: stroke the outline of the path with the line color"""
if strokep:
if closep:
if fillp:
return Op.close_fill_stroke
else:
return Op.close_stroke
else:
if fillp:
return Op.fill_stroke
else:
return Op.stroke
else:
if fillp:
return Op.fill
else:
return Op.endpath
Op.paint_path = _paint_path
class Stream(object):
"""PDF stream object.
This has no pdfRepr method. Instead, call begin(), then output the
contents of the stream by calling write(), and finally call end().
"""
__slots__ = ('id', 'len', 'pdfFile', 'file', 'compressobj', 'extra', 'pos')
def __init__(self, id, len, file, extra=None):
"""id: object id of stream; len: an unused Reference object for the
length of the stream, or None (to use a memory buffer); file:
a PdfFile; extra: a dictionary of extra key-value pairs to
include in the stream header """
self.id = id # object id
self.len = len # id of length object
self.pdfFile = file
self.file = file.fh # file to which the stream is written
self.compressobj = None # compression object
if extra is None: self.extra = dict()
else: self.extra = extra
self.pdfFile.recordXref(self.id)
if rcParams['pdf.compression']:
self.compressobj = zlib.compressobj(rcParams['pdf.compression'])
if self.len is None:
self.file = BytesIO()
else:
self._writeHeader()
self.pos = self.file.tell()
def _writeHeader(self):
write = self.file.write
write(("%d 0 obj\n" % self.id).encode('ascii'))
dict = self.extra
dict['Length'] = self.len
if rcParams['pdf.compression']:
dict['Filter'] = Name('FlateDecode')
write(pdfRepr(dict))
write(b"\nstream\n")
def end(self):
"""Finalize stream."""
self._flush()
if self.len is None:
contents = self.file.getvalue()
self.len = len(contents)
self.file = self.pdfFile.fh
self._writeHeader()
self.file.write(contents)
self.file.write(b"\nendstream\nendobj\n")
else:
length = self.file.tell() - self.pos
self.file.write(b"\nendstream\nendobj\n")
self.pdfFile.writeObject(self.len, length)
def write(self, data):
"""Write some data on the stream."""
if self.compressobj is None:
self.file.write(data)
else:
compressed = self.compressobj.compress(data)
self.file.write(compressed)
def _flush(self):
"""Flush the compression object."""
if self.compressobj is not None:
compressed = self.compressobj.flush()
self.file.write(compressed)
self.compressobj = None
class PdfFile(object):
"""PDF file object."""
def __init__(self, filename):
self.nextObject = 1 # next free object id
self.xrefTable = [ [0, 65535, 'the zero object'] ]
self.passed_in_file_object = False
self.original_file_like = None
self.tell_base = 0
if is_string_like(filename):
fh = open(filename, 'wb')
elif is_writable_file_like(filename):
try:
self.tell_base = filename.tell()
except IOError:
fh = BytesIO()
self.original_file_like = filename
else:
fh = filename
self.passed_in_file_object = True
else:
raise ValueError("filename must be a path or a file-like object")
self._core14fontdir = os.path.join(
rcParams['datapath'], 'fonts', 'pdfcorefonts')
self.fh = fh
self.currentstream = None # stream object to write to, if any
fh.write(b"%PDF-1.4\n") # 1.4 is the first version to have alpha
# Output some eight-bit chars as a comment so various utilities
# recognize the file as binary by looking at the first few
# lines (see note in section 3.4.1 of the PDF reference).
fh.write(b"%\254\334 \253\272\n")
self.rootObject = self.reserveObject('root')
self.pagesObject = self.reserveObject('pages')
self.pageList = []
self.fontObject = self.reserveObject('fonts')
self.alphaStateObject = self.reserveObject('extended graphics states')
self.hatchObject = self.reserveObject('tiling patterns')
self.gouraudObject = self.reserveObject('Gouraud triangles')
self.XObjectObject = self.reserveObject('external objects')
self.resourceObject = self.reserveObject('resources')
root = { 'Type': Name('Catalog'),
'Pages': self.pagesObject }
self.writeObject(self.rootObject, root)
revision = ''
self.infoDict = {
'Creator': 'matplotlib %s, http://matplotlib.org' % __version__,
'Producer': 'matplotlib pdf backend%s' % revision,
'CreationDate': datetime.today()
}
self.fontNames = {} # maps filenames to internal font names
self.nextFont = 1 # next free internal font name
self.dviFontInfo = {} # information on dvi fonts
self.type1Descriptors = {} # differently encoded Type-1 fonts may
# share the same descriptor
self.used_characters = {}
self.alphaStates = {} # maps alpha values to graphics state objects
self.nextAlphaState = 1
self.hatchPatterns = {}
self.nextHatch = 1
self.gouraudTriangles = []
self.images = {}
self.nextImage = 1
self.markers = {}
self.multi_byte_charprocs = {}
self.paths = []
# The PDF spec recommends to include every procset
procsets = [ Name(x)
for x in "PDF Text ImageB ImageC ImageI".split() ]
# Write resource dictionary.
# Possibly TODO: more general ExtGState (graphics state dictionaries)
# ColorSpace Pattern Shading Properties
resources = { 'Font': self.fontObject,
'XObject': self.XObjectObject,
'ExtGState': self.alphaStateObject,
'Pattern': self.hatchObject,
'Shading': self.gouraudObject,
'ProcSet': procsets }
self.writeObject(self.resourceObject, resources)
def newPage(self, width, height):
self.endStream()
self.width, self.height = width, height
contentObject = self.reserveObject('page contents')
thePage = { 'Type': Name('Page'),
'Parent': self.pagesObject,
'Resources': self.resourceObject,
'MediaBox': [ 0, 0, 72*width, 72*height ],
'Contents': contentObject,
'Group': {'Type': Name('Group'),
'S': Name('Transparency'),
'CS': Name('DeviceRGB')}
}
pageObject = self.reserveObject('page')
self.writeObject(pageObject, thePage)
self.pageList.append(pageObject)
self.beginStream(contentObject.id,
self.reserveObject('length of content stream'))
# Initialize the pdf graphics state to match the default mpl
# graphics context: currently only the join style needs to be set
self.output(GraphicsContextPdf.joinstyles['round'], Op.setlinejoin)
def close(self):
self.endStream()
# Write out the various deferred objects
self.writeFonts()
self.writeObject(self.alphaStateObject,
dict([(val[0], val[1])
for val in self.alphaStates.values()]))
self.writeHatches()
self.writeGouraudTriangles()
xobjects = dict(iter(self.images.values()))
for tup in self.markers.values():
xobjects[tup[0]] = tup[1]
for name, value in self.multi_byte_charprocs.items():
xobjects[name] = value
for name, path, trans, ob, join, cap, padding, filled, stroked in self.paths:
xobjects[name] = ob
self.writeObject(self.XObjectObject, xobjects)
self.writeImages()
self.writeMarkers()
self.writePathCollectionTemplates()
self.writeObject(self.pagesObject,
{ 'Type': Name('Pages'),
'Kids': self.pageList,
'Count': len(self.pageList) })
self.writeInfoDict()
# Finalize the file
self.writeXref()
self.writeTrailer()
if self.passed_in_file_object:
self.fh.flush()
elif self.original_file_like is not None:
self.original_file_like.write(self.fh.getvalue())
self.fh.close()
else:
self.fh.close()
def write(self, data):
if self.currentstream is None:
self.fh.write(data)
else:
self.currentstream.write(data)
def output(self, *data):
self.write(fill(list(map(pdfRepr, data))))
self.write(b'\n')
def beginStream(self, id, len, extra=None):
assert self.currentstream is None
self.currentstream = Stream(id, len, self, extra)
def endStream(self):
if self.currentstream is not None:
self.currentstream.end()
self.currentstream = None
def fontName(self, fontprop):
"""
Select a font based on fontprop and return a name suitable for
Op.selectfont. If fontprop is a string, it will be interpreted
as the filename (or dvi name) of the font.
"""
if is_string_like(fontprop):
filename = fontprop
elif rcParams['pdf.use14corefonts']:
filename = findfont(
fontprop, fontext='afm', directory=self._core14fontdir)
if filename is None:
filename = findfont(
"Helvetica", fontext='afm', directory=self._core14fontdir)
else:
filename = findfont(fontprop)
Fx = self.fontNames.get(filename)
if Fx is None:
Fx = Name('F%d' % self.nextFont)
self.fontNames[filename] = Fx
self.nextFont += 1
matplotlib.verbose.report(
'Assigning font %s = %s' % (Fx, filename),
'debug')
return Fx
def writeFonts(self):
fonts = {}
for filename, Fx in self.fontNames.items():
matplotlib.verbose.report('Embedding font %s' % filename, 'debug')
if filename.endswith('.afm'):
# from pdf.use14corefonts
matplotlib.verbose.report('Writing AFM font', 'debug')
fonts[Fx] = self._write_afm_font(filename)
elif filename in self.dviFontInfo:
# a Type 1 font from a dvi file; the filename is really the TeX name
matplotlib.verbose.report('Writing Type-1 font', 'debug')
fonts[Fx] = self.embedTeXFont(filename, self.dviFontInfo[filename])
else:
# a normal TrueType font
matplotlib.verbose.report('Writing TrueType font', 'debug')
realpath, stat_key = get_realpath_and_stat(filename)
chars = self.used_characters.get(stat_key)
if chars is not None and len(chars[1]):
fonts[Fx] = self.embedTTF(realpath, chars[1])
self.writeObject(self.fontObject, fonts)
def _write_afm_font(self, filename):
with open(filename, 'rb') as fh:
font = AFM(fh)
fontname = font.get_fontname()
fontdict = { 'Type': Name('Font'),
'Subtype': Name('Type1'),
'BaseFont': Name(fontname),
'Encoding': Name('WinAnsiEncoding') }
fontdictObject = self.reserveObject('font dictionary')
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def embedTeXFont(self, texname, fontinfo):
matplotlib.verbose.report(
'Embedding TeX font ' + texname + ' - fontinfo=' + repr(fontinfo.__dict__),
'debug')
# Widths
widthsObject = self.reserveObject('font widths')
self.writeObject(widthsObject, fontinfo.dvifont.widths)
# Font dictionary
fontdictObject = self.reserveObject('font dictionary')
fontdict = {
'Type': Name('Font'),
'Subtype': Name('Type1'),
'FirstChar': 0,
'LastChar': len(fontinfo.dvifont.widths) - 1,
'Widths': widthsObject,
}
# Encoding (if needed)
if fontinfo.encodingfile is not None:
enc = dviread.Encoding(fontinfo.encodingfile)
differencesArray = [ Name(ch) for ch in enc ]
differencesArray = [ 0 ] + differencesArray
fontdict['Encoding'] = \
{ 'Type': Name('Encoding'),
'Differences': differencesArray }
# If no file is specified, stop short
if fontinfo.fontfile is None:
warnings.warn(
'Because of TeX configuration (pdftex.map, see updmap ' +
'option pdftexDownloadBase14) the font %s ' % fontinfo.basefont +
'is not embedded. This is deprecated as of PDF 1.5 ' +
'and it may cause the consumer application to show something ' +
'that was not intended.')
fontdict['BaseFont'] = Name(fontinfo.basefont)
self.writeObject(fontdictObject, fontdict)
return fontdictObject
# We have a font file to embed - read it in and apply any effects
t1font = type1font.Type1Font(fontinfo.fontfile)
if fontinfo.effects:
t1font = t1font.transform(fontinfo.effects)
fontdict['BaseFont'] = Name(t1font.prop['FontName'])
# Font descriptors may be shared between differently encoded
# Type-1 fonts, so only create a new descriptor if there is no
# existing descriptor for this font.
effects = (fontinfo.effects.get('slant', 0.0), fontinfo.effects.get('extend', 1.0))
fontdesc = self.type1Descriptors.get((fontinfo.fontfile, effects))
if fontdesc is None:
fontdesc = self.createType1Descriptor(t1font, fontinfo.fontfile)
self.type1Descriptors[(fontinfo.fontfile, effects)] = fontdesc
fontdict['FontDescriptor'] = fontdesc
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def createType1Descriptor(self, t1font, fontfile):
# Create and write the font descriptor and the font file
# of a Type-1 font
fontdescObject = self.reserveObject('font descriptor')
fontfileObject = self.reserveObject('font file')
italic_angle = t1font.prop['ItalicAngle']
fixed_pitch = t1font.prop['isFixedPitch']
flags = 0
if fixed_pitch: flags |= 1 << 0 # fixed width
if 0: flags |= 1 << 1 # TODO: serif
if 1: flags |= 1 << 2 # TODO: symbolic (most TeX fonts are)
else: flags |= 1 << 5 # non-symbolic
if italic_angle: flags |= 1 << 6 # italic
if 0: flags |= 1 << 16 # TODO: all caps
if 0: flags |= 1 << 17 # TODO: small caps
if 0: flags |= 1 << 18 # TODO: force bold
ft2font = FT2Font(str(fontfile))
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': Name(t1font.prop['FontName']),
'Flags': flags,
'FontBBox': ft2font.bbox,
'ItalicAngle': italic_angle,
'Ascent': ft2font.ascender,
'Descent': ft2font.descender,
'CapHeight': 1000, # TODO: find this out
'XHeight': 500, # TODO: this one too
'FontFile': fontfileObject,
'FontFamily': t1font.prop['FamilyName'],
'StemV': 50, # TODO
# (see also revision 3874; but not all TeX distros have AFM files!)
#'FontWeight': a number where 400 = Regular, 700 = Bold
}
self.writeObject(fontdescObject, descriptor)
self.beginStream(fontfileObject.id, None,
{ 'Length1': len(t1font.parts[0]),
'Length2': len(t1font.parts[1]),
'Length3': 0 })
self.currentstream.write(t1font.parts[0])
self.currentstream.write(t1font.parts[1])
self.endStream()
return fontdescObject
def _get_xobject_symbol_name(self, filename, symbol_name):
return "%s-%s" % (
os.path.splitext(os.path.basename(filename))[0],
symbol_name)
_identityToUnicodeCMap = """/CIDInit /ProcSet findresource begin
12 dict begin
begincmap
/CIDSystemInfo
<< /Registry (Adobe)
/Ordering (UCS)
/Supplement 0
>> def
/CMapName /Adobe-Identity-UCS def
/CMapType 2 def
1 begincodespacerange
<0000> <ffff>
endcodespacerange
%d beginbfrange
%s
endbfrange
endcmap
CMapName currentdict /CMap defineresource pop
end
end"""
def embedTTF(self, filename, characters):
"""Embed the TTF font from the named file into the document."""
font = FT2Font(str(filename))
fonttype = rcParams['pdf.fonttype']
def cvt(length, upe=font.units_per_EM, nearest=True):
"Convert font coordinates to PDF glyph coordinates"
value = length / upe * 1000
if nearest: return round(value)
# Perhaps best to round away from zero for bounding
# boxes and the like
if value < 0: return floor(value)
else: return ceil(value)
def embedTTFType3(font, characters, descriptor):
"""The Type 3-specific part of embedding a Truetype font"""
widthsObject = self.reserveObject('font widths')
fontdescObject = self.reserveObject('font descriptor')
fontdictObject = self.reserveObject('font dictionary')
charprocsObject = self.reserveObject('character procs')
differencesArray = []
firstchar, lastchar = 0, 255
bbox = [cvt(x, nearest=False) for x in font.bbox]
fontdict = {
'Type' : Name('Font'),
'BaseFont' : ps_name,
'FirstChar' : firstchar,
'LastChar' : lastchar,
'FontDescriptor' : fontdescObject,
'Subtype' : Name('Type3'),
'Name' : descriptor['FontName'],
'FontBBox' : bbox,
'FontMatrix' : [ .001, 0, 0, .001, 0, 0 ],
'CharProcs' : charprocsObject,
'Encoding' : {
'Type' : Name('Encoding'),
'Differences' : differencesArray},
'Widths' : widthsObject
}
# Make the "Widths" array
from encodings import cp1252
# The "decoding_map" was changed to a "decoding_table" as of Python 2.5.
if hasattr(cp1252, 'decoding_map'):
def decode_char(charcode):
return cp1252.decoding_map[charcode] or 0
else:
def decode_char(charcode):
return ord(cp1252.decoding_table[charcode])
def get_char_width(charcode):
str = decode_char(charcode)
width = font.load_char(str, flags=LOAD_NO_SCALE|LOAD_NO_HINTING).horiAdvance
return cvt(width)
widths = [ get_char_width(charcode) for charcode in range(firstchar, lastchar+1) ]
descriptor['MaxWidth'] = max(widths)
# Make the "Differences" array, sort the ccodes < 255 from
# the multi-byte ccodes, and build the whole set of glyph ids
# that we need from this font.
cmap = font.get_charmap()
glyph_ids = []
differences = []
multi_byte_chars = set()
for c in characters:
ccode = c
gind = cmap.get(ccode) or 0
glyph_ids.append(gind)
glyph_name = font.get_glyph_name(gind)
if ccode <= 255:
differences.append((ccode, glyph_name))
else:
multi_byte_chars.add(glyph_name)
differences.sort()
last_c = -2
for c, name in differences:
if c != last_c + 1:
differencesArray.append(c)
differencesArray.append(Name(name))
last_c = c
# Make the charprocs array (using ttconv to generate the
# actual outlines)
rawcharprocs = ttconv.get_pdf_charprocs(filename, glyph_ids)
charprocs = {}
charprocsRef = {}
for charname, stream in rawcharprocs.items():
charprocDict = { 'Length': len(stream) }
# The 2-byte characters are used as XObjects, so they
# need extra info in their dictionary
if charname in multi_byte_chars:
charprocDict['Type'] = Name('XObject')
charprocDict['Subtype'] = Name('Form')
charprocDict['BBox'] = bbox
# Each glyph includes bounding box information,
# but xpdf and ghostscript can't handle it in a
# Form XObject (they segfault!!!), so we remove it
# from the stream here. It's not needed anyway,
# since the Form XObject includes it in its BBox
# value.
stream = stream[stream.find(b"d1") + 2:]
charprocObject = self.reserveObject('charProc')
self.beginStream(charprocObject.id, None, charprocDict)
self.currentstream.write(stream)
self.endStream()
# Send the glyphs with ccode > 255 to the XObject dictionary,
# and the others to the font itself
if charname in multi_byte_chars:
name = self._get_xobject_symbol_name(filename, charname)
self.multi_byte_charprocs[name] = charprocObject
else:
charprocs[charname] = charprocObject
# Write everything out
self.writeObject(fontdictObject, fontdict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(widthsObject, widths)
self.writeObject(charprocsObject, charprocs)
return fontdictObject
def embedTTFType42(font, characters, descriptor):
"""The Type 42-specific part of embedding a Truetype font"""
fontdescObject = self.reserveObject('font descriptor')
cidFontDictObject = self.reserveObject('CID font dictionary')
type0FontDictObject = self.reserveObject('Type 0 font dictionary')
cidToGidMapObject = self.reserveObject('CIDToGIDMap stream')
fontfileObject = self.reserveObject('font file stream')
wObject = self.reserveObject('Type 0 widths')
toUnicodeMapObject = self.reserveObject('ToUnicode map')
cidFontDict = {
'Type' : Name('Font'),
'Subtype' : Name('CIDFontType2'),
'BaseFont' : ps_name,
'CIDSystemInfo' : {
'Registry' : 'Adobe',
'Ordering' : 'Identity',
'Supplement' : 0 },
'FontDescriptor' : fontdescObject,
'W' : wObject,
'CIDToGIDMap' : cidToGidMapObject
}
type0FontDict = {
'Type' : Name('Font'),
'Subtype' : Name('Type0'),
'BaseFont' : ps_name,
'Encoding' : Name('Identity-H'),
'DescendantFonts' : [cidFontDictObject],
'ToUnicode' : toUnicodeMapObject
}
# Make fontfile stream
descriptor['FontFile2'] = fontfileObject
length1Object = self.reserveObject('decoded length of a font')
self.beginStream(
fontfileObject.id,
self.reserveObject('length of font stream'),
{'Length1': length1Object})
with open(filename, 'rb') as fontfile:
length1 = 0
while True:
data = fontfile.read(4096)
if not data: break
length1 += len(data)
self.currentstream.write(data)
self.endStream()
self.writeObject(length1Object, length1)
# Make the 'W' (Widths) array, CidToGidMap and ToUnicode CMap
# at the same time
cid_to_gid_map = ['\u0000'] * 65536
cmap = font.get_charmap()
unicode_mapping = []
widths = []
max_ccode = 0
for c in characters:
ccode = c
gind = cmap.get(ccode) or 0
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
widths.append((ccode, glyph.horiAdvance / 6))
if ccode < 65536:
cid_to_gid_map[ccode] = chr(gind)
max_ccode = max(ccode, max_ccode)
widths.sort()
cid_to_gid_map = cid_to_gid_map[:max_ccode + 1]
last_ccode = -2
w = []
max_width = 0
unicode_groups = []
for ccode, width in widths:
if ccode != last_ccode + 1:
w.append(ccode)
w.append([width])
unicode_groups.append([ccode, ccode])
else:
w[-1].append(width)
unicode_groups[-1][1] = ccode
max_width = max(max_width, width)
last_ccode = ccode
unicode_bfrange = []
for start, end in unicode_groups:
unicode_bfrange.append(
"<%04x> <%04x> [%s]" %
(start, end,
" ".join(["<%04x>" % x for x in range(start, end+1)])))
unicode_cmap = (self._identityToUnicodeCMap %
(len(unicode_groups),
"\n".join(unicode_bfrange))).encode('ascii')
# CIDToGIDMap stream
cid_to_gid_map = "".join(cid_to_gid_map).encode("utf-16be")
self.beginStream(cidToGidMapObject.id,
None,
{'Length': len(cid_to_gid_map)})
self.currentstream.write(cid_to_gid_map)
self.endStream()
# ToUnicode CMap
self.beginStream(toUnicodeMapObject.id,
None,
{'Length': unicode_cmap})
self.currentstream.write(unicode_cmap)
self.endStream()
descriptor['MaxWidth'] = max_width
# Write everything out
self.writeObject(cidFontDictObject, cidFontDict)
self.writeObject(type0FontDictObject, type0FontDict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(wObject, w)
return type0FontDictObject
# Beginning of main embedTTF function...
# You are lost in a maze of TrueType tables, all different...
sfnt = font.get_sfnt()
try:
ps_name = sfnt[(1,0,0,6)].decode('macroman') # Macintosh scheme
except KeyError:
# Microsoft scheme:
ps_name = sfnt[(3,1,0x0409,6)].decode('utf-16be')
# (see freetype/ttnameid.h)
ps_name = ps_name.encode('ascii', 'replace')
ps_name = Name(ps_name)
pclt = font.get_sfnt_table('pclt') \
or { 'capHeight': 0, 'xHeight': 0 }
post = font.get_sfnt_table('post') \
or { 'italicAngle': (0,0) }
ff = font.face_flags
sf = font.style_flags
flags = 0
symbolic = False #ps_name.name in ('Cmsy10', 'Cmmi10', 'Cmex10')
if ff & FIXED_WIDTH: flags |= 1 << 0
if 0: flags |= 1 << 1 # TODO: serif
if symbolic: flags |= 1 << 2
else: flags |= 1 << 5
if sf & ITALIC: flags |= 1 << 6
if 0: flags |= 1 << 16 # TODO: all caps
if 0: flags |= 1 << 17 # TODO: small caps
if 0: flags |= 1 << 18 # TODO: force bold
descriptor = {
'Type' : Name('FontDescriptor'),
'FontName' : ps_name,
'Flags' : flags,
'FontBBox' : [ cvt(x, nearest=False) for x in font.bbox ],
'Ascent' : cvt(font.ascender, nearest=False),
'Descent' : cvt(font.descender, nearest=False),
'CapHeight' : cvt(pclt['capHeight'], nearest=False),
'XHeight' : cvt(pclt['xHeight']),
'ItalicAngle' : post['italicAngle'][1], # ???
'StemV' : 0 # ???
}
# The font subsetting to a Type 3 font does not work for
# OpenType (.otf) that embed a Postscript CFF font, so avoid that --
# save as a (non-subsetted) Type 42 font instead.
if is_opentype_cff_font(filename):
fonttype = 42
warnings.warn(("'%s' can not be subsetted into a Type 3 font. " +
"The entire font will be embedded in the output.") %
os.path.basename(filename))
if fonttype == 3:
return embedTTFType3(font, characters, descriptor)
elif fonttype == 42:
return embedTTFType42(font, characters, descriptor)
def alphaState(self, alpha):
"""Return name of an ExtGState that sets alpha to the given value"""
state = self.alphaStates.get(alpha, None)
if state is not None:
return state[0]
name = Name('A%d' % self.nextAlphaState)
self.nextAlphaState += 1
self.alphaStates[alpha] = \
(name, { 'Type': Name('ExtGState'),
'CA': alpha[0], 'ca': alpha[1] })
return name
def hatchPattern(self, hatch_style):
# The colors may come in as numpy arrays, which aren't hashable
if hatch_style is not None:
face, edge, hatch = hatch_style
if face is not None:
face = tuple(face)
if edge is not None:
edge = tuple(edge)
hatch_style = (face, edge, hatch)
pattern = self.hatchPatterns.get(hatch_style, None)
if pattern is not None:
return pattern
name = Name('H%d' % self.nextHatch)
self.nextHatch += 1
self.hatchPatterns[hatch_style] = name
return name
def writeHatches(self):
hatchDict = dict()
sidelen = 72.0
for hatch_style, name in self.hatchPatterns.items():
ob = self.reserveObject('hatch pattern')
hatchDict[name] = ob
res = { 'Procsets':
[ Name(x) for x in "PDF Text ImageB ImageC ImageI".split() ] }
self.beginStream(
ob.id, None,
{ 'Type': Name('Pattern'),
'PatternType': 1, 'PaintType': 1, 'TilingType': 1,
'BBox': [0, 0, sidelen, sidelen],
'XStep': sidelen, 'YStep': sidelen,
'Resources': res })
# lst is a tuple of stroke color, fill color,
# number of - lines, number of / lines,
# number of | lines, number of \ lines
rgb = hatch_style[0]
self.output(rgb[0], rgb[1], rgb[2], Op.setrgb_stroke)
if hatch_style[1] is not None:
rgb = hatch_style[1]
self.output(rgb[0], rgb[1], rgb[2], Op.setrgb_nonstroke,
0, 0, sidelen, sidelen, Op.rectangle,
Op.fill)
self.output(0.1, Op.setlinewidth)
# TODO: We could make this dpi-dependent, but that would be
# an API change
self.output(*self.pathOperations(
Path.hatch(hatch_style[2]),
Affine2D().scale(sidelen),
simplify=False))
self.output(Op.stroke)
self.endStream()
self.writeObject(self.hatchObject, hatchDict)
def addGouraudTriangles(self, points, colors):
name = Name('GT%d' % len(self.gouraudTriangles))
self.gouraudTriangles.append((name, points, colors))
return name
def writeGouraudTriangles(self):
gouraudDict = dict()
for name, points, colors in self.gouraudTriangles:
ob = self.reserveObject('Gouraud triangle')
gouraudDict[name] = ob
shape = points.shape
flat_points = points.reshape((shape[0] * shape[1], 2))
flat_colors = colors.reshape((shape[0] * shape[1], 4))
points_min = np.min(flat_points, axis=0) - (1 << 8)
points_max = np.max(flat_points, axis=0) + (1 << 8)
factor = float(0xffffffff) / (points_max - points_min)
self.beginStream(
ob.id, None,
{ 'ShadingType': 4,
'BitsPerCoordinate': 32,
'BitsPerComponent': 8,
'BitsPerFlag': 8,
'ColorSpace': Name('DeviceRGB'),
'AntiAlias': True,
'Decode': [points_min[0], points_max[0],
points_min[1], points_max[1],
0, 1, 0, 1, 0, 1]
})
streamarr = np.empty(
(shape[0] * shape[1],),
dtype=[('flags', 'u1'),
('points', '>u4', (2,)),
('colors', 'u1', (3,))])
streamarr['flags'] = 0
streamarr['points'] = (flat_points - points_min) * factor
streamarr['colors'] = flat_colors[:, :3] * 255.0
self.write(streamarr.tostring())
self.endStream()
self.writeObject(self.gouraudObject, gouraudDict)
def imageObject(self, image):
"""Return name of an image XObject representing the given image."""
pair = self.images.get(image, None)
if pair is not None:
return pair[0]
name = Name('I%d' % self.nextImage)
ob = self.reserveObject('image %d' % self.nextImage)
self.nextImage += 1
self.images[image] = (name, ob)
return name
## These two from backend_ps.py
## TODO: alpha (SMask, p. 518 of pdf spec)
def _rgb(self, im):
h,w,s = im.as_rgba_str()
rgba = np.fromstring(s, np.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[:,:,:3]
a = rgba[:,:,3:]
return h, w, rgb.tostring(), a.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = np.fromstring(rgbat[2], np.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba_f = rgba.astype(np.float32)
r = rgba_f[:,:,0]
g = rgba_f[:,:,1]
b = rgba_f[:,:,2]
gray = (r*rc + g*gc + b*bc).astype(np.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def writeImages(self):
for img, pair in self.images.items():
img.flipud_out()
if img.is_grayscale:
height, width, data = self._gray(img)
self.beginStream(
pair[1].id,
self.reserveObject('length of image stream'),
{'Type': Name('XObject'), 'Subtype': Name('Image'),
'Width': width, 'Height': height,
'ColorSpace': Name('DeviceGray'), 'BitsPerComponent': 8 })
self.currentstream.write(data) # TODO: predictors (i.e., output png)
self.endStream()
else:
height, width, data, adata = self._rgb(img)
smaskObject = self.reserveObject("smask")
stream = self.beginStream(
smaskObject.id,
self.reserveObject('length of smask stream'),
{'Type': Name('XObject'), 'Subtype': Name('Image'),
'Width': width, 'Height': height,
'ColorSpace': Name('DeviceGray'), 'BitsPerComponent': 8 })
self.currentstream.write(adata) # TODO: predictors (i.e., output png)
self.endStream()
self.beginStream(
pair[1].id,
self.reserveObject('length of image stream'),
{'Type': Name('XObject'), 'Subtype': Name('Image'),
'Width': width, 'Height': height,
'ColorSpace': Name('DeviceRGB'), 'BitsPerComponent': 8,
'SMask': smaskObject})
self.currentstream.write(data) # TODO: predictors (i.e., output png)
self.endStream()
img.flipud_out()
def markerObject(self, path, trans, fillp, strokep, lw, joinstyle, capstyle):
"""Return name of a marker XObject representing the given path."""
# self.markers used by markerObject, writeMarkers, close:
# mapping from (path operations, fill?, stroke?) to
# [name, object reference, bounding box, linewidth]
# This enables different draw_markers calls to share the XObject
# if the gc is sufficiently similar: colors etc can vary, but
# the choices of whether to fill and whether to stroke cannot.
# We need a bounding box enclosing all of the XObject path,
# but since line width may vary, we store the maximum of all
# occurring line widths in self.markers.
# close() is somewhat tightly coupled in that it expects the
# first two components of each value in self.markers to be the
# name and object reference.
pathops = self.pathOperations(path, trans, simplify=False)
key = (tuple(pathops), bool(fillp), bool(strokep), joinstyle, capstyle)
result = self.markers.get(key)
if result is None:
name = Name('M%d' % len(self.markers))
ob = self.reserveObject('marker %d' % len(self.markers))
bbox = path.get_extents(trans)
self.markers[key] = [name, ob, bbox, lw]
else:
if result[-1] < lw:
result[-1] = lw
name = result[0]
return name
def writeMarkers(self):
for ((pathops, fillp, strokep, joinstyle, capstyle),
(name, ob, bbox, lw)) in self.markers.items():
bbox = bbox.padded(lw * 0.5)
self.beginStream(
ob.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': list(bbox.extents) })
self.output(GraphicsContextPdf.joinstyles[joinstyle], Op.setlinejoin)
self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
self.output(*pathops)
self.output(Op.paint_path(False, fillp, strokep))
self.endStream()
def pathCollectionObject(self, gc, path, trans, padding, filled, stroked):
name = Name('P%d' % len(self.paths))
ob = self.reserveObject('path %d' % len(self.paths))
self.paths.append(
(name, path, trans, ob, gc.get_joinstyle(), gc.get_capstyle(), padding,
filled, stroked))
return name
def writePathCollectionTemplates(self):
for (name, path, trans, ob, joinstyle, capstyle, padding, filled,
stroked) in self.paths:
pathops = self.pathOperations(path, trans, simplify=False)
bbox = path.get_extents(trans)
if not np.all(np.isfinite(bbox.extents)):
extents = [0, 0, 0, 0]
else:
bbox = bbox.padded(padding)
extents = list(bbox.extents)
self.beginStream(
ob.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': extents})
self.output(GraphicsContextPdf.joinstyles[joinstyle], Op.setlinejoin)
self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
self.output(*pathops)
self.output(Op.paint_path(False, filled, stroked))
self.endStream()
@staticmethod
def pathOperations(path, transform, clip=None, simplify=None, sketch=None):
cmds = []
last_points = None
for points, code in path.iter_segments(transform, clip=clip,
simplify=simplify,
sketch=sketch):
if code == Path.MOVETO:
# This is allowed anywhere in the path
cmds.extend(points)
cmds.append(Op.moveto)
elif code == Path.CLOSEPOLY:
cmds.append(Op.closepath)
elif last_points is None:
# The other operations require a previous point
raise ValueError('Path lacks initial MOVETO')
elif code == Path.LINETO:
cmds.extend(points)
cmds.append(Op.lineto)
elif code == Path.CURVE3:
points = quad2cubic(*(list(last_points[-2:]) + list(points)))
cmds.extend(points[2:])
cmds.append(Op.curveto)
elif code == Path.CURVE4:
cmds.extend(points)
cmds.append(Op.curveto)
last_points = points
return cmds
def writePath(self, path, transform, clip=False, sketch=None):
if clip:
clip = (0.0, 0.0, self.width * 72, self.height * 72)
simplify = path.should_simplify
else:
clip = None
simplify = False
cmds = self.pathOperations(path, transform, clip, simplify=simplify,
sketch=sketch)
self.output(*cmds)
def reserveObject(self, name=''):
"""Reserve an ID for an indirect object.
The name is used for debugging in case we forget to print out
the object with writeObject.
"""
id = self.nextObject
self.nextObject += 1
self.xrefTable.append([None, 0, name])
return Reference(id)
def recordXref(self, id):
self.xrefTable[id][0] = self.fh.tell() - self.tell_base
def writeObject(self, object, contents):
self.recordXref(object.id)
object.write(contents, self)
def writeXref(self):
"""Write out the xref table."""
self.startxref = self.fh.tell() - self.tell_base
self.write(("xref\n0 %d\n" % self.nextObject).encode('ascii'))
i = 0
borken = False
for offset, generation, name in self.xrefTable:
if offset is None:
print('No offset for object %d (%s)' % (i, name), file=sys.stderr)
borken = True
else:
if name == 'the zero object':
self.write(("%010d %05d f \n" % (offset, generation)).encode('ascii'))
else:
self.write(("%010d %05d n \n" % (offset, generation)).encode('ascii'))
i += 1
if borken:
raise AssertionError('Indirect object does not exist')
def writeInfoDict(self):
"""Write out the info dictionary, checking it for good form"""
is_date = lambda x: isinstance(x, datetime)
check_trapped = lambda x: isinstance(x, Name) and x.name in \
('True', 'False', 'Unknown')
keywords = {'Title': is_string_like,
'Author': is_string_like,
'Subject': is_string_like,
'Keywords': is_string_like,
'Creator': is_string_like,
'Producer': is_string_like,
'CreationDate': is_date,
'ModDate': is_date,
'Trapped': check_trapped}
for k in self.infoDict.keys():
if k not in keywords:
warnings.warn('Unknown infodict keyword: %s' % k)
else:
if not keywords[k](self.infoDict[k]):
warnings.warn('Bad value for infodict keyword %s' % k)
self.infoObject = self.reserveObject('info')
self.writeObject(self.infoObject, self.infoDict)
def writeTrailer(self):
"""Write out the PDF trailer."""
self.write(b"trailer\n")
self.write(pdfRepr(
{'Size': self.nextObject,
'Root': self.rootObject,
'Info': self.infoObject }))
# Could add 'ID'
self.write(("\nstartxref\n%d\n%%%%EOF\n" % self.startxref).encode('ascii'))
class RendererPdf(RendererBase):
truetype_font_cache = maxdict(50)
afm_font_cache = maxdict(50)
def __init__(self, file, image_dpi):
RendererBase.__init__(self)
self.file = file
self.gc = self.new_gc()
self.mathtext_parser = MathTextParser("Pdf")
self.image_dpi = image_dpi
self.tex_font_map = None
def finalize(self):
self.file.output(*self.gc.finalize())
def check_gc(self, gc, fillcolor=None):
orig_fill = gc._fillcolor
gc._fillcolor = fillcolor
orig_alphas = gc._effective_alphas
if gc._forced_alpha:
gc._effective_alphas = (gc._alpha, gc._alpha)
elif fillcolor is None or len(fillcolor) < 4:
gc._effective_alphas = (gc._rgb[3], 1.0)
else:
gc._effective_alphas = (gc._rgb[3], fillcolor[3])
delta = self.gc.delta(gc)
if delta: self.file.output(*delta)
# Restore gc to avoid unwanted side effects
gc._fillcolor = orig_fill
gc._effective_alphas = orig_alphas
def tex_font_mapping(self, texfont):
if self.tex_font_map is None:
self.tex_font_map = \
dviread.PsfontsMap(dviread.find_tex_file('pdftex.map'))
return self.tex_font_map[texfont]
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
if isinstance(font, str):
fname = font
else:
fname = font.fname
realpath, stat_key = get_realpath_and_stat(fname)
used_characters = self.file.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in other.items():
used_characters = self.file.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def get_image_magnification(self):
return self.image_dpi/72.0
def option_scale_image(self):
"""
pdf backend support arbitrary scaling of image.
"""
return True
def draw_image(self, gc, x, y, im, dx=None, dy=None, transform=None):
self.check_gc(gc)
h, w = im.get_size_out()
if dx is None:
w = 72.0*w/self.image_dpi
else:
w = dx
if dy is None:
h = 72.0*h/self.image_dpi
else:
h = dy
imob = self.file.imageObject(im)
if transform is None:
self.file.output(Op.gsave,
w, 0, 0, h, x, y, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
else:
tr1, tr2, tr3, tr4, tr5, tr6 = transform.to_values()
self.file.output(Op.gsave,
tr1, tr2, tr3, tr4, tr5, tr6, Op.concat_matrix,
w, 0, 0, h, x, y, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
def draw_path(self, gc, path, transform, rgbFace=None):
self.check_gc(gc, rgbFace)
self.file.writePath(
path, transform,
rgbFace is None and gc.get_hatch_path() is None,
gc.get_sketch_params())
self.file.output(self.gc.paint())
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
# We can only reuse the objects if the presence of fill and
# stroke (and the amount of alpha for each) is the same for
# all of them
can_do_optimization = True
facecolors = np.asarray(facecolors)
edgecolors = np.asarray(edgecolors)
if not len(facecolors):
filled = False
can_do_optimization = not gc.get_hatch()
else:
if np.all(facecolors[:, 3] == facecolors[0, 3]):
filled = facecolors[0, 3] != 0.0
else:
can_do_optimization = False
if not len(edgecolors):
stroked = False
else:
if np.all(edgecolors[:, 3] == edgecolors[0, 3]):
stroked = edgecolors[0, 3] != 0.0
else:
can_do_optimization = False
if not can_do_optimization:
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
padding = np.max(linewidths)
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = self.file.pathCollectionObject(
gc, path, transform, padding, filled, stroked)
path_codes.append(name)
output = self.file.output
output(*self.gc.push())
lastx, lasty = 0, 0
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
self.check_gc(gc0, rgbFace)
dx, dy = xo - lastx, yo - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix, path_id, Op.use_xobject)
lastx, lasty = xo, yo
output(*self.gc.pop())
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
# For simple paths or small numbers of markers, don't bother
# making an XObject
if len(path) * len(marker_path) <= 10:
RendererBase.draw_markers(self, gc, marker_path, marker_trans,
path, trans, rgbFace)
return
self.check_gc(gc, rgbFace)
fillp = gc.fillp(rgbFace)
strokep = gc.strokep()
output = self.file.output
marker = self.file.markerObject(
marker_path, marker_trans, fillp, strokep, self.gc._linewidth,
gc.get_joinstyle(), gc.get_capstyle())
output(Op.gsave)
lastx, lasty = 0, 0
for vertices, code in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
dx, dy = x - lastx, y - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix,
marker, Op.use_xobject)
lastx, lasty = x, y
output(Op.grestore)
def draw_gouraud_triangle(self, gc, points, colors, trans):
self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
colors.reshape((1, 3, 4)), trans)
def draw_gouraud_triangles(self, gc, points, colors, trans):
assert len(points) == len(colors)
assert points.ndim == 3
assert points.shape[1] == 3
assert points.shape[2] == 2
assert colors.ndim == 3
assert colors.shape[1] == 3
assert colors.shape[2] == 4
shape = points.shape
points = points.reshape((shape[0] * shape[1], 2))
tpoints = trans.transform(points)
tpoints = tpoints.reshape(shape)
name = self.file.addGouraudTriangles(tpoints, colors)
self.check_gc(gc)
self.file.output(name, Op.shading)
def _setup_textpos(self, x, y, angle, oldx=0, oldy=0, oldangle=0):
if angle == oldangle == 0:
self.file.output(x - oldx, y - oldy, Op.textpos)
else:
angle = angle / 180.0 * pi
self.file.output( cos(angle), sin(angle),
-sin(angle), cos(angle),
x, y, Op.textmatrix)
self.file.output(0, 0, Op.textpos)
def draw_mathtext(self, gc, x, y, s, prop, angle):
# TODO: fix positioning and encoding
width, height, descent, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
# When using Type 3 fonts, we can't use character codes higher
# than 255, so we use the "Do" command to render those
# instead.
global_fonttype = rcParams['pdf.fonttype']
# Set up a global transformation matrix for the whole math expression
a = angle / 180.0 * pi
self.file.output(Op.gsave)
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
prev_font = None, None
oldx, oldy = 0, 0
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 42 or num <= 255:
self._setup_textpos(ox, oy, 0, oldx, oldy)
oldx, oldy = ox, oy
if (fontname, fontsize) != prev_font:
self.file.output(self.file.fontName(fontname), fontsize,
Op.selectfont)
prev_font = fontname, fontsize
self.file.output(self.encode_string(chr(num), fonttype), Op.show)
self.file.output(Op.end_text)
# If using Type 3 fonts, render all of the multi-byte characters
# as XObjects using the 'Do' command.
if global_fonttype == 3:
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 3 and num > 255:
self.file.fontName(fontname)
self.file.output(Op.gsave,
0.001 * fontsize, 0,
0, 0.001 * fontsize,
ox, oy, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
fontname, symbol_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Draw any horizontal lines in the math layout
for ox, oy, width, height in rects:
self.file.output(Op.gsave, ox, oy, width, height,
Op.rectangle, Op.fill, Op.grestore)
# Pop off the global transformation
self.file.output(Op.grestore)
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
dvifile = texmanager.make_dvi(s, fontsize)
dvi = dviread.Dvi(dvifile, 72)
page = next(iter(dvi))
dvi.close()
# Gather font information and do some setup for combining
# characters into strings. The variable seq will contain a
# sequence of font and text entries. A font entry is a list
# ['font', name, size] where name is a Name object for the
# font. A text entry is ['text', x, y, glyphs, x+w] where x
# and y are the starting coordinates, w is the width, and
# glyphs is a list; in this phase it will always contain just
# one one-character string, but later it may have longer
# strings interspersed with kern amounts.
oldfont, seq = None, []
for x1, y1, dvifont, glyph, width in page.text:
if dvifont != oldfont:
pdfname = self.file.fontName(dvifont.texname)
if dvifont.texname not in self.file.dviFontInfo:
psfont = self.tex_font_mapping(dvifont.texname)
self.file.dviFontInfo[dvifont.texname] = Bunch(
fontfile=psfont.filename,
basefont=psfont.psname,
encodingfile=psfont.encoding,
effects=psfont.effects,
dvifont=dvifont)
seq += [['font', pdfname, dvifont.size]]
oldfont = dvifont
seq += [['text', x1, y1, [chr(glyph)], x1+width]]
# Find consecutive text strings with constant y coordinate and
# combine into a sequence of strings and kerns, or just one
# string (if any kerns would be less than 0.1 points).
i, curx, fontsize = 0, 0, None
while i < len(seq)-1:
elt, nxt = seq[i:i+2]
if elt[0] == 'font':
fontsize = elt[2]
elif elt[0] == nxt[0] == 'text' and elt[2] == nxt[2]:
offset = elt[4] - nxt[1]
if abs(offset) < 0.1:
elt[3][-1] += nxt[3][0]
elt[4] += nxt[4]-nxt[1]
else:
elt[3] += [offset*1000.0/fontsize, nxt[3][0]]
elt[4] = nxt[4]
del seq[i+1]
continue
i += 1
# Create a transform to map the dvi contents to the canvas.
mytrans = Affine2D().rotate_deg(angle).translate(x, y)
# Output the text.
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
curx, cury, oldx, oldy = 0, 0, 0, 0
for elt in seq:
if elt[0] == 'font':
self.file.output(elt[1], elt[2], Op.selectfont)
elif elt[0] == 'text':
curx, cury = mytrans.transform((elt[1], elt[2]))
self._setup_textpos(curx, cury, angle, oldx, oldy)
oldx, oldy = curx, cury
if len(elt[3]) == 1:
self.file.output(elt[3][0], Op.show)
else:
self.file.output(elt[3], Op.showkern)
else:
assert False
self.file.output(Op.end_text)
# Then output the boxes (e.g., variable-length lines of square
# roots).
boxgc = self.new_gc()
boxgc.copy_properties(gc)
boxgc.set_linewidth(0)
pathops = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
for x1, y1, h, w in page.boxes:
path = Path([[x1, y1], [x1+w, y1], [x1+w, y1+h], [x1, y1+h],
[0,0]], pathops)
self.draw_path(boxgc, path, mytrans, gc._rgb)
def encode_string(self, s, fonttype):
if fonttype in (1, 3):
return s.encode('cp1252', 'replace')
return s.encode('utf-16be', 'replace')
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# TODO: combine consecutive texts into one BT/ET delimited section
# This function is rather complex, since there is no way to
# access characters of a Type 3 font with codes > 255. (Type
# 3 fonts can not have a CIDMap). Therefore, we break the
# string into chunks, where each chunk contains exclusively
# 1-byte or exclusively 2-byte characters, and output each
# chunk a separate command. 1-byte characters use the regular
# text show command (Tj), whereas 2-byte characters use the
# use XObject command (Do). If using Type 42 fonts, all of
# this complication is avoided, but of course, those fonts can
# not be subsetted.
self.check_gc(gc, gc._rgb)
if ismath: return self.draw_mathtext(gc, x, y, s, prop, angle)
fontsize = prop.get_size_in_points()
if rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h = font.get_str_bbox(s)
fonttype = 1
else:
font = self._get_font_ttf(prop)
self.track_characters(font, s)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
fonttype = rcParams['pdf.fonttype']
# We can't subset all OpenType fonts, so switch to Type 42
# in that case.
if is_opentype_cff_font(font.fname):
fonttype = 42
def check_simple_method(s):
"""Determine if we should use the simple or woven method
to output this text, and chunks the string into 1-byte and
2-byte sections if necessary."""
use_simple_method = True
chunks = []
if not rcParams['pdf.use14corefonts']:
if fonttype == 3 and not isinstance(s, bytes) and len(s) != 0:
# Break the string into chunks where each chunk is either
# a string of chars <= 255, or a single character > 255.
s = str(s)
for c in s:
if ord(c) <= 255:
char_type = 1
else:
char_type = 2
if len(chunks) and chunks[-1][0] == char_type:
chunks[-1][1].append(c)
else:
chunks.append((char_type, [c]))
use_simple_method = (len(chunks) == 1
and chunks[-1][0] == 1)
return use_simple_method, chunks
def draw_text_simple():
"""Outputs text using the simple method."""
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
self._setup_textpos(x, y, angle)
self.file.output(self.encode_string(s, fonttype), Op.show, Op.end_text)
def draw_text_woven(chunks):
"""Outputs text using the woven method, alternating
between chunks of 1-byte characters and 2-byte characters.
Only used for Type 3 fonts."""
chunks = [(a, ''.join(b)) for a, b in chunks]
cmap = font.get_charmap()
# Do the rotation and global translation as a single matrix
# concatenation up front
self.file.output(Op.gsave)
a = angle / 180.0 * pi
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
# Output all the 1-byte characters in a BT/ET group, then
# output all the 2-byte characters.
for mode in (1, 2):
newx = oldx = 0
# Output a 1-byte character chunk
if mode == 1:
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
for chunk_type, chunk in chunks:
if mode == 1 and chunk_type == 1:
self._setup_textpos(newx, 0, 0, oldx, 0, 0)
self.file.output(self.encode_string(chunk, fonttype), Op.show)
oldx = newx
lastgind = None
for c in chunk:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is not None:
if mode == 2 and chunk_type == 2:
glyph_name = font.get_glyph_name(gind)
self.file.output(Op.gsave)
self.file.output(0.001 * fontsize, 0,
0, 0.001 * fontsize,
newx, 0, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
font.fname, glyph_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Move the pointer based on the character width
# and kerning
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(
lastgind, gind, KERNING_UNFITTED)
else:
kern = 0
lastgind = gind
newx += kern/64.0 + glyph.linearHoriAdvance/65536.0
if mode == 1:
self.file.output(Op.end_text)
self.file.output(Op.grestore)
use_simple_method, chunks = check_simple_method(s)
if use_simple_method:
return draw_text_simple()
else:
return draw_text_woven(chunks)
def get_text_width_height_descent(self, s, prop, ismath):
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
w, h, d, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
elif rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h, d = font.get_str_bbox_and_descent(s)
scale = prop.get_size_in_points()
w *= scale / 1000
h *= scale / 1000
d *= scale / 1000
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
scale = (1.0 / 64.0)
w *= scale
h *= scale
d = font.get_descent()
d *= scale
return w, h, d
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afm_font_cache.get(key)
if font is None:
filename = findfont(
prop, fontext='afm', directory=self.file._core14fontdir)
if filename is None:
filename = findfont(
"Helvetica", fontext='afm',
directory=self.file._core14fontdir)
font = self.afm_font_cache.get(filename)
if font is None:
with open(filename, 'rb') as fh:
font = AFM(fh)
self.afm_font_cache[filename] = font
self.afm_font_cache[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.truetype_font_cache.get(key)
if font is None:
filename = findfont(prop)
font = self.truetype_font_cache.get(filename)
if font is None:
font = FT2Font(str(filename))
self.truetype_font_cache[filename] = font
self.truetype_font_cache[key] = font
font.clear()
font.set_size(prop.get_size_in_points(), 72)
return font
def flipy(self):
return False
def get_canvas_width_height(self):
return self.file.width / 72.0, self.file.height / 72.0
def new_gc(self):
return GraphicsContextPdf(self.file)
class GraphicsContextPdf(GraphicsContextBase):
def __init__(self, file):
GraphicsContextBase.__init__(self)
self._fillcolor = (0.0, 0.0, 0.0)
self._effective_alphas = (1.0, 1.0)
self.file = file
self.parent = None
def __repr__(self):
d = dict(self.__dict__)
del d['file']
del d['parent']
return repr(d)
def strokep(self):
"""
Predicate: does the path need to be stroked (its outline drawn)?
This tests for the various conditions that disable stroking
the path, in which case it would presumably be filled.
"""
# _linewidth > 0: in pdf a line of width 0 is drawn at minimum
# possible device width, but e.g., agg doesn't draw at all
return (self._linewidth > 0 and self._alpha > 0 and
(len(self._rgb) <= 3 or self._rgb[3] != 0.0))
def fillp(self, *args):
"""
Predicate: does the path need to be filled?
An optional argument can be used to specify an alternative
_fillcolor, as needed by RendererPdf.draw_markers.
"""
if len(args):
_fillcolor = args[0]
else:
_fillcolor = self._fillcolor
return (self._hatch or
(_fillcolor is not None and
(len(_fillcolor) <= 3 or _fillcolor[3] != 0.0)))
def close_and_paint(self):
"""
Return the appropriate pdf operator to close the path and
cause it to be stroked, filled, or both.
"""
return Op.paint_path(True, self.fillp(), self.strokep())
def paint(self):
"""
Return the appropriate pdf operator to cause the path to be
stroked, filled, or both.
"""
return Op.paint_path(False, self.fillp(), self.strokep())
capstyles = { 'butt': 0, 'round': 1, 'projecting': 2 }
joinstyles = { 'miter': 0, 'round': 1, 'bevel': 2 }
def capstyle_cmd(self, style):
return [self.capstyles[style], Op.setlinecap]
def joinstyle_cmd(self, style):
return [self.joinstyles[style], Op.setlinejoin]
def linewidth_cmd(self, width):
return [width, Op.setlinewidth]
def dash_cmd(self, dashes):
offset, dash = dashes
if dash is None:
dash = []
offset = 0
return [list(dash), offset, Op.setdash]
def alpha_cmd(self, alpha, forced, effective_alphas):
name = self.file.alphaState(effective_alphas)
return [name, Op.setgstate]
def hatch_cmd(self, hatch):
if not hatch:
if self._fillcolor is not None:
return self.fillcolor_cmd(self._fillcolor)
else:
return [Name('DeviceRGB'), Op.setcolorspace_nonstroke]
else:
hatch_style = (self._rgb, self._fillcolor, hatch)
name = self.file.hatchPattern(hatch_style)
return [Name('Pattern'), Op.setcolorspace_nonstroke,
name, Op.setcolor_nonstroke]
def rgb_cmd(self, rgb):
if rcParams['pdf.inheritcolor']:
return []
if rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_stroke]
else:
return list(rgb[:3]) + [Op.setrgb_stroke]
def fillcolor_cmd(self, rgb):
if rgb is None or rcParams['pdf.inheritcolor']:
return []
elif rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_nonstroke]
else:
return list(rgb[:3]) + [Op.setrgb_nonstroke]
def push(self):
parent = GraphicsContextPdf(self.file)
parent.copy_properties(self)
parent.parent = self.parent
self.parent = parent
return [Op.gsave]
def pop(self):
assert self.parent is not None
self.copy_properties(self.parent)
self.parent = self.parent.parent
return [Op.grestore]
def clip_cmd(self, cliprect, clippath):
"""Set clip rectangle. Calls self.pop() and self.push()."""
cmds = []
# Pop graphics state until we hit the right one or the stack is empty
while ((self._cliprect, self._clippath) != (cliprect, clippath)
and self.parent is not None):
cmds.extend(self.pop())
# Unless we hit the right one, set the clip polygon
if ((self._cliprect, self._clippath) != (cliprect, clippath) or
self.parent is None):
cmds.extend(self.push())
if self._cliprect != cliprect:
cmds.extend([cliprect, Op.rectangle, Op.clip, Op.endpath])
if self._clippath != clippath:
path, affine = clippath.get_transformed_path_and_affine()
cmds.extend(
PdfFile.pathOperations(path, affine, simplify=False) +
[Op.clip, Op.endpath])
return cmds
commands = (
(('_cliprect', '_clippath'), clip_cmd), # must come first since may pop
(('_alpha', '_forced_alpha', '_effective_alphas'), alpha_cmd),
(('_capstyle',), capstyle_cmd),
(('_fillcolor',), fillcolor_cmd),
(('_joinstyle',), joinstyle_cmd),
(('_linewidth',), linewidth_cmd),
(('_dashes',), dash_cmd),
(('_rgb',), rgb_cmd),
(('_hatch',), hatch_cmd), # must come after fillcolor and rgb
)
# TODO: _linestyle
def delta(self, other):
"""
Copy properties of other into self and return PDF commands
needed to transform self into other.
"""
cmds = []
for params, cmd in self.commands:
different = False
for p in params:
ours = getattr(self, p)
theirs = getattr(other, p)
try:
different = bool(ours != theirs)
except ValueError:
ours = np.asarray(ours)
theirs = np.asarray(theirs)
different = ours.shape != theirs.shape or np.any(ours != theirs)
if different:
break
if different:
theirs = [getattr(other, p) for p in params]
cmds.extend(cmd(self, *theirs))
for p in params:
setattr(self, p, getattr(other, p))
return cmds
def copy_properties(self, other):
"""
Copy properties of other into self.
"""
GraphicsContextBase.copy_properties(self, other)
self._fillcolor = other._fillcolor
self._effective_alphas = other._effective_alphas
def finalize(self):
"""
Make sure every pushed graphics state is popped.
"""
cmds = []
while self.parent is not None:
cmds.extend(self.pop())
return cmds
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasPdf(figure)
manager = FigureManagerPdf(canvas, num)
return manager
class PdfPages(object):
"""
A multi-page PDF file.
Use like this::
# Initialize:
with PdfPages('foo.pdf') as pdf:
# As many times as you like, create a figure fig and save it:
# When no figure is specified the current figure is saved
pdf.savefig(fig)
pdf.savefig()
(In reality PdfPages is a thin wrapper around PdfFile, in order to
avoid confusion when using savefig and forgetting the format
argument.)
"""
__slots__ = ('_file',)
def __init__(self, filename):
"""
Create a new PdfPages object that will be written to the file
named *filename*. The file is opened at once and any older
file with the same name is overwritten.
"""
self._file = PdfFile(filename)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
"""
Finalize this object, making the underlying file a complete
PDF file.
"""
self._file.close()
self._file = None
def infodict(self):
"""
Return a modifiable information dictionary object
(see PDF reference section 10.2.1 'Document Information
Dictionary').
"""
return self._file.infoDict
def savefig(self, figure=None, **kwargs):
"""
Save the Figure instance *figure* to this file as a new page.
If *figure* is a number, the figure instance is looked up by
number, and if *figure* is None, the active figure is saved.
Any other keyword arguments are passed to Figure.savefig.
"""
if isinstance(figure, Figure):
figure.savefig(self, format='pdf', **kwargs)
else:
if figure is None:
figureManager = Gcf.get_active()
else:
figureManager = Gcf.get_fig_manager(figure)
if figureManager is None:
raise ValueError("No such figure: " + repr(figure))
else:
figureManager.canvas.figure.savefig(self, format='pdf', **kwargs)
def get_pagecount(self):
"""
Returns the current number of pages in the multipage pdf file.
"""
return len(self._file.pageList)
class FigureCanvasPdf(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def draw(self):
pass
filetypes = {'pdf': 'Portable Document Format'}
def get_default_filetype(self):
return 'pdf'
def print_pdf(self, filename, **kwargs):
image_dpi = kwargs.get('dpi', 72) # dpi to use for images
self.figure.set_dpi(72) # there are 72 pdf points to an inch
width, height = self.figure.get_size_inches()
if isinstance(filename, PdfPages):
file = filename._file
else:
file = PdfFile(filename)
try:
file.newPage(width, height)
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
renderer = MixedModeRenderer(self.figure,
width, height, image_dpi, RendererPdf(file, image_dpi),
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
renderer.finalize()
finally:
if isinstance(filename, PdfPages): # finish off this page
file.endStream()
else: # we opened the file above; now finish it off
file.close()
class FigureManagerPdf(FigureManagerBase):
pass
FigureManager = FigureManagerPdf
| gpl-3.0 |
russel1237/scikit-learn | examples/svm/plot_rbf_parameters.py | 132 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
weidel-p/nest-simulator | pynest/examples/spatial/grid_iaf.py | 20 | 1437 | # -*- coding: utf-8 -*-
#
# grid_iaf.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Create a population of iaf_psc_alpha neurons on a 4x3 grid
-----------------------------------------------------------
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
nest.ResetKernel()
l1 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[4, 3], extent=[2., 1.5]))
nest.PrintNodes()
nest.PlotLayer(l1, nodesize=50)
# beautify
plt.axis([-1.0, 1.0, -0.75, 0.75])
plt.axes().set_aspect('equal', 'box')
plt.axes().set_xticks((-0.75, -0.25, 0.25, 0.75))
plt.axes().set_yticks((-0.5, 0, 0.5))
plt.grid(True)
plt.xlabel('4 Columns, Extent: 1.5')
plt.ylabel('2 Rows, Extent: 1.0')
plt.show()
# plt.savefig('grid_iaf.png')
| gpl-2.0 |
RayMick/scikit-learn | sklearn/neighbors/regression.py | 100 | 11017 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
bravo-t/NN | C/helper_scripts/visualize.py | 1 | 16743 | #!env python3
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
# Size our weights arrays
n = 256
W = []
for i in range(2):
W.append(np.linspace(-5,5,n))
# Define our data points
Input_Data = [(2,), (0,), (2,), (2.1,)]
Output_Data = [(0.95,), (0.5,), (0.10,), (0.099,)]
# Define our supporting functions
def sgm(x):
return 1.0 / (1.0 + np.exp(-x))
def E(x,y):
err = 0
for i,In in enumerate(Input_Data):
output = sgm(x*In[0] + y)
err += 0.5*(output - Output_Data[i][0])**2
return err
def sign(x):
if x > 0:
return 1
elif x < 0:
return -1
else:
return 0
# Define the derivative functions
def b_deriv(Weight, Bias, Input, Target):
O = sgm(Weight * Input + Bias)
D = (O-Target)*O*(1-O)
return D
def w_deriv(Weight, Bias, Input, Target):
return b_deriv(Weight, Bias, Input, Target) * Input
# Initial Conditions for each algorithm
Base = [(2,4,0,0)]
Base_m = Base[:]
Base_m2 = Base[:]
Base_r = Base[:]
Base_rp = Base[:]
Base_rm = Base[:]
Base_irp = Base[:]
Base_irm = Base[:]
Base_rms = Base[:]
Base_am = Base[:]
# Meta-parameters for the algorithms
N_Max = 5000
Error_Max = 0.25
Eta = 0.2 # Learning Rate
coeff_m = 0.9 # Momentum Coefficient
Eta_plus = 1.2 # RProp Factors
Eta_minus = 0.5
lr_min = 1e-6
lr_max = 50
lnRMS = 0.9 # RMSProp Factor
# Which learnings methods are we running?
llGradient = True
llRMSProp = False
llMomentum1 = True
llMomentum2 = False
llAdaptiveMomentum = False
llRProp = True
llRProp_p = False
llRProp_m = False
llRProp_ip = True
llRProp_im = False
# Iterate and build the solution steps (Gradient descent)
if llGradient:
print("Gradient Descent...")
for index in range(N_Max):
# Break early if error is small
if E(Base[-1][0] + Base[-1][2], Base[-1][1] + Base[-1][3]) < Error_Max:
print(("\tIter: {0}".format(index+1)))
break
if index == N_Max - 1:
print(("\tIter: {0}".format(index+1)))
break
# Compute the derivatives
dw, db = 0, 0
w, b = Base[-1][0] + Base[-1][2], Base[-1][1] + Base[-1][3]
for i in range(len(Input_Data)):
dw -= w_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
db -= b_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
# Mark the offset
Base.append( (w, b, Eta*dw, Eta*db) )
# Iterate and build the solution steps (Gradient + Momentum)
if llMomentum1:
print("Gradient Descent + Momentum...")
for index in range(N_Max):
# Break early if error is small
if E(Base_m[-1][0] + Base_m[-1][2], Base_m[-1][1] + Base_m[-1][3]) < Error_Max:
print(("\tIter: {0}".format(index+1)))
break
if index == N_Max - 1:
print(("\tIter: {0}".format(index+1)))
break
# Compute the derivatives
dw, db = 0, 0
w, b = Base_m[-1][0] + Base_m[-1][2], Base_m[-1][1] + Base_m[-1][3]
for i in range(len(Input_Data)):
dw -= w_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
db -= b_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
# Mark the offset
Base_m.append( (w, b, coeff_m * Base_m[-1][2] + Eta*dw, coeff_m * Base_m[-1][3] + Eta*db) )
# Iterate and build the solution steps (Nesterov / Sutskever)
if llMomentum2:
print("Gradient Descent + (Nesterov / Sutskever) Momentum...")
for index in range(N_Max):
# Break early if error is small
if E(Base_m2[-1][0] + Base_m2[-1][2], Base_m2[-1][1] + Base_m2[-1][3]) < Error_Max:
print(("\tIter: {0}".format(index+1)))
break
if index == N_Max - 1:
print(("\tIter: {0}".format(index+1)))
break
# Compute the derivatives
dw, db = 0, 0
w, b = Base_m2[-1][0] + Base_m2[-1][2], Base_m2[-1][1] + Base_m2[-1][3]
for i in range(len(Input_Data)):
dw -= w_deriv(w + coeff_m * Base_m2[-1][2], b + coeff_m * Base_m2[-1][3], Input_Data[i][0], Output_Data[i][0])
db -= b_deriv(w + coeff_m * Base_m2[-1][2], b + coeff_m * Base_m2[-1][3], Input_Data[i][0], Output_Data[i][0])
# Mark the offset
Base_m2.append( (w, b, coeff_m * Base_m2[-1][2] + Eta*dw, coeff_m * Base_m2[-1][3] + Eta*db) )
# Iterate and build the solution steps (Gradient + Adaptive Momentum)
if llAdaptiveMomentum:
print("Gradient Descent + Adaptive Momentum...")
for index in range(N_Max):
# Break early if error is small
if E(Base_am[-1][0] + Base_am[-1][2], Base_am[-1][1] + Base_am[-1][3]) < Error_Max:
print(("\tIter: {0}".format(index+1)))
break
if index == N_Max - 1:
print(("\tIter: {0}".format(index+1)))
break
# Compute the derivatives
dw, db = 0, 0
pw, pb = Base_am[-1][2], Base_am[-1][3]
w, b = Base_am[-1][0] + pw, Base_am[-1][1] + pb
for i in range(len(Input_Data)):
dw -= w_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
db -= b_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
P = np.array([pw, pb], dtype = 'float32')
D = np.array([dw, db], dtype = 'float32')
lP, lD = np.sqrt(np.dot(P,P)), np.sqrt(np.dot(D,D))
if lP != 0 and lD != 0:
c_m = (1 + (np.dot(P,D) / np.sqrt(np.dot(P,P)*np.dot(D,D))))**2 / 4
else:
c_m = 0.05
# Mark the offset
Base_am.append( (w, b, c_m * Base_am[-1][2] + Eta*dw, c_m * Base_am[-1][3] + Eta*db) )
# Iterate and build the solution steps (RProp)
if llRProp:
print("RProp...")
lr = [0.1, 0.1]
prev = [0, 0]
for index in range(N_Max):
# Break early if error is small
if E(Base_r[-1][0] + Base_r[-1][2], Base_r[-1][1] + Base_r[-1][3]) < Error_Max:
print("\tIter: {0}".format(index+1))
break
if index == N_Max - 1:
print("\tIter: {0}".format(index+1))
break
# Compute the derivatives
dw, db = 0, 0
w, b = Base_r[-1][0] + Base_r[-1][2], Base_r[-1][1] + Base_r[-1][3]
for i in range(len(Input_Data)):
dw -= w_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
db -= b_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
curr = [dw, db]
delta = [0, 0]
# Compute the rprop algorithm
for i in range(len(curr)):
if curr[i] * prev[i] > 0:
lr[i] = min([lr[i] * Eta_plus, lr_max])
delta[i] = sign(curr[i]) * lr[i]
prev[i] = curr[i]
elif curr[i] * prev[i] < 0:
lr[i] = max([lr[i] * Eta_minus, lr_min])
prev[i] = 0
#delta[i] = 0
else:
delta[i] = sign(curr[i]) * lr[i]
prev[i] = curr[i]
# Mark the offset
Base_r.append( (w, b, delta[0], delta[1]) )
# Iterate and build the solution steps (RProp+)
if llRProp_p:
print("RProp+...")
lr = [0.1, 0.1]
prev = [0, 0]
for index in range(N_Max):
# Break early if error is small
if E(Base_rp[-1][0] + Base_rp[-1][2], Base_rp[-1][1] + Base_rp[-1][3]) < Error_Max:
print("\tIter: {0}".format(index+1))
break
if index == N_Max - 1:
print("\tIter: {0}".format(index+1))
break
# Compute the derivatives
dw, db = 0, 0
w, b = Base_rp[-1][0] + Base_rp[-1][2], Base_rp[-1][1] + Base_rp[-1][3]
for i in range(len(Input_Data)):
dw -= w_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
db -= b_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
curr = [dw, db]
delta = [0, 0]
# Compute the rprop+ algorithm
for i in range(len(curr)):
if curr[i] * prev[i] > 0:
lr[i] = min([lr[i] * Eta_plus, lr_max])
delta[i] = sign(curr[i]) * lr[i]
prev[i] = curr[i]
elif curr[i] * prev[i] < 0:
lr[i] = max([lr[i] * Eta_minus, lr_min])
delta[i] = -Base_rp[-1][2+i]
prev[i] = 0
else:
delta[i] = sign(curr[i]) * lr[i]
prev[i] = curr[i]
# Mark the offset
Base_rp.append( (w, b, delta[0], delta[1]) )
# Iterate and build the solution steps (RProp-)
if llRProp_m:
print("RProp-...")
lr = [0.1, 0.1]
prev = [0, 0]
for index in range(N_Max):
# Break early if error is small
if E(Base_rm[-1][0] + Base_rm[-1][2], Base_rm[-1][1] + Base_rm[-1][3]) < Error_Max:
print("\tIter: {0}".format(index+1))
break
if index == N_Max - 1:
print("\tIter: {0}".format(index+1))
break
# Compute the derivatives
dw, db = 0, 0
w, b = Base_rm[-1][0] + Base_rm[-1][2], Base_rm[-1][1] + Base_rm[-1][3]
for i in range(len(Input_Data)):
dw -= w_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
db -= b_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
curr = [dw, db]
delta = [0, 0]
# Compute the rprop- algorithm
for i in range(len(curr)):
if curr[i] * prev[i] > 0:
lr[i] = min([lr[i] * Eta_plus, lr_max])
delta[i] = sign(curr[i]) * lr[i]
prev[i] = curr[i]
elif curr[i] * prev[i] < 0:
lr[i] = max([lr[i] * Eta_minus, lr_min])
delta[i] = sign(curr[i]) * lr[i]
prev[i] = curr[i]
else:
delta[i] = sign(curr[i]) * lr[i]
prev[i] = curr[i]
# Mark the offset
Base_rm.append( (w, b, delta[0], delta[1]) )
# Iterate and build the solution steps (iRProp+)
if llRProp_ip:
print("iRProp+...")
lr = [0.1, 0.1]
prev = [0, 0]
for index in range(N_Max):
# Break early if error is small
if E(Base_irp[-1][0] + Base_irp[-1][2], Base_irp[-1][1] + Base_irp[-1][3]) < Error_Max:
print("\tIter: {0}".format(index+1))
break
if index == N_Max - 1:
print("\tIter: {0}".format(index+1))
break
# Compute the derivatives
dw, db = 0, 0
w, b = Base_irp[-1][0] + Base_irp[-1][2], Base_irp[-1][1] + Base_irp[-1][3]
for i in range(len(Input_Data)):
dw -= w_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
db -= b_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
curr = [dw, db]
delta = [0, 0]
# Compute the irprop+ algorithm
for i in range(len(curr)):
if curr[i] * prev[i] > 0:
lr[i] = min([lr[i] * Eta_plus, lr_max])
delta[i] = sign(curr[i]) * lr[i]
prev[i] = curr[i]
elif curr[i] * prev[i] < 0:
lr[i] = max([lr[i] * Eta_minus, lr_min])
if E(w, b) > E(Base_irp[-1][0], Base_irp[-1][1]):
delta[i] = -Base_irp[-1][2+i]
prev[i] = 0
else:
delta[i] = sign(curr[i]) * lr[i]
prev[i] = curr[i]
# Mark the offset
Base_irp.append( (w, b, delta[0], delta[1]) )
# Iterate and build the solution steps (iRProp-)
if llRProp_im:
print("iRProp-...")
lr = [0.1, 0.1]
prev = [0, 0]
for index in range(N_Max):
# Break early if error is small
if E(Base_irm[-1][0] + Base_irm[-1][2], Base_irm[-1][1] + Base_irm[-1][3]) < Error_Max:
print("\tIter: {0}".format(index+1))
break
if index == N_Max - 1:
print("\tIter: {0}".format(index+1))
break
# Compute the derivatives
dw, db = 0, 0
w, b = Base_irm[-1][0] + Base_irm[-1][2], Base_irm[-1][1] + Base_irm[-1][3]
for i in range(len(Input_Data)):
dw -= w_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
db -= b_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
curr = [dw, db]
delta = [0, 0]
# Compute the irprop- algorithm
for i in range(len(curr)):
if curr[i] * prev[i] > 0:
lr[i] = min([lr[i] * Eta_plus, lr_max])
delta[i] = sign(curr[i]) * lr[i]
prev[i] = curr[i]
elif curr[i] * prev[i] < 0:
lr[i] = max([lr[i] * Eta_minus, lr_min])
delta[i] = sign(curr[i]) * lr[i]
prev[i] = 0
else:
delta[i] = sign(curr[i]) * lr[i]
prev[i] = curr[i]
# Mark the offset
Base_irm.append( (w, b, delta[0], delta[1]) )
# Iterate and build the solution steps (RMSProp)
MS_History = []
if llRMSProp:
print("RMSProp...")
MS = np.array([0, 0],dtype='float32')
for index in range(N_Max):
# Break early if error is small
w, b = Base_rms[-1][0] + Base_rms[-1][2], Base_rms[-1][1] + Base_rms[-1][3]
Err = E(w, b)
if Err < Error_Max:
print("\tIter: {0}".format(index+1))
break
if index == N_Max - 1:
print("\tIter: {0}".format(index+1))
break
# Compute the derivatives
dw, db = 0, 0
for i in range(len(Input_Data)):
dw -= w_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
db -= b_deriv(w, b, Input_Data[i][0], Output_Data[i][0])
# Mark the offset
D = np.array([dw, db])
MS = lnRMS * MS + (1 - lnRMS) * D**2
MS_History.append(MS)
sMS = np.sqrt(MS)
Base_rms.append( (w, b, Eta*dw/sMS[0], Eta*db/sMS[1]) )
#
# Build a contour plot
#
X, Y = np.meshgrid(W[0], W[1])
Z = E(X, Y)
plt.figure()
CSF = plt.contourf(X, Y, Z, 14, alpha = .3, cmap = cm.jet,zorder = 0)
CS = plt.contour(X, Y, Z,14, colors = 'black', zorder = 1)
# Plot Gradient Descent
if len(Base) > 1:
plt.quiver(*list(zip(*Base)),
color = 'red',width = 0.005,
scale_units = 'xy', scale = 1.0,
angles = 'xy', zorder = 2)
# Plot Gradient Descent with Momentum 1
if len(Base_m) > 1:
plt.quiver(*list(zip(*Base_m)),
color = 'blue',width = 0.005,
scale_units = 'xy', scale = 1.0,
angles = 'xy', zorder = 3)
# Plot Gradient Descent with Momentum 2
if len(Base_m2) > 1:
plt.quiver(*list(zip(*Base_m2)),
color = 'green',width = 0.005,
scale_units = 'xy', scale = 1.0,
angles = 'xy', zorder = 4)
# Plot RProp
if len(Base_r) > 1:
plt.quiver(*list(zip(*Base_r)),
color = (1.0,0.8,0.8),width = 0.005,
scale_units = 'xy', scale = 1.0,
angles = 'xy', zorder = 5)
# Plot RProp+
if len(Base_rp) > 1:
plt.quiver(*list(zip(*Base_rp)),
color = (0.5,0,1),width = 0.005,
scale_units = 'xy', scale = 1.0,
angles = 'xy', zorder = 6)
# Plot RProp-
if len(Base_rm) > 1:
plt.quiver(*list(zip(*Base_rm)),
color = (0,1,1),width = 0.005,
scale_units = 'xy', scale = 1.0,
angles = 'xy', zorder = 6)
# Plot iRProp+
if len(Base_irp) > 1:
plt.quiver(*list(zip(*Base_irp)),
color = (0.1,0.1,0.5),width = 0.005,
scale_units = 'xy', scale = 1.0,
angles = 'xy', zorder = 7)
# Plot iRProp-
if len(Base_irm) > 1:
plt.quiver(*list(zip(*Base_irm)),
color = (0.1,0.5,0.5),width = 0.005,
scale_units = 'xy', scale = 1.0,
angles = 'xy', zorder = 8)
# Plot RMSProp
if len(Base_rms) > 1:
plt.quiver(*list(zip(*Base_rms)),
color = (0.8,0.8,0.2),width = 0.005,
scale_units = 'xy', scale = 1.0,
angles = 'xy', zorder = 9)
# Plot Adaptive Momentum
if len(Base_am) > 1:
plt.quiver(*list(zip(*Base_am)),
color = (0.1,0.65,0.65),width = 0.005,
scale_units = 'xy', scale = 1.0,
angles = 'xy', zorder = 10)
plt.clabel(CS, inline = 1, fontsize = 10)
plt.title('Error function in weight-space')
plt.show() | lgpl-3.0 |
henridwyer/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
jmschrei/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
ilyes14/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
chrsrds/scikit-learn | examples/text/plot_document_classification_20newsgroups.py | 4 | 10873 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# License: BSD 3 clause
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, ComplementNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
# #############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
# order of labels in `target_names` can be different from `categories`
target_names = data_train.target_names
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(target_names))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', alternate_sign=False,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
# #############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, label in enumerate(target_names):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s" % (label, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=target_names))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="sag"), "Ridge Classifier"),
(Perceptron(max_iter=50, tol=1e-3), "Perceptron"),
(PassiveAggressiveClassifier(max_iter=50, tol=1e-3),
"Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(penalty=penalty, dual=False,
tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, max_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, max_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
results.append(benchmark(ComplementNB(alpha=.1)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', SelectFromModel(LinearSVC(penalty="l1", dual=False,
tol=1e-3))),
('classification', LinearSVC(penalty="l2"))])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
mortonjt/American-Gut | americangut/make_phyla_plots.py | 5 | 36886 | #!/usr/bin/env python
from __future__ import division
from os.path import isfile
from matplotlib import use
use('Agg') # noqa
from biom.parse import parse_biom_table
from biom.util import biom_open
from numpy import (array, zeros, mean, ones, vstack, arange, ndarray)
import matplotlib.pyplot as plt
from matplotlib.transforms import Bbox
from matplotlib.font_manager import FontProperties
from matplotlib import rc
from operator import itemgetter
import colorbrewer
# Colors from www.ColorBrewer.org by Cynthia A. Brewer, Geography,
# Pennsylvania State University.
# Copyright (c) 2002 Cynthia Brewer, Mark Harrower, and The Pennsylvania State
# University.
__author__ = "Justine Debelius"
__copyright__ = "Copyright 2013, The American Gut Project"
__credits__ = ["Justine Debelius", "Daniel McDonald"]
__license__ = "BSD"
__version__ = "unversioned"
__maintainer__ = "Justine Debelius"
__email__ = "[email protected]"
def map_to_2D_dict(mapping_data):
""" Converts mapping file to 2D dictionary
INPUT:
mapping_data -- a tab delimited string from the opened mapping file
OUTPUT:
D2 -- a two dimensional dictionary where each sample ID is keyed to a
dictionary of meta data containing headers and observations
"""
lines = [l.strip().split('\t') for l in mapping_data]
header = lines[0]
D2 = {}
for l in lines[1:]:
inner = {k: v for k, v in zip(header, l)}
sample_id = inner['#SampleID']
D2[sample_id] = inner
return D2
def load_category_files(category_files):
"""Loads the category tables as biom files
INPUTS:
category_files -- a dictionary that associates the mapping category
(key) with the file path to the otu_table summarizing that
OUTPUTS:
category_tables -- a dictionary that associates the mapping category
with the summarized otu table for the category.
"""
category_tables = {}
watch_count = 0
watch_list = []
for (category, category_file) in category_files.iteritems():
if isfile(category_file):
with biom_open(category_file, 'U') as fp:
cat_table = parse_biom_table(fp)
category_tables[category] = cat_table
else:
watch_list.append('The summarized OTU table file cannot be found '
'for %s. \n%s is not in the file path.'
% (category, category_file))
watch_count = watch_count + 1
if watch_count > 0:
print 'The following category files could not be found: \n%s' \
% '\n'.join(watch_list)
if watch_count == len(category_files):
raise ValueError('No files could be found for any of the supplied '
'categories. \n%s' % '\n'.join(watch_list))
return category_tables
def parse_category_files(raw_tables, common_groups, level=2,
metadata='taxonomy'):
""" Collapses categeory tables using the most common OUTPUTS
INPUTS:
category_tables -- a dictionary keying the category name in the mapping
file to the biom otu table of the collapsed data.
common_group -- the reference groups in the metadata category which
should be used to summarize the data.
level -- the level at which data should be summarized
metadata -- the metadata category which should be used to summarize the
data
OUTPUTS:
category_data -- a dictionary that associates the mapping category
with the data summarized using common_categories."""
num_g = len(common_groups)
category_data = {}
for (cat, cat_table) in raw_tables.items():
[ids, data, cats] = \
summarize_common_categories(
biom_table=cat_table, level=level,
common_categories=common_groups[:num_g],
metadata_category=metadata)
category_data.update({cat: {'Groups': ids,
'Summary': data}})
return category_data
def identify_most_common_categories(biom_table, level, limit_mode='COMPOSITE',
metadata_category='taxonomy', limit=1.0):
"""Identifies the most common taxa in a population using variable limits
This method uses a composite score to account for both the average
frequency and the percentage of the population in which the sample is
present. This hopes to be more robust than either the average or fraction
present alone.
INPUTS:
biom_table -- a sparse biom table to be summarized
level -- an integer corresponding to the taxonomic level (or other meta
data category level) at which data should be summarized.
The argument will only take a single integer.
metadata_category -- a description of the metadata category over which
the data will be summarized.
limit_mode -- a string describing how the scoring limit which should be
used. Options are 'COMPOSITE', 'AVERAGE', 'COUNTS' or
'NUMBER', 'NONE'.
COMPOSITE keeps samples whose composite scores greater than
the limit. (Default limit is 1)
AVERAGE keeps samples whose average are greater than the
specified limit. (Default limit is 0.01)
COUNTS keep groups which are represented in at least the
specified fraction of sample (default limit is 0.01)
NONE sorts groups alphabetically and keeps all.
limit -- the numeric lower limit for common taxa
logging -- a binary value specifying if a table of the frequency,
composite value, and metadata category should be printed
and retained.
OUTPUTS
common_categories -- a list of the common categories in the (row
header)
scores -- a sorted list of all the taxa and their scores"""
# Sets up positions
COMPOSITE_CONSTANT = 10000
LIMIT_MODES = {'COMPOSITE': [3, True, 1],
'AVERAGE': [1, True, 3],
'COUNTS': [2, True, 3],
'NONE': [0, False, 3]}
score_position, score_reverse, second_score = \
LIMIT_MODES.get(limit_mode, [None, None, None])
if score_position is None:
raise ValueError("limit_mode is not a supported option. \n"
"Options for limit_mode are 'COMPOSITE', 'AVERAGE', "
"'COUNTS', or 'NONE'.")
# Gets the Sample IDs
sample_ids = list(biom_table.ids())
num_samples = len(sample_ids)
# Prealocates output objects
scoring_all = []
common_categories = []
# Normalizes the data by the number of observations so relative frequencies
# are used.
biom_table_norm = biom_table.norm(inplace=False)
# Collapses the OTUs into category summaries using the correct levels
def bin_fun(y, x):
return x[metadata_category][:level]
for (bin, table) in biom_table_norm.partition(bin_fun, axis='observation'):
# Pulls out the sample data for the group
group_value = array(table.sum('sample'))
group_binary = group_value > 0
# Calculates presence scores
average_freq = round(mean(group_value), 4)
fraction_pres = round(sum(group_binary)/(num_samples), 4)
composite = round(average_freq*fraction_pres*COMPOSITE_CONSTANT, 2)
score_row = [bin, average_freq, fraction_pres, composite]
# Adds the scores to the watch matrix
if fraction_pres > 0:
scoring_all.append(score_row)
# Sorts based on scoring method
scores_all = sorted(sorted(scoring_all,
key=itemgetter(second_score),
reverse=True),
key=itemgetter(score_position),
reverse=score_reverse)
# Identifies rows which meet the scoring criteria
scores = []
for score in scores_all:
scores.append(score)
if score[score_position] > limit:
common_categories.append(score[0])
# Raises an error if necessary
if len(common_categories) == 0:
raise ValueError('Limit too high! No common categories.')
# Returns the values
return common_categories, scores
def summarize_common_categories(biom_table, level, common_categories,
metadata_category='taxonomy'):
"""Determines the frequency of common categories present in a biom table
INPUTS:
biom_table -- a sparse biom table to be evaluated
level -- an integer corresponding to the taxonomic level (or other meta
data category level) at which data should be summarized.
The argument will only take a single integer.
common_categories -- a list of values which define the common
categories.
metadata_category -- a description of the metadata category over which
the data will be summarized.
OUTPUTS:
sample_ids -- a list of the sample ids found in the OTU table.
cat_summary -- a numpy array describing the frequency of the common
categories with an additional frequencies collapsed into
the "other" category.
common_cats -- a summary of the common categories with an "other"
category appended."""
# Checks that input biom table can be processed
all_cats = biom_table.metadata(axis='observation')
cats_zip = zip(*all_cats)
# Gets the set of all categories
group_all = ()
for cat in cats_zip:
group_all = group_all+(cat)
categories = set(group_all)
if metadata_category not in categories:
raise ValueError('The biom table cannot be summarized; supplied '
'category does not exist.')
# Identifies the common categories and removes extraneous characters
num_cats = len(common_categories)
for idx, cat in enumerate(common_categories):
temp_cat = []
for i in cat:
temp_cat.append(i.strip())
common_categories[idx] = tuple(temp_cat)
sample_ids = biom_table.ids()
num_samples = len(sample_ids)
# Sets up the "other category name"
summary_name = all_cats[0][metadata_category]
other_name = [summary_name[0]]
if len(summary_name) > 2:
for cat_des in summary_name[1:(level)]:
other_name.append('%s__%s' % (cat_des.split('__')[0], 'Other'))
other_name = [tuple(other_name)]
# Normalizes the biom table
biom_norm = biom_table.norm(inplace=False)
# Prealocates numpy objects (because that makes life fun!). tax_other is
# set up as a row array because this is ultimately summed
cat_summary = zeros([num_cats, num_samples])
# Collapses the OTU table using the category at the correct level
def bin_fun(y, x):
return x[metadata_category][:level]
for (bin_, table) in biom_norm.partition(bin_fun, axis='observation'):
new_bin = []
for item in bin_:
new_bin.append(item.strip())
new_bin = tuple(new_bin)
if new_bin in common_categories:
current = cat_summary[common_categories.index(new_bin)]
cat_summary[common_categories.index(new_bin)] \
= table.sum('sample') + current
cat_summary = vstack((cat_summary, 1 - sum(cat_summary)))
common_cats = common_categories
common_cats.extend(other_name)
return sample_ids, cat_summary, common_cats
def translate_colors(num_colors, map_name='Spectral'):
"""Gets a colorbrewer colormap and sets it up for plotting in matplotlib
OUTPUTS:
colormap -- a numpy array with the colorbrewer map formatted for use in
matplotlib.
"""
try:
raw_map = getattr(colorbrewer, map_name)
except:
raise ValueError('%s is not a valid colorbrewer map name. '
'\nSee http://colorbrewer2.org for valid map names.'
% map_name)
if num_colors not in raw_map:
raise ValueError('Too many colors. \n%i wanted %i possible. \n'
'Pick fewer colors.'
% (num_colors, max(raw_map.keys())))
map_ar = array(raw_map[num_colors])
# Corrects for colorbrewer's 0 to 255 scaling and matplotlib's use of 0 to
# 1 color sclaing.
colormap = map_ar.astype(float)/255
return colormap
def calculate_dimensions_rectangle(axis_width=4, axis_height=4, border=0.1,
title=0.25, legend=1, xlab=0, ylab=0,
unit='in'):
"""Determines the appriate axis and figure dimensions for square axis.
INPUTS:
axis_size -- a number specifying the side length the axis. DEFAULT: 4
border -- the width of the border around the figure
title -- the height to add to the top of the figure for a title. This
is separate from the border, which is added by default.
DEFAULT: 1
legend -- the width of the legend to the be added to the figure.
DEFAULT: 2
xlab -- the height to be added for labels along the x axis. DEFAULT: 0
ylab -- the width to be added for labels along the y axis. DEFAULT: 0
unit -- a string ('inches' or 'cm'), specifying the unit to be used
in image generation. DEFAULT: in
OUTPUTS:
axis_dimensions -- a Bbox class describing the axis position in the
figure
figure_dimensions -- a 2 element tuple giving the width and height of
the figure in inches
"""
# Specifies a value for converting between units
if unit == 'cm':
conversion = 1/2.54
elif unit == 'in':
conversion = 1.0
else:
raise ValueError('unit must be "in" or "cm".')
# Determines the figure dimensions
fig_width = axis_width+(border*2)+legend+ylab
fig_height = axis_height+(border*2)+title+xlab
figure_dimensions = (fig_width*conversion, fig_height*conversion)
# Determines the axis bounds
axis_left = (border+ylab)/fig_width
axis_right = (border+axis_width+ylab)/fig_width
axis_bottom = (border+xlab)/fig_height
axis_top = (border+axis_height+xlab)/fig_height
axis_dimensions = array([[axis_left, axis_bottom],
[axis_right, axis_top]])
return axis_dimensions, figure_dimensions
def calculate_dimensions_bar(num_bars, bar_width=0.5, axis_height=3,
border=0.1, title=1, legend=2, xlab=0, ylab=0,
unit='in'):
"""Determines the axis and figure dimensions for a bar chart.
INPUTS:
num_bars -- the number of bars in the bar chart being created.
bar_width -- the width of the plotted bars in units DEFAULT: 0.5.
axis_heigth -- the height of the axes DEFAULT: 3
border -- the size of white space to be added around the axis.
DEFAULT: 0.1
title -- the height to add to the top of the figure for a title. This
is separate from the border, which is added by default.
DEFAULT: 1
legend -- the width of the legend to the be added to the figure.
DEFAULT: 2
xlab -- the height to be added for labels along the x axis. DEFAULT: 0
ylab -- the width to be added for labels along the y axis. DEFAULT: 0
unit -- a string ('in' or 'cm'), specifying the unit to be used
in image generation. DEFAULT: in
OUTPUTS:
axis_dimensions -- a Bbox class describing the axis position in the
figure
figure_dimensions -- a 2 element tuple giving the width and height of
the figure"""
# Preforms some sanity checks.
if num_bars < 1:
raise ValueError('There must be at least one group to plot.\nnum_bars'
' must be a whole number greater than 1.')
elif not round(num_bars) == num_bars:
raise ValueError('There cannot be partial categories. \nnum_bars must'
' be a whole number greater than 1.')
# Specifies a value for converting between units
if unit == 'cm':
conversion = 1/2.54
elif unit == 'in':
conversion = 1.0
else:
raise ValueError('unit must be "in" or "cm".')
# Determines the figure width
axis_width = (num_bars*bar_width)
figure_width = (border*2+ylab+axis_width+legend)*conversion
figure_height = (border*2+xlab+axis_height+title)*conversion
figure_dimensions = (figure_width, figure_height)
axis_left = (ylab+border)/figure_width*conversion
axis_right = (ylab+border+axis_width)/figure_width*conversion
axis_bottom = (xlab+border)/figure_height*conversion
axis_top = (xlab+border+axis_height)/figure_height*conversion
axis_dimensions = array([[axis_left, axis_bottom],
[axis_right, axis_top]])
return axis_dimensions, figure_dimensions
def render_single_pie(data_vec, group_names, axis_dims, fig_dims,
file_out='piechart', filetype='PDF', colors=None,
show_edge=True, axis_on=False, plot_ccw=False,
start_angle=90, x_lims=[-1.1, 1.1], y_lims=[-1.1, 1.1],
legend=True, legend_offset=None, legend_font=None,
legend_frame=False, title=None, title_font=None,
labels=None, label_distance=1.1, label_font=None,
use_latex=False, rc_fam='sans-serif',
rc_font=['Helvetica']):
"""Creates a pie chart summarizing the category data
INPUTS:
data_vec -- a vector which sums to 1 describing the fraction of the
chart represented by each group in group_names.
group_names -- a list of the groups in cat_vec. (i.e. Firmictues,
Bacteriodetes, Proteobacteria for a category of Taxonomy).
file_out -- a string giving the file path where the pie plot should be
saved.
filetype -- a string describing the file format to save the output
file. Possible values include 'PNG', 'PDF', 'EPS', and
'SVG'.
DEFAULT: PDF
colormap -- an n x 3 or n x 4 numpy array giving the desired colormap.
Default is to fill all wedges in white.
DEFAULT: array([[1,1,1]])
show_edge -- a binary value dictating whether or the edge should be
outlined in black.
DEFAULT: True
axis_on -- a binary value indicating whether or not the axes should be
displayed.
DEFAULT: False
axis_frame -- a binary value indicating whether the frame should be
displayed on the axes
DEFAULT: False
plot_ccw -- a binary value indicating whether whether the data should
be plotted clockwise (False) or counter-clockwise (True).
DEFAULT: False
start_angle -- the angle from the x-axis (horizontal = 0) in degrees
at which to start plotting.
DEFAULT: 90
x_lims -- the limits on the x-axis in the form of (min, max)
DEFAULT: [-1.1, 1.1]
y_lims -- the limit sont he y axis in the form of (min, max)
DEFAULT: [-1.1, 1.1]
legend -- a binary value indicating whether or not a legend should be
shown. This must be accounted for in the figure dimensions.
Default is to show the legend.
legend_offset -- a two-element list giving the offset for the axes.
If this has a value of None, the legend will remain in the
original position.
DEFAULT: None
legend_font -- a FontProperties object (dictionary) describing the
properties of the font. If None is supplied, the default is
to use the 15 pt normal sans-serif.
DEFAULT: None
legend_frame -- a binary value indicating whether or not a the legend
should display a frame.
title -- a string giving a title to append to the figure. This must be
accounted for in the figure dimensions.
DEFAULT: None
title_font -- a FontProperties object (dictionary) describing the
properties of the font. If None is supplied, the default is
to use the 36 pt normal sans-serif.
DEFAULT: None
labels -- a list of labels to be added to the figure. A value of None
will turn of labels.
DEFAULT: None
label_distance -- the distance from the origin at which the labels
should be displayed.
DEFAULT: 1.1
label_font -- a FontProperties object (dictionary) describing the font.
If None is supplied, the default is to use size 15 normal
sans-serif.
DEFAULT: None
use_latex -- a binary value indicating if matplotlib's ability to
render using LaTeX should be considered or not. If this is
the case, the supplied rc_fam and rc_font will be used.
DEFAULT: False
rc_family -- the font family which should be used for LaTeX rendering.
Options are 'sans-serif', 'serif', 'cursive'.
DEFAULT: 'sans-serif'
rc_font -- a list of the font(s) which should be used with latex
rendering.
DEFAULT: ['Helvetica']
OUTPUTS:
The rendered figure is saved in the at the file_out location.
"""
# Sets up the colormap
num_wedges = len(data_vec)
num_colors = len(colors[:, 0])
if num_colors is None:
colormap = ones((num_wedges, 3))
elif not isinstance(colors, ndarray):
raise TypeError('The colormap must be a numpy array.')
elif num_colors == 1:
colormap = colors*ones((num_wedges, 1))
elif num_colors >= num_wedges:
colormap = colors
else:
raise ValueError('The color map cannot be determined. \nColors must '
'be a a list of n x 3 lists where n is the number of '
'patches being supplied or a single color to be used'
' for all patches.')
# Sets up the font properties for each of the label objects
if label_font is None:
label_font = FontProperties()
label_font.set_size(20)
label_font.set_family('sans-serif')
if legend_font is None:
legend_font = FontProperties()
legend_font.set_size(15)
legend_font.set_family('sans-serif')
if title_font is None:
title_font = FontProperties()
title_font.set_size(30)
title_font.set_family('sans-serif')
# Sets up LateX rendering if desired
if use_latex:
rc('text', usetex=True)
rc('font', **{'family': rc_fam, rc_fam: rc_font})
# Creates the figure
fig = plt.gcf()
fig.set_size_inches(fig_dims)
ax1 = plt.axes(Bbox(axis_dims))
ax1.set_position(Bbox(axis_dims))
if axis_on:
ax1.set_axis_on()
# Plots the data clockwise
[pie_patches, pie_text] = ax1.pie(x=data_vec,
labels=labels,
labeldistance=label_distance,
shadow=False,
startangle=start_angle)
# Colors the data so its pretty!
for idx, patch in enumerate(pie_patches):
# Sets the face color
patch.set_facecolor(colormap[idx, :])
if not show_edge:
patch.set_edgecolor(colormap[idx, :])
# Sets the label properties
if labels is not None:
for lab in pie_text:
lab.set_set_font_properties(label_font)
# Sets the axis and figure dimensions
plt.draw()
# Reverses the axis dimensions for a counter-clockwise plot
if not plot_ccw:
plt.axis([x_lims[1], x_lims[0], y_lims[0], y_lims[1]])
plt.draw()
# Adds the legend if necessary
if legend:
leg = plt.legend(pie_patches, group_names,
loc='center right',
prop=legend_font,
frameon=legend_frame)
plt.draw()
if legend_offset is not None and len(legend_offset) == 2:
leg.set_bbox_to_anchor((legend_offset[0], legend_offset[1]))
elif legend_offset is not None:
leg.set_bbox_to_anchor(tuple(legend_offset))
plt.draw()
# Adds the title if desired
if isinstance(title, str):
plt.title(title, prop=title_font)
plt.draw()
# Saves the output figure
plt.savefig(file_out, format=filetype)
plt.clf()
def render_barchart(
data_table, group_names, sample_names, axis_dims,
fig_dims, file_out='barchart', filetype='PDF', colors=None,
show_edge=True, legend=True, title=None, match_legend=True,
frame=True, bar_width=0.8, x_axis=True, x_label=None,
x_min=-0.5, x_tick_interval=1.0, y_axis=True,
y_lims=[0, 1], y_tick_interval=0.2, y_tick_labels=None,
y_label=None, legend_frame=False,
legend_offset=None, font_angle=45, font_alignment='right',
tick_font=None, label_font=None, legend_font=None,
title_font=None, use_latex=False, rc_fam='sans-serif',
rc_font=['Helvetica']):
"""Creates a stacked bar chart using the data in the category table.
A single value bar chart can be created using a vector for data_table
instead of a table.
INPUTS:
data_table -- a numpy array of the category information to be plotted
where the rows are the groups in the category and the
columns are the samples.
group_names -- a list of the groups, corresponding to rows in the
data table
sample_names -- a list of the sample names, corresponding to the
columns in the data table. The sample names must be string
objects.
axis_dims -- a 2 x 2 numpy array giving the fraction of the figure
which should bound the axis. (row 1: [left, bottom], row 2:
[right, top] as a percentage of the figure space)
fig_dims -- a 2 element tuple giving the width and height in inches of
the output figure.
file_out -- a string giving the file path where the pie plot should be
saved.
filetype -- a string describing the file format to save the output
file. Possible values include 'PNG', 'PDF', 'EPS', and
'SVG'.
DEFAULT: PDF
colors -- an n x 3 or n x 4 numpy array giving the desired color map.
Default is to color each wedge white. (array([[1, 1, 1]]))
show_edge -- a binary value dictating whether or the edge should be
outlined in black.
DEFAULT: True
legend -- a binary value indicating whether or not a legend should be
shown. This must be accounted for in the figure dimensions.
DEFAULT: True
title -- a string giving a title to append to the figure. This must be
accounted for in the figure dimensions.
DEFAULT: None
match_legend -- a binary value indicating whether the order of colors
in the plot should match the order of colors in the legend.
DEFAULT: True
frame -- a binary value indicating whether or not the a frame should be
displayed around the axis
DEFAULT: True
bar_width -- the fraction of the bar width to be occupied by the data.
DEFAULT: 0.8
x_axis -- a binary value indicating whether or not the x-axis should be
labeled.
DEFAULT: True
x_label -- a string describing the the data in the plot's x-axis. A
value of None leaves the axis label off.
DEFAULT: None
x_min -- the minimum value for the x-axis. The maximum is determined by
the number of bars, where each bar is one unit away from
the next.
DEFAULT: 0.5
x_tick_interval -- the spacing between the the plotted bars.
DEFAULT: 1.0
y_axis -- a binary value indicating whether or not tick labels should
be shown on the y axis.
DEFAULT: True
y_lims -- a 2 element list giving the minimum and maximum values for
the y axis.
DEFAULT: [0, 1]
y_tick_interval -- the spacing between ticks on the y-axis.
DEFAULT: 0.2
y_tick_labels -- a string with the labels for the y-ticks. If no value
is supplied, the labels are set up using the y-tick values.
DEFAULT: None
y_label -- a string describing the data plotted on the y-axis. If None,
no string will be present.
DEFAULT: None
legend_frame -- a binary value indicating whether a box will be
displayed around the legend.
DEFAULT: False
legend_offset -- a two-element list giving the offset for the axes.
If this has a value of None, the legend will remain in the
original position.
DEFAULT: None
font_angle -- the angle in degrees at which the x-axis text should be
displayed.
DEFAULT: 45
font_alignment -- the horizontal alignment of the x axis labels. Values
may be 'left', 'right' or 'center'.
DEFAULT: 'right'
tick_font -- a FontProperties object (dictionary) describing the
properties of the font. If None is supplied, the default is
to use the 15 pt normal sans-serif.
DEFAULT: None
label_font -- a FontProperties object (dictionary) describing the
properties of the font. If None is supplied, the default is
to use the 20 pt italic sans-serif.
DEFAULT: None
legend_font -- a FontProperties object (dictionary) describing the
properties of the font. If None is supplied, the default is
to use the 15 pt normal sans-serif.
DEFAULT: None
title_font -- a FontProperties object (dictionary) describing the
properties of the font. If None is supplied, the default is
to use the 36 pt normal sans-serif.
DEFAULT: None
use_latex -- a binary value indicating if matplotlib's ability to
render using LaTeX should be considered or not. If this is
the case, the supplied rc_fam and rc_font will be used.
DEFAULT: False
rc_family -- the font family which should be used for LaTeX rendering.
Options are 'sans-serif', 'serif', 'cursive'.
DEFAULT: 'sans-serif'
rc_font -- a list of the font(s) which should be used with latex
rendering.
DEFAULT: ['Helvetica']
OUTPUT:
The rendered figure is saved in the file_out location.
"""
# Preforms a sanity checks that the provided data is good
(table_height, table_width) = data_table.shape
num_cats = len(group_names)
num_samples = len(sample_names)
if not table_height == num_cats:
raise ValueError('The number of provided categories differ.')
elif not table_width == num_samples:
raise ValueError('The number of samples differ.')
# Sets up the colormap
num_colors = len(colors[:, 0])
if num_colors is None:
colormap = ones((table_height, 3))
elif not isinstance(colors, ndarray):
raise TypeError('The colormap must be a numpy array.')
elif num_colors == 1:
colormap = colors*ones((table_height, 1))
elif num_colors >= table_height:
colormap = colors
else:
raise ValueError('The color map cannot be determined. \nColors must '
'be a a list of n x 3 lists where n is the number of '
'patches being supplied or a single color to be used'
' for all patches.')
# Sets up the edge colormap
if show_edge:
edgecolor = zeros((num_cats, 3))
else:
edgecolor = colormap
# Sets up the font properties for each of the label objects
if label_font is None:
label_font = FontProperties()
label_font.set_size(20)
label_font.set_family('sans-serif')
label_font.set_style('italic')
if legend_font is None:
legend_font = FontProperties()
legend_font.set_size(15)
legend_font.set_family('sans-serif')
if tick_font is None:
tick_font = FontProperties()
tick_font.set_size(15)
tick_font.set_family('sans-serif')
if title_font is None:
title_font = FontProperties()
title_font.set_size(30)
title_font.set_family('sans-serif')
# Sets up LateX rendering if desired
if use_latex:
rc('text', usetex=True)
rc('font', **{'family': rc_fam, rc_fam: rc_font})
# Sets up the x ticks.
# Bar width is divided by two because the tick is assumed to be at the
# center of the bar.
x_tick = arange(num_samples*x_tick_interval)
x_max = x_min + num_samples*x_tick_interval
bar_left = x_tick - bar_width/2
# Creates the x tick labels.
if x_axis:
x_text_labels = map(str, sample_names)
else:
x_text_labels = ['']*num_samples
# Creates the y tick labels
if y_tick_labels is None:
y_tick_labels = arange(y_lims[1] + y_tick_interval, y_lims[0],
-y_tick_interval)
y_tick_labels = y_tick_labels - y_tick_interval
y_tick_labels[-1] = y_lims[0]
num_y_ticks = len(y_tick_labels)
if y_axis:
y_text_labels = map(str, y_tick_labels)
else:
y_text_labels = ['']*num_y_ticks
# Plots the data
fig = plt.figure()
fig.set_size_inches(fig_dims)
ax1 = plt.axes(Bbox(axis_dims))
patches_watch = []
for plot_count, category in enumerate(data_table):
bottom_bar = sum(data_table[0:plot_count, :], 0)
faces = ax1.bar(bar_left, category, bar_width, bottom_bar,
color=colormap[plot_count, :],
edgecolor=edgecolor[plot_count, :])
patches_watch.append(faces[0])
# The y-axis is reversed so the labels are in the same order as the
# colors in the legend
if match_legend:
plt.axis([x_min, x_max, y_lims[1], y_lims[0]])
# Sets up y labels if they are desired.
y_tick_labels = ax1.set_yticklabels(y_text_labels,
fontproperties=tick_font)
if y_label is not None:
ax1.set_ylabel(y_label, fontproperties=label_font)
# Set the x-axis labels
ax1.set_xticks(x_tick)
ax1.set_xticklabels(x_text_labels,
rotation=font_angle,
horizontalalignment='right',
fontproperties=label_font)
if x_label is not None:
ax1.set_xlabel(x_label, fontproperties=label_font)
if legend:
leg = plt.legend(patches_watch, group_names, prop=legend_font)
if legend_offset is not None:
leg.set_bbox_to_anchor((legend_offset[0], legend_offset[1]))
if isinstance(title, str):
plt.title(title, fontproperties=title_font)
plt.savefig(file_out, format=filetype)
| bsd-3-clause |
ClinicalGraphics/scikit-image | doc/examples/features_detection/plot_local_binary_pattern.py | 12 | 6776 | """
===============================================
Local Binary Pattern for texture classification
===============================================
In this example, we will see how to classify textures based on LBP (Local
Binary Pattern). LBP looks at points surrounding a central point and tests
whether the surrounding points are greater than or less than the central point
(i.e. gives a binary result).
Before trying out LBP on an image, it helps to look at a schematic of LBPs.
The below code is just used to plot the schematic.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
METHOD = 'uniform'
plt.rcParams['font.size'] = 9
def plot_circle(ax, center, radius, color):
circle = plt.Circle(center, radius, facecolor=color, edgecolor='0.5')
ax.add_patch(circle)
def plot_lbp_model(ax, binary_values):
"""Draw the schematic for a local binary pattern."""
# Geometry spec
theta = np.deg2rad(45)
R = 1
r = 0.15
w = 1.5
gray = '0.5'
# Draw the central pixel.
plot_circle(ax, (0, 0), radius=r, color=gray)
# Draw the surrounding pixels.
for i, facecolor in enumerate(binary_values):
x = R * np.cos(i * theta)
y = R * np.sin(i * theta)
plot_circle(ax, (x, y), radius=r, color=str(facecolor))
# Draw the pixel grid.
for x in np.linspace(-w, w, 4):
ax.axvline(x, color=gray)
ax.axhline(x, color=gray)
# Tweak the layout.
ax.axis('image')
ax.axis('off')
size = w + 0.2
ax.set_xlim(-size, size)
ax.set_ylim(-size, size)
fig, axes = plt.subplots(ncols=5, figsize=(7, 2))
titles = ['flat', 'flat', 'edge', 'corner', 'non-uniform']
binary_patterns = [np.zeros(8),
np.ones(8),
np.hstack([np.ones(4), np.zeros(4)]),
np.hstack([np.zeros(3), np.ones(5)]),
[1, 0, 0, 1, 1, 1, 0, 0]]
for ax, values, name in zip(axes, binary_patterns, titles):
plot_lbp_model(ax, values)
ax.set_title(name)
"""
.. image:: PLOT2RST.current_figure
The figure above shows example results with black (or white) representing
pixels that are less (or more) intense than the central pixel. When surrounding
pixels are all black or all white, then that image region is flat (i.e.
featureless). Groups of continuous black or white pixels are considered
"uniform" patterns that can be interpreted as corners or edges. If pixels
switch back-and-forth between black and white pixels, the pattern is considered
"non-uniform".
When using LBP to detect texture, you measure a collection of LBPs over an
image patch and look at the distribution of these LBPs. Lets apply LBP to
a brick texture.
"""
from skimage.transform import rotate
from skimage.feature import local_binary_pattern
from skimage import data
from skimage.color import label2rgb
# settings for LBP
radius = 3
n_points = 8 * radius
def overlay_labels(image, lbp, labels):
mask = np.logical_or.reduce([lbp == each for each in labels])
return label2rgb(mask, image=image, bg_label=0, alpha=0.5)
def highlight_bars(bars, indexes):
for i in indexes:
bars[i].set_facecolor('r')
image = data.load('brick.png')
lbp = local_binary_pattern(image, n_points, radius, METHOD)
def hist(ax, lbp):
n_bins = lbp.max() + 1
return ax.hist(lbp.ravel(), normed=True, bins=n_bins, range=(0, n_bins),
facecolor='0.5')
# plot histograms of LBP of textures
fig, (ax_img, ax_hist) = plt.subplots(nrows=2, ncols=3, figsize=(9, 6))
plt.gray()
titles = ('edge', 'flat', 'corner')
w = width = radius - 1
edge_labels = range(n_points // 2 - w, n_points // 2 + w + 1)
flat_labels = list(range(0, w + 1)) + list(range(n_points - w, n_points + 2))
i_14 = n_points // 4 # 1/4th of the histogram
i_34 = 3 * (n_points // 4) # 3/4th of the histogram
corner_labels = (list(range(i_14 - w, i_14 + w + 1)) +
list(range(i_34 - w, i_34 + w + 1)))
label_sets = (edge_labels, flat_labels, corner_labels)
for ax, labels in zip(ax_img, label_sets):
ax.imshow(overlay_labels(image, lbp, labels))
for ax, labels, name in zip(ax_hist, label_sets, titles):
counts, _, bars = hist(ax, lbp)
highlight_bars(bars, labels)
ax.set_ylim(ymax=np.max(counts[:-1]))
ax.set_xlim(xmax=n_points + 2)
ax.set_title(name)
ax_hist[0].set_ylabel('Percentage')
for ax in ax_img:
ax.axis('off')
"""
.. image:: PLOT2RST.current_figure
The above plot highlights flat, edge-like, and corner-like regions of the
image.
The histogram of the LBP result is a good measure to classify textures. Here,
we test the histogram distributions against each other using the
Kullback-Leibler-Divergence.
"""
# settings for LBP
radius = 2
n_points = 8 * radius
def kullback_leibler_divergence(p, q):
p = np.asarray(p)
q = np.asarray(q)
filt = np.logical_and(p != 0, q != 0)
return np.sum(p[filt] * np.log2(p[filt] / q[filt]))
def match(refs, img):
best_score = 10
best_name = None
lbp = local_binary_pattern(img, n_points, radius, METHOD)
n_bins = lbp.max() + 1
hist, _ = np.histogram(lbp, normed=True, bins=n_bins, range=(0, n_bins))
for name, ref in refs.items():
ref_hist, _ = np.histogram(ref, normed=True, bins=n_bins,
range=(0, n_bins))
score = kullback_leibler_divergence(hist, ref_hist)
if score < best_score:
best_score = score
best_name = name
return best_name
brick = data.load('brick.png')
grass = data.load('grass.png')
wall = data.load('rough-wall.png')
refs = {
'brick': local_binary_pattern(brick, n_points, radius, METHOD),
'grass': local_binary_pattern(grass, n_points, radius, METHOD),
'wall': local_binary_pattern(wall, n_points, radius, METHOD)
}
# classify rotated textures
print('Rotated images matched against references using LBP:')
print('original: brick, rotated: 30deg, match result: ',
match(refs, rotate(brick, angle=30, resize=False)))
print('original: brick, rotated: 70deg, match result: ',
match(refs, rotate(brick, angle=70, resize=False)))
print('original: grass, rotated: 145deg, match result: ',
match(refs, rotate(grass, angle=145, resize=False)))
# plot histograms of LBP of textures
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(nrows=2, ncols=3,
figsize=(9, 6))
plt.gray()
ax1.imshow(brick)
ax1.axis('off')
hist(ax4, refs['brick'])
ax4.set_ylabel('Percentage')
ax2.imshow(grass)
ax2.axis('off')
hist(ax5, refs['grass'])
ax5.set_xlabel('Uniform LBP values')
ax3.imshow(wall)
ax3.axis('off')
hist(ax6, refs['wall'])
"""
.. image:: PLOT2RST.current_figure
"""
plt.show()
| bsd-3-clause |
kose-y/pylearn2 | pylearn2/train_extensions/roc_auc.py | 30 | 4854 | """
TrainExtension subclass for calculating ROC AUC scores on monitoring
dataset(s), reported via monitor channels.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
import numpy as np
try:
from sklearn.metrics import roc_auc_score
except ImportError:
roc_auc_score = None
import theano
from theano import gof, config
from theano import tensor as T
from pylearn2.train_extensions import TrainExtension
class RocAucScoreOp(gof.Op):
"""
Theano Op wrapping sklearn.metrics.roc_auc_score.
Parameters
----------
name : str, optional (default 'roc_auc')
Name of this Op.
use_c_code : WRITEME
"""
def __init__(self, name='roc_auc', use_c_code=theano.config.cxx):
super(RocAucScoreOp, self).__init__(use_c_code)
self.name = name
def make_node(self, y_true, y_score):
"""
Calculate ROC AUC score.
Parameters
----------
y_true : tensor_like
Target class labels.
y_score : tensor_like
Predicted class labels or probabilities for positive class.
"""
y_true = T.as_tensor_variable(y_true)
y_score = T.as_tensor_variable(y_score)
output = [T.scalar(name=self.name, dtype=config.floatX)]
return gof.Apply(self, [y_true, y_score], output)
def perform(self, node, inputs, output_storage):
"""
Calculate ROC AUC score.
Parameters
----------
node : Apply instance
Symbolic inputs and outputs.
inputs : list
Sequence of inputs.
output_storage : list
List of mutable 1-element lists.
"""
if roc_auc_score is None:
raise RuntimeError("Could not import from sklearn.")
y_true, y_score = inputs
try:
roc_auc = roc_auc_score(y_true, y_score)
except ValueError:
roc_auc = np.nan
output_storage[0][0] = theano._asarray(roc_auc, dtype=config.floatX)
class RocAucChannel(TrainExtension):
"""
Adds a ROC AUC channel to the monitor for each monitoring dataset.
This monitor will return nan unless both classes are represented in
y_true. For this reason, it is recommended to set monitoring_batches
to 1, especially when using unbalanced datasets.
Parameters
----------
channel_name_suffix : str, optional (default 'roc_auc')
Channel name suffix.
positive_class_index : int, optional (default 1)
Index of positive class in predicted values.
negative_class_index : int or None, optional (default None)
Index of negative class in predicted values for calculation of
one vs. one performance. If None, uses all examples not in the
positive class (one vs. the rest).
"""
def __init__(self, channel_name_suffix='roc_auc', positive_class_index=1,
negative_class_index=None):
self.channel_name_suffix = channel_name_suffix
self.positive_class_index = positive_class_index
self.negative_class_index = negative_class_index
def setup(self, model, dataset, algorithm):
"""
Add ROC AUC channels for monitoring dataset(s) to model.monitor.
Parameters
----------
model : object
The model being trained.
dataset : object
Training dataset.
algorithm : object
Training algorithm.
"""
m_space, m_source = model.get_monitoring_data_specs()
state, target = m_space.make_theano_batch()
y = T.argmax(target, axis=1)
y_hat = model.fprop(state)[:, self.positive_class_index]
# one vs. the rest
if self.negative_class_index is None:
y = T.eq(y, self.positive_class_index)
# one vs. one
else:
pos = T.eq(y, self.positive_class_index)
neg = T.eq(y, self.negative_class_index)
keep = T.add(pos, neg).nonzero()
y = T.eq(y[keep], self.positive_class_index)
y_hat = y_hat[keep]
roc_auc = RocAucScoreOp(self.channel_name_suffix)(y, y_hat)
roc_auc = T.cast(roc_auc, config.floatX)
for dataset_name, dataset in algorithm.monitoring_dataset.items():
if dataset_name:
channel_name = '{0}_{1}'.format(dataset_name,
self.channel_name_suffix)
else:
channel_name = self.channel_name_suffix
model.monitor.add_channel(name=channel_name,
ipt=(state, target),
val=roc_auc,
data_specs=(m_space, m_source),
dataset=dataset)
| bsd-3-clause |
cpcloud/ibis | ibis/impala/tests/test_udf.py | 1 | 18310 | import unittest
from decimal import Decimal
from posixpath import join as pjoin
import numpy as np
import pandas as pd
import pytest
import ibis
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.rules as rules
import ibis.expr.types as ir
import ibis.impala as api # noqa: E402
import ibis.util as util
from ibis.common.exceptions import IbisTypeError
from ibis.expr.tests.mocks import MockConnection
from ibis.impala import ddl # noqa: E402
pytest.importorskip('hdfs')
pytest.importorskip('sqlalchemy')
pytest.importorskip('impala.dbapi')
pytestmark = [pytest.mark.impala, pytest.mark.udf]
class TestWrapping(unittest.TestCase):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('functional_alltypes')
self.i8 = self.table.tinyint_col
self.i16 = self.table.smallint_col
self.i32 = self.table.int_col
self.i64 = self.table.bigint_col
self.d = self.table.double_col
self.f = self.table.float_col
self.s = self.table.string_col
self.b = self.table.bool_col
self.t = self.table.timestamp_col
self.dec = self.con.table('tpch_customer').c_acctbal
self.all_cols = [
self.i8,
self.i16,
self.i32,
self.i64,
self.d,
self.f,
self.dec,
self.s,
self.b,
self.t,
]
def test_sql_generation(self):
func = api.scalar_function(['string'], 'string', name='Tester')
func.register('identity', 'udf_testing')
result = func('hello world')
assert (
ibis.impala.compile(result)
== "SELECT udf_testing.identity('hello world') AS `tmp`"
)
def test_sql_generation_from_infoclass(self):
func = api.wrap_udf('test.so', ['string'], 'string', 'info_test')
repr(func)
func.register('info_test', 'udf_testing')
result = func('hello world')
assert (
ibis.impala.compile(result)
== "SELECT udf_testing.info_test('hello world') AS `tmp`"
)
def test_udf_primitive_output_types(self):
types = [
('boolean', True, self.b),
('int8', 1, self.i8),
('int16', 1, self.i16),
('int32', 1, self.i32),
('int64', 1, self.i64),
('float', 1.0, self.f),
('double', 1.0, self.d),
('string', '1', self.s),
('timestamp', ibis.timestamp('1961-04-10'), self.t),
]
for t, sv, av in types:
func = self._register_udf([t], t, 'test')
ibis_type = dt.validate_type(t)
expr = func(sv)
assert type(expr) == type(
ibis_type.scalar_type()(expr.op())
) # noqa: E501, E721
expr = func(av)
assert type(expr) == type(
ibis_type.column_type()(expr.op())
) # noqa: E501, E721
def test_uda_primitive_output_types(self):
types = [
('boolean', True, self.b),
('int8', 1, self.i8),
('int16', 1, self.i16),
('int32', 1, self.i32),
('int64', 1, self.i64),
('float', 1.0, self.f),
('double', 1.0, self.d),
('string', '1', self.s),
('timestamp', ibis.timestamp('1961-04-10'), self.t),
]
for t, sv, av in types:
func = self._register_uda([t], t, 'test')
ibis_type = dt.validate_type(t)
expr1 = func(sv)
expr2 = func(sv)
expected_type1 = type(ibis_type.scalar_type()(expr1.op()))
expected_type2 = type(ibis_type.scalar_type()(expr2.op()))
assert isinstance(expr1, expected_type1)
assert isinstance(expr2, expected_type2)
def test_decimal(self):
func = self._register_udf(['decimal(9,0)'], 'decimal(9,0)', 'test')
expr = func(1.0)
assert type(expr) == ir.DecimalScalar
expr = func(self.dec)
assert type(expr) == ir.DecimalColumn
def test_udf_invalid_typecasting(self):
cases = [
('int8', self.all_cols[:1], self.all_cols[1:]),
('int16', self.all_cols[:2], self.all_cols[2:]),
('int32', self.all_cols[:3], self.all_cols[3:]),
('int64', self.all_cols[:4], self.all_cols[4:]),
('boolean', [], self.all_cols[:8] + self.all_cols[9:]),
# allowing double here for now
('float', self.all_cols[:6], [self.s, self.b, self.t]),
('double', self.all_cols[:6], [self.s, self.b, self.t]),
('string', [], self.all_cols[:7] + self.all_cols[8:]),
('timestamp', [], self.all_cols[:-1]),
('decimal', self.all_cols[:7], self.all_cols[7:]),
]
for t, valid_casts, invalid_casts in cases:
func = self._register_udf([t], 'int32', 'typecast')
for expr in valid_casts:
func(expr)
for expr in invalid_casts:
self.assertRaises(IbisTypeError, func, expr)
def test_mult_args(self):
func = self._register_udf(
['int32', 'double', 'string', 'boolean', 'timestamp'],
'int64',
'mult_types',
)
expr = func(self.i32, self.d, self.s, self.b, self.t)
assert issubclass(type(expr), ir.ColumnExpr)
expr = func(1, 1.0, 'a', True, ibis.timestamp('1961-04-10'))
assert issubclass(type(expr), ir.ScalarExpr)
def _register_udf(self, inputs, output, name):
func = api.scalar_function(inputs, output, name=name)
func.register(name, 'ibis_testing')
return func
def _register_uda(self, inputs, output, name):
func = api.aggregate_function(inputs, output, name=name)
func.register(name, 'ibis_testing')
return func
@pytest.fixture(scope='session')
def udfcon(con):
con.disable_codegen(False)
try:
yield con
finally:
con.disable_codegen(True)
@pytest.fixture(scope='session')
def alltypes(udfcon):
return udfcon.table('functional_alltypes')
@pytest.fixture(scope='session')
def udf_ll(udfcon, test_data_dir):
return pjoin(test_data_dir, 'udf/udf-sample.ll')
@pytest.fixture(scope='session')
def uda_ll(udfcon, test_data_dir):
return pjoin(test_data_dir, 'udf/uda-sample.ll')
@pytest.fixture(scope='session')
def uda_so(udfcon, test_data_dir):
return pjoin(test_data_dir, 'udf/libudasample.so')
@pytest.mark.parametrize(
('typ', 'lit_val', 'col_name'),
[
('boolean', True, 'bool_col'),
('int8', ibis.literal(5), 'tinyint_col'),
('int16', ibis.literal(2 ** 10), 'smallint_col'),
('int32', ibis.literal(2 ** 17), 'int_col'),
('int64', ibis.literal(2 ** 33), 'bigint_col'),
('float', ibis.literal(3.14), 'float_col'),
('double', ibis.literal(3.14), 'double_col'),
('string', ibis.literal('ibis'), 'string_col'),
('timestamp', ibis.timestamp('1961-04-10'), 'timestamp_col'),
],
)
def test_identity_primitive_types(
udfcon, alltypes, test_data_db, udf_ll, typ, lit_val, col_name
):
col_val = alltypes[col_name]
identity_func_testing(udf_ll, udfcon, test_data_db, typ, lit_val, col_val)
def test_decimal(udfcon, test_data_db, udf_ll):
col = udfcon.table('tpch_customer').c_acctbal
literal = ibis.literal(1).cast('decimal(12,2)')
name = '__tmp_udf_' + util.guid()
func = udf_creation_to_op(
udf_ll,
udfcon,
test_data_db,
name,
'Identity',
['decimal(12,2)'],
'decimal(12,2)',
)
expr = func(literal)
assert issubclass(type(expr), ir.ScalarExpr)
result = udfcon.execute(expr)
assert result == Decimal(1)
expr = func(col)
assert issubclass(type(expr), ir.ColumnExpr)
udfcon.execute(expr)
def test_mixed_inputs(udfcon, alltypes, test_data_db, udf_ll):
name = 'two_args'
symbol = 'TwoArgs'
inputs = ['int32', 'int32']
output = 'int32'
func = udf_creation_to_op(
udf_ll, udfcon, test_data_db, name, symbol, inputs, output
)
expr = func(alltypes.int_col, 1)
assert issubclass(type(expr), ir.ColumnExpr)
udfcon.execute(expr)
expr = func(1, alltypes.int_col)
assert issubclass(type(expr), ir.ColumnExpr)
udfcon.execute(expr)
expr = func(alltypes.int_col, alltypes.tinyint_col)
udfcon.execute(expr)
def test_implicit_typecasting(udfcon, alltypes, test_data_db, udf_ll):
col = alltypes.tinyint_col
literal = ibis.literal(1000)
identity_func_testing(udf_ll, udfcon, test_data_db, 'int32', literal, col)
def identity_func_testing(
udf_ll, udfcon, test_data_db, datatype, literal, column
):
inputs = [datatype]
name = '__tmp_udf_' + util.guid()
func = udf_creation_to_op(
udf_ll, udfcon, test_data_db, name, 'Identity', inputs, datatype
)
expr = func(literal)
assert issubclass(type(expr), ir.ScalarExpr)
result = udfcon.execute(expr)
# Hacky
if datatype == 'timestamp':
assert type(result) == pd.Timestamp
else:
lop = literal.op()
if isinstance(lop, ir.Literal):
np.testing.assert_allclose(lop.value, 5)
else:
np.testing.assert_allclose(result, udfcon.execute(literal), 5)
expr = func(column)
assert issubclass(type(expr), ir.ColumnExpr)
udfcon.execute(expr)
def test_mult_type_args(udfcon, alltypes, test_data_db, udf_ll):
symbol = 'AlmostAllTypes'
name = 'most_types'
inputs = [
'string',
'boolean',
'int8',
'int16',
'int32',
'int64',
'float',
'double',
]
output = 'int32'
func = udf_creation_to_op(
udf_ll, udfcon, test_data_db, name, symbol, inputs, output
)
expr = func('a', True, 1, 1, 1, 1, 1.0, 1.0)
result = udfcon.execute(expr)
assert result == 8
table = alltypes
expr = func(
table.string_col,
table.bool_col,
table.tinyint_col,
table.tinyint_col,
table.smallint_col,
table.smallint_col,
1.0,
1.0,
)
udfcon.execute(expr)
def test_all_type_args(udfcon, test_data_db, udf_ll):
pytest.skip('failing test, to be fixed later')
symbol = 'AllTypes'
name = 'all_types'
inputs = [
'string',
'boolean',
'int8',
'int16',
'int32',
'int64',
'float',
'double',
'decimal',
]
output = 'int32'
func = udf_creation_to_op(
udf_ll, udfcon, test_data_db, name, symbol, inputs, output
)
expr = func('a', True, 1, 1, 1, 1, 1.0, 1.0, 1.0)
result = udfcon.execute(expr)
assert result == 9
def test_udf_varargs(udfcon, alltypes, udf_ll, test_data_db):
t = alltypes
name = 'add_numbers_{0}'.format(util.guid()[:4])
input_sig = rules.varargs(rules.double)
func = api.wrap_udf(udf_ll, input_sig, 'double', 'AddNumbers', name=name)
func.register(name, test_data_db)
udfcon.create_function(func, database=test_data_db)
expr = func(t.double_col, t.double_col)
expr.execute()
def test_drop_udf_not_exists(udfcon):
random_name = util.guid()
with pytest.raises(Exception):
udfcon.drop_udf(random_name)
def test_drop_uda_not_exists(udfcon):
random_name = util.guid()
with pytest.raises(Exception):
udfcon.drop_uda(random_name)
def udf_creation_to_op(
udf_ll, udfcon, test_data_db, name, symbol, inputs, output
):
func = api.wrap_udf(udf_ll, inputs, output, symbol, name)
# self.temp_udfs.append((name, inputs))
udfcon.create_function(func, database=test_data_db)
func.register(name, test_data_db)
assert udfcon.exists_udf(name, test_data_db)
return func
def test_ll_uda_not_supported(uda_ll):
# LLVM IR UDAs are not supported as of Impala 2.2
with pytest.raises(com.IbisError):
conforming_wrapper(uda_ll, ['double'], 'double', 'Variance')
def conforming_wrapper(
where, inputs, output, prefix, serialize=True, name=None
):
kwds = {'name': name}
if serialize:
kwds['serialize_fn'] = '{0}Serialize'.format(prefix)
return api.wrap_uda(
where,
inputs,
output,
'{0}Update'.format(prefix),
init_fn='{0}Init'.format(prefix),
merge_fn='{0}Merge'.format(prefix),
finalize_fn='{0}Finalize'.format(prefix),
**kwds,
)
@pytest.fixture
def wrapped_count_uda(uda_so):
name = 'user_count_{0}'.format(util.guid())
return api.wrap_uda(uda_so, ['int32'], 'int64', 'CountUpdate', name=name)
def test_count_uda(udfcon, alltypes, test_data_db, wrapped_count_uda):
func = wrapped_count_uda
func.register(func.name, test_data_db)
udfcon.create_function(func, database=test_data_db)
# it works!
func(alltypes.int_col).execute()
# self.temp_udas.append((func.name, ['int32']))
def test_list_udas(udfcon, temp_database, wrapped_count_uda):
func = wrapped_count_uda
db = temp_database
udfcon.create_function(func, database=db)
funcs = udfcon.list_udas(database=db)
f = funcs[0]
assert f.name == func.name
assert f.inputs == func.inputs
assert f.output == func.output
def test_drop_database_with_udfs_and_udas(
udfcon, temp_database, wrapped_count_uda
):
uda1 = wrapped_count_uda
udf1 = api.wrap_udf(
udf_ll,
['boolean'],
'boolean',
'Identity',
'udf_{0}'.format(util.guid()),
)
db = temp_database
udfcon.create_database(db)
udfcon.create_function(uda1, database=db)
udfcon.create_function(udf1, database=db)
# drop happens in test tear down
class TestUDFDDL(unittest.TestCase):
def setUp(self):
self.con = MockConnection()
self.name = 'test_name'
self.inputs = ['string', 'string']
self.output = 'int64'
def test_create_udf(self):
func = api.wrap_udf(
'/foo/bar.so',
self.inputs,
self.output,
so_symbol='testFunc',
name=self.name,
)
stmt = ddl.CreateUDF(func)
result = stmt.compile()
expected = (
"CREATE FUNCTION `test_name`(string, string) "
"returns bigint "
"location '/foo/bar.so' symbol='testFunc'"
)
assert result == expected
def test_create_udf_type_conversions(self):
inputs = ['string', 'int8', 'int16', 'int32']
func = api.wrap_udf(
'/foo/bar.so',
inputs,
self.output,
so_symbol='testFunc',
name=self.name,
)
stmt = ddl.CreateUDF(func)
# stmt = ddl.CreateFunction('/foo/bar.so', 'testFunc',
# ,
# self.output, self.name)
result = stmt.compile()
expected = (
"CREATE FUNCTION `test_name`(string, tinyint, "
"smallint, int) returns bigint "
"location '/foo/bar.so' symbol='testFunc'"
)
assert result == expected
def test_delete_udf_simple(self):
stmt = ddl.DropFunction(self.name, self.inputs)
result = stmt.compile()
expected = "DROP FUNCTION `test_name`(string, string)"
assert result == expected
def test_delete_udf_if_exists(self):
stmt = ddl.DropFunction(self.name, self.inputs, must_exist=False)
result = stmt.compile()
expected = "DROP FUNCTION IF EXISTS `test_name`(string, string)"
assert result == expected
def test_delete_udf_aggregate(self):
stmt = ddl.DropFunction(self.name, self.inputs, aggregate=True)
result = stmt.compile()
expected = "DROP AGGREGATE FUNCTION `test_name`(string, string)"
assert result == expected
def test_delete_udf_db(self):
stmt = ddl.DropFunction(self.name, self.inputs, database='test')
result = stmt.compile()
expected = "DROP FUNCTION test.`test_name`(string, string)"
assert result == expected
def test_create_uda(self):
def make_ex(serialize=False):
if serialize:
serialize = "\nserialize_fn='Serialize'"
else:
serialize = ""
return (
(
"CREATE AGGREGATE FUNCTION "
"bar.`test_name`(string, string)"
" returns bigint location '/foo/bar.so'"
"\ninit_fn='Init'"
"\nupdate_fn='Update'"
"\nmerge_fn='Merge'"
)
+ serialize
+ ("\nfinalize_fn='Finalize'")
)
for ser in [True, False]:
func = api.wrap_uda(
'/foo/bar.so',
self.inputs,
self.output,
update_fn='Update',
init_fn='Init',
merge_fn='Merge',
finalize_fn='Finalize',
serialize_fn='Serialize' if ser else None,
)
stmt = ddl.CreateUDA(func, name=self.name, database='bar')
result = stmt.compile()
expected = make_ex(ser)
assert result == expected
def test_list_udf(self):
stmt = ddl.ListFunction('test')
result = stmt.compile()
expected = 'SHOW FUNCTIONS IN test'
assert result == expected
def test_list_udfs_like(self):
stmt = ddl.ListFunction('test', like='identity')
result = stmt.compile()
expected = "SHOW FUNCTIONS IN test LIKE 'identity'"
assert result == expected
def test_list_udafs(self):
stmt = ddl.ListFunction('test', aggregate=True)
result = stmt.compile()
expected = 'SHOW AGGREGATE FUNCTIONS IN test'
assert result == expected
def test_list_udafs_like(self):
stmt = ddl.ListFunction('test', like='identity', aggregate=True)
result = stmt.compile()
expected = "SHOW AGGREGATE FUNCTIONS IN test LIKE 'identity'"
assert result == expected
| apache-2.0 |
gwulfs/zipline | zipline/data/treasuries.py | 29 | 4671 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import numpy as np
import pandas as pd
import requests
from collections import OrderedDict
import xml.etree.ElementTree as ET
from six import iteritems
from . loader_utils import (
guarded_conversion,
safe_int,
Mapping,
date_conversion,
source_to_records
)
def get_treasury_date(dstring):
return date_conversion(dstring.split("T")[0], date_pattern='%Y-%m-%d',
to_utc=False)
def get_treasury_rate(string_val):
val = guarded_conversion(float, string_val)
if val is not None:
val = round(val / 100.0, 4)
return val
_CURVE_MAPPINGS = {
'tid': (safe_int, "Id"),
'date': (get_treasury_date, "NEW_DATE"),
'1month': (get_treasury_rate, "BC_1MONTH"),
'3month': (get_treasury_rate, "BC_3MONTH"),
'6month': (get_treasury_rate, "BC_6MONTH"),
'1year': (get_treasury_rate, "BC_1YEAR"),
'2year': (get_treasury_rate, "BC_2YEAR"),
'3year': (get_treasury_rate, "BC_3YEAR"),
'5year': (get_treasury_rate, "BC_5YEAR"),
'7year': (get_treasury_rate, "BC_7YEAR"),
'10year': (get_treasury_rate, "BC_10YEAR"),
'20year': (get_treasury_rate, "BC_20YEAR"),
'30year': (get_treasury_rate, "BC_30YEAR"),
}
def treasury_mappings(mappings):
return {key: Mapping(*value)
for key, value
in iteritems(mappings)}
class iter_to_stream(object):
"""
Exposes an iterable as an i/o stream
"""
def __init__(self, iterable):
self.buffered = ""
self.iter = iter(iterable)
def read(self, size):
result = ""
while size > 0:
data = self.buffered or next(self.iter, None)
self.buffered = ""
if data is None:
break
size -= len(data)
if size < 0:
data, self.buffered = data[:size], data[size:]
result += data
return result
def get_localname(element):
qtag = ET.QName(element.tag).text
return re.match("(\{.*\})(.*)", qtag).group(2)
def get_treasury_source():
url = """\
http://data.treasury.gov/feed.svc/DailyTreasuryYieldCurveRateData\
"""
res = requests.get(url, stream=True)
stream = iter_to_stream(res.text.splitlines())
elements = ET.iterparse(stream, ('end', 'start-ns', 'end-ns'))
namespaces = OrderedDict()
properties_xpath = ['']
def updated_namespaces():
if '' in namespaces and 'm' in namespaces:
properties_xpath[0] = "{%s}content/{%s}properties" % (
namespaces[''], namespaces['m']
)
else:
properties_xpath[0] = ''
for event, element in elements:
if event == 'end':
tag = get_localname(element)
if tag == "entry":
properties = element.find(properties_xpath[0])
datum = {get_localname(node): node.text
for node in properties if ET.iselement(node)}
# clear the element after we've dealt with it:
element.clear()
yield datum
elif event == 'start-ns':
namespaces[element[0]] = element[1]
updated_namespaces()
elif event == 'end-ns':
namespaces.popitem()
updated_namespaces()
def get_treasury_data():
mappings = treasury_mappings(_CURVE_MAPPINGS)
source = get_treasury_source()
return source_to_records(mappings, source)
def dataconverter(s):
try:
return float(s) / 100
except:
return np.nan
def get_daily_10yr_treasury_data():
"""Download daily 10 year treasury rates from the Federal Reserve and
return a pandas.Series."""
url = "http://www.federalreserve.gov/datadownload/Output.aspx?rel=H15" \
"&series=bcb44e57fb57efbe90002369321bfb3f&lastObs=&from=&to=" \
"&filetype=csv&label=include&layout=seriescolumn"
return pd.read_csv(url, header=5, index_col=0, names=['DATE', 'BC_10YEAR'],
parse_dates=True, converters={1: dataconverter},
squeeze=True)
| apache-2.0 |
mhue/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
Titan-C/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 176 | 2027 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
colors = ['navy', 'turquoise', 'darkorange']
lw = 2
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of IRIS dataset')
plt.figure()
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
erdc-cm/air-water-vv | 3d/linear_waves_flat_3D/linear_waves_flat_3D_01_GAZ/3d_geom.py | 3 | 2270 | ###############INPUT
import tank3D
#=================================
# Vertex coordinates
vx=[]
vy=[]
vz=[]
for coord in vertices:
vx.append(coord[0])
vy.append(coord[1])
vz.append(coord[2])
# Define which faces to show: FaceX, FaceY, FaceZ, allFaces
setface = "FaceX"
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
#def zz(x,y):
# Figure
fig = plt.figure()
ax = fig.gca(projection='3d')
#Plotting vertices
sc1=ax.scatter(vx, vy, vz)
#Plotting line segments
for seg in segments:
l1=ax.plot([vx[seg[0]],vx[seg[1]]],[vy[seg[0]],vy[seg[1]]],[vz[seg[0]],vz[seg[1]]])
# Plotting facets
for face in facets:
#Coordinates of face vertices
fx = []
fy = []
fz = []
#Counter for determining if it is a X,Y,Z facet ---
icount = np.array([0.,0.,0.])
jj=-1
for vertex in face[0]:
jj+=1
fx.append(vx[vertex])
fy.append(vy[vertex])
fz.append(vz[vertex])
icount[0]=icount[0] + abs(fx[jj]-fx[max(0,jj-1)])
icount[1]=icount[1] + abs(fy[jj]-fy[max(0,jj-1)])
#print abs(fy[jj]-fy[max(0,jj-1)])
icount[2]+=abs(fz[jj]-fz[max(0,jj-1)])
#print abs(fz[jj]-fz[max(0,jj-1)])
#fx.append(vx[face[0]]+1e-30)
#fy.append(vy[face[0]]+1e-30)
#fz.append(vz[face[0]]+1e-30)
print icount
if icount[0]<=1e-30 and ((setface is "FaceX") or (setface is "allFaces")):
print "FaceX"
Y,Z = np.meshgrid(fy,fz)
Z1,X = np.meshgrid(fz,fx)
s1=ax.plot_surface(X,Y,Z,rstride=1, cstride=1,color="r",alpha=0.1)
elif icount[1]<=1e-30 and ((setface is "FaceY") or (setface is "allFaces")):
print "FaceY"
X,Z = np.meshgrid(fx,fz)
Z1,Y = np.meshgrid(fz,fy)
s2=ax.plot_surface(X,Y,Z,rstride=1, cstride=1,color="r",alpha=0.1)
elif icount[2]<=1e-30 and ((setface is "FaceZ") or (setface is "allFaces")):
print "FaceZ"
X,Y = np.meshgrid(fx,fy)
Y1,Z = np.meshgrid(fy,fz)
s3=ax.plot_surface(X,Y,Z,rstride=1, cstride=1,color="r",alpha=0.1)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
plt.show()
| mit |
hlin117/statsmodels | statsmodels/tsa/base/tests/test_datetools.py | 28 | 5620 | from datetime import datetime
import numpy.testing as npt
from statsmodels.tsa.base.datetools import (_date_from_idx,
_idx_from_dates, date_parser, date_range_str, dates_from_str,
dates_from_range, _infer_freq, _freq_to_pandas)
from pandas import DatetimeIndex, PeriodIndex
def test_date_from_idx():
d1 = datetime(2008, 12, 31)
idx = 15
npt.assert_equal(_date_from_idx(d1, idx, 'Q'), datetime(2012, 9, 30))
npt.assert_equal(_date_from_idx(d1, idx, 'A'), datetime(2023, 12, 31))
npt.assert_equal(_date_from_idx(d1, idx, 'B'), datetime(2009, 1, 21))
npt.assert_equal(_date_from_idx(d1, idx, 'D'), datetime(2009, 1, 15))
npt.assert_equal(_date_from_idx(d1, idx, 'W'), datetime(2009, 4, 12))
npt.assert_equal(_date_from_idx(d1, idx, 'M'), datetime(2010, 3, 31))
def test_idx_from_date():
d1 = datetime(2008, 12, 31)
idx = 15
npt.assert_equal(_idx_from_dates(d1, datetime(2012, 9, 30), 'Q'), idx)
npt.assert_equal(_idx_from_dates(d1, datetime(2023, 12, 31), 'A'), idx)
npt.assert_equal(_idx_from_dates(d1, datetime(2009, 1, 21), 'B'), idx)
npt.assert_equal(_idx_from_dates(d1, datetime(2009, 1, 15), 'D'), idx)
# move d1 and d2 forward to end of week
npt.assert_equal(_idx_from_dates(datetime(2009, 1, 4),
datetime(2009, 4, 17), 'W'), idx-1)
npt.assert_equal(_idx_from_dates(d1, datetime(2010, 3, 31), 'M'), idx)
def test_regex_matching_month():
t1 = "1999m4"
t2 = "1999:m4"
t3 = "1999:mIV"
t4 = "1999mIV"
result = datetime(1999, 4, 30)
npt.assert_equal(date_parser(t1), result)
npt.assert_equal(date_parser(t2), result)
npt.assert_equal(date_parser(t3), result)
npt.assert_equal(date_parser(t4), result)
def test_regex_matching_quarter():
t1 = "1999q4"
t2 = "1999:q4"
t3 = "1999:qIV"
t4 = "1999qIV"
result = datetime(1999, 12, 31)
npt.assert_equal(date_parser(t1), result)
npt.assert_equal(date_parser(t2), result)
npt.assert_equal(date_parser(t3), result)
npt.assert_equal(date_parser(t4), result)
def test_dates_from_range():
results = [datetime(1959, 3, 31, 0, 0),
datetime(1959, 6, 30, 0, 0),
datetime(1959, 9, 30, 0, 0),
datetime(1959, 12, 31, 0, 0),
datetime(1960, 3, 31, 0, 0),
datetime(1960, 6, 30, 0, 0),
datetime(1960, 9, 30, 0, 0),
datetime(1960, 12, 31, 0, 0),
datetime(1961, 3, 31, 0, 0),
datetime(1961, 6, 30, 0, 0),
datetime(1961, 9, 30, 0, 0),
datetime(1961, 12, 31, 0, 0),
datetime(1962, 3, 31, 0, 0),
datetime(1962, 6, 30, 0, 0)]
dt_range = dates_from_range('1959q1', '1962q2')
npt.assert_(results == dt_range)
# test with starting period not the first with length
results = results[2:]
dt_range = dates_from_range('1959q3', length=len(results))
npt.assert_(results == dt_range)
# check month
results = [datetime(1959, 3, 31, 0, 0),
datetime(1959, 4, 30, 0, 0),
datetime(1959, 5, 31, 0, 0),
datetime(1959, 6, 30, 0, 0),
datetime(1959, 7, 31, 0, 0),
datetime(1959, 8, 31, 0, 0),
datetime(1959, 9, 30, 0, 0),
datetime(1959, 10, 31, 0, 0),
datetime(1959, 11, 30, 0, 0),
datetime(1959, 12, 31, 0, 0),
datetime(1960, 1, 31, 0, 0),
datetime(1960, 2, 28, 0, 0),
datetime(1960, 3, 31, 0, 0),
datetime(1960, 4, 30, 0, 0),
datetime(1960, 5, 31, 0, 0),
datetime(1960, 6, 30, 0, 0),
datetime(1960, 7, 31, 0, 0),
datetime(1960, 8, 31, 0, 0),
datetime(1960, 9, 30, 0, 0),
datetime(1960, 10, 31, 0, 0),
datetime(1960, 12, 31, 0, 0),
datetime(1961, 1, 31, 0, 0),
datetime(1961, 2, 28, 0, 0),
datetime(1961, 3, 31, 0, 0),
datetime(1961, 4, 30, 0, 0),
datetime(1961, 5, 31, 0, 0),
datetime(1961, 6, 30, 0, 0),
datetime(1961, 7, 31, 0, 0),
datetime(1961, 8, 31, 0, 0),
datetime(1961, 9, 30, 0, 0),
datetime(1961, 10, 31, 0, 0)]
dt_range = dates_from_range("1959m3", length=len(results))
def test_infer_freq():
d1 = datetime(2008, 12, 31)
d2 = datetime(2012, 9, 30)
b = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['B']).values
d = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['D']).values
w = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['W']).values
m = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['M']).values
a = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['A']).values
q = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['Q']).values
assert _infer_freq(w) == 'W-SUN'
assert _infer_freq(a) == 'A-DEC'
assert _infer_freq(q) == 'Q-DEC'
assert _infer_freq(w[:3]) == 'W-SUN'
assert _infer_freq(a[:3]) == 'A-DEC'
assert _infer_freq(q[:3]) == 'Q-DEC'
assert _infer_freq(b[2:5]) == 'B'
assert _infer_freq(b[:3]) == 'D'
assert _infer_freq(b) == 'B'
assert _infer_freq(d) == 'D'
assert _infer_freq(m) == 'M'
assert _infer_freq(d[:3]) == 'D'
assert _infer_freq(m[:3]) == 'M'
def test_period_index():
dates = PeriodIndex(start="1/1/1990", periods=20, freq="M")
npt.assert_(_infer_freq(dates) == "M")
| bsd-3-clause |
yl565/statsmodels | statsmodels/sandbox/examples/example_garch.py | 31 | 2294 | import numpy as np
import matplotlib.pyplot as plt
#import scikits.timeseries as ts
#import scikits.timeseries.lib.plotlib as tpl
import statsmodels.api as sm
#from statsmodels.sandbox import tsa
from statsmodels.sandbox.tsa.garch import * # local import
#dta2 = ts.tsfromtxt(r'gspc_table.csv',
# datecols=0, skiprows=0, delimiter=',',names=True, freq='D')
#print dta2
aa=np.genfromtxt(r'gspc_table.csv', skip_header=0, delimiter=',', names=True)
cl = aa['Close']
ret = np.diff(np.log(cl))[-2000:]*1000.
ggmod = Garch(ret - ret.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.1, 0.1, 0.1, 0.1])
ggres = ggmod.fit(start_params=np.array([-0.1, 0.1, 0.1, 0.0]),
maxiter=1000,method='bfgs')
print('ggres.params', ggres.params)
garchplot(ggmod.errorsest, ggmod.h, title='Garch estimated')
use_rpy = False
if use_rpy:
from rpy import r
r.library('fGarch')
f = r.formula('~garch(1, 1)')
fit = r.garchFit(f, data = ret - ret.mean(), include_mean=False)
f = r.formula('~arma(1,1) + ~garch(1, 1)')
fit = r.garchFit(f, data = ret)
ggmod0 = Garch0(ret - ret.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.1, 0.1, ret.var()])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)
print('ggres0.params', ggres0.params)
g11res = optimize.fmin(lambda params: -loglike_GARCH11(params, ret - ret.mean())[0], [0.01, 0.1, 0.1])
print(g11res)
llf = loglike_GARCH11(g11res, ret - ret.mean())
print(llf[0])
ggmod0 = Garch0(ret - ret.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 2
ggmod.nma = 2
start_params = np.array([-0.1,-0.1, 0.1, 0.1, ret.var()])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)#, method='ncg')
print('ggres0.params', ggres0.params)
ggmod = Garch(ret - ret.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 2
ggmod.nma = 2
start_params = np.array([-0.1,-0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
ggmod._start_params = start_params
ggres = ggmod.fit(start_params=start_params, maxiter=1000)#,method='bfgs')
print('ggres.params', ggres.params)
| bsd-3-clause |
BlaisProteomics/mzStudio | mzStudio/mzGUI_standalone.py | 1 | 25426 | # Copyright 2008 Dana-Farber Cancer Institute
# multiplierz is distributed under the terms of the GNU Lesser General Public License
#
# This file is part of multiplierz.
#
# multiplierz is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# multiplierz is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with multiplierz. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import wx
#import wxmpl
from numpy import array, hypot
from multiplierz.mzTools import mz_image
#from mzDesktop import install_dir, settings
MZ_EXT = ('.raw', '.wiff', '.mzml', '.mzml.gz')
MZ_EXT_2 = MZ_EXT + tuple((e + '.lnk') for e in MZ_EXT) # with shortcuts included
MZ_WILDCARD = 'MS Data Files (%s)|%s' % ('; '.join(('*' + e) for e in MZ_EXT),
'; '.join(('*' + e) for e in MZ_EXT))
class mzApp():
def __init__(self):
#self.app = wx.PySimpleApp()
self.app = wx.App(False)
def launch(self):
self.app.MainLoop()
#class mzForm(wx.Frame):
#def __init__(self, parent=None, title="mzForm", items=None, function=None, size=(600,450)):
#wx.Frame.__init__(self, parent, -1, title, size=size)
#self.SetIcon(wx.Icon(os.path.join(install_dir, 'images', 'icons', 'multiplierz.ico'),
#wx.BITMAP_TYPE_ICO))
#items = items or []
#self.function = function
#self.files = dict()
#self.variables = set()
#self.pane = wx.Panel(self, -1)
## status bar
#self.statusbar = self.CreateStatusBar()
#self.statusbar.SetFieldsCount(2)
#self.statusbar.SetStatusText("Ready", 0)
#gbs = wx.GridBagSizer(10,15)
#for i,(ctrl_type, label, variable, value) in enumerate(items):
#self.variables.add(variable)
#gbs.Add( wx.StaticText(self.pane, -1, label, style=wx.ALIGN_RIGHT),
#(i,0), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT )
#ctrl_type = ctrl_type.lower()
#if ctrl_type == 'file' or ctrl_type == 'files':
#txt_ctrl = wx.TextCtrl(self.pane, -1, value, name=variable)
#gbs.Add( txt_ctrl,
#(i,1), flag=wx.EXPAND )
#browse_btn = wx.Button(self.pane, -1, 'Browse')
#gbs.Add( browse_btn,
#(i,2), flag=wx.EXPAND )
#if ctrl_type == 'file':
#browse_btn.Bind(wx.EVT_BUTTON, lambda e: self.on_file_browse(txt_ctrl))
#self.files[str(variable)] = ''
#else:
#browse_btn.Bind(wx.EVT_BUTTON, lambda e: self.on_files_browse(txt_ctrl))
#self.files[str(variable)] = []
#else:
#if ctrl_type == 'text':
#gbs.Add( wx.TextCtrl(self.pane, -1, value, name=variable),
#(i,1), (1,2), flag=wx.EXPAND|wx.ALIGN_CENTER )
#elif ctrl_type == 'radio':
#gbs.Add( wx.RadioBox(self.pane, -1, label='', choices=value, name=variable),
#(i,1), (1,2), flag=wx.EXPAND|wx.ALIGN_CENTER )
#elif ctrl_type == 'check':
#check_box = wx.CheckBox(self.pane, -1, label='', name=variable)
#gbs.Add( check_box,
#(i,1), (1,2), flag=wx.EXPAND|wx.ALIGN_CENTER )
#check_box.SetValue(value)
## submit button
#submit_btn = wx.Button(self.pane, -1, "Submit")
#gbs.Add( submit_btn,
#(i+1,0), (1,3), flag=wx.ALIGN_CENTER )
#self.Bind(wx.EVT_BUTTON, self.on_submit, submit_btn)
#gbs.AddGrowableCol(1, 1)
#gbs.AddGrowableRow(i+1, 1)
#box = wx.BoxSizer()
#box.Add(gbs, 1, wx.ALL|wx.EXPAND, 5)
#self.pane.SetSizerAndFit(box)
#def on_file_browse(self, text_ctrl):
#file_chooser = wx.FileDialog(None, "Choose File:", style=wx.FD_OPEN)
#if file_chooser.ShowModal() == wx.ID_OK:
#self.files[str(text_ctrl.GetName())] = file_chooser.GetPath()
#text_ctrl.SetValue(file_chooser.GetPath())
#text_ctrl.SetToolTipString(file_chooser.GetPath())
#else:
#self.files[str(text_ctrl.GetName())] = ''
#text_ctrl.SetValue('')
#text_ctrl.SetToolTipString('')
#file_chooser.Destroy()
#def on_files_browse(self, text_ctrl):
#file_chooser = wx.FileDialog(None, "Choose Files:", style=wx.FD_MULTIPLE)
#if file_chooser.ShowModal() == wx.ID_OK:
#self.files[str(text_ctrl.GetName())] = file_chooser.GetPaths()
#text_ctrl.SetValue('; '.join(file_chooser.GetPaths()))
#text_ctrl.SetToolTipString('\n'.join(file_chooser.GetPaths()))
#else:
#self.files[str(text_ctrl.GetName())] = []
#text_ctrl.SetValue('')
#text_ctrl.SetToolTipString('')
#file_chooser.Destroy()
#def on_submit(self, event):
#self.statusbar.SetStatusText("Working...", 0)
#wx.BeginBusyCursor(wx.HOURGLASS_CURSOR)
##Get Variables:
#values = self.files.copy()
#for c in self.pane.GetChildren():
#if c.GetName() in self.variables and c.GetName() not in self.files:
#if isinstance(c, wx.RadioBox):
#values[str(c.GetName())] = c.GetStringSelection()
#elif not isinstance(c, wx.StaticText):
#values[str(c.GetName())] = c.GetValue()
#self.function(**values)
#self.statusbar.SetStatusText("Ready", 0)
#wx.EndBusyCursor()
#class mzPlot(wx.Frame):
#def __init__(self, parent=None, title="mzPlot", size=(600,450)):
#wx.Frame.__init__(self, parent, -1, title, size=size)
#self.xy_data = None
#self.last_anno = None
#self.tooltip_str = '%%3.1f, %%3d' # default tooltip string
##Icon
#self.SetIcon(wx.Icon(os.path.join(install_dir, 'images', 'icons', 'multiplierz.ico'),
#wx.BITMAP_TYPE_ICO))
##add menu bar
#menu_bar = wx.MenuBar()
##Edit Menu
#edit_menu = wx.Menu()
#change_title = edit_menu.Append(-1, 'Change &Title\tCtrl+T', 'Change Plot Title')
#self.Bind(wx.EVT_MENU, self.on_title, change_title)
#x_label = edit_menu.Append(-1, 'Change &X Axis Label\tCtrl+X', 'Change X Axis Label')
#self.Bind(wx.EVT_MENU, self.on_xlabel, x_label)
#y_label = edit_menu.Append(-1, 'Change &Y Axis Label\tCtrl+Y', 'Change Y Axis Label')
#self.Bind(wx.EVT_MENU, self.on_ylabel, y_label)
#menu_bar.Append(edit_menu, "&Edit")
#save_menu = wx.Menu()
#save_image = save_menu.Append(-1, '&Save Image\tCtrl+S', 'Save Plot as Image')
#self.Bind(wx.EVT_MENU, self.on_save, save_image)
#menu_bar.Append(save_menu, "&Save")
#resize_menu = wx.Menu()
#resize_800 = resize_menu.Append(-1, "800x600\tAlt+1", "Resize Plot to 800x600")
#self.Bind(wx.EVT_MENU, lambda e: self.on_resize((800,600)), resize_800)
#resize_1200 = resize_menu.Append(-1, "1200x900\tAlt+2", "Resize Plot to 1200x900")
#self.Bind(wx.EVT_MENU, lambda e: self.on_resize((1200,900)), resize_1200)
#resize_1400 = resize_menu.Append(-1, "1400x1050\tAlt+3", "Resize Plot to 1400x1050")
#self.Bind(wx.EVT_MENU, lambda e: self.on_resize((1400,1050)), resize_1400)
#menu_bar.Append(resize_menu, "&Resize")
#self.SetMenuBar(menu_bar)
#self.plot_panel = wxmpl.PlotPanel(self, -1, (1.6, 1.2))
#self.plot_panel.mpl_connect('button_release_event', self.on_click)
#self.figure = self.plot_panel.get_figure()
#a = self.figure.add_axes([0.125, 0.1, 0.775, 0.8])
#a.set_title(title)
#self.plot_panel.draw()
#box = wx.BoxSizer()
#box.Add(self.plot_panel, 1, wx.EXPAND, 0)
#self.SetSizerAndFit(box)
#self.SetSize(size)
#def on_resize(self, size):
#self.SetSize(size)
#self.SendSizeEvent()
#def on_title(self, event):
#with wx.TextEntryDialog(self, 'Title this graph',
#'Enter Graph Title',
#self.GetTitle()) as title_dlg:
#if title_dlg.ShowModal() == wx.ID_OK:
#title = title_dlg.GetValue()
#self.SetTitle(title)
#self.figure.get_axes()[0].set_title(title)
#self.plot_panel.draw()
#def on_xlabel(self, event):
#with wx.TextEntryDialog(self, 'Change X-Axis Label',
#'Enter X-Axis Label',
#self.figure.get_axes()[0].get_xlabel()) as xlabel_dlg:
#if xlabel_dlg.ShowModal() == wx.ID_OK:
#title = xlabel_dlg.GetValue()
#self.figure.get_axes()[0].set_xlabel(title)
#self.plot_panel.draw()
#def on_ylabel(self, event):
#with wx.TextEntryDialog(self, 'Change Y-Axis Label',
#'Enter Y-Axis Label',
#self.figure.get_axes()[0].get_ylabel()) as ylabel_dlg:
#if ylabel_dlg.ShowModal() == wx.ID_OK:
#title = ylabel_dlg.GetValue()
#self.figure.get_axes()[0].set_ylabel(title)
#self.plot_panel.draw()
#def on_save(self, event):
#wildcard = ("PNG (*.png)|*.png|"
#"PDF (*.pdf)|*.pdf|"
#"PS (*.ps)|*.ps|"
#"EPS (*.eps)|*.eps|"
#"SVG (*.svg)|*.svg")
#formats = ('PNG', 'PDF', 'PS', 'EPS', 'SVG')
#with wx.FileDialog(self, "Save figure as...",
#wildcard=wildcard, style=wx.FD_SAVE) as dlg:
#if dlg.ShowModal() == wx.ID_OK:
#self.plot_panel.print_figure(dlg.GetPath(),
#format=formats[dlg.GetFilterIndex()])
#def closest_point(self, event):
#if self.xy_data is None:
#return None
#axes = event.canvas.figure.get_axes()[0]
#xlim = axes.get_xlim()
#ylim = axes.get_ylim()
#xy_data = [(x,y) for x,y in self.xy_data
#if xlim[0] <= x <= xlim[1] and ylim[0] <= y <= ylim[1]]
#if not xy_data:
#return None
#e_xy = array([event.x, event.y])
#xy = min((axes.transData.transform([x,y]) for x,y in xy_data),
#key = lambda xy: hypot(*(e_xy - xy)))
## 10 pixel threshold for labeling
#if all(abs(xy - e_xy) < 10.0):
#return (tuple(abs(axes.transData.inverted().transform(xy))),
#tuple(axes.transData.inverted().transform(xy+5)))
#else:
#return None
#def on_click(self, event):
#'''Annotate the point closest to the cursor if it is within range'''
#if event.inaxes:
#xy_o = self.closest_point(event)
#if xy_o:
#xy,o = xy_o
#if self.last_anno is not None:
#self.last_anno.remove()
#tip = self.tooltip_str % xy
#axes = self.figure.get_axes()[0]
#t = axes.text(o[0], o[1], tip)
#self.last_anno = t
#event.canvas.draw()
#return
#if self.last_anno is not None:
#self.last_anno.remove()
#self.last_anno = None
#event.canvas.draw()
#def plot(self, *args, **kwargs):
#'''A simple wrapper for matplotlib's axes.plot() function. If you
#want to do something more complicated, you can access the figure
#directly using mzPlot.figure'''
#self.figure.clear()
#axes = self.figure.add_axes([0.125, 0.1, 0.775, 0.8])
#self.xy_data = axes.plot(*args, **kwargs)[0].get_xydata()
#self.plot_panel.draw()
#def plot_xic(self, title="XIC", data=None, scan_dot=None, other_MS2s=None):
#if data is None:
#raise TypeError("Required argument 'data' cannot be None")
#self.tooltip_str = '(%%3.%df, %%3.%df)' % (settings.xic_time_figs,
#settings.xic_int_figs)
#mz_image._make_xic(self.figure, None,
#[x for x,y in data],
#[y for x,y in data],
#scan_dot,
#[x for x,y in other_MS2s] if other_MS2s else [],
#[y for x,y in other_MS2s] if other_MS2s else [],
#title)
#self.plot_panel.draw()
#def plot_full_ms(self, title="Full MS", scan=None, scan_mz=None):
#if scan is None:
#raise TypeError("Required argument 'scan' cannot be None")
#self.tooltip_str = '(%%3.%df, %%3.%df)' % (settings.ms1_mz_figs,
#settings.ms1_int_figs)
#mz_image._make_ms1(self.figure,
#None,
#scan,
#scan.mode,
#[scan_mz] if scan_mz else None,
#title,
#settings.MS1_view_mz_window / 2)
#self.plot_panel.draw()
#def plot_ms_ms(self, title="MS-MS", scan=None):
#if scan is None:
#raise TypeError("Required argument 'scan' cannot be None")
#self.tooltip_str = '(%%3.%df, %%3.%df)' % (settings.ms2_mz_figs,
#settings.ms2_int_figs)
#mz_image._make_ms2(self.figure,
#scan,
#scan.mode,
#None,
#title=title)
#self.plot_panel.draw()
#def plot_venn(self, A, B, AB, A_label, B_label, title='Venn Diagram', eps=0.001):
#'''Plot a proportional 2-set Venn diagram. A and B are the sizes of the two sets,
#AB is the size of the intersection, and eps is an error margin for the proportional
#placement. E.g. if eps is 0.01 then the areas of the plot will be accurate to ~1%.
#A lower eps will give a more accurate plot at the expense of longer running time.
#The method uses a bisecting search algorithm to find the right proportions.'''
#mz_image.make_venn(self.figure, A, B, AB, A_label, B_label, title, eps)
#self.plot_panel.draw()
class NumValidator(wx.PyValidator):
'''This is a generic float-validating class which is used in various
forms to require a non-negative floating-point number from a text control.
A flag indicates whether 0.0 is considered valid.
If the validator fails, it will highlight the control and pop up an
error message. The control should have a reasonable name to stick
in the message text.
'''
def __init__(self, func=float, flag=False):
wx.PyValidator.__init__(self)
self.flag = not flag # reverse the flag for convenience later
self.func = func # used to cast the result, to allow int or float validation
def Clone(self):
return NumValidator(self.func, not self.flag)
def Validate(self, win):
tc = self.GetWindow()
val = tc.GetValue()
nm = tc.GetName()
try:
v = self.func(val)
if self.flag and v <= 0.0:
wx.MessageBox("%s must be positive" % nm, "Error")
elif v < 0.0:
wx.MessageBox("%s must be non-negative" % nm, "Error")
else:
tc.SetBackgroundColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW))
tc.Refresh()
return True
except ValueError:
wx.MessageBox("%s is not a valid number" % nm, "Error")
tc.SetBackgroundColour("YELLOW")
tc.Clear()
tc.SetFocus()
tc.Refresh()
return False
class ProgressBar:
def __init__(self, title, entries):
self.progressBar = wx.ProgressDialog(title, "Time remaining", entries, style=wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME | wx.PD_AUTO_HIDE | wx.PD_CAN_ABORT | wx.PD_APP_MODAL)
def update(self, count):
self.progressBar.Update(count)
if self.progressBar.Update(count)[0] == 0:
cancelMsg = wx.MessageDialog(None, "Are you sure you want to cancel?",'Continue?',wx.YES_NO | wx.ICON_QUESTION)
cancelMsgAnswer = cancelMsg.ShowModal()
if cancelMsgAnswer == wx.ID_YES:
return False
else:
self.progressBar.Resume()
return True
def destroy(self):
self.progressBar.Destroy()
def alerts(message='multiplierz', title='multiplierz', method='popup'):
"""Alert system for displaying popup or writing message to a file
Available methods equal 'popup' or 'file'.
If a file type method is chosen, the title represents the file name and location.
Example:
>> alerts('popup', 'Test', 'Hello. The test worked. Please click OK.')
"""
message = str(message)
title = str(title)
if method == 'popup':
try:
wx.MessageBox(message, title)
except:
app = mzApp()
app.launch()
wx.MessageBox(message, title)
elif method == 'file':
fh = open(title,'w')
fh.write(message)
fh.close()
def file_chooser(title='Choose a file:', mode='r', wildcard='*'):
"""Provides a file chooser dialog and returns the file path(s) when the file(s) is selected.
mode option provides file dialog type: read single, read multiple, or save single.
mode = 'r' creates an 'Open' file dialog for single file read.
mode = 'm' creates an 'Open' file dialog for multiple files read.
mode = 'w' creates a 'Save' file dialog.
wildcard option can be specified as "*.xls"
Example:
>> file_chooser(title='Choose a file:', mode='r')
"""
wildcard = "%s|%s" % (wildcard, wildcard)
style = { 'r': wx.FD_OPEN,
'm': wx.FD_MULTIPLE,
'w': wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT }[mode]
try:
file_chooser = wx.FileDialog(None, title, wildcard=wildcard, style=style)
except:
app = mzApp()
app.launch()
file_chooser = wx.FileDialog(None, title, wildcard=wildcard, style=style)
file_name = None
if file_chooser.ShowModal() == wx.ID_OK:
if mode == 'm':
file_name = file_chooser.GetPaths()
else:
file_name = file_chooser.GetPath()
file_chooser.Destroy()
return file_name
def report_chooser(parent=None, title=None, mode='r', **kwargs):
'''A specialized file_chooser function for multiplierz files. Otherwise,
works just like file_chooser.
'parent' is the parent of the dialog--used when this is called by a GUI
element. It is perfectly fine to leave it as None, and the GUI frame will
have no parent.
'title' can be left blank for the following default titles based on mode:
r - 'Choose multiplierz File:'
w - 'Save File:'
m - 'Choose multiplierz Files:'
'mode' is one of 'r', 'w', and 'm', just as for file_chooser.
**kwargs can include any additional options to pass to the FileDialog constructor,
such as defaultDir (default directory).'''
wildcard = ("Worksheets (*.xls; *.xlsx)|*.xls; *.xlsx|"
"Comma-separated Values (*.csv)|*.csv|"
"mzResults Database (*.mzd)|*.mzd|"
"mzIdentML (*.mzid)|*.mzid")
if not title:
title = {'r': 'Choose multiplierz File:',
'w': 'Save File:',
'm': 'Choose multiplierz Files:'}[mode]
style = { 'r': wx.FD_OPEN,
'm': wx.FD_MULTIPLE,
'w': wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT }[mode]
#index = {'.xls': 0,
#'.xlsx': 0,
#'.csv': 1,
#'.mzd': 2}[settings.default_format]
index = 0
try:
file_dialog = wx.FileDialog(parent, title, wildcard=wildcard, style=style, **kwargs)
except:
app = mzApp()
app.launch()
file_dialog = wx.FileDialog(None, title, wildcard=wildcard, style=style, **kwargs)
file_dialog.SetFilterIndex(index)
file_name = None
if file_dialog.ShowModal() == wx.ID_OK:
if mode == 'm':
file_name = file_dialog.GetPaths()
else:
file_name = file_dialog.GetPath()
file_dialog.Destroy()
return file_name
class FileArrayDialog(wx.Dialog):
def __init__(self, parent, filetypes, fileLimit = None):
wx.Dialog.__init__(self, parent, -1, title = 'Select Input Files',
style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
self.filetypes = {}
self.fileArray = wx.ListCtrl(self, -1, style = wx.LC_REPORT | wx.LC_EDIT_LABELS | wx.LC_HRULES | wx.LC_VRULES)
for i, (filetype, ext) in enumerate(filetypes):
self.fileArray.InsertColumn(i, filetype)
assert filetype not in self.filetypes, "Non-unique file identifiers! %s" % filetype
self.filetypes[filetype] = ext
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.getFile, self.fileArray)
self.goButton = wx.Button(self, -1, "Use Selected Files")
self.Bind(wx.EVT_BUTTON, self.complete, self.goButton)
self.Bind(wx.EVT_CLOSE, self.abort)
box = wx.BoxSizer(wx.VERTICAL)
box.Add(self.fileArray, 1, wx.ALL | wx.EXPAND, 10)
box.Add(self.goButton, 0, wx.ALL | wx.EXPAND, 10)
self.SetSizerAndFit(box)
self.SetSize(wx.Size(1200, 300))
self.Bind(wx.EVT_SIZE, self.resize)
for i in range(0, 10):
self.fileArray.Append([''] * self.fileArray.GetColumnCount())
self.resize(None)
self.CentreOnScreen()
def resize(self, event):
arraywidth = self.fileArray.GetSize()[0] - 10
for col in range(self.fileArray.GetColumnCount()):
self.fileArray.SetColumnWidth(col, arraywidth / self.fileArray.GetColumnCount())
if event:
event.Skip()
def getFile(self, event):
row = event.GetIndex()
for col in range(self.fileArray.GetColumnCount()):
filetype = self.fileArray.GetColumn(col).GetText()
exts = self.filetypes[filetype]
wildcard = '|'.join(['%s|%s' % (x, x) for x in exts] + ['*|*'])
#wildcard = '|'.join(exts + ['*'])
givenfile = file_chooser(title = 'Choose %s:' % filetype, wildcard = wildcard)
self.fileArray.SetStringItem(index = row, col = col, label = givenfile)
def returnFiles(self):
filesets = []
for r in range(self.fileArray.GetItemCount()):
fileset = []
for c in range(self.fileArray.GetColumnCount()):
fileset.append(self.fileArray.GetItem(r, c).GetText())
if any(fileset):
filesets.append(fileset)
return filesets
def complete(self, event):
self.EndModal(wx.ID_OK)
def abort(self, event):
raise RuntimeError, "User cancelled file selection."
def open_filearray(parent = None, filetypes = None):
assert filetypes and all([len(x) == 2 for x in filetypes])
if parent:
dialog = FileArrayDialog(parent, filetypes)
else:
app = mzApp()
app.launch()
dialog = FileArrayDialog(None, filetypes)
dialog.ShowModal()
files = dialog.returnFiles()
return files
def text_input(prompt='', value='', title=''):
"""Provides a text input dialog and returns the user input
The value field can be used to enter the default initial value.
Example:
>> textInput('Enter your name:', title='Name')
"""
try:
dlg = wx.TextEntryDialog(None, message=prompt, caption=title, defaultValue=value)
except:
app = mzApp()
app.launch()
dlg = wx.TextEntryDialog(None, message=prompt, caption=title, defaultValue=value)
dlg.ShowModal()
output = dlg.GetValue()
dlg.Destroy()
return output
def directory_chooser(parent = None, title = None):
try:
dialog = wx.DirDialog(parent)
except TypeError:
app = mzApp()
app.launch()
dialog = wx.DirDialog()
if dialog.ShowModal() == wx.ID_OK:
return dialog.GetPath()
else:
return None
#if __name__ == "__main__":
#print "TEST MODE"
#foo = open_filearray(None, [('Foo', ["*.xlsx", '*.features']), ('Bar', ['*.features', '*.raw'])])
#print "DONE" | gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/dask/dataframe/io/tests/test_sql.py | 4 | 7201 | from __future__ import (print_function, division, absolute_import,
unicode_literals)
import io
import pytest
from dask.dataframe.io.sql import read_sql_table
from dask.utils import tmpfile
from dask.dataframe.utils import assert_eq
pd = pytest.importorskip('pandas')
dd = pytest.importorskip('dask.dataframe')
pytest.importorskip('sqlalchemy')
pytest.importorskip('sqlite3')
np = pytest.importorskip('numpy')
data = """
name,number,age,negish
Alice,0,33,-5
Bob,1,40,-3
Chris,2,22,3
Dora,3,16,5
Edith,4,53,0
Francis,5,30,0
Garreth,6,20,0
"""
df = pd.read_csv(io.StringIO(data), index_col='number')
@pytest.yield_fixture
def db():
with tmpfile() as f:
uri = 'sqlite:///%s' % f
df.to_sql('test', uri, index=True, if_exists='replace')
yield uri
def test_empty(db):
from sqlalchemy import create_engine, MetaData, Table, Column, Integer
with tmpfile() as f:
uri = 'sqlite:///%s' % f
metadata = MetaData()
engine = create_engine(uri)
table = Table('empty_table', metadata,
Column('id', Integer, primary_key=True),
Column('col2', Integer))
metadata.create_all(engine)
dask_df = read_sql_table(table.name, uri, index_col='id', npartitions=1)
assert dask_df.index.name == 'id'
assert dask_df.col2.dtype == np.dtype('int64')
pd_dataframe = dask_df.compute()
assert pd_dataframe.empty is True
def test_needs_rational(db):
import datetime
now = datetime.datetime.now()
d = datetime.timedelta(seconds=1)
df = pd.DataFrame({'a': list('ghjkl'), 'b': [now + i * d for i in range(5)],
'c': [True, True, False, True, True]})
df = df.append([{'a': 'x', 'b': now + d * 1000, 'c': None},
{'a': None, 'b': now + d * 1001, 'c': None}])
with tmpfile() as f:
uri = 'sqlite:///%s' % f
df.to_sql('test', uri, index=False, if_exists='replace')
# one partition contains NULL
data = read_sql_table('test', uri, npartitions=2, index_col='b')
df2 = df.set_index('b')
assert_eq(data, df2.astype({'c': bool})) # bools are coerced
# one partition contains NULL, but big enough head
data = read_sql_table('test', uri, npartitions=2, index_col='b',
head_rows=12)
df2 = df.set_index('b')
assert_eq(data, df2)
# empty partitions
data = read_sql_table('test', uri, npartitions=20, index_col='b')
part = data.get_partition(12).compute()
assert part.dtypes.tolist() == ['O', bool]
assert part.empty
df2 = df.set_index('b')
assert_eq(data, df2.astype({'c': bool}))
# explicit meta
data = read_sql_table('test', uri, npartitions=2, index_col='b',
meta=df2[:0])
part = data.get_partition(1).compute()
assert part.dtypes.tolist() == ['O', 'O']
df2 = df.set_index('b')
assert_eq(data, df2)
def test_simple(db):
# single chunk
data = read_sql_table('test', db, npartitions=2, index_col='number'
).compute()
assert (data.name == df.name).all()
assert data.index.name == 'number'
assert_eq(data, df)
def test_npartitions(db):
data = read_sql_table('test', db, columns=list(df.columns), npartitions=2,
index_col='number')
assert len(data.divisions) == 3
assert (data.name.compute() == df.name).all()
data = read_sql_table('test', db, columns=['name'], npartitions=6,
index_col="number")
assert_eq(data, df[['name']])
data = read_sql_table('test', db, columns=list(df.columns),
bytes_per_chunk=2**30,
index_col='number')
assert data.npartitions == 1
assert (data.name.compute() == df.name).all()
def test_divisions(db):
data = read_sql_table('test', db, columns=['name'], divisions=[0, 2, 4],
index_col="number")
assert data.divisions == (0, 2, 4)
assert data.index.max().compute() == 4
assert_eq(data, df[['name']][df.index <= 4])
def test_division_or_partition(db):
with pytest.raises(TypeError):
read_sql_table('test', db, columns=['name'], index_col="number",
divisions=[0, 2, 4], npartitions=3)
out = read_sql_table('test', db, index_col="number", bytes_per_chunk=100)
m = out.map_partitions(lambda d: d.memory_usage(
deep=True, index=True).sum()).compute()
assert (50 < m).all() and (m < 200).all()
assert_eq(out, df)
def test_range(db):
data = read_sql_table('test', db, npartitions=2, index_col='number',
limits=[1, 4])
assert data.index.min().compute() == 1
assert data.index.max().compute() == 4
def test_datetimes():
import datetime
now = datetime.datetime.now()
d = datetime.timedelta(seconds=1)
df = pd.DataFrame({'a': list('ghjkl'), 'b': [now + i * d
for i in range(2, -3, -1)]})
with tmpfile() as f:
uri = 'sqlite:///%s' % f
df.to_sql('test', uri, index=False, if_exists='replace')
data = read_sql_table('test', uri, npartitions=2, index_col='b')
assert data.index.dtype.kind == "M"
assert data.divisions[0] == df.b.min()
df2 = df.set_index('b')
assert_eq(data.map_partitions(lambda x: x.sort_index()),
df2.sort_index())
def test_with_func(db):
from sqlalchemy import sql
index = sql.func.abs(sql.column('negish')).label('abs')
# function for the index, get all columns
data = read_sql_table('test', db, npartitions=2, index_col=index)
assert data.divisions[0] == 0
part = data.get_partition(0).compute()
assert (part.index == 0).all()
# now an arith op for one column too; it's name will be 'age'
data = read_sql_table('test', db, npartitions=2, index_col=index,
columns=[index, -sql.column('age')])
assert (data.age.compute() < 0).all()
# a column that would have no name, give it a label
index = (-sql.column('negish')).label('index')
data = read_sql_table('test', db, npartitions=2, index_col=index,
columns=['negish', 'age'])
d = data.compute()
assert (-d.index == d['negish']).all()
def test_no_nameless_index(db):
from sqlalchemy import sql
index = (-sql.column('negish'))
with pytest.raises(ValueError):
read_sql_table('test', db, npartitions=2, index_col=index,
columns=['negish', 'age', index])
index = sql.func.abs(sql.column('negish'))
# function for the index, get all columns
with pytest.raises(ValueError):
read_sql_table('test', db, npartitions=2, index_col=index)
def test_select_from_select(db):
from sqlalchemy import sql
s1 = sql.select([sql.column('number'), sql.column('name')]
).select_from(sql.table('test'))
out = read_sql_table(s1, db, npartitions=2, index_col='number')
assert_eq(out, df[['name']])
| gpl-3.0 |
tangyaohua/dl4mt | session2/nmtrlm.py | 1 | 41883 | '''
Build a simple neural machine translation model
'''
import theano
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import cPickle as pkl
import numpy
import copy
import os
import warnings
import sys
import time
from scipy import optimize, stats
from collections import OrderedDict
from sklearn.cross_validation import KFold
from data_iterator import TextIterator
profile = False
# push parameters to Theano shared variables
def zipp(params, tparams):
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
# pull parameters from Theano shared variables
def unzip(zipped):
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
# get the list of parameters: Note that tparams must be OrderedDict
def itemlist(tparams):
return [vv for kk, vv in tparams.iteritems()]
# dropout
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(use_noise,
state_before * trng.binomial(state_before.shape, p=0.5, n=1, dtype=state_before.dtype),
state_before * 0.5)
return proj
# make prefix-appended name
def _p(pp, name):
return '%s_%s'%(pp, name)
# initialize Theano shared variables according to the initial parameters
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
# load parameters
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('%s is not in the archive'%kk)
continue
params[kk] = pp[kk]
return params
# layers: 'name': ('parameter initializer', 'feedforward')
layers = {'ff': ('param_init_fflayer', 'fflayer'),
'gru': ('param_init_gru', 'gru_layer'),
'gru_cond': ('param_init_gru_cond', 'gru_cond_layer'),
}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# some utilities
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin,nout=None, scale=0.01, ortho=True):
if nout == None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
def tanh(x):
return tensor.tanh(x)
def linear(x):
return x
def concatenate(tensor_list, axis=0):
"""
Alternative implementation of `theano.tensor.concatenate`.
This function does exactly the same thing, but contrary to Theano's own
implementation, the gradient is implemented on the GPU.
Backpropagating through `theano.tensor.concatenate` yields slowdowns
because the inverse operation (splitting) needs to be done on the CPU.
This implementation does not have that problem.
:usage:
>>> x, y = theano.tensor.matrices('x', 'y')
>>> c = concatenate([x, y], axis=1)
:parameters:
- tensor_list : list
list of Theano tensor expressions that should be concatenated.
- axis : int
the tensors will be joined along this axis.
:returns:
- out : tensor
the concatenated tensor expression.
"""
concat_size = sum(tt.shape[axis] for tt in tensor_list)
output_shape = ()
for k in range(axis):
output_shape += (tensor_list[0].shape[k],)
output_shape += (concat_size,)
for k in range(axis + 1, tensor_list[0].ndim):
output_shape += (tensor_list[0].shape[k],)
out = tensor.zeros(output_shape)
offset = 0
for tt in tensor_list:
indices = ()
for k in range(axis):
indices += (slice(None),)
indices += (slice(offset, offset + tt.shape[axis]),)
for k in range(axis + 1, tensor_list[0].ndim):
indices += (slice(None),)
out = tensor.set_subtensor(out[indices], tt)
offset += tt.shape[axis]
return out
# batch preparation
def prepare_data(seqs_x, seqs_y, maxlen=None, n_words_src=30000, n_words=30000):
# x: a list of sentences
lengths_x = [len(s) for s in seqs_x]
lengths_y = [len(s) for s in seqs_y]
if maxlen != None:
new_seqs_x = []
new_seqs_y = []
new_lengths_x = []
new_lengths_y = []
for l_x, s_x, l_y, s_y in zip(lengths_x, seqs_x, lengths_y, seqs_y):
if l_x < maxlen and l_y < maxlen:
new_seqs_x.append(s_x)
new_lengths_x.append(l_x)
new_seqs_y.append(s_y)
new_lengths_y.append(l_y)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
lengths_y = new_lengths_y
seqs_y = new_seqs_y
if len(lengths_x) < 1 or len(lengths_y) < 1:
return None, None, None, None
n_samples = len(seqs_x)
maxlen_x = numpy.max(lengths_x) + 1
maxlen_y = numpy.max(lengths_y) + 1
x = numpy.zeros((maxlen_x, n_samples)).astype('int64')
y = numpy.zeros((maxlen_y, n_samples)).astype('int64')
x_mask = numpy.zeros((maxlen_x, n_samples)).astype('float32')
y_mask = numpy.zeros((maxlen_y, n_samples)).astype('float32')
for idx, [s_x, s_y] in enumerate(zip(seqs_x,seqs_y)):
x[:lengths_x[idx],idx] = s_x
x_mask[:lengths_x[idx]+1,idx] = 1.
y[:lengths_y[idx],idx] = s_y
y_mask[:lengths_y[idx]+1,idx] = 1.
return x, x_mask, y, y_mask
# feedforward layer: affine transformation + point-wise nonlinearity
def param_init_fflayer(options, params, prefix='ff', nin=None, nout=None, ortho=True):
if nin == None:
nin = options['dim_proj']
if nout == None:
nout = options['dim_proj']
params[_p(prefix,'W')] = norm_weight(nin, nout, scale=0.01, ortho=ortho)
params[_p(prefix,'b')] = numpy.zeros((nout,)).astype('float32')
return params
def fflayer(tparams, state_below, options, prefix='rconv', activ='lambda x: tensor.tanh(x)', **kwargs):
return eval(activ)(tensor.dot(state_below, tparams[_p(prefix,'W')])+tparams[_p(prefix,'b')])
# GRU layer
def param_init_gru(options, params, prefix='gru', nin=None, dim=None, hiero=False):
if nin == None:
nin = options['dim_proj']
if dim == None:
dim = options['dim_proj']
if not hiero:
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
params[_p(prefix,'b')] = numpy.zeros((2 * dim,)).astype('float32')
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
Wx = norm_weight(nin, dim)
params[_p(prefix,'Wx')] = Wx
Ux = ortho_weight(dim)
params[_p(prefix,'Ux')] = Ux
params[_p(prefix,'bx')] = numpy.zeros((dim,)).astype('float32')
return params
def gru_layer(tparams, state_below, options, prefix='gru', mask=None, **kwargs):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix,'Ux')].shape[1]
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
U = tparams[_p(prefix, 'U')]
Ux = tparams[_p(prefix, 'Ux')]
def _step_slice(m_, x_, xx_, h_, U, Ux):
preact = tensor.dot(h_, U)
preact += x_
r = tensor.nnet.sigmoid(_slice(preact, 0, dim))
u = tensor.nnet.sigmoid(_slice(preact, 1, dim))
preactx = tensor.dot(h_, Ux)
preactx = preactx * r
preactx = preactx + xx_
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h#, r, u, preact, preactx
seqs = [mask, state_below_, state_belowx]
_step = _step_slice
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info = [tensor.alloc(0., n_samples, dim)],
#None, None, None, None],
non_sequences = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Ux')]],
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
rval = [rval]
return rval
# Conditional GRU layer with Attention
def param_init_gru_cond(options, params, prefix='gru_cond',
nin=None, dim=None, dimctx=None):
if nin == None:
nin = options['dim']
if dim == None:
dim = options['dim']
if dimctx == None:
dimctx = options['dim']
params = param_init_gru(options, params, prefix, nin=nin, dim=dim)
# context to LSTM
Wc = norm_weight(dimctx,dim*2)
params[_p(prefix,'Wc')] = Wc
Wcx = norm_weight(dimctx,dim)
params[_p(prefix,'Wcx')] = Wcx
# attention: prev -> hidden
Wi_att = norm_weight(nin,dimctx)
params[_p(prefix,'Wi_att')] = Wi_att
# attention: context -> hidden
Wc_att = norm_weight(dimctx)
params[_p(prefix,'Wc_att')] = Wc_att
# attention: LSTM -> hidden
Wd_att = norm_weight(dim,dimctx)
params[_p(prefix,'Wd_att')] = Wd_att
# attention: hidden bias
b_att = numpy.zeros((dimctx,)).astype('float32')
params[_p(prefix,'b_att')] = b_att
# attention:
U_att = norm_weight(dimctx,1)
params[_p(prefix,'U_att')] = U_att
c_att = numpy.zeros((1,)).astype('float32')
params[_p(prefix, 'c_tt')] = c_att
return params
def gru_cond_layer(tparams, state_below, options, prefix='gru',
mask=None, context=None, one_step=False,
init_memory=None, init_state=None,
context_mask=None,
**kwargs):
assert context, 'Context must be provided'
if one_step:
assert init_state, 'previous state must be provided'
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
# mask
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
dim = tparams[_p(prefix, 'Wcx')].shape[1]
# initial/previous state
if init_state == None:
init_state = tensor.alloc(0., n_samples, dim)
# projected context
assert context.ndim == 3, 'Context must be 3-d: #annotation x #sample x dim'
pctx_ = tensor.dot(context, tparams[_p(prefix,'Wc_att')]) + tparams[_p(prefix,'b_att')]
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
# projected x
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
state_belowc = tensor.dot(state_below, tparams[_p(prefix, 'Wi_att')])
def _step_slice(m_, x_, xx_, xc_, h_, ctx_, alpha_, pctx_, cc_,
U, Wc, Wd_att, U_att, c_tt, Ux, Wcx):
# attention
pstate_ = tensor.dot(h_, Wd_att)
pctx__ = pctx_ + pstate_[None,:,:]
pctx__ += xc_
pctx__ = tensor.tanh(pctx__)
alpha = tensor.dot(pctx__, U_att)+c_tt
alpha = alpha.reshape([alpha.shape[0], alpha.shape[1]])
alpha = tensor.exp(alpha)
if context_mask:
alpha = alpha * context_mask
alpha = alpha / alpha.sum(0, keepdims=True)
ctx_ = (cc_ * alpha[:,:,None]).sum(0) # current context
preact = tensor.dot(h_, U)
preact += x_
preact += tensor.dot(ctx_, Wc)
preact = tensor.nnet.sigmoid(preact)
r = _slice(preact, 0, dim)
u = _slice(preact, 1, dim)
preactx = tensor.dot(h_, Ux)
preactx *= r
preactx += xx_
preactx += tensor.dot(ctx_, Wcx)
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h, ctx_, alpha.T #, pstate_, preact, preactx, r, u
seqs = [mask, state_below_, state_belowx, state_belowc]
_step = _step_slice
shared_vars = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Wc')],
tparams[_p(prefix,'Wd_att')],
tparams[_p(prefix,'U_att')],
tparams[_p(prefix, 'c_tt')],
tparams[_p(prefix, 'Ux')],
tparams[_p(prefix, 'Wcx')]]
if one_step:
rval = _step(*(seqs+[init_state, None, None, pctx_, context]+shared_vars))
else:
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info = [init_state,
tensor.alloc(0., n_samples, context.shape[2]),
tensor.alloc(0., n_samples, context.shape[0])],
non_sequences=[pctx_,
context]+shared_vars,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
return rval
# initialize all parameters
def init_params(options):
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])
params['Wemb_dec'] = norm_weight(options['n_words'], options['dim_word'])
# encoder: bidirectional RNN
params = get_layer(options['encoder'])[0](options, params, prefix='encoder',
nin=options['dim_word'],
dim=options['dim'])
params = get_layer(options['encoder'])[0](options, params, prefix='encoder_r',
nin=options['dim_word'],
dim=options['dim'])
ctxdim = 2 * options['dim']
# init_state, init_cell
params = get_layer('ff')[0](options, params, prefix='ff_state',
nin=ctxdim, nout=options['dim'])
params = get_layer('ff')[0](options, params, prefix='ff_state_r',
nin=ctxdim, nout=options['dim'])
# decoder
params = get_layer(options['decoder'])[0](options, params, prefix='decoder',
nin=options['dim_word'],
dim=options['dim'],
dimctx=ctxdim)
params = get_layer(options['decoder'])[0](options, params, prefix='decoder_r',
nin=options['dim_word'],
dim=options['dim'],
dimctx=ctxdim)
# readout
params = get_layer('ff')[0](options, params, prefix='ff_logit_lstm',
nin=options['dim'], nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_lstm_r',
nin=options['dim'], nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_prev',
nin=options['dim_word'], nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_ctx',
nin=ctxdim, nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_ctx_r',
nin=ctxdim, nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit',
nin=options['dim_word'], nout=options['n_words'])
return params
# build a training model
def build_model(tparams, options):
opt_ret = dict()
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# description string: #words x #samples
x = tensor.matrix('x', dtype='int64')
x_mask = tensor.matrix('x_mask', dtype='float32')
y = tensor.matrix('y', dtype='int64')
y_mask = tensor.matrix('y_mask', dtype='float32')
xr = x[::-1]
xr_mask = x_mask[::-1]
yr = y[::-1]
yr_mask = y_mask[::-1]
n_timesteps = x.shape[0]
n_timesteps_trg = y.shape[0]
n_samples = x.shape[1]
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
proj = get_layer(options['encoder'])[1](tparams, emb, options,
prefix='encoder',
mask=x_mask)
embr = tparams['Wemb'][xr.flatten()]
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
projr = get_layer(options['encoder'])[1](tparams, embr, options,
prefix='encoder_r',
mask=xr_mask)
ctx = concatenate([proj[0], projr[0][::-1]], axis=proj[0].ndim-1)
ctx_mean = (ctx * x_mask[:,:,None]).sum(0) / x_mask.sum(0)[:,None]
#ctx_mean = concatenate([proj[0][-1], projr[0][-1]], axis=proj[0].ndim-2)
# initial decoder state
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
init_state_r = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state_r', activ='tanh')
# word embedding (target)
emb = tparams['Wemb_dec'][y.flatten()]
emb = emb.reshape([n_timesteps_trg, n_samples, options['dim_word']])
emb_shifted = tensor.zeros_like(emb)
emb_shifted = tensor.set_subtensor(emb_shifted[1:], emb[:-1])
emb = emb_shifted
# decoder
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=y_mask, context=ctx,
context_mask=x_mask,
one_step=False,
init_state=init_state)
embr=tparams['Wemb_dec'][yr.flatten()]
embr = embr.reshape([n_timesteps_trg, n_samples, options['dim_word']])
emb_shifted = tensor.zeros_like(embr)
emb_shifted = tensor.set_subtensor(emb_shifted[1:], embr[:-1])
embr = emb_shifted
# decoder_r
proj_r = get_layer(options['decoder'])[1](tparams, embr, options,
prefix='decoder_r',
mask=yr_mask, context=ctx,
context_mask=x_mask,
one_step=False,
init_state=init_state_r)
proj_h = proj[0]
ctxs = proj[1]
opt_ret['dec_alphas'] = proj[2]
proj_r_h = proj_r[0][::-1]
ctxs_r = proj_r[1][::-1]
# compute word probabilities
logit_lstm = get_layer('ff')[1](tparams, proj_h, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit_ctx_r = get_layer('ff')[1](tparams, ctxs_r, options,
prefix='ff_logit_ctx_r', activ='linear')
logit_lstm_r = get_layer('ff')[1](tparams, proj_r_h, options,
prefix='ff_logit_lstm_r', activ='linear')
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx+logit_lstm_r+logit_ctx_r)
logit = get_layer('ff')[1](tparams, logit, options,
prefix='ff_logit', activ='linear')
logit_shp = logit.shape
probs = tensor.nnet.softmax(logit.reshape([logit_shp[0]*logit_shp[1],
logit_shp[2]]))
# cost
y_flat = y.flatten()
y_flat_idx = tensor.arange(y_flat.shape[0]) * options['n_words'] + y_flat
cost = -tensor.log(probs.flatten()[y_flat_idx])
cost = cost.reshape([y.shape[0],y.shape[1]])
cost = (cost * y_mask).sum(0)
return trng, use_noise, x, x_mask, y, y_mask, opt_ret, cost
# build a sampler
def build_sampler(tparams, options, trng):
x = tensor.matrix('x', dtype='int64')
xr = x[::-1]
n_timesteps = x.shape[0]
n_samples = x.shape[1]
# word embedding (source)
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
embr = tparams['Wemb'][xr.flatten()]
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
# encoder
proj = get_layer(options['encoder'])[1](tparams, emb, options, prefix='encoder')
projr = get_layer(options['encoder'])[1](tparams, embr, options, prefix='encoder_r')
ctx = concatenate([proj[0],projr[0][::-1]], axis=proj[0].ndim-1)
ctx_mean = ctx.mean(0)
#ctx_mean = concatenate([proj[0][-1],projr[0][-1]], axis=proj[0].ndim-2)
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
init_state_r = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state_r', activ='tanh')
print 'Building f_init...',
outs = [init_state, init_state_r, ctx]
f_init = theano.function([x], outs, name='f_init', profile=profile)
print 'Done'
# x: 1 x 1
y = tensor.vector('y_sampler', dtype='int64')
init_state = tensor.matrix('init_state', dtype='float32')
yr = tensor.vector('y_sampler', dtype='int64')
init_state_r=tensor.matrix('init_state_r', dtype='float32')
# if it's the first word, emb should be all zero
emb = tensor.switch(y[:,None] < 0,
tensor.alloc(0., 1, tparams['Wemb_dec'].shape[1]),
tparams['Wemb_dec'][y])
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=None, context=ctx,
one_step=True,
init_state=init_state)
embr = tensor.switch(yr[:,None] < 0,
tensor.alloc(0., 1, tparams['Wemb_dec'].shape[1]),
tparams['Wemb_dec'][yr])
projr = get_layer(options['decoder'])[1](tparams, embr, options,
prefix='decoder_r',
mask=None, context=ctx,
one_step=True,
init_state=init_state_r)
next_state_r = projr[0]
ctxs_r = projr[1]
next_state = proj[0]
ctxs = proj[1]
# next word probability
print 'Building f_next..',
inps = [y, ctx, init_state, init_state_r]
outs = [next_probs, next_sample, next_state, next_state_r]
f_next = theano.function(inps, outs, name='f_next', profile=profile)
print 'Done'
return f_init, f_next
# generate sample
def gen_sample(tparams, f_init, f_next, x, options, trng=None, k=1, maxlen=30,
stochastic=True, argmax=False):
if k > 1:
assert not stochastic, 'Beam search does not support stochastic sampling'
sample = []
sample_score = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
ret = f_init(x)
next_state, next_state_r, ctx0 = ret[0], ret[1], ret[2]
next_w = -1 * numpy.ones((1,)).astype('int64')
for ii in xrange(maxlen):
ctx = numpy.tile(ctx0, [live_k, 1])
inps = [next_w, ctx, next_state, next_state_r]
ret = f_next(*inps)
next_p, next_w, next_state, next_state_r = ret[0], ret[1], ret[2], ret[3]
if stochastic:
if argmax:
nw = next_p[0].argmax()
else:
nw = next_w[0]
sample.append(nw)
sample_score += next_p[0,nw]
if nw == 0:
break
else:
cand_scores = hyp_scores[:,None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)]
voc_size = next_p.shape[1]
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype('float32')
new_hyp_states = []
new_hyp_states_r = []
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[ti])
new_hyp_states.append(copy.copy(next_state[ti]))
new_hyp_states_r.append(copy.copy(next_state[ti]))
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = numpy.array(hyp_states)
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score
def pred_probs(f_log_probs, prepare_data, options, iterator, verbose=True):
probs = []
n_done = 0
for x, y in iterator:
n_done += len(x)
x, x_mask, y, y_mask = prepare_data(x, y,
n_words_src=options['n_words_src'],
n_words=options['n_words'])
pprobs = f_log_probs(x,x_mask,y,y_mask)
for pp in pprobs:
probs.append(pp)
if numpy.isnan(numpy.mean(probs)):
import ipdb; ipdb.set_trace()
if verbose:
print >>sys.stderr, '%d samples computed'%(n_done)
return numpy.array(probs)
# optimizers
# name(hyperp, tparams, grads, inputs (list), cost) = f_grad_shared, f_update
def adam(lr, tparams, grads, inp, cost):
gshared = [theano.shared(p.get_value() * 0.,
name='%s_grad'%k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup, profile=profile)
lr0 = 0.0002
b1 = 0.1
b2 = 0.001
e = 1e-8
updates = []
i = theano.shared(numpy.float32(0.))
i_t = i + 1.
fix1 = 1. - b1**(i_t)
fix2 = 1. - b2**(i_t)
lr_t = lr0 * (tensor.sqrt(fix2) / fix1)
for p, g in zip(tparams.values(), gshared):
m = theano.shared(p.get_value() * 0.)
v = theano.shared(p.get_value() * 0.)
m_t = (b1 * g) + ((1. - b1) * m)
v_t = (b2 * tensor.sqr(g)) + ((1. - b2) * v)
g_t = m_t / (tensor.sqrt(v_t) + e)
p_t = p - (lr_t * g_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((i, i_t))
f_update = theano.function([lr], [], updates=updates,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_grad'%k)
for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rup2'%k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad2'%k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rg2up, profile=profile)
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
f_update = theano.function([lr], [], updates=ru2up+param_up, on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_grad'%k) for k, p in tparams.iteritems()]
running_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad'%k) for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rgup+rg2up, profile=profile)
updir = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_updir'%k) for k, p in tparams.iteritems()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4)) for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads, running_grads2)]
param_up = [(p, p + udn[1]) for p, udn in zip(itemlist(tparams), updir_new)]
f_update = theano.function([lr], [], updates=updir_new+param_up, on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def sgd(lr, tparams, grads, x, mask, y, cost):
gshared = [theano.shared(p.get_value() * 0., name='%s_grad'%k) for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function([x, mask, y], cost, updates=gsup, profile=profile)
pup = [(p, p - lr * g) for p, g in zip(itemlist(tparams), gshared)]
f_update = theano.function([lr], [], updates=pup, profile=profile)
return f_grad_shared, f_update
def train(dim_word=100, # word vector dimensionality
dim=1000, # the number of LSTM units
encoder='gru',
decoder='gru_cond',
patience=10,
max_epochs=10,
dispFreq=100,
decay_c=0.,
alpha_c=0.,
diag_c=0.,
clip_c=-1.,
lrate=0.01,
n_words_src=10001,
n_words=10001,
maxlen=100, # maximum length of the description
optimizer='rmsprop',
batch_size = 16,
valid_batch_size = 16,
saveto='model.npz',
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
sampleFreq=100, # generate some samples after every sampleFreq updates
datasets=['/home/tangyh/Dropbox/PycharmProjects/dl4mt/session2/data/xinhua_u8.en',
'/home/tangyh/Dropbox/PycharmProjects/dl4mt/session2/data/xinhua_u8.ch'],
valid_datasets=['/home/tangyh/Dropbox/PycharmProjects/dl4mt/session2/data/xinhua_u8.en',
'/home/tangyh/Dropbox/PycharmProjects/dl4mt/session2/data/xinhua_u8.ch'],
dictionaries=['/home/tangyh/Dropbox/PycharmProjects/dl4mt/session2/data/vocab.english.pkl',
'/home/tangyh/Dropbox/PycharmProjects/dl4mt/session2/data/vocab.chinese.pkl'],
use_dropout=False,
reload_=False):
# Model options
model_options = locals().copy()
worddicts = [None] * len(dictionaries)
worddicts_r = [None] * len(dictionaries)
for ii, dd in enumerate(dictionaries):
with open(dd, 'rb') as f:
worddicts[ii] = pkl.load(f)
worddicts_r[ii] = dict()
for kk, vv in worddicts[ii].iteritems():
worddicts_r[ii][vv] = kk
# reload options
if reload_ and os.path.exists(saveto):
with open('%s.pkl'%saveto, 'rb') as f:
models_options = pkl.load(f)
print 'Loading data'
train = TextIterator(datasets[0], datasets[1],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=batch_size,
maxlen=maxlen)
valid = TextIterator(valid_datasets[0], valid_datasets[1],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=valid_batch_size,
maxlen=maxlen)
print 'Building model'
params = init_params(model_options)
# reload parameters
if reload_ and os.path.exists(saveto):
params = load_params(saveto, params)
tparams = init_tparams(params)
trng, use_noise, \
x, x_mask, y, y_mask, \
opt_ret, \
cost = \
build_model(tparams, model_options)
inps = [x, x_mask, y, y_mask]
print 'Buliding sampler'
f_init, f_next = build_sampler(tparams, model_options, trng)
# before any regularizer
print 'Building f_log_probs...',
f_log_probs = theano.function(inps, cost, profile=profile)
print 'Done'
cost = cost.mean()
if decay_c > 0.:
decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
weight_decay = 0.
for kk, vv in tparams.iteritems():
weight_decay += (vv ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
if alpha_c > 0. and not model_options['decoder'].endswith('simple'):
alpha_c = theano.shared(numpy.float32(alpha_c), name='alpha_c')
alpha_reg = alpha_c * ((tensor.cast(y_mask.sum(0)//x_mask.sum(0), 'float32')[:,None]-
opt_ret['dec_alphas'].sum(0))**2).sum(1).mean()
cost += alpha_reg
# after any regularizer
print 'Building f_cost...',
f_cost = theano.function(inps, cost, profile=profile)
print 'Done'
print 'Computing gradient...',
grads = tensor.grad(cost, wrt=itemlist(tparams))
print 'Done'
print 'Building f_grad...',
f_grad = theano.function(inps, grads, profile=profile)
print 'Done'
if clip_c > 0.:
g2 = 0.
for g in grads:
g2 += (g**2).sum()
new_grads = []
for g in grads:
new_grads.append(tensor.switch(g2 > (clip_c**2),
g / tensor.sqrt(g2) * clip_c,
g))
grads = new_grads
lr = tensor.scalar(name='lr')
print 'Building optimizers...',
f_grad_shared, f_update = eval(optimizer)(lr, tparams, grads, inps, cost)
print 'Done'
print 'Optimization'
history_errs = []
# reload history
if reload_ and os.path.exists(saveto):
history_errs = list(numpy.load(saveto)['history_errs'])
best_p = None
bad_count = 0
if validFreq == -1:
validFreq = len(train[0])/batch_size
if saveFreq == -1:
saveFreq = len(train[0])/batch_size
if sampleFreq == -1:
sampleFreq = len(train[0])/batch_size
uidx = 0
estop = False
for eidx in xrange(max_epochs):
n_samples = 0
for x, y in train:
n_samples += len(x)
uidx += 1
use_noise.set_value(1.)
x, x_mask, y, y_mask = prepare_data(x, y, maxlen=maxlen,
n_words_src=n_words_src,
n_words=n_words)
if x == None:
print 'Minibatch with zero sample under length ', maxlen
uidx -= 1
continue
ud_start = time.time()
cost = f_grad_shared(x, x_mask, y, y_mask)
f_update(lrate)
ud = time.time() - ud_start
if numpy.isnan(cost) or numpy.isinf(cost):
print 'NaN detected'
return 1., 1., 1.
if numpy.mod(uidx, dispFreq) == 0:
print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost, 'UD ', ud
if numpy.mod(uidx, saveFreq) == 0:
print 'Saving...',
#import ipdb; ipdb.set_trace()
if best_p != None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, **params)
pkl.dump(model_options, open('%s.pkl'%saveto, 'wb'))
print 'Done'
if numpy.mod(uidx, sampleFreq) == 0:
# FIXME: random selection?
for jj in xrange(numpy.minimum(5,x.shape[1])):
stochastic = True
sample, score = gen_sample(tparams, f_init, f_next, x[:,jj][:,None],
model_options, trng=trng, k=1, maxlen=30,
stochastic=stochastic, argmax=False)
print 'Source ',jj,': ',
for vv in x[:,jj]:
if vv == 0:
break
if vv in worddicts_r[0]:
print worddicts_r[0][vv],
else:
print 'UNK',
print
print 'Truth ',jj,' : ',
for vv in y[:,jj]:
if vv == 0:
break
if vv in worddicts_r[1]:
print worddicts_r[1][vv],
else:
print 'UNK',
print
print 'Sample ', jj, ': ',
if stochastic:
ss = sample
else:
score = score / numpy.array([len(s) for s in sample])
ss = sample[score.argmin()]
for vv in ss:
if vv == 0:
break
if vv in worddicts_r[1]:
print worddicts_r[1][vv],
else:
print 'UNK',
print
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
valid_errs = pred_probs(f_log_probs, prepare_data, model_options, valid)
valid_err = valid_errs.mean()
history_errs.append(valid_err)
if uidx == 0 or valid_err <= numpy.array(history_errs).min():
best_p = unzip(tparams)
bad_counter = 0
if len(history_errs) > patience and valid_err >= numpy.array(history_errs)[:-patience].min():
bad_counter += 1
if bad_counter > patience:
print 'Early Stop!'
estop = True
break
if numpy.isnan(valid_err):
import ipdb; ipdb.set_trace()
print 'Valid ', valid_err
print 'Seen %d samples'%n_samples
if estop:
break
if best_p is not None:
zipp(best_p, tparams)
use_noise.set_value(0.)
valid_err = pred_probs(f_log_probs, prepare_data, model_options, valid).mean()
print 'Valid ', valid_err
params = copy.copy(best_p)
numpy.savez(saveto, zipped_params=best_p,
history_errs=history_errs,
**params)
return valid_err
if __name__ == '__main__':
pass
| bsd-3-clause |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/matplotlib/delaunay/testfuncs.py | 21 | 21168 | """Some test functions for bivariate interpolation.
Most of these have been yoinked from ACM TOMS 792.
http://netlib.org/toms/792
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import numpy as np
from .triangulate import Triangulation
class TestData(dict):
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
self.__dict__ = self
class TestDataSet(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
data = TestData(
franke100=TestDataSet(
x=np.array([0.0227035, 0.0539888, 0.0217008, 0.0175129, 0.0019029,
-0.0509685, 0.0395408, -0.0487061, 0.0315828, -0.0418785,
0.1324189, 0.1090271, 0.1254439, 0.093454, 0.0767578,
0.1451874, 0.0626494, 0.1452734, 0.0958668, 0.0695559,
0.2645602, 0.2391645, 0.208899, 0.2767329, 0.1714726,
0.2266781, 0.1909212, 0.1867647, 0.2304634, 0.2426219,
0.3663168, 0.3857662, 0.3832392, 0.3179087, 0.3466321,
0.3776591, 0.3873159, 0.3812917, 0.3795364, 0.2803515,
0.4149771, 0.4277679, 0.420001, 0.4663631, 0.4855658,
0.4092026, 0.4792578, 0.4812279, 0.3977761, 0.4027321,
0.5848691, 0.5730076, 0.6063893, 0.5013894, 0.5741311,
0.6106955, 0.5990105, 0.5380621, 0.6096967, 0.5026188,
0.6616928, 0.6427836, 0.6396475, 0.6703963, 0.7001181,
0.633359, 0.6908947, 0.6895638, 0.6718889, 0.6837675,
0.7736939, 0.7635332, 0.7410424, 0.8258981, 0.7306034,
0.8086609, 0.8214531, 0.729064, 0.8076643, 0.8170951,
0.8424572, 0.8684053, 0.8366923, 0.9418461, 0.8478122,
0.8599583, 0.91757, 0.8596328, 0.9279871, 0.8512805,
1.044982, 0.9670631, 0.9857884, 0.9676313, 1.0129299,
0.965704, 1.0019855, 1.0359297, 1.0414677, 0.9471506]),
y=np.array([-0.0310206, 0.1586742, 0.2576924, 0.3414014, 0.4943596,
0.5782854, 0.6993418, 0.7470194, 0.9107649, 0.996289,
0.050133, 0.0918555, 0.2592973, 0.3381592, 0.4171125,
0.5615563, 0.6552235, 0.7524066, 0.9146523, 0.9632421,
0.0292939, 0.0602303, 0.2668783, 0.3696044, 0.4801738,
0.5940595, 0.6878797, 0.8185576, 0.9046507, 0.9805412,
0.0396955, 0.0684484, 0.2389548, 0.3124129, 0.4902989,
0.5199303, 0.6445227, 0.8203789, 0.8938079, 0.9711719,
-0.0284618, 0.1560965, 0.2262471, 0.3175094, 0.3891417,
0.5084949, 0.6324247, 0.7511007, 0.8489712, 0.9978728,
-0.0271948, 0.127243, 0.2709269, 0.3477728, 0.4259422,
0.6084711, 0.6733781, 0.7235242, 0.9242411, 1.0308762,
0.0255959, 0.0707835, 0.2008336, 0.3259843, 0.4890704,
0.5096324, 0.669788, 0.7759569, 0.9366096, 1.0064516,
0.0285374, 0.1021403, 0.1936581, 0.3235775, 0.4714228,
0.6091595, 0.6685053, 0.8022808, 0.847679, 1.0512371,
0.0380499, 0.0902048, 0.2083092, 0.3318491, 0.4335632,
0.5910139, 0.6307383, 0.8144841, 0.904231, 0.969603,
-0.01209, 0.1334114, 0.2695844, 0.3795281, 0.4396054,
0.5044425, 0.6941519, 0.7459923, 0.8682081, 0.9801409])),
franke33=TestDataSet(
x=np.array([5.00000000e-02, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 1.00000000e-01, 1.00000000e-01,
1.50000000e-01, 2.00000000e-01, 2.50000000e-01,
3.00000000e-01, 3.50000000e-01, 5.00000000e-01,
5.00000000e-01, 5.50000000e-01, 6.00000000e-01,
6.00000000e-01, 6.00000000e-01, 6.50000000e-01,
7.00000000e-01, 7.00000000e-01, 7.00000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
8.00000000e-01, 8.00000000e-01, 8.50000000e-01,
9.00000000e-01, 9.00000000e-01, 9.50000000e-01,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00]),
y=np.array([4.50000000e-01, 5.00000000e-01, 1.00000000e+00,
0.00000000e+00, 1.50000000e-01, 7.50000000e-01,
3.00000000e-01, 1.00000000e-01, 2.00000000e-01,
3.50000000e-01, 8.50000000e-01, 0.00000000e+00,
1.00000000e+00, 9.50000000e-01, 2.50000000e-01,
6.50000000e-01, 8.50000000e-01, 7.00000000e-01,
2.00000000e-01, 6.50000000e-01, 9.00000000e-01,
1.00000000e-01, 3.50000000e-01, 8.50000000e-01,
4.00000000e-01, 6.50000000e-01, 2.50000000e-01,
3.50000000e-01, 8.00000000e-01, 9.00000000e-01,
0.00000000e+00, 5.00000000e-01, 1.00000000e+00])),
lawson25=TestDataSet(
x=np.array([0.1375, 0.9125, 0.7125, 0.225, -0.05, 0.475, 0.05,
0.45, 1.0875, 0.5375, -0.0375, 0.1875, 0.7125, 0.85,
0.7, 0.275, 0.45, 0.8125, 0.45, 1., 0.5,
0.1875, 0.5875, 1.05, 0.1]),
y=np.array([0.975, 0.9875, 0.7625, 0.8375, 0.4125, 0.6375,
-0.05, 1.0375, 0.55, 0.8, 0.75, 0.575,
0.55, 0.4375, 0.3125, 0.425, 0.2875, 0.1875,
-0.0375, 0.2625, 0.4625, 0.2625, 0.125, -0.06125,
0.1125])),
random100=TestDataSet(
x=np.array([0.0096326, 0.0216348, 0.029836, 0.0417447, 0.0470462,
0.0562965, 0.0646857, 0.0740377, 0.0873907, 0.0934832,
0.1032216, 0.1110176, 0.1181193, 0.1251704, 0.132733,
0.1439536, 0.1564861, 0.1651043, 0.1786039, 0.1886405,
0.2016706, 0.2099886, 0.2147003, 0.2204141, 0.2343715,
0.240966, 0.252774, 0.2570839, 0.2733365, 0.2853833,
0.2901755, 0.2964854, 0.3019725, 0.3125695, 0.3307163,
0.3378504, 0.3439061, 0.3529922, 0.3635507, 0.3766172,
0.3822429, 0.3869838, 0.3973137, 0.4170708, 0.4255588,
0.4299218, 0.4372839, 0.4705033, 0.4736655, 0.4879299,
0.494026, 0.5055324, 0.5162593, 0.5219219, 0.5348529,
0.5483213, 0.5569571, 0.5638611, 0.5784908, 0.586395,
0.5929148, 0.5987839, 0.6117561, 0.6252296, 0.6331381,
0.6399048, 0.6488972, 0.6558537, 0.6677405, 0.6814074,
0.6887812, 0.6940896, 0.7061687, 0.7160957, 0.7317445,
0.7370798, 0.746203, 0.7566957, 0.7699998, 0.7879347,
0.7944014, 0.8164468, 0.8192794, 0.8368405, 0.8500993,
0.8588255, 0.8646496, 0.8792329, 0.8837536, 0.8900077,
0.8969894, 0.9044917, 0.9083947, 0.9203972, 0.9347906,
0.9434519, 0.9490328, 0.9569571, 0.9772067, 0.9983493]),
y=np.array([0.3083158, 0.2450434, 0.8613847, 0.0977864, 0.3648355,
0.7156339, 0.5311312, 0.9755672, 0.1781117, 0.5452797,
0.1603881, 0.7837139, 0.9982015, 0.6910589, 0.104958,
0.8184662, 0.7086405, 0.4456593, 0.1178342, 0.3189021,
0.9668446, 0.7571834, 0.2016598, 0.3232444, 0.4368583,
0.8907869, 0.064726, 0.5692618, 0.2947027, 0.4332426,
0.3347464, 0.7436284, 0.1066265, 0.8845357, 0.515873,
0.9425637, 0.4799701, 0.1783069, 0.114676, 0.8225797,
0.2270688, 0.4073598, 0.887508, 0.7631616, 0.9972804,
0.4959884, 0.3410421, 0.249812, 0.6409007, 0.105869,
0.5411969, 0.0089792, 0.8784268, 0.5515874, 0.4038952,
0.1654023, 0.2965158, 0.3660356, 0.0366554, 0.950242,
0.2638101, 0.9277386, 0.5377694, 0.7374676, 0.4674627,
0.9186109, 0.0416884, 0.1291029, 0.6763676, 0.8444238,
0.3273328, 0.1893879, 0.0645923, 0.0180147, 0.8904992,
0.4160648, 0.4688995, 0.2174508, 0.5734231, 0.8853319,
0.8018436, 0.6388941, 0.8931002, 0.1000558, 0.2789506,
0.9082948, 0.3259159, 0.8318747, 0.0508513, 0.970845,
0.5120548, 0.2859716, 0.9581641, 0.6183429, 0.3779934,
0.4010423, 0.9478657, 0.7425486, 0.8883287, 0.549675])),
uniform9=TestDataSet(
x=np.array([1.25000000e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 1.25000000e-01, 1.25000000e-01,
1.25000000e-01, 1.25000000e-01, 1.25000000e-01,
1.25000000e-01, 1.25000000e-01, 1.25000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00]),
y=np.array([0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00])),
)
def constant(x, y):
return np.ones(x.shape, x.dtype)
constant.title = 'Constant'
def xramp(x, y):
return x
xramp.title = 'X Ramp'
def yramp(x, y):
return y
yramp.title = 'Y Ramp'
def exponential(x, y):
x = x * 9
y = y * 9
x1 = x + 1.0
x2 = x - 2.0
x4 = x - 4.0
x7 = x - 7.0
y1 = x + 1.0
y2 = y - 2.0
y3 = y - 3.0
y7 = y - 7.0
f = (0.75 * np.exp(-(x2 * x2 + y2 * y2) / 4.0) +
0.75 * np.exp(-x1 * x1 / 49.0 - y1 / 10.0) +
0.5 * np.exp(-(x7 * x7 + y3 * y3) / 4.0) -
0.2 * np.exp(-x4 * x4 - y7 * y7))
return f
exponential.title = 'Exponential and Some Gaussians'
def cliff(x, y):
f = np.tanh(9.0 * (y - x) + 1.0) / 9.0
return f
cliff.title = 'Cliff'
def saddle(x, y):
f = (1.25 + np.cos(5.4 * y)) / (6.0 + 6.0 * (3 * x - 1.0) ** 2)
return f
saddle.title = 'Saddle'
def gentle(x, y):
f = np.exp(-5.0625 * ((x - 0.5) ** 2 + (y - 0.5) ** 2)) / 3.0
return f
gentle.title = 'Gentle Peak'
def steep(x, y):
f = np.exp(-20.25 * ((x - 0.5) ** 2 + (y - 0.5) ** 2)) / 3.0
return f
steep.title = 'Steep Peak'
def sphere(x, y):
circle = 64 - 81 * ((x - 0.5) ** 2 + (y - 0.5) ** 2)
f = np.where(circle >= 0, np.sqrt(np.clip(circle, 0, 100)) - 0.5, 0.0)
return f
sphere.title = 'Sphere'
def trig(x, y):
f = 2.0 * np.cos(10.0 * x) * np.sin(10.0 * y) + np.sin(10.0 * x * y)
return f
trig.title = 'Cosines and Sines'
def gauss(x, y):
x = 5.0 - 10.0 * x
y = 5.0 - 10.0 * y
g1 = np.exp(-x * x / 2)
g2 = np.exp(-y * y / 2)
f = g1 + 0.75 * g2 * (1 + g1)
return f
gauss.title = 'Gaussian Peak and Gaussian Ridges'
def cloverleaf(x, y):
ex = np.exp((10.0 - 20.0 * x) / 3.0)
ey = np.exp((10.0 - 20.0 * y) / 3.0)
logitx = 1.0 / (1.0 + ex)
logity = 1.0 / (1.0 + ey)
f = (((20.0 / 3.0) ** 3 * ex * ey) ** 2 * (logitx * logity) ** 5 *
(ex - 2.0 * logitx) * (ey - 2.0 * logity))
return f
cloverleaf.title = 'Cloverleaf'
def cosine_peak(x, y):
circle = np.hypot(80 * x - 40.0, 90 * y - 45.)
f = np.exp(-0.04 * circle) * np.cos(0.15 * circle)
return f
cosine_peak.title = 'Cosine Peak'
allfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss,
cloverleaf, cosine_peak]
class LinearTester(object):
name = 'Linear'
def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0),
nrange=101, npoints=250):
self.xrange = xrange
self.yrange = yrange
self.nrange = nrange
self.npoints = npoints
rng = np.random.RandomState(1234567890)
self.x = rng.uniform(xrange[0], xrange[1], size=npoints)
self.y = rng.uniform(yrange[0], yrange[1], size=npoints)
self.tri = Triangulation(self.x, self.y)
def replace_data(self, dataset):
self.x = dataset.x
self.y = dataset.y
self.tri = Triangulation(self.x, self.y)
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.linear_extrapolator(z, bbox=self.xrange + self.yrange)
def plot(self, func, interp=True, plotter='imshow'):
import matplotlib as mpl
from matplotlib import pylab as pl
if interp:
lpi = self.interpolator(func)
z = lpi[self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
else:
y, x = np.mgrid[
self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
z = func(x, y)
z = np.where(np.isinf(z), 0.0, z)
extent = (self.xrange[0], self.xrange[1],
self.yrange[0], self.yrange[1])
pl.ioff()
pl.clf()
pl.hot() # Some like it hot
if plotter == 'imshow':
pl.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent,
origin='lower')
elif plotter == 'contour':
Y, X = np.ogrid[
self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
pl.contour(np.ravel(X), np.ravel(Y), z, 20)
x = self.x
y = self.y
lc = mpl.collections.LineCollection(
np.array([((x[i], y[i]), (x[j], y[j]))
for i, j in self.tri.edge_db]),
colors=[(0, 0, 0, 0.2)])
ax = pl.gca()
ax.add_collection(lc)
if interp:
title = '%s Interpolant' % self.name
else:
title = 'Reference'
if hasattr(func, 'title'):
pl.title('%s: %s' % (func.title, title))
else:
pl.title(title)
pl.show()
pl.ion()
class NNTester(LinearTester):
name = 'Natural Neighbors'
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.nn_extrapolator(z, bbox=self.xrange + self.yrange)
def plotallfuncs(allfuncs=allfuncs):
from matplotlib import pylab as pl
pl.ioff()
nnt = NNTester(npoints=1000)
lpt = LinearTester(npoints=1000)
for func in allfuncs:
print(func.title)
nnt.plot(func, interp=False, plotter='imshow')
pl.savefig('%s-ref-img.png' % func.__name__)
nnt.plot(func, interp=True, plotter='imshow')
pl.savefig('%s-nn-img.png' % func.__name__)
lpt.plot(func, interp=True, plotter='imshow')
pl.savefig('%s-lin-img.png' % func.__name__)
nnt.plot(func, interp=False, plotter='contour')
pl.savefig('%s-ref-con.png' % func.__name__)
nnt.plot(func, interp=True, plotter='contour')
pl.savefig('%s-nn-con.png' % func.__name__)
lpt.plot(func, interp=True, plotter='contour')
pl.savefig('%s-lin-con.png' % func.__name__)
pl.ion()
def plot_dt(tri, colors=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if colors is None:
colors = [(0, 0, 0, 0.2)]
lc = mpl.collections.LineCollection(
np.array([((tri.x[i], tri.y[i]), (tri.x[j], tri.y[j]))
for i, j in tri.edge_db]),
colors=colors)
ax = pl.gca()
ax.add_collection(lc)
pl.draw_if_interactive()
def plot_vo(tri, colors=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if colors is None:
colors = [(0, 1, 0, 0.2)]
lc = mpl.collections.LineCollection(np.array(
[(tri.circumcenters[i], tri.circumcenters[j])
for i in xrange(len(tri.circumcenters))
for j in tri.triangle_neighbors[i] if j != -1]),
colors=colors)
ax = pl.gca()
ax.add_collection(lc)
pl.draw_if_interactive()
def plot_cc(tri, edgecolor=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if edgecolor is None:
edgecolor = (0, 0, 1, 0.2)
dxy = (np.array([(tri.x[i], tri.y[i]) for i, j, k in tri.triangle_nodes])
- tri.circumcenters)
r = np.hypot(dxy[:, 0], dxy[:, 1])
ax = pl.gca()
for i in xrange(len(r)):
p = mpl.patches.Circle(tri.circumcenters[i], r[i],
resolution=100, edgecolor=edgecolor,
facecolor=(1, 1, 1, 0), linewidth=0.2)
ax.add_patch(p)
pl.draw_if_interactive()
def quality(func, mesh, interpolator='nn', n=33):
"""Compute a quality factor (the quantity r**2 from TOMS792).
interpolator must be in ('linear', 'nn').
"""
fz = func(mesh.x, mesh.y)
tri = Triangulation(mesh.x, mesh.y)
intp = getattr(tri,
interpolator + '_extrapolator')(fz, bbox=(0., 1., 0., 1.))
Y, X = np.mgrid[0:1:complex(0, n), 0:1:complex(0, n)]
Z = func(X, Y)
iz = intp[0:1:complex(0, n), 0:1:complex(0, n)]
#nans = np.isnan(iz)
#numgood = n*n - np.sum(np.array(nans.flat, np.int32))
numgood = n * n
SE = (Z - iz) ** 2
SSE = np.sum(SE.flat)
meanZ = np.sum(Z.flat) / numgood
SM = (Z - meanZ) ** 2
SSM = np.sum(SM.flat)
r2 = 1.0 - SSE / SSM
print(func.__name__, r2, SSE, SSM, numgood)
return r2
def allquality(interpolator='nn', allfuncs=allfuncs, data=data, n=33):
results = {}
kv = list(six.iteritems(data))
kv.sort()
for name, mesh in kv:
reslist = results.setdefault(name, [])
for func in allfuncs:
reslist.append(quality(func, mesh, interpolator, n))
return results
def funky():
x0 = np.array([0.25, 0.3, 0.5, 0.6, 0.6])
y0 = np.array([0.2, 0.35, 0.0, 0.25, 0.65])
tx = 0.46
ty = 0.23
t0 = Triangulation(x0, y0)
t1 = Triangulation(np.hstack((x0, [tx])), np.hstack((y0, [ty])))
return t0, t1
| gpl-3.0 |
TomAugspurger/pandas | pandas/tests/io/test_clipboard.py | 2 | 8028 | from textwrap import dedent
import numpy as np
from numpy.random import randint
import pytest
import pandas as pd
from pandas import DataFrame, get_option, read_clipboard
import pandas._testing as tm
from pandas.io.clipboard import clipboard_get, clipboard_set
def build_kwargs(sep, excel):
kwargs = {}
if excel != "default":
kwargs["excel"] = excel
if sep != "default":
kwargs["sep"] = sep
return kwargs
@pytest.fixture(
params=[
"delims",
"utf8",
"utf16",
"string",
"long",
"nonascii",
"colwidth",
"mixed",
"float",
"int",
]
)
def df(request):
data_type = request.param
if data_type == "delims":
return pd.DataFrame({"a": ['"a,\t"b|c', "d\tef´"], "b": ["hi'j", "k''lm"]})
elif data_type == "utf8":
return pd.DataFrame({"a": ["µasd", "Ωœ∑´"], "b": ["øπ∆˚¬", "œ∑´®"]})
elif data_type == "utf16":
return pd.DataFrame(
{"a": ["\U0001f44d\U0001f44d", "\U0001f44d\U0001f44d"], "b": ["abc", "def"]}
)
elif data_type == "string":
return tm.makeCustomDataframe(
5, 3, c_idx_type="s", r_idx_type="i", c_idx_names=[None], r_idx_names=[None]
)
elif data_type == "long":
max_rows = get_option("display.max_rows")
return tm.makeCustomDataframe(
max_rows + 1,
3,
data_gen_f=lambda *args: randint(2),
c_idx_type="s",
r_idx_type="i",
c_idx_names=[None],
r_idx_names=[None],
)
elif data_type == "nonascii":
return pd.DataFrame({"en": "in English".split(), "es": "en español".split()})
elif data_type == "colwidth":
_cw = get_option("display.max_colwidth") + 1
return tm.makeCustomDataframe(
5,
3,
data_gen_f=lambda *args: "x" * _cw,
c_idx_type="s",
r_idx_type="i",
c_idx_names=[None],
r_idx_names=[None],
)
elif data_type == "mixed":
return DataFrame(
{
"a": np.arange(1.0, 6.0) + 0.01,
"b": np.arange(1, 6).astype(np.int64),
"c": list("abcde"),
}
)
elif data_type == "float":
return tm.makeCustomDataframe(
5,
3,
data_gen_f=lambda r, c: float(r) + 0.01,
c_idx_type="s",
r_idx_type="i",
c_idx_names=[None],
r_idx_names=[None],
)
elif data_type == "int":
return tm.makeCustomDataframe(
5,
3,
data_gen_f=lambda *args: randint(2),
c_idx_type="s",
r_idx_type="i",
c_idx_names=[None],
r_idx_names=[None],
)
else:
raise ValueError
@pytest.fixture
def mock_clipboard(monkeypatch, request):
"""Fixture mocking clipboard IO.
This mocks pandas.io.clipboard.clipboard_get and
pandas.io.clipboard.clipboard_set.
This uses a local dict for storing data. The dictionary
key used is the test ID, available with ``request.node.name``.
This returns the local dictionary, for direct manipulation by
tests.
"""
# our local clipboard for tests
_mock_data = {}
def _mock_set(data):
_mock_data[request.node.name] = data
def _mock_get():
return _mock_data[request.node.name]
monkeypatch.setattr("pandas.io.clipboard.clipboard_set", _mock_set)
monkeypatch.setattr("pandas.io.clipboard.clipboard_get", _mock_get)
yield _mock_data
@pytest.mark.clipboard
def test_mock_clipboard(mock_clipboard):
import pandas.io.clipboard
pandas.io.clipboard.clipboard_set("abc")
assert "abc" in set(mock_clipboard.values())
result = pandas.io.clipboard.clipboard_get()
assert result == "abc"
@pytest.mark.single
@pytest.mark.clipboard
@pytest.mark.usefixtures("mock_clipboard")
class TestClipboard:
def check_round_trip_frame(self, data, excel=None, sep=None, encoding=None):
data.to_clipboard(excel=excel, sep=sep, encoding=encoding)
result = read_clipboard(sep=sep or "\t", index_col=0, encoding=encoding)
tm.assert_frame_equal(data, result)
# Test that default arguments copy as tab delimited
def test_round_trip_frame(self, df):
self.check_round_trip_frame(df)
# Test that explicit delimiters are respected
@pytest.mark.parametrize("sep", ["\t", ",", "|"])
def test_round_trip_frame_sep(self, df, sep):
self.check_round_trip_frame(df, sep=sep)
# Test white space separator
def test_round_trip_frame_string(self, df):
df.to_clipboard(excel=False, sep=None)
result = read_clipboard()
assert df.to_string() == result.to_string()
assert df.shape == result.shape
# Two character separator is not supported in to_clipboard
# Test that multi-character separators are not silently passed
def test_excel_sep_warning(self, df):
with tm.assert_produces_warning():
df.to_clipboard(excel=True, sep=r"\t")
# Separator is ignored when excel=False and should produce a warning
def test_copy_delim_warning(self, df):
with tm.assert_produces_warning():
df.to_clipboard(excel=False, sep="\t")
# Tests that the default behavior of to_clipboard is tab
# delimited and excel="True"
@pytest.mark.parametrize("sep", ["\t", None, "default"])
@pytest.mark.parametrize("excel", [True, None, "default"])
def test_clipboard_copy_tabs_default(self, sep, excel, df, request, mock_clipboard):
kwargs = build_kwargs(sep, excel)
df.to_clipboard(**kwargs)
assert mock_clipboard[request.node.name] == df.to_csv(sep="\t")
# Tests reading of white space separated tables
@pytest.mark.parametrize("sep", [None, "default"])
@pytest.mark.parametrize("excel", [False])
def test_clipboard_copy_strings(self, sep, excel, df):
kwargs = build_kwargs(sep, excel)
df.to_clipboard(**kwargs)
result = read_clipboard(sep=r"\s+")
assert result.to_string() == df.to_string()
assert df.shape == result.shape
def test_read_clipboard_infer_excel(self, request, mock_clipboard):
# gh-19010: avoid warnings
clip_kwargs = dict(engine="python")
text = dedent(
"""
John James Charlie Mingus
1 2
4 Harry Carney
""".strip()
)
mock_clipboard[request.node.name] = text
df = pd.read_clipboard(**clip_kwargs)
# excel data is parsed correctly
assert df.iloc[1][1] == "Harry Carney"
# having diff tab counts doesn't trigger it
text = dedent(
"""
a\t b
1 2
3 4
""".strip()
)
mock_clipboard[request.node.name] = text
res = pd.read_clipboard(**clip_kwargs)
text = dedent(
"""
a b
1 2
3 4
""".strip()
)
mock_clipboard[request.node.name] = text
exp = pd.read_clipboard(**clip_kwargs)
tm.assert_frame_equal(res, exp)
def test_invalid_encoding(self, df):
# test case for testing invalid encoding
with pytest.raises(ValueError):
df.to_clipboard(encoding="ascii")
with pytest.raises(NotImplementedError):
pd.read_clipboard(encoding="ascii")
@pytest.mark.parametrize("enc", ["UTF-8", "utf-8", "utf8"])
def test_round_trip_valid_encodings(self, enc, df):
self.check_round_trip_frame(df, encoding=enc)
@pytest.mark.single
@pytest.mark.clipboard
@pytest.mark.parametrize("data", ["\U0001f44d...", "Ωœ∑´...", "abcd..."])
def test_raw_roundtrip(data):
# PR #25040 wide unicode wasn't copied correctly on PY3 on windows
clipboard_set(data)
assert data == clipboard_get()
| bsd-3-clause |
russel1237/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 41 | 35602 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.random import choice
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(choice(vocab_words, size=5, replace=False,
random_state=rng))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = choice(vocab_words, size=5, replace=False, random_state=rng)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
SitiBanc/1061_NCTU_IOMDS | 1011/Homework3/1011_HW3.py | 1 | 6316 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 12 13:26:32 2017
@author: sitibanc
"""
import math
import numpy as np
from sklearn import datasets
def entropy(p1, n1): # postive, negative
if p1 == 0 and n1 == 0:
return 1
value = 0
pp = p1 / (p1 + n1)
pn = n1 / (p1 + n1)
if pp > 0:
value -= pp * math.log2(pp)
if pn > 0:
value -= pn * math.log2(pn)
return value
def infoGain(p1, n1, p2, n2):
total = p1 + n1 + p2 + n2
s1 = p1 + n1
s2 = p2 + n2
return entropy(p1 + p2, n1 + n2) - s1 / total * entropy(p1, n1) - s2 / total * entropy(p2, n2)
def buildDT(feature, target, positive, negative):
### node structure (dictionary)
# node.leaf = 0/1
# node.selectf = feature index
# node.threshold = some value (regards feature value)
# node.child = index of childs (leaft, right)
###
# root node
node = dict()
node['data'] = range(len(target))
### tree structure (list)
tree = []
tree.append(node)
###
i = 0
while i < len(tree):
idx = tree[i]['data']
# data中的值是否相同
if sum(target[idx] == negative) == len(idx): # 全負
tree[i]['leaf'] = 1 # is leaf node
tree[i]['decision'] = negative
elif sum(target[idx] == positive) == len(idx): # 全正
tree[i]['leaf'] = 1
tree[i]['decision'] = positive
# 試圖找出最好的切分方法
else:
bestIG = 0
# 從該node(tree[j])中取出集合,決定threshold
for j in range(feature.shape[1]): # feature.shape回傳(rows長度, columns長度)的tuple
pool = list(set(feature[idx, j])) # 以集合觀念處理去掉重複的值(feature的可能值)
pool.sort() # pool排序讓後續(第64行能正常執行)
for k in range(len(pool) - 1):
threshold = (pool[k] + pool[k + 1]) / 2
G1 = [] # 左子樹
G2 = [] # 右子樹
for t in idx:
if feature[t, j] <= threshold:
G1.append(t)
else:
G2.append(t)
# Calculate infoGain
thisIG = infoGain(sum(target[G1] == positive), sum(target[G1] == negative), sum(target[G2] == positive), sum(target[G2] == negative))
# Update bestIG
if thisIG > bestIG:
bestIG = thisIG
bestG1 = G1
bestG2 = G2
bestThreshold = threshold
bestf = j
if bestIG > 0:
tree[i]['leaf'] = 0
tree[i]['selectf'] = bestf
tree[i]['threshold'] = bestThreshold
tree[i]['child'] = [len(tree), len(tree) + 1]
# 先放左子樹
node = dict()
node['data'] = bestG1
tree.append(node)
# 後放右子樹
node = dict()
node['data'] = bestG2
tree.append(node)
# 沒有更好的切分方法
else:
tree[i]['leaf'] = 1
# 預測結果從多數決
if sum(target[idx] == positive) > sum(target[idx] == negative):
tree[i]['decision'] = positive
else:
tree[i]['decision'] = negative
i += 1
return tree
def testDT(tree, test_feature, test_target):
now = 0
while tree[now]['leaf'] == 0:
bestf = tree[now]['selectf']
threshold = tree[now]['threshold']
# 屬於左子樹
if test_feature[bestf] <= threshold:
now = tree[now]['child'][0]
# 屬於右子樹
else:
now = tree[now]['child'][1]
if tree[now]['decision'] == test_target:
return True
else:
return False
def predictDT(tree, test_feature):
now = 0
while tree[now]['leaf'] == 0:
bestf = tree[now]['selectf']
threshold = tree[now]['threshold']
# 屬於左子樹
if test_feature[bestf] <= threshold:
now = tree[now]['child'][0]
# 屬於右子樹
else:
now = tree[now]['child'][1]
return tree[now]['decision']
### Main ###
# Load Data
iris = datasets.load_iris()
# Data Preprocessing
# Separate data & target according to its target value
data0 = iris.data[:50]
data1 = iris.data[50:100]
data2 = iris.data[100:]
targets0 = iris.target[:50]
targets1 = iris.target[50:100]
targets2 = iris.target[100:]
prediction = [0] * 150
error = 0
# Leave-one-out
for i in range(len(iris.data)):
# Initial vote
vote = [0, 0, 0]
# Separate Feature & Target
feature0 = data0
feature1 = data1
feature2 = data2
target0 = targets0
target1 = targets1
target2 = targets2
# Remove[i] data (Leave-one-out)
if i < 50:
feature0 = np.delete(feature0, i, 0)
target0 = np.delete(target0, i, 0)
elif i < 100:
feature1 = np.delete(feature1, i % 50, 0)
target1 = np.delete(target1, i % 50, 0)
else:
feature2 = np.delete(feature2, i % 50, 0)
target2 = np.delete(target2, i % 50, 0)
# Stack arrays in sequence vertically (row wise).
tree01 = buildDT(np.vstack((feature0, feature1)), np.append(target0, target1), 0, 1)
tree02 = buildDT(np.vstack((feature0, feature2)), np.append(target0, target2), 0, 2)
tree12 = buildDT(np.vstack((feature1, feature2)), np.append(target1, target2), 1, 2)
vote[predictDT(tree01, iris.data[i])] += 1
vote[predictDT(tree02, iris.data[i])] += 1
vote[predictDT(tree12, iris.data[i])] += 1
# 檢查是否同票
if max(vote) == 1:
prediction[i] = 0
else:
for j in range(3):
if vote[j] > 1:
max_idx = j
prediction[i] = max_idx
# Calculate Error Rate
error_list = []
for i in range(len(iris.target)):
if iris.target[i] != prediction[i]:
error += 1
error_list.append(i)
print('Error Rate:', error / len(iris.target))
| apache-2.0 |
mohseniaref/PySAR-1 | pysar/igram_viewer.py | 1 | 8917 | #! /usr/bin/env python
############################################################
# Program is part of PySAR v1.0 #
# Copyright(c) 2013, Heresh Fattahi #
# Author: Heresh Fattahi #
############################################################
import sys
import os
import getopt
import time
import datetime
from numpy import *
from numpy import round as round
from scipy.io import loadmat
import matplotlib
import matplotlib.pyplot as plt
from pylab import *
import h5py
import random
from mpl_toolkits.axes_grid.inset_locator import inset_axes
import readfile
def Usage():
# print 'tsviewer timeseries.h5'
print '''
***************************************************************
***************************************************************
Time-series viewe
Usage:
tsviewer.py -f timeseriesFile.h5 -v velocityFile.h5 -l lower bound -h higher bound -s fontsize -m Marker Size -c marker color -w linewidth -u unit
-f : file of the timeseries
-v : velocity file (if not specified then the last time-series epoch is displayed)
-l : lower bound of the displacement (default is minimum of the displacemen)
-h : higher bound of the displacemet (default is max of the displacemen)
-s : size of font used x and y labels (default = 22)
-m : marker size (default = 16)
-c : color of the markers (default = red). some options are: orange, black, yellow, blue, green...
-w : width of lines to connect the points (default = 2 ). set to 0 (-l 0) if you don't want any line connecting the points
-u : unit of the displacement (default = cm). Other optons are: mm and m
-e : event dates
-F : another timeseries file (can be used to compare 2 time-series)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Example:
tsviewer.py timeseries.h5
tsviewer.py -f timeseries.h5
tsviewer.py -f timeseries_demCor.h5 -v velocity_masked.h5 -u m -c blue
tsviewer.py -f timeseries.h5 -v velocity.h5 -s 24 -m 12 -c orange -l -10 -h 10 -w 4 -u mm
tsviewer.py -f timeseries.h5 -F timeseries_tropCor.h5
***************************************************************
***************************************************************
'''
def main(argv):
markerSize=16
markerSize2=16
markerColor='g'
markerColor2='red'
lineWidth=2
fontSize=22
unit='cm'
if len(sys.argv)>2:
try:
opts, args = getopt.getopt(argv,"f:F:v:a:b:s:m:c:w:u:l:h:")
except getopt.GetoptError:
Usage() ; sys.exit(1)
for opt,arg in opts:
if opt == '-f':
timeSeriesFile = arg
elif opt == '-F':
timeSeriesFile_2 = arg
elif opt == '-v':
velocityFile = arg
elif opt == '-a':
vmin = float(arg)
elif opt == '-b':
vmax = float(arg)
elif opt == '-s':
fontSize = int(arg)
elif opt == '-m':
lineWidth=int(arg)
elif opt == '-c':
markerColor=arg
elif opt == '-w':
lineWidth=int(arg)
elif opt == '-u':
unit=arg
elif opt == '-l':
lbound=float(arg)
elif opt == '-h':
hbound=float(arg)
elif len(sys.argv)==2:
if argv[0]=='-h':
Usage(); sys.exit(1)
elif os.path.isfile(argv[0]):
timeSeriesFile = argv[0]
h5timeseries = h5py.File(timeSeriesFile)
if not 'interferograms' in h5timeseries.keys():
print 'ERROR'
Usage(); sys.exit(1)
else:
Usage(); sys.exit(1)
elif len(sys.argv)<2:
Usage(); sys.exit(1)
if unit in ('m','M'):
unitFac=1
elif unit in ('cm','Cm','CM'):
unitFac=100
elif unit in ('mm','Mm','MM','mM'):
unitFac=1000
else:
print 'Warning:'
print 'wrong unit input!'
print 'cm is considered to display the displacement'
############################################
if not os.path.isfile(timeSeriesFile):
Usage();sys.exit(1)
h5timeseries = h5py.File(timeSeriesFile)
# if not 'timeseries' in h5timeseries.keys():
# Usage(); sys.exit(1)
igramList = h5timeseries['interferograms'].keys()
dates=range(len(igramList))
# dateIndex={}
# for ni in range(len(dateList)):
# dateIndex[dateList[ni]]=ni
# tbase=[]
# d1 = datetime.datetime(*time.strptime(dateList[0],"%Y%m%d")[0:5])
# for ni in range(len(dateList)):
# d2 = datetime.datetime(*time.strptime(dateList[ni],"%Y%m%d")[0:5])
# diff = d2-d1
# tbase.append(diff.days)
# dates=[]
# for ni in range(len(dateList)):
# d = datetime.datetime(*time.strptime(dateList[ni],"%Y%m%d")[0:5])
# dates.append(d)
# datevector=[]
# for i in range(len(dates)):
# datevector.append(np.float(dates[i].year) + np.float(dates[i].month-1)/12 + np.float(dates[i].day-1)/365)
# datevector2=[round(i,2) for i in datevector]
###########################################
# eventDates=['20041223','20051003']
# try:
# eventDates
# events=[]
# for ni in range(len(eventDates)):
# d = datetime.datetime(*time.strptime(eventDates[ni],"%Y%m%d")[0:5])
# events.append(d)
# except:
# print ''
#print events
###########################################
try:
velocityFile
h5file=h5py.File(velocityFile,'r')
k=h5file.keys()
dset= h5file[k[0]].get(k[0])
print 'The file to display is: ' + k[0]
except:
dset = h5timeseries['interferograms'][h5timeseries['interferograms'].keys()[-1]].get(h5timeseries['interferograms'].keys()[-1])
# timeseries = np.zeros((len(h5timeseries['timeseries'].keys()),np.shape(dset)[0],np.shape(dset)[1]),np.float32)
# for date in h5timeseries['timeseries'].keys():
# timeseries[dateIndex[date]] = h5timeseries['timeseries'].get(date)
###########################################
fig = plt.figure()
ax=fig.add_subplot(111)
try:
vmin
vmax
ax.imshow(dset,vmin=vmin,vmax=vmax)
except:
ax.imshow(dset)
fig2 = plt.figure(2)
ax2=fig2.add_subplot(111)
# print dates
# print dateList
try:
timeSeriesFile_2
h5timeseries_2=h5py.File(timeSeriesFile_2)
except:
print""
##########################################
def onclick(event):
if event.button==1:
print 'click'
xClick = int(event.xdata)
yClick = int(event.ydata)
print xClick
print yClick
Dis=[]
for igram in h5timeseries['interferograms'].keys():
Dis.append( h5timeseries['interferograms'][igram].get(igram)[yClick][xClick])
ax2.cla()
try:
Dis2=[]
for igram in h5timeseries['interferograms'].keys():
Dis2.append( h5timeseries_2['interferograms'][igram].get(igram)[yClick][xClick])
dis2=array(Dis2)
# dis2=round(dis2/2./pi)
# dis2=dis2*unitFac
ax2.plot(dates,dis2, '^',ms=markerSize2, alpha=0.7, mfc=markerColor2)
except:
Dis2=[]
# ax2.plot(dates,dis, '-ko',ms=markerSize, lw=lineWidth, alpha=0.7, mfc=markerColor)
dis=array(Dis)
print dis
# dis=round(dis/2./pi)
# dis=dis*unitFac
ax2.plot(dates,dis, '-ko',ms=markerSize, lw=lineWidth, alpha=0.7, mfc=markerColor)
# print dis
# print dates
# print dset[yClick][xClick]
# ax2.fmt_xdata = DateFormatter('%Y-%m-%d %H:%M:%S')
# if unitFac==100:
# ax2.set_ylabel('Displacement [cm]',fontsize=fontSize)
# elif unitFac==1000:
# ax2.set_ylabel('Displacement [mm]',fontsize=fontSize)
# else:
# ax2.set_ylabel('Displacement [m]',fontsize=fontSize)
# ax2.set_xlabel('Time [years]',fontsize=fontSize)
# ds=datevector[0]-0.2
# de=datevector[-1]+0.2
# ys=int(ds)
# ye=int(de)
# ms=int((ds-ys)*12)+1
# me=int((de-ye)*12)+1
# dss=datetime.datetime(ys,ms,1,0,0)
# dee=datetime.datetime(ye,me,1,0,0)
# ax2.set_xlim(dss,dee)
try:
lbound
hbound
ax2.set_ylim(lbound,hbound)
except:
ax2.set_ylim(min(dis)-0.4*abs(min(dis)),max(dis)+0.4*max(dis))
for tick in ax2.xaxis.get_major_ticks():
tick.label.set_fontsize(fontSize)
for tick in ax2.yaxis.get_major_ticks():
tick.label.set_fontsize(fontSize)
# specify integer or one of preset strings, e.g.
#tick.label.set_fontsize('x-small')
# tick.label.set_rotation('vertical')
fig2.autofmt_xdate()
plt.show()
import scipy.io as sio
Phase={}
Phase['phase']=Dis
# Delay['time']=datevector
sio.savemat('phase.mat', {'phase': Phase})
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
if __name__ == '__main__':
main(sys.argv[1:])
| mit |
stulp/dmpbbo | src/functionapproximators/tests/testPerturbModelParameterLWR.py | 1 | 2762 | # This file is part of DmpBbo, a set of libraries and programs for the
# black-box optimization of dynamical movement primitives.
# Copyright (C) 2014 Freek Stulp, ENSTA-ParisTech
#
# DmpBbo is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# DmpBbo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DmpBbo. If not, see <http://www.gnu.org/licenses/>.
from mpl_toolkits.mplot3d.axes3d import Axes3D
import numpy
import matplotlib.pyplot as plt
import os, sys, subprocess
# Include scripts for plotting
lib_path = os.path.abspath('../../../python')
sys.path.append(lib_path)
from functionapproximators.functionapproximators_plotting import *
def plotFunctionApproximatorTrainingFromDirectory(directory,ax):
"""Load data related to function approximator training from a directory and plot it."""
plotDataFromDirectory(directory,ax)
data_read_successfully = True
cur_directory_number=0
while (data_read_successfully):
cur_dir = directory+'/perturbation'+str(cur_directory_number)+'/'
data_read_successfully = plotLocallyWeightedLinesFromDirectory(cur_dir,ax)
cur_directory_number+=1
if __name__=='__main__':
"""Pass a directory argument, read inputs, targets and predictions from that directory, and plot."""
executable = "../../../bin_test/testPerturbModelParameterLWR"
if (not os.path.isfile(executable)):
print("")
print("ERROR: Executable '"+executable+"' does not exist.")
print("Please call 'make install' in the build directory first.")
print("")
sys.exit(-1);
fig_number = 1;
directory = "/tmp/testPerturbModelParameterLWR/"
# Call the executable with the directory to which results should be written
command = executable+" "+directory
#print(command)
subprocess.call(command, shell=True)
for dim in [1, 2]:
fig = plt.figure(dim)
cur_directory = directory+"/"+str(dim)+"D"
if (getDataDimFromDirectory(cur_directory)==1):
ax = fig.gca()
else:
ax = Axes3D(fig)
plotFunctionApproximatorTrainingFromDirectory(cur_directory,ax)
plt.show()
| lgpl-2.1 |
MatteusDeloge/opengrid | notebooks/load_duration.py | 2 | 2365 | # -*- coding: utf-8 -*-
"""
Script to extract all minute sensor data from the flukso server through the
flukso api.
Created on Mon Dec 30 04:24:28 2013 by Roel De Coninck
"""
import os, sys
import inspect
import numpy as np
import matplotlib.pyplot as plt
script_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# add the path to opengrid to sys.path
sys.path.append(os.path.join(script_dir, os.pardir, os.pardir))
from opengrid.library.houseprint import Houseprint
from opengrid.library import fluksoapi
# script settings ############################################################
water = True
gas = True
electricity = True
create_ts = False
path_to_data = os.path.join(script_dir, os.pardir, os.pardir, os.pardir, 'work', 'data')
##############################################################################
def get_timeseries(hp, sensortype):
"""
Return list with pandas TimeSeries for all sensors of the given sensortype.
The timeseries have the sensor hex as name.
"""
timeseries = []
# create list with sensors
sensors = hp.get_sensors_by_type(sensortype)
for sensor in sensors:
# compose a single csv of all the data and load as timeseries
try:
csv = fluksoapi.consolidate(folder = path_to_data, sensor = sensor)
except ValueError:
# this sensor has no csv files: no problem
pass
ts = fluksoapi.load_csv(csv)
ts.name = sensor
timeseries.append(ts)
return timeseries
def load_duration(list_ts):
"""
Make a simple plot with load duration curves for all timeseries in list_ts.
"""
fig = plt.figure()
ax = plt.subplot(111)
for ts in list_ts:
arr = ts.values
arr = arr.reshape(arr.size,)
ax.plot(np.sort(arr), label = hp.get_flukso_from_sensor(ts.name))
plt.legend()
return fig, ax
if __name__ == '__main__':
if create_ts:
hp = Houseprint()
hp.get_all_fluksosensors()
print('Sensor data fetched')
timeseries = {}
for t in ['water', 'gas', 'electricity']:
timeseries[t] = get_timeseries(hp, t)
for t in ['water', 'gas', 'electricity']:
if eval(t):
load_duration(timeseries[t])
| apache-2.0 |
johankaito/fufuka | microblog/flask/venv/lib/python2.7/site-packages/scipy/cluster/hierarchy.py | 4 | 91787 | """
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from . import _hierarchy
import scipy.spatial.distance as distance
from scipy._lib.six import string_types
from scipy._lib.six import xrange
_cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2,
'weighted': 6}
_cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5}
_cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union(
set(_cpy_euclid_methods.keys()))
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering on the condensed
distance matrix y.
y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
A 4 by :math:`(n-1)` matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns :math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str or function, optional
The distance metric to use. See the ``distance.pdist`` function for a
list of valid distance metrics. The customized distance can also be
used. See the ``distance.pdist`` function for details.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
"""
if not isinstance(method, string_types):
raise TypeError("Argument 'method' must be a string.")
y = _convert_to_double(np.asarray(y, order='c'))
s = y.shape
if len(s) == 1:
distance.is_valid_y(y, throw=True, name='y')
d = distance.num_obs_y(y)
if method not in _cpy_non_euclid_methods:
raise ValueError("Valid methods when the raw observations are "
"omitted are 'single', 'complete', 'weighted', "
"and 'average'.")
# Since the C code does not support striding using strides.
[y] = _copy_arrays_if_base_present([y])
Z = np.zeros((d - 1, 4))
if method == 'single':
_hierarchy.slink(y, Z, int(d))
else:
_hierarchy.linkage(y, Z, int(d),
int(_cpy_non_euclid_methods[method]))
elif len(s) == 2:
X = y
n = s[0]
if method not in _cpy_linkage_methods:
raise ValueError('Invalid method: %s' % method)
if method in _cpy_non_euclid_methods:
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
if method == 'single':
_hierarchy.slink(dm, Z, n)
else:
_hierarchy.linkage(dm, Z, n,
int(_cpy_non_euclid_methods[method]))
elif method in _cpy_euclid_methods:
if metric != 'euclidean':
raise ValueError(("Method '%s' requires the distance metric "
"to be euclidean") % method)
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy.linkage(dm, Z, n,
int(_cpy_euclid_methods[method]))
return Z
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object.
The reference r to the root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When False, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# The number of original objects is equal to the number of rows minus
# 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != np.bool:
X = X.astype(np.bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see ``linkage`` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n - 1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy ** 2
denomB = Zz ** 2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage
(hierarchical clustering). See ``linkage`` documentation
for more information on its form.
d : int, optional
The number of links up to `d` levels below each
non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row
contains the link statistics for the non-singleton cluster
``i``. The link statistics are computed over the link
heights for links :math:`d` levels below the cluster
``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is
the number of links included in the calculation; and
``R[i,3]`` is the inconsistency coefficient,
.. math:: \\frac{\\mathtt{Z[i,2]}-\\mathtt{R[i,0]}} {R[i,1]}
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(R) != np.ndarray:
raise TypeError('Variable %spassed as inconsistency matrix is not '
'a numpy array.' % name_str)
if R.dtype != np.double:
raise TypeError('Inconsistency matrix %smust contain doubles '
'(double).' % name_str)
if len(R.shape) != 2:
raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '
'be two-dimensional).' % name_str)
if R.shape[1] != 4:
raise ValueError('Inconsistency matrix %smust have 4 columns.' %
name_str)
if R.shape[0] < 1:
raise ValueError('Inconsistency matrix %smust have at least one '
'row.' % name_str)
if (R[:, 0] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height means.' % name_str)
if (R[:, 1] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height standard deviations.' % name_str)
if (R[:, 2] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'counts.' % name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional
ndarray (type double) with :math:`n`
rows and 4 columns. The first two columns must contain indices
between 0 and :math:`2n-1`. For a given row ``i``,
:math:`0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1`
and :math:`0 \\leq Z[i,1] \\leq i+n-1`
(i.e. a cluster cannot join another cluster unless the cluster
being joined has been generated.)
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True iff the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(Z) != np.ndarray:
raise TypeError('Passed linkage argument %sis not a valid array.' %
name_str)
if Z.dtype != np.double:
raise TypeError('Linkage matrix %smust contain doubles.' % name_str)
if len(Z.shape) != 2:
raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '
'two-dimensional).' % name_str)
if Z.shape[1] != 4:
raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
raise ValueError('Linkage %scontains negative indices.' %
name_str)
if (Z[:, 2] < 0).any():
raise ValueError('Linkage %scontains negative distances.' %
name_str)
if (Z[:, 3] < 0).any():
raise ValueError('Linkage %scontains negative counts.' %
name_str)
if _check_hierarchy_uses_cluster_before_formed(Z):
raise ValueError('Linkage %suses non-singleton cluster before '
'it is formed.' % name_str)
if _check_hierarchy_uses_cluster_more_than_once(Z):
raise ValueError('Linkage %suses the same cluster more than once.'
% name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do:
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do:
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, `monocrit[i] >= monocrit[j]`.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See ``linkage`` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None, above_threshold_color='b'):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot the dendrogram. Use no_plot=True to calculate the dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Depenendent variable plot height
dvw = mh + mh * 0.05
ivticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
ax.xaxis.set_ticks_position('bottom')
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_fs), lbls)
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'bottom':
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(p))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(p))
map(lambda lbl: lbl.set_rotation(leaf_fs), lbls)
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'left':
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the
# links
for line in ax.get_yticklines():
line.set_visible(False)
elif orientation == 'right':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('right')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
# Let's use collections instead. This way there is a separate legend
# item for each tree grouping, rather than stupidly one for each line
# segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the groupings below the color threshold.
for color in colors_used:
if color != above_threshold_color:
ax.add_collection(colors_to_collections[color])
# If there is a grouping of links above the color threshold,
# it should go last.
if above_threshold_color in colors_to_collections:
ax.add_collection(colors_to_collections[above_threshold_color])
if contraction_marks is not None:
if orientation in ('left', 'right'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((y, x),
width=dvw / 100, height=1.0)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if orientation in ('top', 'bottom'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((x, y),
width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for dendrogram color_threshold.
Parameters
----------
palette : list
A list of matplotlib color codes. The order of
the color codes is the order in which the colors are cycled
through when color thresholding in the dendrogram.
"""
if type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, color_list=None,
leaf_font_size=None, leaf_rotation=None, leaf_label_func=None,
no_leaves=False, show_contracted=False,
link_color_func=None, ax=None, above_threshold_color='b'):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do:
>>> # First define the leaf label function.
>>> def llf(id):
... if id < n:
... return str(id)
... else:
>>> return '[%d %d %1.2f]' % (id, count, R[n-id,3])
>>>
>>> # The text for the leaf nodes is going to be big so force
>>> # a rotation of 90 degrees.
>>> dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example:
>>> dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
above_threshold_color : str, optional
This matplotlib color string sets the color of the links above the
color_threshold. The default is 'b'.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
"""
# Features under consideration.
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
if no_leaves:
ivl = None
else:
ivl = []
if color_threshold is None or \
(isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
if show_contracted:
contraction_marks = []
else:
contraction_marks = None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2 * n - 2, iv=0.0, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax,
above_threshold_color=above_threshold_color)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None,
above_threshold_color='b'):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the max(Z[*,2]) for all nodes * below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# it's label is either the empty string or the number of original
# observations belonging to cluster i.
if i < 2 * n - p and i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = above_threshold_color
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
# These are test functions to help me test the leaders function.
def _leaders_test(Z, T):
tr = to_tree(Z)
_leaders_test_recurs_mark(tr, T)
return tr
def _leader_identify(tr, T):
if tr.is_leaf():
return T[tr.id]
else:
left = tr.get_left()
right = tr.get_right()
lfid = _leader_identify(left, T)
rfid = _leader_identify(right, T)
print('ndid: %d lid: %d lfid: %d rid: %d rfid: %d'
% (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid))
if lfid != rfid:
if lfid != -1:
print('leader: %d with tag %d' % (left.id, lfid))
if rfid != -1:
print('leader: %d with tag %d' % (right.id, rfid))
return -1
else:
return lfid
def _leaders_test_recurs_mark(tr, T):
if tr.is_leaf():
tr.asgn = T[tr.id]
else:
tr.asgn = -1
_leaders_test_recurs_mark(tr.left, T)
_leaders_test_recurs_mark(tr.right, T)
| apache-2.0 |
pabitrad/HTBaseBackend | pychart/pychart.py | 1 | 1359 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from numpy import *
g_dpi=80
def drawGraph(tfn, dim, data, xlabel, ylabel, title, style, pltformat, sizex, sizey):
global g_dpi
if dim==2:
x = data[0]
y = data[1]
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
fig=plt.figure(figsize=(sizex/g_dpi,sizey/g_dpi), dpi=g_dpi)
plt.plot(x,y, style, figure=fig )
plt.savefig(tfn, transparent=True, format=pltformat, dpi=g_dpi)
plt.close(fig)
def drawPie(tfn, data, title, pLabels, pColors, pShadow, pltformat, sizex, sizey):
global g_dpi
if len(data)!=len(pLabels):
pLabels=None
if len(pColors)==0:
pColors=None
plt.title(title)
fig=plt.figure(figsize=(sizex/g_dpi,sizey/g_dpi), dpi=g_dpi)
plt.pie(data, labels=pLabels, colors=pColors, shadow=pShadow)
plt.savefig(tfn, transparent=True, format=pltformat, dpi=g_dpi)
plt.close(fig)
def drawBar(tfn, data, xlabels, ylabel, title, barcolor, pltformat, sizex, sizey, width):
global g_dpi
if len(barcolor)==0:
barcolor=None
x = data
fig=plt.figure(figsize=(sizex/g_dpi,sizey/g_dpi), dpi=g_dpi)
plt.bar(arange(len(data)), x, width, color=barcolor, figure=fig )
plt.xticks(arange(len(data))+width/2.,xlabels)
plt.ylabel(ylabel)
plt.title(title)
plt.savefig(tfn, transparent=True, format=pltformat, dpi=g_dpi)
plt.close(fig) | gpl-3.0 |
harrison-caudill/pylink | examples/eg_budgets.py | 1 | 4628 | #!/usr/bin/env python
import os
import shutil
import subprocess
import numpy as np
import matplotlib.pyplot as plt
import pylink
sat_pattern = pylink.pattern_generator(3)
sat_rf_chain = [
pylink.Element(name='Cables',
gain_db=-0.75,
noise_figure_db=0.75),
pylink.Element(name='LNA',
gain_db=35,
noise_figure_db=2.75),
pylink.Element(name='Filter',
gain_db=-3.5,
noise_figure_db=3.5),
pylink.Element(name='Demodulator',
gain_db=0,
noise_figure_db=15),
]
gs_rf_chain = [
pylink.Element(name='Cables',
gain_db=-0.75,
noise_figure_db=0.75),
pylink.Element(name='LNA',
gain_db=35,
noise_figure_db=2.75),
pylink.Element(name='Filter',
gain_db=-3.5,
noise_figure_db=3.5),
pylink.Element(name='Demodulator',
gain_db=0,
noise_figure_db=15),
]
geometry = pylink.Geometry(apoapsis_altitude_km=550,
periapsis_altitude_km=500,
min_elevation_deg=20)
sat_rx_antenna = pylink.Antenna(gain=3,
polarization='RHCP',
pattern=sat_pattern,
rx_noise_temp_k=1000,
is_rx=True,
tracking=False)
sat_tx_antenna = pylink.Antenna(gain=3,
polarization='RHCP',
pattern=sat_pattern,
is_rx=False,
tracking=False)
gs_rx_antenna = pylink.Antenna(pattern=pylink.pattern_generator(48),
rx_noise_temp_k=300,
polarization='RHCP',
is_rx=True,
tracking=True)
gs_tx_antenna = pylink.Antenna(gain=25,
polarization='RHCP',
is_rx=False,
tracking=True)
sat_receiver = pylink.Receiver(rf_chain=sat_rf_chain,
implementation_loss_db=2,
name='Satellite SBand Receiver')
gs_receiver = pylink.Receiver(rf_chain=gs_rf_chain,
name='Ground SBand Receiver')
gs_transmitter = pylink.Transmitter(tx_power_at_pa_dbw=23,
name='Ground SBand Transmitter')
sat_transmitter = pylink.Transmitter(tx_power_at_pa_dbw=1.5,
name='Satellite XBand Transmitter')
rx_interconnect = pylink.Interconnect(is_rx=True)
tx_interconnect = pylink.Interconnect(is_rx=False)
x_channel = pylink.Channel(bitrate_hz=1e6,
allocation_hz=500e4,
center_freq_mhz=8200,
atmospheric_loss_db=1,
ionospheric_loss_db=1,
rain_loss_db=2,
multipath_fading_db=0,
polarization_mismatch_loss_db=3)
s_channel = pylink.Channel(bitrate_hz=500e3,
allocation_hz=5e6,
center_freq_mhz=2022.5,
atmospheric_loss_db=.5,
ionospheric_loss_db=.5,
rain_loss_db=1,
multipath_fading_db=0,
polarization_mismatch_loss_db=3)
# defaults to DVB-S2X
modulation = pylink.Modulation()
DOWNLINK = pylink.DAGModel([geometry,
gs_rx_antenna,
sat_transmitter,
sat_tx_antenna,
gs_receiver,
x_channel,
rx_interconnect,
tx_interconnect,
modulation,
pylink.LinkBudget(name='Example XBand Downlink',
is_downlink=True)])
UPLINK = pylink.DAGModel([geometry,
sat_rx_antenna,
sat_receiver,
gs_transmitter,
gs_tx_antenna,
s_channel,
pylink.LinkBudget(name='Example SBand Uplink',
is_downlink=False)])
| bsd-3-clause |