repo_name
stringlengths 6
96
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 762
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
lefthandedroo/Cosmo-models
|
zprev versions/plots.py
|
1
|
3865
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 27 12:40:48 2018
@author: BallBlueMeercat
"""
import pickle
from matplotlib.font_manager import FontProperties
from pylab import figure, plot, xlabel, ylabel, grid, legend, title, show, scatter
def plots(mag, zpicks, z, dlpc, dl, gamma, e_dash0m, e_dash0de, t, a, a_dot, t_cut, a_cut, a_dotcut, e_dashm, e_dashde):
# f = open('results.pckl', 'rb')
# obj = pickle.load(f)
# f.close()
#
# mag, zpicks, z, dlpc, dl, gamma, e_dash0m, e_dash0de, t, a, a_dot, t_cut, a_cut, a_dotcut, e_dashm, e_dashde = obj
# Plotting selected results:
# a and a_dot vs time.
while True:
figure()
xlabel('time in $H_0^{-1}$')
grid(True)
plot(t_cut, a_cut, 'r', t_cut, a_dotcut, 'b', lw=1)
legend((r'$a$', r'$\.a$'), prop=FontProperties(size=16))
title('IC: $\epsilon_{m0} \'$ = %s, $\epsilon_{DE0} \'$ =%s, $\gamma$ = %s [cut]'
%(e_dash0m, e_dash0de, gamma))
show()
break
# e_dashm and e_dashde vs time.
while True:
# e_dashm
figure()
xlabel('t in 1/H0')
ylabel('$\epsilon_m \'$')
lw = 1
plot(t_cut, e_dashm, 'g', linewidth=lw)
title('$\epsilon_m \'$ evolution, IC: $\epsilon_{m0} \'$ = %s, $\epsilon_{DE0} \'$ =%s, $\gamma$ = %s [cut]'
%(e_dash0m, e_dash0de, gamma))
show()
# e_dashde
figure()
xlabel('t in 1/H0')
ylabel('$\epsilon_{DE} \'$')
lw = 1
plot(t_cut, e_dashde, 'm', linewidth=lw)
title('$\epsilon_{DE} \'$ evolution, IC: $\epsilon_{m0} \'$ = %s, $\epsilon_{DE0} \'$ =%s, $\gamma$ = %s [cut]'
%(e_dash0m, e_dash0de, gamma))
show()
break
# Luminosity distance vs redshift.
while True:
figure()
xlabel('redshift $z$')
ylabel('$d_L$*($H_0$/c)')
grid(True)
plot(z, dl, 'tab:green', lw=1)
title('$d_L$, IC: $\epsilon_{m0} \'$ = %s, $\epsilon_{DE0} \'$ =%s, $\gamma$ = %s [cut]'
%(e_dash0m, e_dash0de, gamma))
show()
break
while False:
figure()
xlabel('redshift $z$')
ylabel('pc')
grid(True)
plot(z, dlpc, 'tab:green', lw=1)
title('$d_L$, IC: $\epsilon_{m0} \'$ = %s, $\epsilon_{DE0} \'$ =%s, $\gamma$ = %s [cut]'
%(e_dash0m, e_dash0de, gamma))
show()
break
dlgpc = dlpc /10**9
while False:
figure()
xlabel('redshift $z$')
ylabel('Gpc')
grid(True)
plot(z, dlgpc, 'tab:green', lw=1)
title('$d_L$, IC: $\epsilon_{m0} \'$ = %s, $\epsilon_{DE0} \'$ =%s, $\gamma$ = %s [cut]'
%(e_dash0m, e_dash0de, gamma))
show()
break
while True:
# Redshift vs time.
figure()
xlabel('time in $H_0^{-1}$')
ylabel('$z$')
# axis([0,-0.8,0,5])
grid(True)
plot(t_cut, z, 'tab:pink', lw=1)
title('Redshift, IC: $\epsilon_{m0} \'$ = %s, $\epsilon_{DE0} \'$ =%s, $\gamma$ = %s [cut]'
%(e_dash0m, e_dash0de, gamma))
show()
break
# Complete results with blow up resulting from a approaching big bang.
while False:
figure()
xlabel('time in $H_0^{-1}$')
grid(True)
plot(t, a, 'r', lw=1)
plot(t, a_dot, 'b', lw=1)
legend((r'$a$', r'$\.a$', r'$\'\epsilon$'), prop=FontProperties(size=16))
title('IC: $\epsilon_{m0} \'$ = %s, $\epsilon_{DE0} \'$ =%s, $\gamma$ = %s [uncut]'
%(e_dash0m, e_dash0de, gamma))
show()
break
while True:
figure()
title('Mag simulated with msim parameters')
scatter(zpicks, mag, lw='3', c='r')
show()
break
return
|
mit
|
wesm/statsmodels
|
scikits/statsmodels/sandbox/distributions/examples/matchdist.py
|
5
|
9771
|
'''given a 1D sample of observation, find a matching distribution
* estimate maximum likelihood paramater for each distribution
* rank estimated distribution by Kolmogorov-Smirnov and Anderson-Darling
test statistics
Author: Josef Pktd
License: Simplified BSD
original December 2008
TODO:
* refactor to result class
* split estimation by support, add option and choose automatically
*
'''
from scipy import stats
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#stats.distributions.beta_gen._fitstart = lambda self, data : (5,5,0,1)
def plothist(x,distfn, args, loc, scale, right=1):
plt.figure()
# the histogram of the data
n, bins, patches = plt.hist(x, 25, normed=1, facecolor='green', alpha=0.75)
maxheight = max([p.get_height() for p in patches])
print maxheight
axlim = list(plt.axis())
#print axlim
axlim[-1] = maxheight*1.05
#plt.axis(tuple(axlim))
## print bins
## print 'args in plothist', args
# add a 'best fit' line
#yt = stats.norm.pdf( bins, loc=loc, scale=scale)
yt = distfn.pdf( bins, loc=loc, scale=scale, *args)
yt[yt>maxheight]=maxheight
lt = plt.plot(bins, yt, 'r--', linewidth=1)
ys = stats.t.pdf( bins, 10,scale=10,)*right
ls = plt.plot(bins, ys, 'b-', linewidth=1)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title(r'$\mathrm{Testing: %s :}\ \mu=%f,\ \sigma=%f$'%(distfn.name,loc,scale))
#plt.axis([bins[0], bins[-1], 0, 0.134+0.05])
plt.grid(True)
plt.draw()
#plt.show()
#plt.close()
#targetdist = ['norm','t','truncnorm','johnsonsu','johnsonsb',
targetdist = ['norm','alpha', 'anglit', 'arcsine',
'beta', 'betaprime', 'bradford', 'burr', 'fisk', 'cauchy',
'chi', 'chi2', 'cosine', 'dgamma', 'dweibull', 'erlang',
'expon', 'exponweib', 'exponpow', 'fatiguelife', 'foldcauchy',
'f', 'foldnorm', 'frechet_r', 'weibull_min', 'frechet_l',
'weibull_max', 'genlogistic', 'genpareto', 'genexpon', 'genextreme',
'gamma', 'gengamma', 'genhalflogistic', 'gompertz', 'gumbel_r',
'gumbel_l', 'halfcauchy', 'halflogistic', 'halfnorm', 'hypsecant',
'gausshyper', 'invgamma', 'invnorm', 'invweibull', 'johnsonsb',
'johnsonsu', 'laplace', 'levy', 'levy_l',
'logistic', 'loggamma', 'loglaplace', 'lognorm', 'gilbrat',
'maxwell', 'mielke', 'nakagami', 'ncx2', 'ncf', 't',
'nct', 'pareto', 'lomax', 'powerlaw', 'powerlognorm', 'powernorm',
'rdist', 'rayleigh', 'reciprocal', 'rice', 'recipinvgauss',
'semicircular', 'triang', 'truncexpon', 'truncnorm',
'tukeylambda', 'uniform', 'vonmises', 'wald', 'wrapcauchy',
'binom', 'bernoulli', 'nbinom', 'geom', 'hypergeom', 'logser',
'poisson', 'planck', 'boltzmann', 'randint', 'zipf', 'dlaplace']
left = []
right = []
finite = []
unbound = []
other = []
contdist = []
discrete = []
categ = {('open','open'):'unbound', ('0','open'):'right',('open','0',):'left',
('finite','finite'):'finite',('oth','oth'):'other'}
categ = {('open','open'):unbound, ('0','open'):right,('open','0',):left,
('finite','finite'):finite,('oth','oth'):other}
categ2 = {
('open', '0') : ['frechet_l', 'weibull_max', 'levy_l'],
('finite', 'finite') : ['anglit', 'cosine', 'rdist', 'semicircular'],
('0', 'open') : ['alpha', 'burr', 'fisk', 'chi', 'chi2', 'erlang',
'expon', 'exponweib', 'exponpow', 'fatiguelife', 'foldcauchy', 'f',
'foldnorm', 'frechet_r', 'weibull_min', 'genpareto', 'genexpon',
'gamma', 'gengamma', 'genhalflogistic', 'gompertz', 'halfcauchy',
'halflogistic', 'halfnorm', 'invgamma', 'invnorm', 'invweibull',
'levy', 'loglaplace', 'lognorm', 'gilbrat', 'maxwell', 'mielke',
'nakagami', 'ncx2', 'ncf', 'lomax', 'powerlognorm', 'rayleigh',
'rice', 'recipinvgauss', 'truncexpon', 'wald'],
('open', 'open') : ['cauchy', 'dgamma', 'dweibull', 'genlogistic', 'genextreme',
'gumbel_r', 'gumbel_l', 'hypsecant', 'johnsonsu', 'laplace',
'logistic', 'loggamma', 't', 'nct', 'powernorm', 'reciprocal',
'truncnorm', 'tukeylambda', 'vonmises'],
('0', 'finite') : ['arcsine', 'beta', 'betaprime', 'bradford', 'gausshyper',
'johnsonsb', 'powerlaw', 'triang', 'uniform', 'wrapcauchy'],
('finite', 'open') : ['pareto']
}
#Note: weibull_max == frechet_l
right_incorrect = ['genextreme']
right_all = categ2[('0', 'open')] + categ2[('0', 'finite')] + categ2[('finite', 'open')]\
+ right_incorrect
for distname in targetdist:
distfn = getattr(stats,distname)
if hasattr(distfn,'_pdf'):
if np.isinf(distfn.a):
low = 'open'
elif distfn.a == 0:
low = '0'
else:
low = 'finite'
if np.isinf(distfn.b):
high = 'open'
elif distfn.b == 0:
high = '0'
else:
high = 'finite'
contdist.append(distname)
categ.setdefault((low,high),[]).append(distname)
not_good = ['genextreme', 'reciprocal', 'vonmises']
# 'genextreme' is right (or left?), 'reciprocal' requires 0<a<b, 'vonmises' no a,b
targetdist = [f for f in categ[('open', 'open')] if not f in not_good]
not_good = ['wrapcauchy']
not_good = ['vonmises']
not_good = ['genexpon','vonmises']
#'wrapcauchy' requires additional parameter (scale) in argcheck
targetdist = [f for f in contdist if not f in not_good]
#targetdist = contdist
#targetdist = not_good
#targetdist = ['t', 'f']
#targetdist = ['norm','burr']
if __name__ == '__main__':
#TODO: calculate correct tail probability for mixture
prefix = 'run_conv500_1_'
convol = 0.75
n = 500
dgp_arg = 10
dgp_scale = 10
results = []
for i in range(1):
rvs_orig = stats.t.rvs(dgp_arg,scale=dgp_scale,size=n*convol)
rvs_orig = np.hstack((rvs_orig,stats.halflogistic.rvs(loc=0.4, scale=5.0,size =n*(1-convol))))
rvs_abs = np.absolute(rvs_orig)
rvs_pos = rvs_orig[rvs_orig>0]
rightfactor = 1
rvs_right = rvs_pos
print '='*50
print 'samplesize = ', n
for distname in targetdist:
distfn = getattr(stats,distname)
if distname in right_all:
rvs = rvs_right
rind = rightfactor
else:
rvs = rvs_orig
rind = 1
print '-'*30
print 'target = %s' % distname
sm = rvs.mean()
sstd = np.sqrt(rvs.var())
ssupp = (rvs.min(), rvs.max())
if distname in ['truncnorm','betaprime','reciprocal']:
par0 = (sm-2*sstd,sm+2*sstd)
par_est = tuple(distfn.fit(rvs,loc=sm,scale=sstd,*par0))
elif distname == 'norm':
par_est = tuple(distfn.fit(rvs,loc=sm,scale=sstd))
elif distname == 'genextreme':
par_est = tuple(distfn.fit(rvs,-5,loc=sm,scale=sstd))
elif distname == 'wrapcauchy':
par_est = tuple(distfn.fit(rvs,0.5,loc=0,scale=sstd))
elif distname == 'f':\
par_est = tuple(distfn.fit(rvs,10,15,loc=0,scale=1))
elif distname in right:
sm = rvs.mean()
sstd = np.sqrt(rvs.var())
par_est = tuple(distfn.fit(rvs,loc=0,scale=1))
else:
sm = rvs.mean()
sstd = np.sqrt(rvs.var())
par_est = tuple(distfn.fit(rvs,loc=sm,scale=sstd))
print 'fit', par_est
arg_est = par_est[:-2]
loc_est = par_est[-2]
scale_est = par_est[-1]
rvs_normed = (rvs-loc_est)/scale_est
ks_stat, ks_pval = stats.kstest(rvs_normed,distname, arg_est)
print 'kstest', ks_stat, ks_pval
quant = 0.1
crit = distfn.ppf(1-quant*float(rind), loc=loc_est, scale=scale_est,*par_est)
tail_prob = stats.t.sf(crit,dgp_arg,scale=dgp_scale)
print 'crit, prob', quant, crit, tail_prob
#if distname == 'norm':
#plothist(rvs,loc_est,scale_est)
#args = tuple()
results.append([distname,ks_stat, ks_pval,arg_est,loc_est,scale_est,crit,tail_prob ])
#plothist(rvs,distfn,arg_est,loc_est,scale_est)
#plothist(rvs,distfn,arg_est,loc_est,scale_est)
#plt.show()
#plt.close()
#TODO: collect results and compare tail quantiles
from operator import itemgetter
res_sort = sorted(results, key = itemgetter(2))
res_sort.reverse() #kstest statistic: smaller is better, pval larger is better
print 'number of distributions', len(res_sort)
imagedir = 'matchresults'
import os
if not os.path.exists(imagedir):
os.makedirs(imagedir)
for ii,di in enumerate(res_sort):
distname,ks_stat, ks_pval,arg_est,loc_est,scale_est,crit,tail_prob = di[:]
distfn = getattr(stats,distname)
if distname in right_all:
rvs = rvs_right
rind = rightfactor
ri = 'r'
else:
rvs = rvs_orig
ri = ''
rind = 1
print '%s ks-stat = %f, ks-pval = %f tail_prob = %f)' % \
(distname, ks_stat, ks_pval, tail_prob)
## print 'arg_est = %s, loc_est = %f scale_est = %f)' % \
## (repr(arg_est),loc_est,scale_est)
plothist(rvs,distfn,arg_est,loc_est,scale_est,right = rind)
plt.savefig(os.path.join(imagedir,'%s%s%02d_%s.png'% (prefix, ri,ii, distname)))
##plt.show()
##plt.close()
|
bsd-3-clause
|
cainiaocome/scikit-learn
|
sklearn/linear_model/stochastic_gradient.py
|
130
|
50966
|
# Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
|
bsd-3-clause
|
Messaoud-Boudjada/dipy
|
dipy/tests/test_scripts.py
|
9
|
4292
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Test scripts
Run scripts and check outputs
"""
from __future__ import division, print_function, absolute_import
import os
import shutil
from os.path import (dirname, join as pjoin, abspath)
from nose.tools import assert_true, assert_false, assert_equal
import numpy.testing as nt
import nibabel as nib
from nibabel.tmpdirs import InTemporaryDirectory
from dipy.data import get_data
# Quickbundles command-line requires matplotlib:
try:
import matplotlib
no_mpl = False
except ImportError:
no_mpl = True
from .scriptrunner import ScriptRunner
runner = ScriptRunner(
script_sdir = 'bin',
debug_print_var = 'NIPY_DEBUG_PRINT')
run_command = runner.run_command
DATA_PATH = abspath(pjoin(dirname(__file__), 'data'))
def test_dipy_peak_extraction():
# test dipy_peak_extraction script
cmd = 'dipy_peak_extraction'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def test_dipy_fit_tensor():
# test dipy_fit_tensor script
cmd = 'dipy_fit_tensor'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def test_dipy_sh_estimate():
# test dipy_sh_estimate script
cmd = 'dipy_sh_estimate'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def assert_image_shape_affine(filename, shape, affine):
assert_true(os.path.isfile(filename))
image = nib.load(filename)
assert_equal(image.shape, shape)
nt.assert_array_almost_equal(image.get_affine(), affine)
def test_dipy_fit_tensor_again():
with InTemporaryDirectory():
dwi, bval, bvec = get_data("small_25")
# Copy data to tmp directory
shutil.copyfile(dwi, "small_25.nii.gz")
shutil.copyfile(bval, "small_25.bval")
shutil.copyfile(bvec, "small_25.bvec")
# Call script
cmd = ["dipy_fit_tensor", "--mask=none", "small_25.nii.gz"]
out = run_command(cmd)
assert_equal(out[0], 0)
# Get expected values
img = nib.load("small_25.nii.gz")
affine = img.get_affine()
shape = img.shape[:-1]
# Check expected outputs
assert_image_shape_affine("small_25_fa.nii.gz", shape, affine)
assert_image_shape_affine("small_25_t2di.nii.gz", shape, affine)
assert_image_shape_affine("small_25_dirFA.nii.gz", shape, affine)
assert_image_shape_affine("small_25_ad.nii.gz", shape, affine)
assert_image_shape_affine("small_25_md.nii.gz", shape, affine)
assert_image_shape_affine("small_25_rd.nii.gz", shape, affine)
with InTemporaryDirectory():
dwi, bval, bvec = get_data("small_25")
# Copy data to tmp directory
shutil.copyfile(dwi, "small_25.nii.gz")
shutil.copyfile(bval, "small_25.bval")
shutil.copyfile(bvec, "small_25.bvec")
# Call script
cmd = ["dipy_fit_tensor", "--save-tensor", "--mask=none", "small_25.nii.gz"]
out = run_command(cmd)
assert_equal(out[0], 0)
# Get expected values
img = nib.load("small_25.nii.gz")
affine = img.get_affine()
shape = img.shape[:-1]
# Check expected outputs
assert_image_shape_affine("small_25_fa.nii.gz", shape, affine)
assert_image_shape_affine("small_25_t2di.nii.gz", shape, affine)
assert_image_shape_affine("small_25_dirFA.nii.gz", shape, affine)
assert_image_shape_affine("small_25_ad.nii.gz", shape, affine)
assert_image_shape_affine("small_25_md.nii.gz", shape, affine)
assert_image_shape_affine("small_25_rd.nii.gz", shape, affine)
# small_25_tensor saves the tensor as a symmetric matrix following
# the nifti standard.
ten_shape = shape + (1, 6)
assert_image_shape_affine("small_25_tensor.nii.gz", ten_shape,
affine)
@nt.dec.skipif(no_mpl)
def test_qb_commandline():
with InTemporaryDirectory():
tracks_file = get_data('fornix')
cmd = ["dipy_quickbundles", tracks_file, '--pkl_file', 'mypickle.pkl',
'--out_file', 'tracks300.trk']
out = run_command(cmd)
assert_equal(out[0], 0)
|
bsd-3-clause
|
rrohan/scikit-learn
|
sklearn/tests/test_metaestimators.py
|
226
|
4954
|
"""Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
|
bsd-3-clause
|
AlexisEidelman/Til
|
til/pgm/Archives/DataTable_from_liam.py
|
2
|
7093
|
# -*- coding:utf-8 -*-
"""
Convert Liam output in OpenFisca Input
"""
from pandas import HDFStore, merge # DataFrame
import numpy as np
import pdb
import time
from src.lib.simulation import SurveySimulation
from src.parametres.paramData import XmlReader, Tree2Object
import pandas as pd
import datetime as dt
import pandas.rpy.common as com
from rpy2.robjects import r
temps = time.clock()
input = "C:/Myliam2/Model/simulTest.h5"
output = "C:/openfisca/output/liam/"
name_convertion = {'person':'ind','declar':'foy','menage':'men', 'fam':'fam'}
store = HDFStore(input)
goal = HDFStore("C:/openfisca/src/countries/france/data/surveyLiam.h5")
#available_years = sorted([int(x[-4:]) for x in store.keys()])
# on travaille d'abord sur l'ensemble des tables puis on selectionne chaque annee
# step 1
table = {}
nom = 'person'
base = 'entities/'+nom
ent = name_convertion[nom]
table[ent] = store[str(base)]
# get years
years = np.unique(table[ent]['period'].values)[:1]
# rename variables to make them OF ones
table[ent] = table[ent].rename(columns={'res': 'idmen', 'quires': 'quimen', 'foy': 'idfoy', 'id': 'noi'})
# travail important sur les qui==2
time_qui = time.clock()
for ent in ('men','foy'): # 'fam' un jour...
qui= 'qui'+ent
ident = 'id'+ent
trav = table['ind'].ix[table['ind'][qui]==2, [ident,qui,'period']]
for name, group in trav.groupby([ident,'period']):
to_add = range(len(group))
group[qui] = group[qui]+to_add
table['ind'].ix[group[qui].index, qui] = group[qui]
print "les qui pour ", ent," sont réglés"
time_qui = time.clock() - time_qui
print time_qui
ent = 'ind'
# création de variable
table[ent]['agem'] = 12 * table[ent]['age']
table[ent]['ageq'] = table[ent]['age']/5 - 4
f = lambda x: min( max(x, 0), 12)
table[ent]['ageq'] = table[ent]['ageq'].map(f)
# menage qu'on élimine pour l'instant
#diff_foy = set([3880, 3916, 4190, 7853, 8658, 9376, 9508, 9717, 12114, 13912, 15260])
#
#temp = table['ind']['idfoy'].isin(diff_foy)
#diff_ind = table['ind'][temp]['id']
#diff_men = table['ind'][temp]['idmen'].copy()
#temp_ent = table[ent]['idmen'].isin(diff_men)
#table[ent] = table[ent][-temp_ent]
# il faut espérer après qu'avec les ménages, on s'en sort et qu'on n'a pas de
# pere dans diff_ind pour quelqu'un d'autre, ie, un pere hors du domicile supprimé
# on fait on s'en fout, on fait que de la légisaltion ici
# create fam base
table[ent][['idfam','quifam']] = table[ent][['idmen','quimen']]
# save information on qui == 0
foy0 = table[ent].ix[table[ent]['quifoy']==0,['id','idfoy','idmen','idfam','period']]
men0 = table[ent].ix[table[ent]['quimen']==0,['id','idfoy','idmen','idfam','period']]
fam0 = men0
for nom in ('menage','declar','fam'):
ent = name_convertion[nom]
base = 'entities/'+nom
ident = 'id'+ent
if ent == 'fam':
table[ent] = eval(ent +'0')
else :
table[ent] = store[str(base)].rename(columns={'id': ident})
table[ent] = merge(table[ent], eval(ent +'0'), how='left', left_on=[ident,'period'], right_on=[ident,'period'])
# traduction de variable en OF pour ces entités
if ent=='men':
# nbinde est limité à 6 personnes et donc valeur = 5 en python
table[ent]['nbinde'] = (table[ent]['nb_persons']-1) * (table[ent]['nb_persons']-1 <=5) +5*(table[ent]['nb_persons']-1 >5)
# temp_ent = table[ent]['idmen'].isin(diff_men)
# print ent
# table[ent] = table[ent][-temp_ent]
# test sur le nombre de qui ==0
test = {}
for year in years:
for nom in ('menage','declar'):
ent = name_convertion[nom]
base = 'entities/'+nom
ident = 'id'+ent
print ent, base, ident
test[ent] = store[str(base)].rename(columns={'id': ident})
test[ent] = test[ent].ix[test[ent]['period']==year,:]
test0 = eval(ent +'0')[eval(ent +'0')['period']==year]
tab = table[ent].ix[table[ent]['period']==year,['id','id'+ent,'idfam']]
ind = table['ind'].ix[table['ind']['period']==year,['qui'+ent]]
list_ind = ind[ind==0]
lidmen = test[ent][ident]
lidmenU = np.unique(lidmen)
diff1 = set(test0[ident]).symmetric_difference(lidmenU)
#voir = store[str(base)][['id','period']].rename(columns={'id': ident})
#voir = store[str(base)].rename(columns={'id': ident})[[ident,'period']]
#voir.ix[ voir['period']==2011,'id']
#
#test[ent][ident][:10]
#test1.ix[table[ent]['period']==year,['idmen']]
# il y a un truc avec les gens qui se marient puis divorcent
# en profiter pour bien gerer les conj = 0 ou conj =-1
# si on ne s'arrete pas là, c'est qu'on n'a pas de problème !!
print year, ent, diff1
for k in diff1:
pd.set_printoptions(max_columns=30)
listind = table['ind'][table['ind'][ident]==k]
print listind
for indiv in np.unique(listind['id']):
print table['ind'].ix[table['ind']['id']==indiv,['id','period','sexe','idmen','quimen','idfoy','quifoy','conj','mere','pere']]
pdb.set_trace()
for year in years:
goal.remove('survey_'+str(year))
for ent in ('ind','men','foy','fam'):
tab = table[ent].ix[table[ent]['period']==year]
key = 'survey_'+str(year) + '/'+ent
goal.put(key, tab)
# if year == 2010:
# pdb.set_trace()
# tab = table[ent].ix[table[ent]['period']==year]
# tab[:5]
# len(tab['idfam'])
# len(np.unique(tab['idfam']))
# list_qui = tab['idfam']
# double = list_qui.value_counts()[list_qui.value_counts()>1]
# tabind = table['ind'].ix[table['ind']['period']==year]
store.close()
goal.close()
# on fais maintenant tourner le modèle OF
country = 'france'
for year in years:
yr = str(year)
deb3 = time.clock()
simu = SurveySimulation()
simu.set_config(year = year, country = country)
# mettre les paramètres de a législation 2009
date_str = str(2009)+ '-01-01'
date = dt.datetime.strptime(date_str ,"%Y-%m-%d").date()
reader = XmlReader(simu.param_file, date)
rootNode = reader.tree
simu.P_default = Tree2Object(rootNode, defaut=True)
simu.P_default.datesim = date
simu.P = Tree2Object(rootNode, defaut=False)
simu.P.datesim = date
simu.set_survey(filename="C:/openfisca/src/countries/france/data/surveyLiam.h5", num_table=3, print_missing=True)
simu.compute()
for ent in ('ind','men','foy','fam'):
df = simu.outputs.table3[ent]
not_bool = df.dtypes[df.dtypes != bool]
print df.ix[:50,df.dtypes[df.dtypes == bool].index]
df = df.ix[:,not_bool.index]
r_dataframe = com.convert_to_r_dataframe(df)
name = ent+'_'+str(year)
r.assign(name, r_dataframe)
file_dir = output + name+ ".gzip"
phrase = "save("+name+", file='" +file_dir+"', compress=TRUE)"
r(phrase)
fin3 = time.clock()
print time.clock()- temps
|
gpl-3.0
|
NYUDataBootcamp/Materials
|
Code/Python/bootcamp_mini_basics.py
|
1
|
3987
|
"""
For Class #1 of an informal mini-course at NYU Stern, Fall 2014.
Topics: calculations, assignments, strings, slicing, lists, data frames,
reading csv and xls files
Repository of materials (including this file):
* https://github.com/NYUDataBootcamp/Materials
Written by Dave Backus, Sarah Beckett-Hile, and Glenn Okun
Created with Python 3.4
"""
"""
Check Python version
"""
# https://docs.python.org/3.4/library/sys.html
import sys
print('\nWhat version of Python? \n', sys.version, '\n', sep='')
if float(sys.version_info[0]) < 3.0 :
raise Exception('Program halted, old version of Python. \n',
'Sorry, you need to install Anaconda again.')
else:
print('Congratulations, Python is up to date!')
#%%
"""
Calculations and assignments (best in IPython console)
"""
2*3
2 * 3
2^3
log(3)
#%%
"""
Strings
"""
a = 'some'
b = 'thing'
c = a + b
print('c = ', c)
print('c[1] is:', c[1])
print('c[1:2] is', c[1:2])
print('c[1:3] is:', c[1:3])
print('c[1:] is:', c[1:])
#%%
#print(['a[1:3]', a[1:3]])
# names
first, last = 'Dave', 'Backus'
full = first + ' ' + last
#%%
longstring = """
Four score and seven years ago
Our fathers brought forth """
print(longstring)
#%%
"""
Output and input
"""
print(full)
print(first, last)
print(last, ', ', first)
print(last, ', ', first, sep='')
#x = input('Type your name here --> ')
#print(x, end='\n\n')
"""
Lists
"""
numbers = [x, y, z]
strings = [a, b, c]
both = numbers + strings
print(['both[3:]', both[3:]])
#%%
"""
Functions
"""
def hello(firstname):
print('Hello, ', firstname)
hello('Dave')
#%%
def combine(first, last):
lastfirst = last + ', ' + first
return lastfirst
both = combine('Chase', 'Coleman')
print(both)
#%%
"""
Inputting data
"""
import pandas as pd
# check version
print('Pandas version ', pd.__version__)
# read from local file
file = '../Data/test1.csv'
df = pd.read_csv(file)
#%%
# some properties
print(df)
print(type(df))
print(['Shape is', df.shape])
print(df.mean())
print(df.columns)
print(['column labels', df.columns])
print(['row labels', df.index])
#%%
# read from url
url = 'https://raw.githubusercontent.com/NYUDataBootcamp/Materials/master/Data/test1.csv'
dfurl = pd.read_csv(url)
#%%
import pandas as pd
# read IMF's WEO data from
url = 'http://www.imf.org/external/pubs/ft/weo/2015/01/weodata/WEOApr2015all.xls'
weo = pd.read_csv(url, sep='\t') # tab = \t
print(weo.head())
print(['column labels', weo.columns])
print(['row labels', weo.index])
#%%
countries = ['AFG', 'USA']
variables = ['NGDP_RPCH', 'FLIBOR6']
weo_sub = weo[weo['ISO'].isin(countries) & weo['WEO Subject Code'].isin(variables)]
weo_sub.to_csv('weo.csv')
#%%
# copy file from url to hard drive
import urllib.request # this is a module from the package urllib
file = 'foo.csv'
url = 'https://raw.githubusercontent.com/NYUDataBootcamp/Materials/master/Data/test1.csv'
urllib.request.urlretrieve(url, file)
#%%
# Sarah's version
f = urllib.request.urlopen(url)
file = 'foo_sbh.csv'
with open(file, 'wb') as local_file:
local_file.write(f.read())
#%%
# read from xls
file = '../Data/test2.xlsx'
xls = pd.read_excel(file) # default is first sheet
#%%
# zip files
import pandas as pd
import urllib
import zipfile
import os
# this is a big file, best to test with something smaller
url = 'http://databank.worldbank.org/data/download/WDI_csv.zip'
file = os.path.basename(url) # strip out file name
urllib.request.urlretrieve(url, file) # copy to disk
# see what's there
print(['Is zipfile?', zipfile.is_zipfile(file)])
zf = zipfile.ZipFile(file, 'r')
print('List of zipfile contents (two versions)')
[print(file) for file in zf.namelist()]
zf.printdir()
# extract a component
csv = zf.extract('WDI_Data.csv') # copy to disk
df1 = pd.read_csv('WDI_Data.csv') # read
print(df1.columns) # check contents
# alternative: open and read
csv = zf.open('WDI_Data.csv')
df2 = pd.read_csv(csv)
print(df2.columns)
|
mit
|
Srisai85/scikit-learn
|
sklearn/decomposition/tests/test_online_lda.py
|
6
|
11950
|
import numpy as np
from scipy.sparse import csr_matrix
from scipy.linalg import block_diag
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.validation import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 disticnt words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method, random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has", lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method', LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has", lda.partial_fit, X_2)
@if_not_mac_os()
def test_lda_multi_jobs():
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=3,
learning_method=method, random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_not_mac_os()
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=-1, learning_offset=5.,
total_samples=30, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X, invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X, invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10, learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
|
bsd-3-clause
|
sinhrks/seaborn
|
seaborn/tests/test_palettes.py
|
22
|
9613
|
import colorsys
import numpy as np
import matplotlib as mpl
import nose.tools as nt
import numpy.testing as npt
from .. import palettes, utils, rcmod, husl
from ..xkcd_rgb import xkcd_rgb
from ..crayons import crayons
class TestColorPalettes(object):
def test_current_palette(self):
pal = palettes.color_palette(["red", "blue", "green"], 3)
rcmod.set_palette(pal, 3)
nt.assert_equal(pal, mpl.rcParams["axes.color_cycle"])
rcmod.set()
def test_palette_context(self):
default_pal = palettes.color_palette()
context_pal = palettes.color_palette("muted")
with palettes.color_palette(context_pal):
nt.assert_equal(mpl.rcParams["axes.color_cycle"], context_pal)
nt.assert_equal(mpl.rcParams["axes.color_cycle"], default_pal)
def test_big_palette_context(self):
original_pal = palettes.color_palette("deep", n_colors=8)
context_pal = palettes.color_palette("husl", 10)
rcmod.set_palette(original_pal)
with palettes.color_palette(context_pal, 10):
nt.assert_equal(mpl.rcParams["axes.color_cycle"], context_pal)
nt.assert_equal(mpl.rcParams["axes.color_cycle"], original_pal)
# Reset default
rcmod.set()
def test_seaborn_palettes(self):
pals = "deep", "muted", "pastel", "bright", "dark", "colorblind"
for name in pals:
pal_out = palettes.color_palette(name)
nt.assert_equal(len(pal_out), 6)
def test_hls_palette(self):
hls_pal1 = palettes.hls_palette()
hls_pal2 = palettes.color_palette("hls")
npt.assert_array_equal(hls_pal1, hls_pal2)
def test_husl_palette(self):
husl_pal1 = palettes.husl_palette()
husl_pal2 = palettes.color_palette("husl")
npt.assert_array_equal(husl_pal1, husl_pal2)
def test_mpl_palette(self):
mpl_pal1 = palettes.mpl_palette("Reds")
mpl_pal2 = palettes.color_palette("Reds")
npt.assert_array_equal(mpl_pal1, mpl_pal2)
def test_mpl_dark_palette(self):
mpl_pal1 = palettes.mpl_palette("Blues_d")
mpl_pal2 = palettes.color_palette("Blues_d")
npt.assert_array_equal(mpl_pal1, mpl_pal2)
def test_bad_palette_name(self):
with nt.assert_raises(ValueError):
palettes.color_palette("IAmNotAPalette")
def test_terrible_palette_name(self):
with nt.assert_raises(ValueError):
palettes.color_palette("jet")
def test_bad_palette_colors(self):
pal = ["red", "blue", "iamnotacolor"]
with nt.assert_raises(ValueError):
palettes.color_palette(pal)
def test_palette_desat(self):
pal1 = palettes.husl_palette(6)
pal1 = [utils.desaturate(c, .5) for c in pal1]
pal2 = palettes.color_palette("husl", desat=.5)
npt.assert_array_equal(pal1, pal2)
def test_palette_is_list_of_tuples(self):
pal_in = np.array(["red", "blue", "green"])
pal_out = palettes.color_palette(pal_in, 3)
nt.assert_is_instance(pal_out, list)
nt.assert_is_instance(pal_out[0], tuple)
nt.assert_is_instance(pal_out[0][0], float)
nt.assert_equal(len(pal_out[0]), 3)
def test_palette_cycles(self):
deep = palettes.color_palette("deep")
double_deep = palettes.color_palette("deep", 12)
nt.assert_equal(double_deep, deep + deep)
def test_hls_values(self):
pal1 = palettes.hls_palette(6, h=0)
pal2 = palettes.hls_palette(6, h=.5)
pal2 = pal2[3:] + pal2[:3]
npt.assert_array_almost_equal(pal1, pal2)
pal_dark = palettes.hls_palette(5, l=.2)
pal_bright = palettes.hls_palette(5, l=.8)
npt.assert_array_less(list(map(sum, pal_dark)),
list(map(sum, pal_bright)))
pal_flat = palettes.hls_palette(5, s=.1)
pal_bold = palettes.hls_palette(5, s=.9)
npt.assert_array_less(list(map(np.std, pal_flat)),
list(map(np.std, pal_bold)))
def test_husl_values(self):
pal1 = palettes.husl_palette(6, h=0)
pal2 = palettes.husl_palette(6, h=.5)
pal2 = pal2[3:] + pal2[:3]
npt.assert_array_almost_equal(pal1, pal2)
pal_dark = palettes.husl_palette(5, l=.2)
pal_bright = palettes.husl_palette(5, l=.8)
npt.assert_array_less(list(map(sum, pal_dark)),
list(map(sum, pal_bright)))
pal_flat = palettes.husl_palette(5, s=.1)
pal_bold = palettes.husl_palette(5, s=.9)
npt.assert_array_less(list(map(np.std, pal_flat)),
list(map(np.std, pal_bold)))
def test_cbrewer_qual(self):
pal_short = palettes.mpl_palette("Set1", 4)
pal_long = palettes.mpl_palette("Set1", 6)
nt.assert_equal(pal_short, pal_long[:4])
pal_full = palettes.mpl_palette("Set2", 8)
pal_long = palettes.mpl_palette("Set2", 10)
nt.assert_equal(pal_full, pal_long[:8])
def test_mpl_reversal(self):
pal_forward = palettes.mpl_palette("BuPu", 6)
pal_reverse = palettes.mpl_palette("BuPu_r", 6)
nt.assert_equal(pal_forward, pal_reverse[::-1])
def test_rgb_from_hls(self):
color = .5, .8, .4
rgb_got = palettes._color_to_rgb(color, "hls")
rgb_want = colorsys.hls_to_rgb(*color)
nt.assert_equal(rgb_got, rgb_want)
def test_rgb_from_husl(self):
color = 120, 50, 40
rgb_got = palettes._color_to_rgb(color, "husl")
rgb_want = husl.husl_to_rgb(*color)
nt.assert_equal(rgb_got, rgb_want)
def test_rgb_from_xkcd(self):
color = "dull red"
rgb_got = palettes._color_to_rgb(color, "xkcd")
rgb_want = xkcd_rgb[color]
nt.assert_equal(rgb_got, rgb_want)
def test_light_palette(self):
pal_forward = palettes.light_palette("red")
pal_reverse = palettes.light_palette("red", reverse=True)
npt.assert_array_almost_equal(pal_forward, pal_reverse[::-1])
red = tuple(mpl.colors.colorConverter.to_rgba("red"))
nt.assert_equal(tuple(pal_forward[-1]), red)
pal_cmap = palettes.light_palette("blue", as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_dark_palette(self):
pal_forward = palettes.dark_palette("red")
pal_reverse = palettes.dark_palette("red", reverse=True)
npt.assert_array_almost_equal(pal_forward, pal_reverse[::-1])
red = tuple(mpl.colors.colorConverter.to_rgba("red"))
nt.assert_equal(tuple(pal_forward[-1]), red)
pal_cmap = palettes.dark_palette("blue", as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_blend_palette(self):
colors = ["red", "yellow", "white"]
pal_cmap = palettes.blend_palette(colors, as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_cubehelix_against_matplotlib(self):
x = np.linspace(0, 1, 8)
mpl_pal = mpl.cm.cubehelix(x)[:, :3].tolist()
sns_pal = palettes.cubehelix_palette(8, start=0.5, rot=-1.5, hue=1,
dark=0, light=1, reverse=True)
nt.assert_list_equal(sns_pal, mpl_pal)
def test_cubehelix_n_colors(self):
for n in [3, 5, 8]:
pal = palettes.cubehelix_palette(n)
nt.assert_equal(len(pal), n)
def test_cubehelix_reverse(self):
pal_forward = palettes.cubehelix_palette()
pal_reverse = palettes.cubehelix_palette(reverse=True)
nt.assert_list_equal(pal_forward, pal_reverse[::-1])
def test_cubehelix_cmap(self):
cmap = palettes.cubehelix_palette(as_cmap=True)
nt.assert_is_instance(cmap, mpl.colors.ListedColormap)
pal = palettes.cubehelix_palette()
x = np.linspace(0, 1, 6)
npt.assert_array_equal(cmap(x)[:, :3], pal)
cmap_rev = palettes.cubehelix_palette(as_cmap=True, reverse=True)
x = np.linspace(0, 1, 6)
pal_forward = cmap(x).tolist()
pal_reverse = cmap_rev(x[::-1]).tolist()
nt.assert_list_equal(pal_forward, pal_reverse)
def test_xkcd_palette(self):
names = list(xkcd_rgb.keys())[10:15]
colors = palettes.xkcd_palette(names)
for name, color in zip(names, colors):
as_hex = mpl.colors.rgb2hex(color)
nt.assert_equal(as_hex, xkcd_rgb[name])
def test_crayon_palette(self):
names = list(crayons.keys())[10:15]
colors = palettes.crayon_palette(names)
for name, color in zip(names, colors):
as_hex = mpl.colors.rgb2hex(color)
nt.assert_equal(as_hex, crayons[name].lower())
def test_color_codes(self):
palettes.set_color_codes("deep")
colors = palettes.color_palette("deep") + [".1"]
for code, color in zip("bgrmyck", colors):
rgb_want = mpl.colors.colorConverter.to_rgb(color)
rgb_got = mpl.colors.colorConverter.to_rgb(code)
nt.assert_equal(rgb_want, rgb_got)
palettes.set_color_codes("reset")
def test_as_hex(self):
pal = palettes.color_palette("deep")
for rgb, hex in zip(pal, pal.as_hex()):
nt.assert_equal(mpl.colors.rgb2hex(rgb), hex)
def test_preserved_palette_length(self):
pal_in = palettes.color_palette("Set1", 10)
pal_out = palettes.color_palette(pal_in)
nt.assert_equal(pal_in, pal_out)
|
bsd-3-clause
|
GEMScienceTools/rmtk
|
setup.py
|
1
|
2633
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2015-2017 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup, find_packages
url = "https://github.com/GEMScienceTools/rmtk"
README = """
Risk Modeller's Toolkit (RMTK)
The RMTK is a suite of tools developed by scientists working at the
Global Earthquake Model (GEM) Foundation. The main purpose
of the RMTK is to provide a suite of tools for the creation of seismic
risk input models and for the post-processing and visualisation of
OpenQuake risk results.
Copyright (C) 2015-2017 GEM Foundation
"""
setup(
name='rmtk',
version='1.0.0',
description="""The main purpose of the RMTK is to provide a suite of tools
for the creation of seismic risk input models and for the
post-processing and visualisation of OpenQuake risk
results.
""",
long_description=README,
url=url,
packages=find_packages(exclude=['tests', 'tests.*']),
install_requires=[
# FIXME taken from the README, the list is incomplete
'lxml',
'numpy',
'scipy',
'shapely',
'matplotlib',
'basemap',
'ipython',
'notebook',
'jupyter',
],
author='GEM Foundation',
author_email='[email protected]',
maintainer='GEM Foundation',
maintainer_email='[email protected]',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Scientific/Engineering',
),
keywords="seismic risk",
license="AGPL3",
platforms=["any"],
package_data={"rmtk": [
"README.md", "LICENSE"]},
include_package_data=True,
zip_safe=False,
)
|
agpl-3.0
|
bhargav/scikit-learn
|
benchmarks/bench_plot_parallel_pairwise.py
|
297
|
1247
|
# Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
|
bsd-3-clause
|
terhorstd/nest-simulator
|
topology/examples/test_3d_exp.py
|
2
|
2963
|
# -*- coding: utf-8 -*-
#
# test_3d_exp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module
EXPERIMENTAL example of 3d layer.
3d layers are currently not supported, use at your own risk!
Hans Ekkehard Plesser, UMB
This example uses the function GetChildren, which is deprecated. A deprecation
warning is therefore issued. For details about deprecated functions, see
documentation.
'''
import nest
import pylab
import random
import nest.topology as topo
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
pylab.ion()
nest.ResetKernel()
# generate list of 1000 (x,y,z) triplets
pos = [[random.uniform(-0.5, 0.5), random.uniform(-0.5, 0.5),
random.uniform(-0.5, 0.5)]
for j in range(1000)]
l1 = topo.CreateLayer(
{'extent': [1.5, 1.5, 1.5], # must specify 3d extent AND center
'center': [0., 0., 0.],
'positions': pos,
'elements': 'iaf_psc_alpha'})
# visualize
# xext, yext = nest.GetStatus(l1, 'topology')[0]['extent']
# xctr, yctr = nest.GetStatus(l1, 'topology')[0]['center']
# l1_children is a work-around until NEST 3.0 is released
l1_children = nest.hl_api.GetChildren(l1)[0]
# extract position information, transpose to list of x, y and z positions
xpos, ypos, zpos = zip(*topo.GetPosition(l1_children))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xpos, ypos, zpos, s=15, facecolor='b', edgecolor='none')
# Gaussian connections in full volume [-0.75,0.75]**3
topo.ConnectLayers(l1, l1,
{'connection_type': 'divergent', 'allow_autapses': False,
'mask': {'volume': {'lower_left': [-0.75, -0.75, -0.75],
'upper_right': [0.75, 0.75, 0.75]}},
'kernel': {'exponential':
{'c': 0., 'a': 1., 'tau': 0.25}}})
# show connections from center element
# sender shown in red, targets in green
ctr = topo.FindCenterElement(l1)
xtgt, ytgt, ztgt = zip(*topo.GetTargetPositions(ctr, l1)[0])
xctr, yctr, zctr = topo.GetPosition(ctr)[0]
ax.scatter([xctr], [yctr], [zctr], s=40, facecolor='r', edgecolor='none')
ax.scatter(xtgt, ytgt, ztgt, s=40, facecolor='g', edgecolor='g')
tgts = topo.GetTargetNodes(ctr, l1)[0]
d = topo.Distance(ctr, tgts)
plt.figure()
plt.hist(d, 25)
# plt.show()
|
gpl-2.0
|
liang42hao/bokeh
|
bokeh/charts/builder/step_builder.py
|
43
|
5445
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Step class which lets you build your Step charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
from six import string_types
from ..utils import cycle_colors
from .._builder import create_and_build, Builder
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer
from ...models.glyphs import Line
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Step(values, index=None, **kws):
""" Create a step chart using :class:`StepBuilder <bokeh.charts.builder.step_builder.StepBuilder>`
render the geometry from values and index.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
from bokeh.charts import Step, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
step = Step(xyvalues, title="Steps", legend="top_left", ylabel='Languages')
output_file('step.html')
show(step)
"""
return create_and_build(StepBuilder, values, index=index, **kws)
class StepBuilder(Builder):
"""This is the Step class and it is in charge of plotting
Step charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the
source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""It calculates the chart properties accordingly from Step.values.
Then build a dict containing references to all the points to be
used by the segment glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
self._groups = []
orig_xs = self._values_index
xs = np.empty(2*len(orig_xs)-1, dtype=np.int)
xs[::2] = orig_xs[:]
xs[1::2] = orig_xs[1:]
self._data['x'] = xs
for i, col in enumerate(self._values.keys()):
if isinstance(self.index, string_types) and col == self.index:
continue
# save every new group we find
self._groups.append(col)
orig_ys = np.array([self._values[col][x] for x in orig_xs])
ys = np.empty(2*len(orig_ys)-1)
ys[::2] = orig_ys[:]
ys[1::2] = orig_ys[:-1]
self._data['y_%s' % col] = ys
def _set_sources(self):
""" Push the Step data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
#y_sources = [sc.columns("y_%s" % col) for col in self._groups]
self.y_range = DataRange1d()
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the Step.
Takes reference points from the data loaded at the ColumnDataSource.
"""
colors = cycle_colors(self._groups, self.palette)
for i, name in enumerate(self._groups):
# draw the step horizontal segment
glyph = Line(x="x", y="y_%s" % name, line_color=colors[i], line_width=2)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
|
bsd-3-clause
|
pchmieli/h2o-3
|
h2o-py/tests/testdir_algos/gbm/pyunit_DEPRECATED_bernoulliGBM.py
|
3
|
2673
|
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
import numpy as np
from sklearn import ensemble
from sklearn.metrics import roc_auc_score
def bernoulliGBM():
#Log.info("Importing prostate.csv data...\n")
prostate_train = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate_train.csv"))
#Log.info("Converting CAPSULE and RACE columns to factors...\n")
prostate_train["CAPSULE"] = prostate_train["CAPSULE"].asfactor()
#Log.info("H2O Summary of prostate frame:\n")
#prostate.summary()
# Import prostate_train.csv as numpy array for scikit comparison
trainData = np.loadtxt(pyunit_utils.locate("smalldata/logreg/prostate_train.csv"), delimiter=',', skiprows=1)
trainDataResponse = trainData[:,0]
trainDataFeatures = trainData[:,1:]
ntrees = 100
learning_rate = 0.1
depth = 5
min_rows = 10
# Build H2O GBM classification model:
#Log.info(paste("H2O GBM with parameters:\ndistribution = 'bernoulli', ntrees = ", ntrees, ", max_depth = 5,
# min_rows = 10, learn_rate = 0.1\n", sep = ""))
gbm_h2o = h2o.gbm(x=prostate_train[1:], y=prostate_train["CAPSULE"], ntrees=ntrees, learn_rate=learning_rate,
max_depth=depth, min_rows=min_rows, distribution="bernoulli")
# Build scikit GBM classification model
#Log.info("scikit GBM with same parameters\n")
gbm_sci = ensemble.GradientBoostingClassifier(learning_rate=learning_rate, n_estimators=ntrees, max_depth=depth,
min_samples_leaf=min_rows, max_features=None)
gbm_sci.fit(trainDataFeatures,trainDataResponse)
#Log.info("Importing prostate_test.csv data...\n")
prostate_test = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate_test.csv"))
#Log.info("Converting CAPSULE and RACE columns to factors...\n")
prostate_test["CAPSULE"] = prostate_test["CAPSULE"].asfactor()
# Import prostate_test.csv as numpy array for scikit comparison
testData = np.loadtxt(pyunit_utils.locate("smalldata/logreg/prostate_test.csv"), delimiter=',', skiprows=1)
testDataResponse = testData[:,0]
testDataFeatures = testData[:,1:]
# Score on the test data and compare results
# scikit
auc_sci = roc_auc_score(testDataResponse, gbm_sci.predict_proba(testDataFeatures)[:,1])
# h2o
gbm_perf = gbm_h2o.model_performance(prostate_test)
auc_h2o = gbm_perf.auc()
#Log.info(paste("scikit AUC:", auc_sci, "\tH2O AUC:", auc_h2o))
assert auc_h2o >= auc_sci, "h2o (auc) performance degradation, with respect to scikit"
if __name__ == "__main__":
pyunit_utils.standalone_test(bernoulliGBM)
else:
bernoulliGBM()
|
apache-2.0
|
JohnDickerson/EnvyFree
|
analysis/graph_theory.py
|
1
|
1528
|
#!/usr/bin/env python
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.font_manager import FontProperties
import matplotlib.patches as patches # For the proxy twin-axis legend entry
from scipy.optimize import curve_fit
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
XFONT={'fontsize':24}
YFONT={'fontsize':24}
TITLEFONT={'fontsize':24}
TINYFONT={'fontsize':6}
def thm_2_func(x, a, b):
return a * (x / np.log(x)) + b
def print_thm_2_func(vec):
return "${0:.2f} * m/\ln(m) - {1:.2f}$".format(vec[0], -vec[1])
n = range(3,11)
# 99%+ percentage of runs were feasible after m=?
unif_m = [8,9,10,12,14,16,18,19]
corr_m = [8,10,11,13,14,16,18,19]
# Fit an f(m) = O( m / ln(m) ) curve to the 99%+ data
popt_unif, pcov_unif = curve_fit(thm_2_func, n, unif_m)
popt_corr, pcov_corr = curve_fit(thm_2_func, n, corr_m)
print popt_unif
print popt_corr
fig = plt.figure()
ax = fig.add_subplot(111)
unif = ax.plot(n, unif_m, 'ks', label="Uniform", linewidth=2,)
corr = ax.plot(n, corr_m, 'ko', label="Correlated", linewidth=2,)
fit_unif = ax.plot(n, thm_2_func(n, *popt_unif), 'k--', label=print_thm_2_func(popt_unif))
fit_corr = ax.plot(n, thm_2_func(n, *popt_corr), 'k-', label=print_thm_2_func(popt_corr))
ax.set_ylabel("$m$", fontdict=XFONT)
ax.set_xlabel("$n$", fontdict=YFONT)
plt.legend(loc='upper left',)
plt.savefig("with_high_probability.pdf", format='pdf', bbox_inches='tight')
plt.clf()
|
gpl-2.0
|
yongfuyang/vnpy
|
vn.trader/ctaAlgo/tools/multiTimeFrame/ctaStrategyMultiTF.py
|
18
|
15032
|
# encoding: UTF-8
"""
This file tweaks ctaTemplate Module to suit multi-TimeFrame strategies.
"""
from strategyAtrRsi import *
from ctaBase import *
from ctaTemplate import CtaTemplate
########################################################################
class TC11(CtaTemplate):
# Strategy name and author
className = "TC11"
author = "Zenacon"
# Set MongoDB DataBase
barDbName = "TestData"
# Strategy parameters
pGeneric_prd = 21
pGeneric_on = True
pATRprd_F = 13
pATRprd_M = 21
pATRprd_S = 63
pBOSSplus_prd = 98
pBOSSminus_prd = 22
if pGeneric_on == 0:
pRSIprd = 20
pBBprd = 10
pBB_ATRprd = 15
pATRprd = 21
pDMIprd = 21
else:
pRSIprd = \
pBBprd = \
pBB_ATRprd = \
pATRprd = \
pDMIprd = pGeneric_prd
pBOSS_Mult = 1.75
# Strategy variables
vOBO_initialpoint = EMPTY_FLOAT
vOBO_Stretch = EMPTY_FLOAT
vOBO_level_L = EMPTY_FLOAT
vOBO_level_S = EMPTY_FLOAT
# parameters' list, record names of parameters
paramList = ['name',
'className',
'author',
'vtSymbol']
# variables' list, record names of variables
varList = ['inited',
'trading',
'pos']
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(TC11, self).__init__(ctaEngine, setting)
# ----------------------------------------------------------------------
def onBar(self, bar, **kwargs):
"""收到Bar推送(必须由用户继承实现)"""
# 撤销之前发出的尚未成交的委托(包括限价单和停止单)
for orderID in self.orderList:
self.cancelOrder(orderID)
self.orderList = []
# Record new information bar
if "infobar" in kwargs:
for i in kwargs["infobar"]:
if kwargs["infobar"][i] is None:
pass
else:
# print kwargs["infobar"][i]["close"]
self.closeArray[0:self.bufferSize - 1] = self.closeArray[1:self.bufferSize]
self.highArray[0:self.bufferSize - 1] = self.highArray[1:self.bufferSize]
self.lowArray[0:self.bufferSize - 1] = self.lowArray[1:self.bufferSize]
self.closeArray[-1] = bar.close
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
"""
Record new bar
"""
self.closeArray[0:self.bufferSize - 1] = self.closeArray[1:self.bufferSize]
self.highArray[0:self.bufferSize - 1] = self.highArray[1:self.bufferSize]
self.lowArray[0:self.bufferSize - 1] = self.lowArray[1:self.bufferSize]
self.closeArray[-1] = bar.close
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
self.bufferCount += 1
if self.bufferCount < self.bufferSize:
return
"""
Calculate Indicators
"""
vOBO_initialpoint = self.dataHTF_filled['Open']
vOBO_Stretch = self.vATR['htf'].m * self.pBOSS_Mult
self.atrValue = talib.ATR(self.highArray,
self.lowArray,
self.closeArray,
self.atrLength)[-1]
self.atrArray[0:self.bufferSize - 1] = self.atrArray[1:self.bufferSize]
self.atrArray[-1] = self.atrValue
self.atrCount += 1
if self.atrCount < self.bufferSize:
return
self.atrMa = talib.MA(self.atrArray,
self.atrMaLength)[-1]
self.rsiValue = talib.RSI(self.closeArray,
self.rsiLength)[-1]
# 判断是否要进行交易
# 当前无仓位
if self.pos == 0:
self.intraTradeHigh = bar.high
self.intraTradeLow = bar.low
# ATR数值上穿其移动平均线,说明行情短期内波动加大
# 即处于趋势的概率较大,适合CTA开仓
if self.atrValue > self.atrMa:
# 使用RSI指标的趋势行情时,会在超买超卖区钝化特征,作为开仓信号
if self.rsiValue > self.rsiBuy:
# 这里为了保证成交,选择超价5个整指数点下单
self.buy(bar.close + 5, 1)
elif self.rsiValue < self.rsiSell:
self.short(bar.close - 5, 1)
# 持有多头仓位
elif self.pos > 0:
# 计算多头持有期内的最高价,以及重置最低价
self.intraTradeHigh = max(self.intraTradeHigh, bar.high)
self.intraTradeLow = bar.low
# 计算多头移动止损
longStop = self.intraTradeHigh * (1 - self.trailingPercent / 100)
# 发出本地止损委托,并且把委托号记录下来,用于后续撤单
orderID = self.sell(longStop, 1, stop=True)
self.orderList.append(orderID)
# 持有空头仓位
elif self.pos < 0:
self.intraTradeLow = min(self.intraTradeLow, bar.low)
self.intraTradeHigh = bar.high
shortStop = self.intraTradeLow * (1 + self.trailingPercent / 100)
orderID = self.cover(shortStop, 1, stop=True)
self.orderList.append(orderID)
# 发出状态更新事件
self.putEvent()
########################################################################
class Prototype(AtrRsiStrategy):
"""
"infoArray" 字典是用来储存辅助品种信息的, 可以是同品种的不同分钟k线, 也可以是不同品种的价格。
调用的方法:
self.infoArray["数据库名 + 空格 + collection名"]["close"]
self.infoArray["数据库名 + 空格 + collection名"]["high"]
self.infoArray["数据库名 + 空格 + collection名"]["low"]
"""
infoArray = {}
initInfobar = {}
def __int__(self):
super(Prototype, self).__int__()
# ----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' % self.name)
# 初始化RSI入场阈值
self.rsiBuy = 50 + self.rsiEntry
self.rsiSell = 50 - self.rsiEntry
# 载入历史数据,并采用回放计算的方式初始化策略数值
initData = self.loadBar(self.initDays)
for bar in initData:
# 推送新数据, 同时检查是否有information bar需要推送
# Update new bar, check whether the Time Stamp matching any information bar
ibar = self.checkInfoBar(bar)
self.onBar(bar, infobar=ibar)
self.putEvent()
# ----------------------------------------------------------------------
def checkInfoBar(self, bar):
"""在初始化时, 检查辅助品种数据的推送(初始化结束后, 回测时不会调用)"""
initInfoCursorDict = self.ctaEngine.initInfoCursor
# 如果"initInfobar"字典为空, 初始化字典, 插入第一个数据
# If dictionary "initInfobar" is empty, insert first data record
if self.initInfobar == {}:
for info_symbol in initInfoCursorDict:
try:
self.initInfobar[info_symbol] = next(initInfoCursorDict[info_symbol])
except StopIteration:
print "Data of information symbols is empty! Input is a list, not str."
raise
# 若有某一品种的 TimeStamp 和执行报价的 TimeStamp 匹配, 则将"initInfobar"中的数据推送,
# 然后更新该品种的数据
# If any symbol's TimeStamp is matched with execution symbol's TimeStamp, return data
# in "initInfobar", and update new data.
temp = {}
for info_symbol in self.initInfobar:
data = self.initInfobar[info_symbol]
# Update data only when Time Stamp is matched
if data['datetime'] <= bar.datetime:
try:
temp[info_symbol] = CtaBarData()
temp[info_symbol].__dict__ = data
self.initInfobar[info_symbol] = next(initInfoCursorDict[info_symbol])
except StopIteration:
self.ctaEngine.output("No more data for initializing %s." % (info_symbol,))
else:
temp[info_symbol] = None
return temp
# ----------------------------------------------------------------------
def updateInfoArray(self, infobar):
"""收到Infomation Data, 更新辅助品种缓存字典"""
for name in infobar:
data = infobar[name]
# Construct empty array
if len(self.infoArray) < len(infobar) :
self.infoArray[name] = {
"close": np.zeros(self.bufferSize),
"high": np.zeros(self.bufferSize),
"low": np.zeros(self.bufferSize)
}
if data is None:
pass
else:
self.infoArray[name]["close"][0:self.bufferSize - 1] = \
self.infoArray[name]["close"][1:self.bufferSize]
self.infoArray[name]["high"][0:self.bufferSize - 1] = \
self.infoArray[name]["high"][1:self.bufferSize]
self.infoArray[name]["low"][0:self.bufferSize - 1] = \
self.infoArray[name]["low"][1:self.bufferSize]
self.infoArray[name]["close"][-1] = data.close
self.infoArray[name]["high"][-1] = data.high
self.infoArray[name]["low"][-1] = data.low
# ----------------------------------------------------------------------
def onBar(self, bar, **kwargs):
"""收到Bar推送(必须由用户继承实现)"""
# 撤销之前发出的尚未成交的委托(包括限价单和停止单)
for orderID in self.orderList:
self.cancelOrder(orderID)
self.orderList = []
# Update infomation data
# "infobar"是由不同时间或不同品种的品种数据组成的字典, 如果和执行品种的 TimeStamp 不匹配,
# 则传入的是"None", 当time stamp和执行品种匹配时, 传入的是"Bar"
self.updateInfoArray(kwargs["infobar"])
# 保存K线数据
self.closeArray[0:self.bufferSize - 1] = self.closeArray[1:self.bufferSize]
self.highArray[0:self.bufferSize - 1] = self.highArray[1:self.bufferSize]
self.lowArray[0:self.bufferSize - 1] = self.lowArray[1:self.bufferSize]
self.closeArray[-1] = bar.close
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
# 若读取的缓存数据不足, 不考虑交易
self.bufferCount += 1
if self.bufferCount < self.bufferSize:
return
# 计算指标数值
# 计算不同时间下的ATR数值
# Only trading when information bar changes
# 只有在30min或者1d K线更新后才可以交易
TradeOn = False
if any([i is not None for i in kwargs["infobar"].values()]):
TradeOn = True
self.scaledAtrValue1M = talib.ATR(self.highArray,
self.lowArray,
self.closeArray,
self.atrLength)[-1] * (25) ** (0.5)
self.atrValue30M = talib.abstract.ATR(self.infoArray["TestData @GC_30M"])[-1]
self.rsiValue = talib.abstract.RSI(self.infoArray["TestData @GC_30M"], self.rsiLength)[-1]
self.atrCount += 1
if self.atrCount < self.bufferSize:
return
# 判断是否要进行交易
# 当前无仓位
if (self.pos == 0 and TradeOn == True):
self.intraTradeHigh = bar.high
self.intraTradeLow = bar.low
# 1Min调整后ATR大于30MinATR
# 即处于趋势的概率较大,适合CTA开仓
if self.atrValue30M < self.scaledAtrValue1M:
# 使用RSI指标的趋势行情时,会在超买超卖区钝化特征,作为开仓信号
if self.rsiValue > self.rsiBuy:
# 这里为了保证成交,选择超价5个整指数点下单
self.buy(bar.close+5, 1)
elif self.rsiValue < self.rsiSell:
self.short(bar.close-5, 1)
# 下单后, 在下一个30Min K线之前不交易
TradeOn = False
# 持有多头仓位
elif self.pos > 0:
# 计算多头持有期内的最高价,以及重置最低价
self.intraTradeHigh = max(self.intraTradeHigh, bar.high)
self.intraTradeLow = bar.low
# 计算多头移动止损
longStop = self.intraTradeHigh * (1 - self.trailingPercent / 100)
# 发出本地止损委托,并且把委托号记录下来,用于后续撤单
orderID = self.sell(longStop, 1, stop=True)
self.orderList.append(orderID)
# 持有空头仓位
elif self.pos < 0:
self.intraTradeLow = min(self.intraTradeLow, bar.low)
self.intraTradeHigh = bar.high
shortStop = self.intraTradeLow * (1 + self.trailingPercent / 100)
orderID = self.cover(shortStop, 1, stop=True)
self.orderList.append(orderID)
# 发出状态更新事件
self.putEvent()
if __name__ == '__main__':
# 提供直接双击回测的功能
# 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错
from ctaBacktestMultiTF import *
from PyQt4 import QtCore, QtGui
import time
'''
创建回测引擎
设置引擎的回测模式为K线
设置回测用的数据起始日期
载入历史数据到引擎中
在引擎中创建策略对象
Create backtesting engine
Set backtest mode as "Bar"
Set "Start Date" of data range
Load historical data to engine
Create strategy instance in engine
'''
engine = BacktestEngineMultiTF()
engine.setBacktestingMode(engine.BAR_MODE)
engine.setStartDate('20100101')
engine.setDatabase("TestData", "@GC_1M", info_symbol=[("TestData","@GC_30M")])
# Set parameters for strategy
d = {'atrLength': 11}
engine.initStrategy(Prototype, d)
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setCommission(0.3 / 10000) # 万0.3
engine.setSize(300) # 股指合约大小
# 开始跑回测
start = time.time()
engine.runBacktesting()
# 显示回测结果
engine.showBacktestingResult()
print 'Time consumed:%s' % (time.time() - start)
|
mit
|
root-mirror/root
|
tutorials/tmva/tmva102_Testing.py
|
21
|
1138
|
## \file
## \ingroup tutorial_tmva
## \notebook -nodraw
## This tutorial illustrates how you can test a trained BDT model using the fast
## tree inference engine offered by TMVA and external tools such as scikit-learn.
##
## \macro_code
## \macro_output
##
## \date August 2019
## \author Stefan Wunsch
import ROOT
import pickle
from tmva100_DataPreparation import variables
from tmva101_Training import load_data
# Load data
x, y_true, w = load_data("test_signal.root", "test_background.root")
# Load trained model
bdt = ROOT.TMVA.Experimental.RBDT[""]("myBDT", "tmva101.root")
# Make prediction
y_pred = bdt.Compute(x)
# Compute ROC using sklearn
from sklearn.metrics import roc_curve, auc
fpr, tpr, _ = roc_curve(y_true, y_pred, sample_weight=w)
score = auc(fpr, tpr, reorder=True)
# Plot ROC
c = ROOT.TCanvas("roc", "", 600, 600)
g = ROOT.TGraph(len(fpr), fpr, tpr)
g.SetTitle("AUC = {:.2f}".format(score))
g.SetLineWidth(3)
g.SetLineColor(ROOT.kRed)
g.Draw("AC")
g.GetXaxis().SetRangeUser(0, 1)
g.GetYaxis().SetRangeUser(0, 1)
g.GetXaxis().SetTitle("False-positive rate")
g.GetYaxis().SetTitle("True-positive rate")
c.Draw()
|
lgpl-2.1
|
nomadcube/scikit-learn
|
examples/preprocessing/plot_function_transformer.py
|
161
|
1949
|
"""
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
|
bsd-3-clause
|
Funtimezzhou/TradeBuildTools
|
SAT eBook/chapter15/strategy.py
|
2
|
1077
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# strategy.py
from __future__ import print_function
from abc import ABCMeta, abstractmethod
import datetime
try:
import Queue as queue
except ImportError:
import queue
import numpy as np
import pandas as pd
from event import SignalEvent
class Strategy(object):
# Strategy is an abstract base class providing an interface for
# all subsequent (inherited) strategy handling objects.
# The goal of a (derived) Strategy object is to generate Signal
# objects for particular symbols based on the inputs of Bars
# (OHLCV) generated by a DataHandler object.
# This is designed to work both with historic and live data as
# the Strategy object is agnostic to where the data came from,
# since it obtains the bar tuples from a queue object.
__metaclass__ = ABCMeta
@abstractmethod
def calculate_signals(self):
# Provides the mechanisms to calculate the list of signals.
raise NotImplementedError("Should implement calculate_signals()")
|
gpl-3.0
|
LouisPlisso/pytomo
|
pytomo/cdfplot_new.py
|
2
|
11021
|
#!/usr/bin/env python
"Module to plot cdf from data or file. Can be called directly."
from __future__ import division, print_function
#from optparse import OptionParser
import sys
# AO 201221010 (due to error in win) =>
try:
import numpy as np
except ImportError:
np = None
# in case of non-interactive usage
#import matplotlib
#matplotlib.use('PDF')
try:
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib.colors import colorConverter
except ImportError:
plt = None
# <= AO 201221010 (due to error in win)
from itertools import cycle
_VERSION = '2.0'
# possibility to place legend outside graph:
#pylab.subfigure(111)
#pylab.subplots_adjust(right=0.8) or (top=0.8)
#pylab.legend(loc=(1.1, 0.5)
class CdfFigure(object):
"Hold the figure and its default properties"
def __init__(self, xlabel='x', ylabel=r'P(X$\leq$x)',
title='Empirical Distribution', fontsize='xx-large',
legend_fontsize='large', legend_ncol=1, subplot_top=None):
self._figure = plt.figure()
if subplot_top:
self._figure.subplotpars.top = subplot_top
self._axis = self._figure.add_subplot(111)
self._lines = {}
self.xlabel = xlabel
self.ylabel = ylabel
self.title = title
self.fontsize = fontsize
self.legend_fontsize = legend_fontsize
self.legend_ncol = legend_ncol
def savefig(self, *args, **kwargs):
"Saves the figure: interface to plt.Figure.savefig"
self._figure.savefig(*args, **kwargs)
def bar(self, *args, **kwargs):
"Plot in the axis: interface to plt.Axes.bar"
self._axis.bar(*args, **kwargs)
def plot(self, *args, **kwargs):
"Plot in the axis: interface to plt.Axes.plot"
self._axis.plot(*args, **kwargs)
def get_xlim(self, *args, **kwargs):
"Plot in the axis: interface to plt.Axes.get_xlim()"
return self._axis.get_xlim(*args, **kwargs)
def set_xlim(self, *args, **kwargs):
"Plot in the axis: interface to plt.Axes.set_xlim()"
self._axis.set_xlim(*args, **kwargs)
def set_ylim(self, *args, **kwargs):
"Plot in the axis: interface to plt.Axes.set_ylim()"
self._axis.set_ylim(*args, **kwargs)
def cdfplot(self, data_in, name='Data', finalize=False):
"""Plot the cdf of a data array
Wrapper to call the plot method of axes
"""
data = sorted(filter(lambda x: x is not None, data_in))
data_len = len(data)
if data_len == 0:
print("no data to plot", file=sys.stderr)
return
cdf = np.arange(data_len + 1) / data_len
# to have cdf up to 1
data.append(data[-1])
line = self._axis.plot(data, cdf, drawstyle='steps',
label=name + ': %d' % len(data))
self._lines[name] = line[0]
if finalize:
self.adjust_plot()
def ccdfplot(self, data_in, name='Data', finalize=False):
"""Plot the cdf of a data array
Wrapper to call the plot method of axes
"""
data = sorted(filter(lambda x: x is not None, data_in))
data_len = len(data)
if data_len == 0:
print("no data to plot", file=sys.stderr)
return
ccdf = 1 - np.arange(data_len + 1) / data_len
# to have cdf up to 1
data.append(data[-1])
line = self._axis.plot(data, ccdf, drawstyle='steps',
label=name + ': %d' % len(data))
self._lines[name] = line[0]
if finalize:
self.adjust_plot()
def show(self):
"Show the figure, and hold to do interactive drawing"
self._figure.show()
self._figure.hold(True)
@staticmethod
def generate_line_properties():
"Cycle through the lines properties"
colors = cycle('mgcb')
line_width = 2.5
dashes = cycle([(1, 0), (8, 5)]) #self.dash_generator()
linestyles = cycle(['-'])
#alphas = cycle([.3, 1.])
markers = cycle(' oxv*d')
while True:
dash = dashes.next()
yield (colors.next(), line_width, dash, linestyles.next(),
markers.next())
yield (colors.next(), line_width, dash, linestyles.next(),
markers.next())
dash = dashes.next()
yield (colors.next(), line_width, dash, linestyles.next(),
markers.next())
yield (colors.next(), line_width, dash, linestyles.next(),
markers.next())
def adjust_lines(self, dashes=True, leg_loc='best'):
"""Put correct styles in the axes lines
Should be launch when all lines are plotted
Optimised for up to 8 lines in the plot
"""
generator = self.generate_line_properties()
for key in sorted(self._lines):
(color, line_width, dash, linestyle, marker) = generator.next()
line = self._lines[key]
line.set_color(color)
line.set_lw(line_width)
line.set_linestyle(linestyle)
if dashes:
line.set_dashes(dash)
line.set_marker(marker)
line.set_markersize(12)
line.set_markeredgewidth(1.5)
line.set_markerfacecolor('1.')
line.set_markeredgecolor(color)
# we want at most 15 markers per line
markevery = 1 + len(line.get_xdata()) // 15
line.set_markevery(markevery)
self.adjust_plot(leg_loc=leg_loc)
def adjust_plot(self, leg_loc='best'):
"Adjust main plot properties (grid, ticks, legend)"
self.put_labels()
self.adjust_ticks()
self._axis.grid(True)
self._axis.legend(loc=leg_loc, ncol=self.legend_ncol)
def put_labels(self):
"Put labels for axes and title"
self._axis.set_xlabel(self.xlabel, size=self.fontsize)
self._axis.set_ylabel(self.ylabel, size=self.fontsize)
self._axis.set_title(self.title, size=self.fontsize)
def legend(self, loc='best'):
"Plot legend with correct font size"
font = FontProperties(size=self.legend_fontsize)
self._axis.legend(loc=loc, prop=font)
def adjust_ticks(self):
"""Adjusts ticks sizes
To call after a rescale (log...)
"""
self._axis.minorticks_on()
for tick in self._axis.xaxis.get_major_ticks():
tick.label1.set_fontsize(self.fontsize)
for tick in self._axis.yaxis.get_major_ticks():
tick.label1.set_fontsize(self.fontsize)
def setgraph_logx(self):
"Set graph in xlogscale and adjusts plot (grid, ticks, legend)"
self._axis.semilogx(nonposy='clip', nonposx='clip')
def setgraph_logy(self):
"Set graph in xlogscale and adjusts plot (grid, ticks, legend)"
self._axis.semilogy(nonposy='clip', nonposx='clip')
def setgraph_loglog(self):
"Set graph in xlogscale and adjusts plot (grid, ticks, legend)"
self._axis.loglog(nonposy='clip', nonposx='clip')
def cdfplotdata(self, list_data_name, **kwargs):
"Method to be able to append data to the figure"
cdfplotdata(list_data_name, figure=self, **kwargs)
def ccdfplotdata(self, list_data_name, **kwargs):
"Method to be able to append data to the figure"
cdfplotdata(list_data_name, figure=self, cdf=True, **kwargs)
def cdfplotdata(list_data_name, figure=None, xlabel='x', loc='best',
fs_legend='large', title = 'Empirical Distribution', logx=True,
logy=False, cdf=True, dashes=True, legend_ncol=1):
"Plot the cdf of a list of names and data arrays"
if not figure:
figure = CdfFigure(xlabel=xlabel, title=title,
legend_fontsize=fs_legend, legend_ncol=legend_ncol)
else:
figure.title = title
figure.xlabel = xlabel
figure.legend_fontsize = fs_legend
figure.legend_ncol = legend_ncol
if not list_data_name:
print("no data to plot", file=sys.stderr)
return figure
for name, data in list_data_name:
if cdf:
figure.cdfplot(data, name=name)
else:
figure.ccdfplot(data, name=name)
if logx and logy:
figure.setgraph_loglog()
elif logy:
figure.setgraph_logy()
elif logx:
figure.setgraph_logx()
figure.adjust_lines(dashes=dashes, leg_loc=loc)
return figure
def cdfplot(in_file, col=0):
"Plot the cdf of a column in file"
data = np.loadtxt(in_file, usecols = [col])
cdfplotdata(('Data', data))
def scatter_plot(data, title='Scatterplot', xlabel='X', ylabel='Y',
logx=False, logy=False):
"Plot a scatter plot of data"
figure = CdfFigure(title=title, xlabel=xlabel, ylabel=ylabel)
x, y = zip(*data)
figure.plot(x, y, linestyle='', marker='^', markersize=8,
markeredgecolor='b', markerfacecolor='w')
if logx and logy:
figure.setgraph_loglog()
elif logy:
figure.setgraph_logy()
elif logx:
figure.setgraph_logx()
figure.adjust_plot()
return figure
def scatter_plot_multi(datas, title='Scatterplot', xlabel='X', ylabel='Y',
logx=False, logy=False):
"Plot a scatter plot of dictionary"
figure = CdfFigure(title=title, xlabel=xlabel, ylabel=ylabel)
markers = cycle('^xo')
colors = cycle('brm')
transparent = colorConverter.to_rgba('w', alpha=1)
total_nb = len([x for y in datas.values() for x in y])
for label, data in sorted(datas.items()):
x, y = zip(*data)
figure.plot(x, y,
label=(r'%s: %d (\textbf{%d\%%})'
% (label, len(data), 100 *len(data) // total_nb)),
linestyle='', marker=markers.next(), markersize=8,
markeredgecolor=colors.next(), markerfacecolor=transparent)
if logx and logy:
figure.setgraph_loglog()
elif logy:
figure.setgraph_logy()
elif logx:
figure.setgraph_logx()
figure.adjust_plot()
return figure
def bin_plot(datas, title='Bin Plot', xlabel='X', ylabel='Y',
logx=False, logy=False):
"Plot a bin plot of dictionary"
figure = CdfFigure(title=title, xlabel=xlabel, ylabel=ylabel)
# linestyles = cycle(('-', '--'))
# markers = cycle('^xo')
# colors = cycle('brm')
# for label, data in datas:
left, width, height, yerr = zip(*datas)
figure.bar(left, height, width, linewidth=0) #, yerr=yerr)
# linestyle=linestyles.next(), marker=markers.next(),
# markersize=6, markeredgecolor=colors.next(),
# markerfacecolor='w')
if logx and logy:
figure.setgraph_loglog()
elif logy:
figure.setgraph_logy()
elif logx:
figure.setgraph_logx()
figure.adjust_plot()
return figure
|
gpl-2.0
|
lawrencejones/neuro
|
Exercise_3/Solutions/WattsStrogatzStatistics.py
|
3
|
1387
|
"""
Computational Neurodynamics
Exercise 3
Randomly generates networks according to the Watts-Strogatz procedure,
computes their small-world indices, and local and global efficiencies,
and plots them. N is the number of nodes, k is the neighbourhood size.
(C) Murray Shanahan et al, 2015
"""
import sys
sys.path.append('..')
import numpy as np
import matplotlib.pyplot as plt
import bct
from SmallWorldIndex import SmallWorldIndex
from NetworkWattsStrogatz import NetworkWattsStrogatz
# Set up parameter values
nb_trials = 100
N = 200
k = 4
# Pre-allocate arrays
prob = 10**(-3*np.random.random(nb_trials))
SWI = np.zeros(nb_trials)
Eglob = np.zeros(nb_trials)
Eloc = np.zeros(nb_trials)
# Calculate statistics
for i, p in enumerate(prob):
CIJ = NetworkWattsStrogatz(N, k, p)
SWI[i] = SmallWorldIndex(CIJ)
Eglob[i] = bct.efficiency_wei(CIJ, local=False)
Eloc[i] = np.mean(bct.efficiency_wei(CIJ, local=True))
# Plot figures
plt.figure(1)
plt.semilogx(prob, SWI, marker='.', linestyle='none')
plt.xlabel('Rewiring probability')
plt.ylabel('Small World Index')
plt.figure(2)
plt.subplot(211)
plt.semilogx(prob, Eglob, marker='.', linestyle='none')
plt.xlabel('Rewiring probability')
plt.ylabel('Global efficiency')
plt.subplot(212)
plt.semilogx(prob, Eloc, marker='.', linestyle='none')
plt.xlabel('Rewiring probability')
plt.ylabel('Local efficiency')
plt.show()
|
gpl-3.0
|
arjunkhode/ASP
|
lectures/04-STFT/plots-code/time-freq-compromise.py
|
5
|
1247
|
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import utilFunctions as UF
import matplotlib.pyplot as plt
from scipy.signal import hamming
from scipy.fftpack import fft
import math
(fs, x) = UF.wavread('../../../sounds/piano.wav')
plt.figure(1, figsize=(9.5, 6))
w = np.hamming(256)
N = 256
H = 128
mX1, pX1 = STFT.stftAnal(x, w, N, H)
plt.subplot(211)
numFrames = int(mX1[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mX1[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX1))
plt.title('mX (piano.wav), M=256, N=256, H=128')
plt.autoscale(tight=True)
w = np.hamming(1024)
N = 1024
H = 128
mX2, pX2 = STFT.stftAnal(x, w, N, H)
plt.subplot(212)
numFrames = int(mX2[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mX2[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX2))
plt.title('mX (piano.wav), M=1024, N=1024, H=128')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('time-freq-compromise.png')
plt.show()
|
agpl-3.0
|
yavalvas/yav_com
|
build/matplotlib/doc/mpl_examples/pylab_examples/contourf_log.py
|
9
|
1350
|
'''
Demonstrate use of a log color scale in contourf
'''
from matplotlib import pyplot as P
import numpy as np
from numpy import ma
from matplotlib import colors, ticker, cm
from matplotlib.mlab import bivariate_normal
N = 100
x = np.linspace(-3.0, 3.0, N)
y = np.linspace(-2.0, 2.0, N)
X, Y = np.meshgrid(x, y)
# A low hump with a spike coming out of the top right.
# Needs to have z/colour axis on a log scale so we see both hump and spike.
# linear scale only shows the spike.
z = (bivariate_normal(X, Y, 0.1, 0.2, 1.0, 1.0)
+ 0.1 * bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0))
# Put in some negative values (lower left corner) to cause trouble with logs:
z[:5, :5] = -1
# The following is not strictly essential, but it will eliminate
# a warning. Comment it out to see the warning.
z = ma.masked_where(z<= 0, z)
# Automatic selection of levels works; setting the
# log locator tells contourf to use a log scale:
cs = P.contourf(X, Y, z, locator=ticker.LogLocator(), cmap=cm.PuBu_r)
# Alternatively, you can manually set the levels
# and the norm:
#lev_exp = np.arange(np.floor(np.log10(z.min())-1),
# np.ceil(np.log10(z.max())+1))
#levs = np.power(10, lev_exp)
#cs = P.contourf(X, Y, z, levs, norm=colors.LogNorm())
#The 'extend' kwarg does not work yet with a log scale.
cbar = P.colorbar()
P.show()
|
mit
|
soylentdeen/cuddly-weasel
|
gfVerification/Twins.py
|
1
|
1910
|
import matplotlib.pyplot as pyplot
import numpy
import scipy
import sys
import MoogTools
import Moog960
import AstroUtils
fig1 = pyplot.figure(0)
fig1.clear()
ax1 = fig1.add_axes([0.1, 0.1, 0.8, 0.8])
fig2 = pyplot.figure(1)
fig2.clear()
ax2 = fig2.add_axes([0.1, 0.1, 0.8, 0.8])
baseName = sys.argv[1]
arcConfig = baseName+".arcturus.cfg"
solConfig = baseName+".solar.cfg"
arcturus = MoogTools.MoogStokes(arcConfig, moogInstance = "ALPHA")
solar = MoogTools.MoogStokes(solConfig, moogInstance = "BRAVO")
solarSpectrum = Moog960.ObservedMelody.fromFile(filename=solar.config["inFile"])
arcturusSpectrum = Moog960.ObservedMelody.fromFile(filename=arcturus.config["inFile"])
solarSpectrum.loadData()
arcturusSpectrum.loadData()
wlRange = [solar.config["wlStart"], solar.config["wlStop"]]
solarSpectrum.selectPhrases(wlRange=wlRange)
obsSol, labels = solarSpectrum.perform()
arcturusSpectrum.selectPhrases(wlRange=wlRange)
obsArc, labels = arcturusSpectrum.perform()
solWlShift = solar.config["wlShift"]
obsSol = obsSol[0][0]
obsSol.wl += solWlShift
window = ((obsSol.wl > wlRange[0]) & (obsSol.wl < wlRange[1]))
obsSol.wl = obsSol.wl[window]
obsSol.flux_I = obsSol.flux_I[window]
arcWlShift = arcturus.config["wlShift"]
obsArc = obsArc[0][0]
obsArc.wl += arcWlShift
window = ((obsArc.wl > wlRange[0]) & (obsArc.wl < wlRange[1]))
obsArc.wl = obsArc.wl[window]
obsArc.flux_I = obsArc.flux_I[window]
arcturus.run()
solar.run()
obsSol.plot(ax = ax1, color = 'b', lw=2.0, label='Observed Solar')
solar.Spectra[-1].plot(ax=ax1, color = 'r', lw=2.0, label='Arcturus gf\'s')
ax1.legend(loc=3)
ax1.set_ybound(0.5, 1.05)
fig1.show()
fig1.savefig("ObservedSolar.png")
obsArc.plot(ax = ax2, color = 'b', lw=2.0, label='Observed Arcturus')
arcturus.Spectra[-1].plot(ax=ax2, color = 'r', lw=2.0, label='Solar gs\'s')
ax2.legend(loc=3)
ax2.set_ybound(0.45, 1.05)
fig2.show()
fig2.savefig("ObservedArcturus.png")
|
mit
|
joshbohde/scikit-learn
|
doc/conf.py
|
1
|
7181
|
# -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
# -- General configuration -----------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath',
]
try:
import numpy_ext.numpydoc
extensions.append('numpy_ext.numpydoc')
# With older versions of sphinx, this causes a crash
autosummary_generate=True
except:
# Older version of sphinx
extensions.append('numpy_ext_old.numpydoc')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'scikit-learn'
copyright = u'2010–2011, scikit-learn developers (BSD License)'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
import sklearn
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'scikit-learn user guide',
u'scikit-learn developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = """
\usepackage{amsmath}\usepackage{amsfonts}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
trim_doctests_flags = True
|
bsd-3-clause
|
Pymatteo/QtNMR
|
build/exe.win32-3.4/scipy/stats/morestats.py
|
7
|
78330
|
# Author: Travis Oliphant, 2002
#
# Further updates and enhancements by many SciPy developers.
#
from __future__ import division, print_function, absolute_import
import math
import warnings
import numpy as np
from numpy import (isscalar, r_, log, sum, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d, sqrt, ceil,
floor, array, poly1d, compress, not_equal, pi, exp, ravel, angle)
from numpy.testing.decorators import setastest
from scipy.lib.six import string_types
from scipy.lib._numpy_compat import count_nonzero
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
def bayes_mvs(data, alpha=0.90):
"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability `alpha`.
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
`alpha`.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://hdl.handle.net/1877/438, 2006.
"""
res = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given." % alpha)
return tuple((x.mean(), x.interval(alpha)) for x in res)
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
Notes
-----
The return values from bayes_mvs(data) is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
Examples
--------
>>> from scipy.stats import mvsdist
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if (n < 2):
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if (n > 1000): # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C/n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C/(2.*n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0/n)*C)
else:
nm1 = n-1
fac = n*C/2.
val = nm1/2.
mdist = distributions.t(nm1,loc=xbar,scale=math.sqrt(C/nm1))
sdist = distributions.gengamma(val,-2,scale=math.sqrt(fac))
vdist = distributions.invgamma(val,scale=fac)
return mdist, vdist, sdist
def kstat(data,n=2):
"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic is the unique symmetric unbiased estimator of the nth
cumulant kappa_n.
Parameters
----------
data : array_like
Input array.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
Notes
-----
The cumulants are related to central moments but are specifically defined
using a power series expansion of the logarithm of the characteristic
function (which is the Fourier transform of the PDF).
In particular let phi(t) be the characteristic function, then::
ln phi(t) = > kappa_n (it)^n / n! (sum from n=0 to inf)
The first few cumulants (kappa_n) in terms of central moments (mu_n) are::
kappa_1 = mu_1
kappa_2 = mu_2
kappa_3 = mu_3
kappa_4 = mu_4 - 3*mu_2**2
kappa_5 = mu_5 - 10*mu_2 * mu_3
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = zeros(n+1,'d')
data = ravel(data)
N = len(data)
for k in range(1,n+1):
S[k] = sum(data**k,axis=0)
if n == 1:
return S[1]*1.0/N
elif n == 2:
return (N*S[2]-S[1]**2.0)/(N*(N-1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2]+N*N*S[3]) / (N*(N-1.0)*(N-2.0))
elif n == 4:
return (-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) / \
(N*(N-1.0)*(N-2.0)*(N-3.0))
else:
raise ValueError("Should not be here.")
def kstatvar(data,n=2):
"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data,n=2)*1.0/N
elif n == 2:
k2 = kstat(data,n=2)
k4 = kstat(data,n=4)
return (2*k2*k2*N + (N-1)*k4)/(N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(x):
"""See Notes section of `probplot` for details."""
N = len(x)
osm_uniform = np.zeros(N, dtype=np.float64)
osm_uniform[-1] = 0.5**(1.0 / N)
osm_uniform[0] = 1 - osm_uniform[-1]
i = np.arange(2, N)
osm_uniform[1:-1] = (i - 0.3175) / (N + 0.365)
return osm_uniform
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def probplot(x, sparams=(), dist='norm', fit=True, plot=None):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample/2.,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
osm_uniform = _calc_uniform_order_statistic_medians(x)
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if fit or (plot is not None):
# perform a linear fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title('Probability Plot')
plot.set_xlabel('Quantiles')
plot.set_ylabel('Ordered Values')
else:
# matplotlib.pyplot module
plot.title('Probability Plot')
plot.xlabel('Quantiles')
plot.ylabel('Ordered Values')
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
# Add R^2 value to the plot as text
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0,1.0), dist='tukeylambda'):
"""Returns the shape parameter that maximizes the probability plot
correlation coefficient for the given data to a one-parameter
family of distributions.
See also ppcc_plot
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(x)
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1-r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x,a,b,dist='tukeylambda', plot=None, N=80):
"""Returns (shape, ppcc), and optionally plots shape vs. ppcc
(probability plot correlation coefficient) as a function of shape
parameter for a one-parameter family of distributions from shape
value a to b.
See also ppcc_max
"""
svals = r_[a:b:complex(N)]
ppcc = svals*0.0
k = 0
for sval in svals:
r1,r2 = probplot(x,sval,dist=dist,fit=1)
ppcc[k] = r2[-1]
k += 1
if plot is not None:
plot.plot(svals, ppcc, 'x')
plot.title('(%s) PPCC Plot' % dist)
plot.xlabel('Prob Plot Corr. Coef.')
plot.ylabel('Shape Values')
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=np.float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If `alpha` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and `alpha` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given `alpha`.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when `alpha` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(x)
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=np.float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title('Box-Cox Normality Plot')
plot.set_ylabel('Prob Plot Corr. Coef.')
plot.set_xlabel('$\lambda$')
else:
# matplotlib.pyplot module
plot.title('Box-Cox Normality Plot')
plot.ylabel('Prob Plot Corr. Coef.')
plot.xlabel('$\lambda$')
except Exception:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
"""
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N,'f')
init = 0
else:
if len(a) != N//2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0,2]:
warnings.warn(str(ifault))
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
def anderson(x,dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1' is a synonym for 'gumbel'
Returns
-------
A2 : float
The Anderson-Darling test statistic
critical : list
The critical values for this distribution
sig : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm','expon','gumbel','extreme1','logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y-xbar)/s
z = distributions.norm.cdf(w)
sig = array([15,10,5,2.5,1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N),3)
elif dist == 'expon':
w = y / xbar
z = distributions.expon.cdf(w)
sig = array([15,10,5,2.5,1])
critical = around(_Avals_expon / (1.0 + 0.6/N),3)
elif dist == 'logistic':
def rootfunc(ab,xj,N):
a,b = ab
tmp = (xj-a)/b
tmp2 = exp(tmp)
val = [sum(1.0/(1+tmp2),axis=0)-0.5*N,
sum(tmp*(1.0-tmp2)/(1+tmp2),axis=0)+N]
return array(val)
sol0 = array([xbar,np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc,sol0,args=(x,N),xtol=1e-5)
w = (y-sol[0])/sol[1]
z = distributions.logistic.cdf(w)
sig = array([25,10,5,2.5,1,0.5])
critical = around(_Avals_logistic / (1.0+0.25/N),3)
else: # (dist == 'gumbel') or (dist == 'extreme1'):
# the following is incorrect, see ticket:1097
#def fixedsolve(th,xj,N):
# val = stats.sum(xj)*1.0/N
# tmp = exp(-xj/th)
# term = sum(xj*tmp,axis=0)
# term /= sum(tmp,axis=0)
# return val - term
#s = optimize.fixed_point(fixedsolve, 1.0, args=(x,N),xtol=1e-5)
#xbar = -s*log(sum(exp(-x/s),axis=0)*1.0/N)
xbar, s = distributions.gumbel_l.fit(x)
w = (y-xbar)/s
z = distributions.gumbel_l.cdf(w)
sig = array([25,10,5,2.5,1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)),3)
i = arange(1,N+1)
S = sum((2*i-1.0)/N*(log(z)+log(1-z[::-1])),axis=0)
A2 = -N-S
return A2, critical, sig
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(np.float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / \
(Bj * (N - Bj) - N * lj / 4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
A2 : float
Normalized k-sample Anderson-Darling test statistic.
critical : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
p : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
h = (1. / arange(1, N)).sum()
H = (1. / n).sum()
g = 0
for l in arange(1, N-1):
inner = np.array([1. / ((N - l) * m) for m in arange(l+1, N)])
g += inner.sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return A2, critical, p
def ansari(x,y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
AB : float
The Ansari-Bradley test statistic
p-value : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x,y = asarray(x),asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m+n
xy = r_[x,y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank,N-rank+1)),0)
AB = sum(symrank[:n],axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and ((m < 55) or (n < 55)):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n,m)
ind = AB-astart
total = sum(a1,axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if (ind == cind):
pval = 2.0*sum(a1[:cind+1],axis=0)/total
else:
pval = 2.0*sum(a1[:cind],axis=0)/total
else:
find = int(floor(ind))
if (ind == floor(ind)):
pval = 2.0*sum(a1[find:],axis=0)/total
else:
pval = 2.0*sum(a1[find+1:],axis=0)/total
return AB, min(1.0,pval)
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n*(N+1.0)**2 / 4.0 / N
varAB = n*m*(N+1.0)*(3+N**2)/(48.0*N**2)
else:
mnAB = n*(N+2.0)/4.0
varAB = m*n*(N+2)*(N-2.0)/48/(N-1.0)
if repeats: # adjust variance estimates
# compute sum(tj * rj**2,axis=0)
fac = sum(symrank**2,axis=0)
if N % 2: # N odd
varAB = m*n*(16*N*fac-(N+1)**4)/(16.0 * N**2 * (N-1))
else: # N even
varAB = m*n*(16*fac-N*(N+2)**2)/(16.0 * N * (N-1))
z = (AB - mnAB)/sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AB, pval
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
T : float
The test statistic.
p-value : float
The p-value of the test.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
"""
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k,'d')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = sum(Ni,axis=0)
spsq = sum((Ni-1)*ssq,axis=0)/(1.0*(Ntot-k))
numer = (Ntot*1.0-k)*log(spsq) - sum((Ni-1.0)*log(ssq),axis=0)
denom = 1.0 + (1.0/(3*(k-1)))*((sum(1.0/(Ni-1.0),axis=0))-1.0/(Ntot-k))
T = numer / denom
pval = distributions.chi2.sf(T,k-1) # 1 - cdf
return T, pval
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = sum(Ni, axis=0)
# compute Zij's
Zij = [None]*k
for i in range(k):
Zij[i] = abs(asarray(args[i])-Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i]*Ni[i]
Zbar /= Ntot
numer = (Ntot-k) * sum(Ni*(Zbari-Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += sum((Zij[i]-Zbari[i])**2, axis=0)
denom = (k-1.0)*dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return W, pval
@setastest(False)
def binom_test(x, n=None, p=0.5):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1]+x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
d = distributions.binom.pmf(x, n, p)
rerr = 1+1e-7
if (x == p*n):
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif (x < p*n):
i = np.arange(np.ceil(p*n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n-y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = []
for k in range(len(g)-1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
def fligner(*args, **kwds):
"""
Perform Fligner's test for equal variances.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner's test is
non-parametric in contrast to Bartlett's test `bartlett` and
Levene's test `levene`.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
Xsq : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean','median','trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks/(2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = sum(Ni*(asarray(Aibar) - anbar)**2.0, axis=0)/varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return Xsq, pval
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis: int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.84332354, -5.6840814 ]), array([5.11694980e-09, 1.31517628e-08]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = sum((Ri - (N + 1.0) / 2) ** 2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
T : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
p-value : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' \
or 'pratt' or 'zsplit'")
if y is None:
d = x
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x-y
if zero_method == "wilcox":
d = compress(not_equal(d, 0), d, axis=-1) # Keep all non-zero differences
count = len(d)
if (count < 10):
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = sum((d > 0) * r, axis=0)
r_minus = sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count*(count + 1.) * 0.25
se = count*(count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return T, prob
@setastest(False)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
grand_median = np.median(np.concatenate(data))
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.where((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None]*N
plist[0] = poly1d(1)
for n in range(1,N):
plist[n] = plist[n-1].deriv() - poly1d([1,0])*plist[n-1]
return plist
def pdf_fromgamma(g1, g2, g3=0.0, g4=None):
if g4 is None:
g4 = 3*g2*g2
sigsq = 1.0/g2
sig = sqrt(sigsq)
mu = g1*sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] = p12[k]/sig**k
# Add all of the terms to polynomial
totp = p12[0] - (g1/6.0*p12[3]) + \
(g2/24.0*p12[4] + g1*g1/72.0*p12[6]) - \
(g3/120.0*p12[5] + g1*g2/144.0*p12[7] + g1**3.0/1296.0*p12[9]) + \
(g4/720*p12[6] + (g2*g2/1152.0+g1*g3/720)*p12[8] +
g1*g1*g2/1728.0*p12[10] + g1**4.0/31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi)/sig
def thefunc(x):
xn = (x-mu)/sig
return totp(xn)*exp(-xn*xn/2.0)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high-low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = angle(np.mean(exp(1j*ang), axis=axis))
mask = res < 0
if (mask.ndim > 0):
res[mask] += 2*pi
elif mask:
res = res + 2*pi
return res*(high-low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j*ang), axis=axis)
R = abs(res)
return ((high-low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j*ang), axis=axis)
R = abs(res)
return ((high-low)/2.0/pi) * sqrt(-2*log(R))
# Tests to include (from R) -- some of these already in stats.
########
# X Ansari-Bradley
# X Bartlett (and Levene)
# X Binomial
# Y Pearson's Chi-squared (stats.chisquare)
# Y Association Between Paired samples (stats.pearsonr, stats.spearmanr)
# stats.kendalltau) -- these need work though
# Fisher's exact test
# X Fligner-Killeen Test
# Y Friedman Rank Sum (stats.friedmanchisquare?)
# Y Kruskal-Wallis
# Y Kolmogorov-Smirnov
# Cochran-Mantel-Haenszel Chi-Squared for Count
# McNemar's Chi-squared for Count
# X Mood Two-Sample
# X Test For Equal Means in One-Way Layout (see stats.ttest also)
# Pairwise Comparisons of proportions
# Pairwise t tests
# Tabulate p values for pairwise comparisons
# Pairwise Wilcoxon rank sum tests
# Power calculations two sample test of prop.
# Power calculations for one and two sample t tests
# Equal or Given Proportions
# Trend in Proportions
# Quade Test
# Y Student's T Test
# Y F Test to compare two variances
# XY Wilcoxon Rank Sum and Signed Rank Tests
|
gpl-3.0
|
quietcoolwu/myBuildingMachineLearningSystemsWithPython-second_edition
|
ch06/04_sent.py
|
6
|
10142
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script trains tries to tweak hyperparameters to improve P/R AUC
#
import time
start_time = time.time()
import re
import nltk
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from utils import log_false_positives
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import f1_score
from sklearn.base import BaseEstimator
from sklearn.naive_bayes import MultinomialNB
from utils import load_sent_word_net
sent_word_net = load_sent_word_net()
phase = "04"
import json
poscache_filename = "poscache.json"
try:
poscache = json.load(open(poscache_filename, "r"))
except IOError:
poscache = {}
class LinguisticVectorizer(BaseEstimator):
def get_feature_names(self):
return np.array(['sent_neut', 'sent_pos', 'sent_neg',
'nouns', 'adjectives', 'verbs', 'adverbs',
'allcaps', 'exclamation', 'question'])
def fit(self, documents, y=None):
return self
def _get_sentiments(self, d):
# http://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
sent = tuple(nltk.word_tokenize(d))
if poscache is not None:
if d in poscache:
tagged = poscache[d]
else:
poscache[d] = tagged = nltk.pos_tag(sent)
else:
tagged = nltk.pos_tag(sent)
pos_vals = []
neg_vals = []
nouns = 0.
adjectives = 0.
verbs = 0.
adverbs = 0.
for w, t in tagged:
p, n = 0, 0
sent_pos_type = None
if t.startswith("NN"):
sent_pos_type = "n"
nouns += 1
elif t.startswith("JJ"):
sent_pos_type = "a"
adjectives += 1
elif t.startswith("VB"):
sent_pos_type = "v"
verbs += 1
elif t.startswith("RB"):
sent_pos_type = "r"
adverbs += 1
if sent_pos_type is not None:
sent_word = "%s/%s" % (sent_pos_type, w)
if sent_word in sent_word_net:
p, n = sent_word_net[sent_word]
pos_vals.append(p)
neg_vals.append(n)
l = len(sent)
avg_pos_val = np.mean(pos_vals)
avg_neg_val = np.mean(neg_vals)
return [1 - avg_pos_val - avg_neg_val, avg_pos_val, avg_neg_val,
nouns / l, adjectives / l, verbs / l, adverbs / l]
def transform(self, documents):
obj_val, pos_val, neg_val, nouns, adjectives, verbs, adverbs = np.array(
[self._get_sentiments(d) for d in documents]).T
allcaps = []
exclamation = []
question = []
for d in documents:
allcaps.append(
np.sum([t.isupper() for t in d.split() if len(t) > 2]))
exclamation.append(d.count("!"))
question.append(d.count("?"))
result = np.array(
[obj_val, pos_val, neg_val, nouns, adjectives, verbs, adverbs, allcaps,
exclamation, question]).T
return result
emo_repl = {
# positive emoticons
"<3": " good ",
":d": " good ", # :D in lower case
":dd": " good ", # :DD in lower case
"8)": " good ",
":-)": " good ",
":)": " good ",
";)": " good ",
"(-:": " good ",
"(:": " good ",
# negative emoticons:
":/": " bad ",
":>": " sad ",
":')": " sad ",
":-(": " bad ",
":(": " bad ",
":S": " bad ",
":-S": " bad ",
}
emo_repl_order = [k for (k_len, k) in reversed(
sorted([(len(k), k) for k in emo_repl.keys()]))]
re_repl = {
r"\br\b": "are",
r"\bu\b": "you",
r"\bhaha\b": "ha",
r"\bhahaha\b": "ha",
r"\bdon't\b": "do not",
r"\bdoesn't\b": "does not",
r"\bdidn't\b": "did not",
r"\bhasn't\b": "has not",
r"\bhaven't\b": "have not",
r"\bhadn't\b": "had not",
r"\bwon't\b": "will not",
r"\bwouldn't\b": "would not",
r"\bcan't\b": "can not",
r"\bcannot\b": "can not",
}
def create_union_model(params=None):
def preprocessor(tweet):
tweet = tweet.lower()
for k in emo_repl_order:
tweet = tweet.replace(k, emo_repl[k])
for r, repl in re_repl.iteritems():
tweet = re.sub(r, repl, tweet)
return tweet.replace("-", " ").replace("_", " ")
tfidf_ngrams = TfidfVectorizer(preprocessor=preprocessor,
analyzer="word")
ling_stats = LinguisticVectorizer()
all_features = FeatureUnion(
[('ling', ling_stats), ('tfidf', tfidf_ngrams)])
#all_features = FeatureUnion([('tfidf', tfidf_ngrams)])
#all_features = FeatureUnion([('ling', ling_stats)])
clf = MultinomialNB()
pipeline = Pipeline([('all', all_features), ('clf', clf)])
if params:
pipeline.set_params(**params)
return pipeline
def __grid_search_model(clf_factory, X, Y):
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, indices=True, random_state=0)
param_grid = dict(vect__ngram_range=[(1, 1), (1, 2), (1, 3)],
vect__min_df=[1, 2],
vect__smooth_idf=[False, True],
vect__use_idf=[False, True],
vect__sublinear_tf=[False, True],
vect__binary=[False, True],
clf__alpha=[0, 0.01, 0.05, 0.1, 0.5, 1],
)
grid_search = GridSearchCV(clf_factory(),
param_grid=param_grid,
cv=cv,
score_func=f1_score,
verbose=10)
grid_search.fit(X, Y)
clf = grid_search.best_estimator_
print clf
return clf
def train_model(clf, X, Y, name="NB ngram", plot=False):
# create it again for plotting
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
clfs = [] # just to later get the median
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
if plot:
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
plot_pr(pr_scores[median], name, phase, precisions[median],
recalls[median], label=name)
log_false_positives(clfs[median], X_test, y_test, name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print "%.3f\t%.3f\t%.3f\t%.3f\t" % summary
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in xrange(len(X_wrong)):
print "clf.predict('%s')=%i instead of %i" %\
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx])
def get_best_model():
best_params = dict(all__tfidf__ngram_range=(1, 2),
all__tfidf__min_df=1,
all__tfidf__stop_words=None,
all__tfidf__smooth_idf=False,
all__tfidf__use_idf=False,
all__tfidf__sublinear_tf=True,
all__tfidf__binary=False,
clf__alpha=0.01,
)
best_clf = create_union_model(best_params)
return best_clf
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data()
#from sklearn.utils import shuffle
# print "shuffle, sample"
#X_orig, Y_orig = shuffle(X_orig, Y_orig)
#X_orig = X_orig[:100,]
#Y_orig = Y_orig[:100,]
classes = np.unique(Y_orig)
for c in classes:
print "#%s: %i" % (c, sum(Y_orig == c))
print "== Pos vs. neg =="
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print "== Pos/neg vs. irrelevant/neutral =="
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
# best_clf = grid_search_model(create_union_model, X, Y, name="sent vs
# rest", plot=True)
train_model(get_best_model(), X, Y, name="pos+neg vs rest", plot=True)
print "== Pos vs. rest =="
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs rest",
plot=True)
print "== Neg vs. rest =="
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(get_best_model(), X, Y, name="neg vs rest",
plot=True)
print "time spent:", time.time() - start_time
json.dump(poscache, open(poscache_filename, "w"))
|
mit
|
akshayrangasai/akshayrangasai.github.io
|
Blog Post Content/Airport_Queue_Simulation.py
|
1
|
1887
|
#Imports for solution
import numpy as np
import scipy.stats as sp
from matplotlib.pyplot import *
#Setting Distribution variables
##All rates are in per Minute.
QUEUE_ARRIVAL_RATE = 6
QUEUE_BUFFER_TIME = 4 #Takes 15 seconds to cover queue
N_SCANNERS = 1
SCANNER_BAG_CHECKING_RATE = 3 #Takes 20 seconds to put your bag on Scanner
FRISK_MACHINES_PER_SCANNER = 1 #Number of people checking machine per scanner
N_FRISK_MACHINES = N_SCANNERS*FRISK_MACHINES_PER_SCANNER
FRISK_CHECKING_RATE = 2 #Half a minute per frisk
SCANNER_RATE = SCANNER_BAG_CHECKING_RATE*N_SCANNERS
FRISK_RATE = FRISK_CHECKING_RATE*N_FRISK_MACHINES
FRISK_ARRIVAL_RATE = SCANNER_RATE
#Modeling First Part of the Queue
ARRIVAL_PATTERN = sp.poisson.rvs(QUEUE_ARRIVAL_RATE,size = 60) #for an hour
ARRIVAL_LIST = []
for index, item in enumerate(ARRIVAL_PATTERN):
ARRIVAL_LIST += [index]*item
#print ARRIVAL_LIST
TIMEAXIS = np.linspace(1,60,60)
arrivalfig = figure()
arrivalplot = plot(TIMEAXIS,ARRIVAL_PATTERN,'o')
show()
SCAN_PATTERN = sp.poisson.rvs(SCANNER_RATE,size=60)
SCAN_LIST = []
for index, item in enumerate(SCAN_PATTERN):
SCAN_LIST += [index]*item
arrivalfig = figure()
arrivalplot = plot(TIMEAXIS,SCAN_PATTERN,'o')
show()
FRISK_PATTERN = sp.poisson.rvs(FRISK_RATE,size=60)
FRISK_LIST = []
for index, item in enumerate(FRISK_PATTERN):
FRISK_LIST += [index]*item
arrivalfig = figure()
arrivalplot = plot(TIMEAXIS,FRISK_PATTERN,'o')
show()
EXIT_NUMER = zip(FRISK_PATTERN,SCAN_PATTERN)
EXIT_NUMBER = [min(k) for k in EXIT_NUMER]
EXIT_PATTERN = []
for index, item in enumerate(EXIT_NUMBER):
EXIT_PATTERN += [index]*item
RESIDUAL_ARRIVAL_PATTERN = ARRIVAL_LIST[0:len(EXIT_PATTERN)]
WAIT_TIMES = [m-n for m,n in zip(EXIT_PATTERN,RESIDUAL_ARRIVAL_PATTERN)]
#print EXIT_PATTERN
'''
for i,val in EXIT_PATTERN:
WAIT_TIMES += [ARRIVAL_PATTERN(i) - val]
'''
plot(WAIT_TIMES,'r-')
show()
|
mit
|
florian-f/sklearn
|
sklearn/linear_model/bayes.py
|
4
|
15486
|
"""
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_arrays
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Parameters
----------
X : array, shape = (n_samples, n_features)
Training vectors.
y : array, shape = (length)
Target values for training vectors
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
wether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
`coef_` : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
`alpha_` : float
estimated precision of the noise.
`lambda_` : array, shape = (n_features)
estimated precisions of the weights.
`scores_` : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_arrays(X, y, sparse_format='dense',
dtype=np.float)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Parameters
----------
X : array, shape = (n_samples, n_features)
Training vectors.
y : array, shape = (n_samples)
Target values for training vectors
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
wether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
`coef_` : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
`alpha_` : float
estimated precision of the noise.
`lambda_` : array, shape = (n_features)
estimated precisions of the weights.
`sigma_` : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
`scores_` : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_arrays(X, y, sparse_format='dense',
dtype=np.float)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self._set_intercept(X_mean, y_mean, X_std)
return self
|
bsd-3-clause
|
olologin/scikit-learn
|
examples/neighbors/plot_regression.py
|
349
|
1402
|
"""
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
|
bsd-3-clause
|
bmazin/ARCONS-pipeline
|
hotpix/removePixelsGui.py
|
1
|
2237
|
'''
Author: Matt Strader Date: January 13, 2015
'''
import sys, os
import numpy as np
from PyQt4 import QtCore
from PyQt4 import QtGui
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
from functools import partial
from util.FileName import FileName
from util.ObsFile import ObsFile
from util.CalLookupFile import CalLookupFile
from quicklook.ObsFileViewer import ObsFileViewer
from hotpix.manuallyRemovePixels import removePixel
class HotPixGui(ObsFileViewer):
def __init__(self, **kwargs):
super(HotPixGui,self).__init__(**kwargs)
self.addClickFunc(self.removeBadPixel)
self.addClickFunc(self.printTimeMask)
def printTimeMask(self,row,col):
print self.obs.hotPixTimeMask.get_intervals(row,col)
def removeBadPixel(self,row,col,reason='unknown'):
reply = QtGui.QMessageBox.question(self, 'Confirm',
'Mark pixel (x,y)=({},{}) with tag \'{}\'?'.format(col,row,reason), QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
if not self.obs.hotPixFileName is None:
timeMaskPath = self.obs.hotPixFileName
else:
raise AttributeError('obs file does not have a timeMask loaded')
self.obs.hotPixFile.close()
removePixel(timeMaskPath=timeMaskPath,pixelRow=row,
pixelCol=col,reason=reason)
self.obsMethod('loadHotPixCalFile',timeMaskPath,reasons=['hot pixel','dead pixel','unknown'])
print 'pixel (x,y)=({},{}) tagged'.format(col,row)
if __name__ == "__main__":
kwargs = {}
if len(sys.argv) > 1:
if sys.argv[1] == '-h' or sys.argv[1] == '--help':
print 'Usage: {} obsFilePath/obsTstamp'.format(sys.argv[0])
elif os.path.exists(sys.argv[1]):
kwargs['obsPath'] = sys.argv[1]
else:
kwargs['obsTstamp'] = sys.argv[1]
else:
obsPath = None
form = HotPixGui(**kwargs)
form.show()
|
gpl-2.0
|
khkaminska/scikit-learn
|
sklearn/neural_network/rbm.py
|
206
|
12292
|
"""Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
|
bsd-3-clause
|
ywcui1990/htmresearch
|
projects/capybara/datasets/SyntheticData/generate_synthetic_data.py
|
9
|
5156
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate synthetic sequences using a pool of sequence motifs
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from sklearn import decomposition
import os
plt.figure()
# Generate a set of sequence motifs
def generateSequenceMotifs(numMotif, motifLength, seed=None):
if seed is not None:
np.random.seed(seed)
sequenceMotifs = np.random.randn(motifLength, motifLength)
pca = decomposition.PCA(n_components=numMotif)
pca.fit(sequenceMotifs)
sequenceMotifs = pca.components_
for i in range(numMotif):
sequenceMotifs[i, :] = sequenceMotifs[i, :]-min(sequenceMotifs[i, :])
sequenceMotifs[i, :] = sequenceMotifs[i, :]/max(sequenceMotifs[i, :])
return sequenceMotifs
def generateSequence(sequenceLength, useMotif, currentClass, sequenceMotifs):
motifLength = sequenceMotifs.shape[1]
sequence = np.zeros((sequenceLength + 20,))
motifState = np.zeros((sequenceLength + 20,))
randomLengthList = np.linspace(1, 10, 10).astype('int')
t = 0
while t < sequenceLength:
randomLength = np.random.choice(randomLengthList)
sequence[t:t + randomLength] = np.random.rand(randomLength)
motifState[t:t + randomLength] = -1
t += randomLength
motifIdx = np.random.choice(useMotif[currentClass])
sequence[t:t + motifLength] = sequenceMotifs[motifIdx]
motifState[t:t + motifLength] = motifIdx
t += motifLength
sequence = sequence[:sequenceLength]
motifState = motifState[:sequenceLength]
return sequence, motifState
def generateSequences(numSeq, numClass, sequenceLength, useMotif, sequenceMotifs):
trainData = np.zeros((numSeq, sequenceLength+1))
numSeqPerClass = numSeq/numClass
classList = []
for classIdx in range(numClass):
classList += [classIdx] * numSeqPerClass
for seq in range(numSeq):
currentClass = classList[seq]
sequence, motifState = generateSequence(sequenceLength, useMotif,
currentClass, sequenceMotifs)
trainData[seq, 0] = currentClass
trainData[seq, 1:] = sequence
return trainData
numMotif = 5
motifLength = 5
sequenceMotifs = generateSequenceMotifs(numMotif, 5, seed=42)
numTrain = 100
numTest = 100
numClass = 2
motifPerClass = 2
np.random.seed(2)
useMotif = {}
motifList = set(range(numMotif))
for classIdx in range(numClass):
useMotifForClass = []
for _ in range(motifPerClass):
useMotifForClass.append(np.random.choice(list(motifList)))
motifList.remove(useMotifForClass[-1])
useMotif[classIdx] = useMotifForClass
sequenceLength = 100
currentClass = 0
sequence, motifState = generateSequence(sequenceLength, useMotif,
currentClass, sequenceMotifs)
MotifColor = {}
colorList = ['r','g','b','c','m','y']
i = 0
for c in useMotif.keys():
for v in useMotif[c]:
MotifColor[v] = colorList[i]
i += 1
fig, ax = plt.subplots(nrows=4, ncols=1, figsize=(20, 3 * 4))
for plti in xrange(4):
currentClass = [0 if plti < 2 else 1][0]
sequence, motifState = generateSequence(sequenceLength, useMotif,
currentClass, sequenceMotifs)
ax[plti].plot(sequence, 'k-')
startPatch = False
for t in range(len(motifState)):
if motifState[t] >= 0 and startPatch is False:
startPatchAt = t
startPatch = True
currentMotif = motifState[t]
if startPatch and (motifState[t] < 0):
endPatchAt = t-1
ax[plti].add_patch(
patches.Rectangle(
(startPatchAt, 0),
endPatchAt-startPatchAt, 1, alpha=0.5,
color=MotifColor[currentMotif]
)
)
startPatch = False
ax[plti].set_xlim([0, 100])
ax[plti].set_ylabel('class {}'.format(currentClass))
outputDir = 'Test1'
if not os.path.exists(outputDir):
os.makedirs(outputDir)
trainData = generateSequences(
numTrain, numClass, sequenceLength, useMotif, sequenceMotifs)
testData = generateSequences(
numTest, numClass, sequenceLength, useMotif, sequenceMotifs)
np.savetxt('Test1/Test1_TRAIN', trainData, delimiter=',', fmt='%.10f')
np.savetxt('Test1/Test1_TEST', testData, delimiter=',', fmt='%.10f')
plt.savefig('Test1/Test1.png')
|
agpl-3.0
|
nvoron23/scikit-learn
|
doc/conf.py
|
210
|
8446
|
# -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
|
bsd-3-clause
|
yonglehou/scikit-learn
|
sklearn/datasets/tests/test_lfw.py
|
230
|
7880
|
"""This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
|
bsd-3-clause
|
dalejung/trtools
|
trtools/core/tests/test_select_translate.py
|
1
|
2877
|
from unittest import TestCase
import numpy as np
import pandas as pd
import trtools.util.testing as tm
import trtools.core.select_translate as sl
class TestSelectTranslate(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_select_translate_frame(self):
"""
Test the dictionary translate
"""
df = pd.DataFrame({111: [1,2], 123: [3,4]})
# setup the translation
sl.KEY_TRANS = {'dale': 111, 'bob': 123}
# non list columns should return a scalar
test = df.ix[0, 'dale']
assert np.isscalar(test)
assert test == 1
# list columns should return a series
test = df.ix[0, ['dale']]
assert isinstance(test, pd.Series)
# use values since we have int-index
assert test.values[0] == 1
# multiple selection
test = df.ix[0, ['dale', 'bob']]
assert isinstance(test, pd.Series)
# use values since we have int-index
assert test.values[0] == 1
assert test.values[1] == 3
# multi row
test = df.ix[:, ['dale', 'bob']]
assert isinstance(test, pd.DataFrame)
assert np.all(test.values[0] == [1,3])
assert np.all(test.values[1] == [2,4])
# test boolean index
test = df.ix[:, [True, False]]
col = df.columns[0] # not guaranteed column order so grab it this way
assert np.all(df.ix[:, [col]].values == test.values)
# unset the translation so future tests don't mess up
sl.KEY_TRANS = {}
def test_select_translate_panel(self):
"""
Test the dictionary translate
"""
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
df3 = tm.makeTimeDataFrame()
panel = pd.Panel({111: df1, 123: df2, 666:df3})
# setup the translation
sl.KEY_TRANS = {'dale': 111, 'bob': 123}
test = panel.ix["dale"]
tm.assert_frame_equal(test, df1)
tm.assert_frame_equal(panel.ix[123], df2)
tm.assert_frame_equal(panel.ix['bob'], df2)
tm.assert_frame_equal(panel.ix['bob', :10], df2.ix[:10])
tm.assert_frame_equal(panel.ix['bob', :10, :3], df2.ix[:10, :3])
# grab sub panel
test = panel.ix[["dale", "bob"]]
assert np.all(test.items == [111, 123])
tm.assert_frame_equal(test.ix['dale'], df1)
tm.assert_frame_equal(test.ix['bob'], df2)
sl.KEY_TRANS = None
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],exit=False)
|
mit
|
google/cluster-scheduler-simulator
|
src/main/python/graphing-scripts/utils.py
|
4
|
3270
|
# Copyright (c) 2013, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution. Neither the name of the University of California, Berkeley
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission. THIS
# SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from matplotlib import use, rc
use('Agg')
import matplotlib.pyplot as plt
# plot saving utility function
def writeout(filename_base, formats = ['pdf']):
for fmt in formats:
plt.savefig("%s.%s" % (filename_base, fmt), format=fmt, bbox_inches='tight')
# plt.savefig("%s.%s" % (filename_base, fmt), format=fmt)
def set_leg_fontsize(size):
rc('legend', fontsize=size)
def set_paper_rcs():
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica'],
'serif':['Helvetica'],'size':8})
rc('text', usetex=True)
rc('legend', fontsize=7)
rc('figure', figsize=(3.33,2.22))
# rc('figure.subplot', left=0.10, top=0.90, bottom=0.12, right=0.95)
rc('axes', linewidth=0.5)
rc('lines', linewidth=0.5)
def set_rcs():
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica'],
'serif':['Times'],'size':12})
rc('text', usetex=True)
rc('legend', fontsize=7)
rc('figure', figsize=(6,4))
rc('figure.subplot', left=0.10, top=0.90, bottom=0.12, right=0.95)
rc('axes', linewidth=0.5)
rc('lines', linewidth=0.5)
def append_or_create(d, i, e):
if not i in d:
d[i] = [e]
else:
d[i].append(e)
# Append e to the array at position (i,k).
# d - a dictionary of dictionaries of arrays, essentially a 2d dictionary.
# i, k - essentially a 2 element tuple to use as the key into this 2d dict.
# e - the value to add to the array indexed by key (i,k).
def append_or_create_2d(d, i, k, e):
if not i in d:
d[i] = {k : [e]}
elif k not in d[i]:
d[i][k] = [e]
else:
d[i][k].append(e)
def cell_to_anon(cell):
if cell == 'A':
return 'A'
elif cell == 'B':
return 'B'
elif cell == 'C':
return 'C'
else:
return 'SYNTH'
|
bsd-3-clause
|
Solid-Mechanics/matplotlib-4-abaqus
|
matplotlib/animation.py
|
4
|
41616
|
# TODO:
# * Loop Delay is broken on GTKAgg. This is because source_remove() is not
# working as we want. PyGTK bug?
# * Documentation -- this will need a new section of the User's Guide.
# Both for Animations and just timers.
# - Also need to update http://www.scipy.org/Cookbook/Matplotlib/Animations
# * Blit
# * Currently broken with Qt4 for widgets that don't start on screen
# * Still a few edge cases that aren't working correctly
# * Can this integrate better with existing matplotlib animation artist flag?
# - If animated removes from default draw(), perhaps we could use this to
# simplify initial draw.
# * Example
# * Frameless animation - pure procedural with no loop
# * Need example that uses something like inotify or subprocess
# * Complex syncing examples
# * Movies
# * Can blit be enabled for movies?
# * Need to consider event sources to allow clicking through multiple figures
import sys
import itertools
import contextlib
from matplotlib.cbook import iterable, is_string_like
from matplotlib.compat import subprocess
from matplotlib import verbose
from matplotlib import rcParams
# Other potential writing methods:
# * http://pymedia.org/
# * libmng (produces swf) python wrappers: https://github.com/libming/libming
# * Wrap x264 API:
# (http://stackoverflow.com/questions/2940671/
# how-to-encode-series-of-images-into-h264-using-x264-api-c-c )
# A registry for available MovieWriter classes
class MovieWriterRegistry(object):
def __init__(self):
self.avail = dict()
# Returns a decorator that can be used on classes to register them under
# a name. As in:
# @register('foo')
# class Foo:
# pass
def register(self, name):
def wrapper(writerClass):
if writerClass.isAvailable():
self.avail[name] = writerClass
return writerClass
return wrapper
def list(self):
''' Get a list of available MovieWriters.'''
return self.avail.keys()
def is_available(self, name):
return name in self.avail
def __getitem__(self, name):
if not self.avail:
raise RuntimeError("No MovieWriters available!")
return self.avail[name]
writers = MovieWriterRegistry()
class MovieWriter(object):
'''
Base class for writing movies. Fundamentally, what a MovieWriter does
is provide is a way to grab frames by calling grab_frame(). setup()
is called to start the process and finish() is called afterwards.
This class is set up to provide for writing movie frame data to a pipe.
saving() is provided as a context manager to facilitate this process as::
with moviewriter.saving('myfile.mp4'):
# Iterate over frames
moviewriter.grab_frame()
The use of the context manager ensures that setup and cleanup are
performed as necessary.
frame_format: string
The format used in writing frame data, defaults to 'rgba'
'''
def __init__(self, fps=5, codec=None, bitrate=None, extra_args=None,
metadata=None):
'''
Construct a new MovieWriter object.
fps: int
Framerate for movie.
codec: string or None, optional
The codec to use. If None (the default) the setting in the
rcParam `animation.codec` is used.
bitrate: int or None, optional
The bitrate for the saved movie file, which is one way to control
the output file size and quality. The default value is None,
which uses the value stored in the rcParam `animation.bitrate`.
A value of -1 implies that the bitrate should be determined
automatically by the underlying utility.
extra_args: list of strings or None
A list of extra string arguments to be passed to the underlying
movie utiltiy. The default is None, which passes the additional
argurments in the 'animation.extra_args' rcParam.
metadata: dict of string:string or None
A dictionary of keys and values for metadata to include in the
output file. Some keys that may be of use include:
title, artist, genre, subject, copyright, srcform, comment.
'''
self.fps = fps
self.frame_format = 'rgba'
if codec is None:
self.codec = rcParams['animation.codec']
else:
self.codec = codec
if bitrate is None:
self.bitrate = rcParams['animation.bitrate']
else:
self.bitrate = bitrate
if extra_args is None:
self.extra_args = list(rcParams[self.args_key])
else:
self.extra_args = extra_args
if metadata is None:
self.metadata = dict()
else:
self.metadata = metadata
@property
def frame_size(self):
'A tuple (width,height) in pixels of a movie frame.'
width_inches, height_inches = self.fig.get_size_inches()
return width_inches * self.dpi, height_inches * self.dpi
def setup(self, fig, outfile, dpi, *args):
'''
Perform setup for writing the movie file.
fig: `matplotlib.Figure` instance
The figure object that contains the information for frames
outfile: string
The filename of the resulting movie file
dpi: int
The DPI (or resolution) for the file. This controls the size
in pixels of the resulting movie file.
'''
self.outfile = outfile
self.fig = fig
self.dpi = dpi
# Run here so that grab_frame() can write the data to a pipe. This
# eliminates the need for temp files.
self._run()
@contextlib.contextmanager
def saving(self, *args):
'''
Context manager to facilitate writing the movie file.
``*args`` are any parameters that should be passed to `setup`.
'''
# This particular sequence is what contextlib.contextmanager wants
self.setup(*args)
yield
self.finish()
def _run(self):
# Uses subprocess to call the program for assembling frames into a
# movie file. *args* returns the sequence of command line arguments
# from a few configuration options.
command = self._args()
if verbose.ge('debug'):
output = sys.stdout
else:
output = subprocess.PIPE
verbose.report('MovieWriter.run: running command: %s' %
' '.join(command))
self._proc = subprocess.Popen(command, shell=False,
stdout=output, stderr=output,
stdin=subprocess.PIPE)
def finish(self):
'Finish any processing for writing the movie.'
self.cleanup()
def grab_frame(self, **savefig_kwargs):
'''
Grab the image information from the figure and save as a movie frame.
All keyword arguments in savefig_kwargs are passed on to the 'savefig'
command that saves the figure.
'''
verbose.report('MovieWriter.grab_frame: Grabbing frame.',
level='debug')
try:
# Tell the figure to save its data to the sink, using the
# frame format and dpi.
self.fig.savefig(self._frame_sink(), format=self.frame_format,
dpi=self.dpi, **savefig_kwargs)
except RuntimeError:
out, err = self._proc.communicate()
verbose.report('MovieWriter -- Error '
'running proc:\n%s\n%s' % (out,
err), level='helpful')
raise
def _frame_sink(self):
'Returns the place to which frames should be written.'
return self._proc.stdin
def _args(self):
'Assemble list of utility-specific command-line arguments.'
return NotImplementedError("args needs to be implemented by subclass.")
def cleanup(self):
'Clean-up and collect the process used to write the movie file.'
out, err = self._proc.communicate()
verbose.report('MovieWriter -- '
'Command stdout:\n%s' % out, level='debug')
verbose.report('MovieWriter -- '
'Command stderr:\n%s' % err, level='debug')
@classmethod
def bin_path(cls):
'''
Returns the binary path to the commandline tool used by a specific
subclass. This is a class method so that the tool can be looked for
before making a particular MovieWriter subclass available.
'''
return rcParams[cls.exec_key]
@classmethod
def isAvailable(cls):
'''
Check to see if a MovieWriter subclass is actually available by
running the commandline tool.
'''
try:
subprocess.Popen(cls.bin_path(),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return True
except OSError:
return False
class FileMovieWriter(MovieWriter):
'`MovieWriter` subclass that handles writing to a file.'
def __init__(self, *args, **kwargs):
MovieWriter.__init__(self, *args, **kwargs)
self.frame_format = rcParams['animation.frame_format']
def setup(self, fig, outfile, dpi, frame_prefix='_tmp', clear_temp=True):
'''
Perform setup for writing the movie file.
fig: `matplotlib.Figure` instance
The figure object that contains the information for frames
outfile: string
The filename of the resulting movie file
dpi: int
The DPI (or resolution) for the file. This controls the size
in pixels of the resulting movie file.
frame_prefix: string, optional
The filename prefix to use for the temporary files. Defaults
to '_tmp'
clear_temp: bool
Specifies whether the temporary files should be deleted after
the movie is written. (Useful for debugging.) Defaults to True.
'''
self.fig = fig
self.outfile = outfile
self.dpi = dpi
self.clear_temp = clear_temp
self.temp_prefix = frame_prefix
self._frame_counter = 0 # used for generating sequential file names
self._temp_names = list()
self.fname_format_str = '%s%%07d.%s'
@property
def frame_format(self):
'''
Format (png, jpeg, etc.) to use for saving the frames, which can be
decided by the individual subclasses.
'''
return self._frame_format
@frame_format.setter
def frame_format(self, frame_format):
if frame_format in self.supported_formats:
self._frame_format = frame_format
else:
self._frame_format = self.supported_formats[0]
def _base_temp_name(self):
# Generates a template name (without number) given the frame format
# for extension and the prefix.
return self.fname_format_str % (self.temp_prefix, self.frame_format)
def _frame_sink(self):
# Creates a filename for saving using the basename and the current
# counter.
fname = self._base_temp_name() % self._frame_counter
# Save the filename so we can delete it later if necessary
self._temp_names.append(fname)
verbose.report(
'FileMovieWriter.frame_sink: saving frame %d to fname=%s' %
(self._frame_counter, fname),
level='debug')
self._frame_counter += 1 # Ensures each created name is 'unique'
# This file returned here will be closed once it's used by savefig()
# because it will no longer be referenced and will be gc-ed.
return open(fname, 'wb')
def finish(self):
# Call run here now that all frame grabbing is done. All temp files
# are available to be assembled.
self._run()
MovieWriter.finish(self) # Will call clean-up
# Check error code for creating file here, since we just run
# the process here, rather than having an open pipe.
if self._proc.returncode:
raise RuntimeError('Error creating movie, return code: '
+ str(self._proc.returncode)
+ ' Try running with --verbose-debug')
def cleanup(self):
MovieWriter.cleanup(self)
#Delete temporary files
if self.clear_temp:
import os
verbose.report(
'MovieWriter: clearing temporary fnames=%s' %
str(self._temp_names),
level='debug')
for fname in self._temp_names:
os.remove(fname)
# Base class of ffmpeg information. Has the config keys and the common set
# of arguments that controls the *output* side of things.
class FFMpegBase:
exec_key = 'animation.ffmpeg_path'
args_key = 'animation.ffmpeg_args'
@property
def output_args(self):
# The %dk adds 'k' as a suffix so that ffmpeg treats our bitrate as in
# kbps
args = ['-vcodec', self.codec]
if self.bitrate > 0:
args.extend(['-b', '%dk' % self.bitrate])
if self.extra_args:
args.extend(self.extra_args)
for k, v in self.metadata.items():
args.extend(['-metadata', '%s=%s' % (k, v)])
return args + ['-y', self.outfile]
# Combine FFMpeg options with pipe-based writing
@writers.register('ffmpeg')
class FFMpegWriter(MovieWriter, FFMpegBase):
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a pipe.
args = [self.bin_path(), '-f', 'rawvideo', '-vcodec', 'rawvideo',
'-s', '%dx%d' % self.frame_size, '-pix_fmt', self.frame_format,
'-r', str(self.fps)]
# Logging is quieted because subprocess.PIPE has limited buffer size.
if not verbose.ge('debug'):
args += ['-loglevel', 'quiet']
args += ['-i', 'pipe:'] + self.output_args
return args
#Combine FFMpeg options with temp file-based writing
@writers.register('ffmpeg_file')
class FFMpegFileWriter(FileMovieWriter, FFMpegBase):
supported_formats = ['png', 'jpeg', 'ppm', 'tiff', 'sgi', 'bmp',
'pbm', 'raw', 'rgba']
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a collection of temp images
return [self.bin_path(), '-vframes', str(self._frame_counter),
'-r', str(self.fps), '-i',
self._base_temp_name()] + self.output_args
# Base class of avconv information. AVConv has identical arguments to
# FFMpeg
class AVConvBase(FFMpegBase):
exec_key = 'animation.avconv_path'
args_key = 'animation.avconv_args'
# Combine AVConv options with pipe-based writing
@writers.register('avconv')
class AVConvWriter(AVConvBase, FFMpegWriter):
pass
# Combine AVConv options with file-based writing
@writers.register('avconv_file')
class AVConvFileWriter(AVConvBase, FFMpegFileWriter):
pass
# Base class of mencoder information. Contains configuration key information
# as well as arguments for controlling *output*
class MencoderBase:
exec_key = 'animation.mencoder_path'
args_key = 'animation.mencoder_args'
# Mencoder only allows certain keys, other ones cause the program
# to fail.
allowed_metadata = ['name', 'artist', 'genre', 'subject', 'copyright',
'srcform', 'comment']
# Mencoder mandates using name, but 'title' works better with ffmpeg.
# If we find it, just put it's value into name
def _remap_metadata(self):
if 'title' in self.metadata:
self.metadata['name'] = self.metadata['title']
@property
def output_args(self):
self._remap_metadata()
args = ['-o', self.outfile, '-ovc', 'lavc', '-lavcopts',
'vcodec=%s' % self.codec]
if self.bitrate > 0:
args.append('vbitrate=%d' % self.bitrate)
if self.extra_args:
args.extend(self.extra_args)
if self.metadata:
args.extend(['-info', ':'.join('%s=%s' % (k, v)
for k, v in self.metadata.items()
if k in self.allowed_metadata)])
return args
# Combine Mencoder options with pipe-based writing
@writers.register('mencoder')
class MencoderWriter(MovieWriter, MencoderBase):
def _args(self):
# Returns the command line parameters for subprocess to use
# mencoder to create a movie
return [self.bin_path(), '-', '-demuxer', 'rawvideo', '-rawvideo',
('w=%i:h=%i:' % self.frame_size +
'fps=%i:format=%s' % (self.fps,
self.frame_format))] + self.output_args
# Combine Mencoder options with temp file-based writing
@writers.register('mencoder_file')
class MencoderFileWriter(FileMovieWriter, MencoderBase):
supported_formats = ['png', 'jpeg', 'tga', 'sgi']
def _args(self):
# Returns the command line parameters for subprocess to use
# mencoder to create a movie
return [self.bin_path(),
'mf://%s*.%s' % (self.temp_prefix, self.frame_format),
'-frames', str(self._frame_counter), '-mf',
'type=%s:fps=%d' % (self.frame_format,
self.fps)] + self.output_args
# Base class for animated GIFs with convert utility
class ImageMagickBase:
exec_key = 'animation.convert_path'
args_key = 'animation.convert_args'
@property
def delay(self):
return 100. / self.fps
@property
def output_args(self):
return [self.outfile]
@writers.register('imagemagick')
class ImageMagickWriter(MovieWriter, ImageMagickBase):
def _args(self):
return ([self.bin_path(),
'-size', '%ix%i' % self.frame_size, '-depth', '8',
'-delay', str(self.delay), '-loop', '0',
'%s:-' % self.frame_format]
+ self.output_args)
@writers.register('imagemagick_file')
class ImageMagickFileWriter(FileMovieWriter, ImageMagickBase):
supported_formats = ['png', 'jpeg', 'ppm', 'tiff', 'sgi', 'bmp',
'pbm', 'raw', 'rgba']
def _args(self):
return ([self.bin_path(), '-delay', str(self.delay), '-loop', '0',
'%s*.%s' % (self.temp_prefix, self.frame_format)]
+ self.output_args)
class Animation(object):
'''
This class wraps the creation of an animation using matplotlib. It is
only a base class which should be subclassed to provide needed behavior.
*fig* is the figure object that is used to get draw, resize, and any
other needed events.
*event_source* is a class that can run a callback when desired events
are generated, as well as be stopped and started. Examples include timers
(see :class:`TimedAnimation`) and file system notifications.
*blit* is a boolean that controls whether blitting is used to optimize
drawing.
'''
def __init__(self, fig, event_source=None, blit=False):
self._fig = fig
# Disables blitting for backends that don't support it. This
# allows users to request it if available, but still have a
# fallback that works if it is not.
self._blit = blit and fig.canvas.supports_blit
# These are the basics of the animation. The frame sequence represents
# information for each frame of the animation and depends on how the
# drawing is handled by the subclasses. The event source fires events
# that cause the frame sequence to be iterated.
self.frame_seq = self.new_frame_seq()
self.event_source = event_source
# Clear the initial frame
self._init_draw()
# Instead of starting the event source now, we connect to the figure's
# draw_event, so that we only start once the figure has been drawn.
self._first_draw_id = fig.canvas.mpl_connect('draw_event', self._start)
# Connect to the figure's close_event so that we don't continue to
# fire events and try to draw to a deleted figure.
self._close_id = self._fig.canvas.mpl_connect('close_event',
self._stop)
if self._blit:
self._setup_blit()
def _start(self, *args):
'''
Starts interactive animation. Adds the draw frame command to the GUI
handler, calls show to start the event loop.
'''
# On start, we add our callback for stepping the animation and
# actually start the event_source. We also disconnect _start
# from the draw_events
self.event_source.add_callback(self._step)
self.event_source.start()
self._fig.canvas.mpl_disconnect(self._first_draw_id)
self._first_draw_id = None # So we can check on save
def _stop(self, *args):
# On stop we disconnect all of our events.
if self._blit:
self._fig.canvas.mpl_disconnect(self._resize_id)
self._fig.canvas.mpl_disconnect(self._close_id)
self.event_source.remove_callback(self._step)
self.event_source = None
def save(self, filename, writer=None, fps=None, dpi=None, codec=None,
bitrate=None, extra_args=None, metadata=None, extra_anim=None,
savefig_kwargs=None):
'''
Saves a movie file by drawing every frame.
*filename* is the output filename, e.g., :file:`mymovie.mp4`
*writer* is either an instance of :class:`MovieWriter` or a string
key that identifies a class to use, such as 'ffmpeg' or 'mencoder'.
If nothing is passed, the value of the rcparam `animation.writer` is
used.
*fps* is the frames per second in the movie. Defaults to None,
which will use the animation's specified interval to set the frames
per second.
*dpi* controls the dots per inch for the movie frames. This combined
with the figure's size in inches controls the size of the movie.
*codec* is the video codec to be used. Not all codecs are supported
by a given :class:`MovieWriter`. If none is given, this defaults to the
value specified by the rcparam `animation.codec`.
*bitrate* specifies the amount of bits used per second in the
compressed movie, in kilobits per second. A higher number means a
higher quality movie, but at the cost of increased file size. If no
value is given, this defaults to the value given by the rcparam
`animation.bitrate`.
*extra_args* is a list of extra string arguments to be passed to the
underlying movie utiltiy. The default is None, which passes the
additional argurments in the 'animation.extra_args' rcParam.
*metadata* is a dictionary of keys and values for metadata to include
in the output file. Some keys that may be of use include:
title, artist, genre, subject, copyright, srcform, comment.
*extra_anim* is a list of additional `Animation` objects that should
be included in the saved movie file. These need to be from the same
`matplotlib.Figure` instance. Also, animation frames will just be
simply combined, so there should be a 1:1 correspondence between
the frames from the different animations.
*savefig_kwargs* is a dictionary containing keyword arguments to be
passed on to the 'savefig' command which is called repeatedly to save
the individual frames. This can be used to set tight bounding boxes,
for example.
'''
if savefig_kwargs is None:
savefig_kwargs = {}
# FIXME: Using 'bbox_inches' doesn't currently work with
# writers that pipe the data to the command because this
# requires a fixed frame size (see Ryan May's reply in this
# thread: [1]). Thus we drop the 'bbox_inches' argument if it
# exists in savefig_kwargs.
#
# [1] (http://matplotlib.1069221.n5.nabble.com/
# Animation-class-let-save-accept-kwargs-which-
# are-passed-on-to-savefig-td39627.html)
#
if 'bbox_inches' in savefig_kwargs:
if not (writer in ['ffmpeg_file', 'mencoder_file'] or
isinstance(writer,
(FFMpegFileWriter, MencoderFileWriter))):
print("Warning: discarding the 'bbox_inches' argument in "
"'savefig_kwargs' as it is only currently supported "
"with the writers 'ffmpeg_file' and 'mencoder_file' "
"(writer used: "
"'{}').".format(writer if isinstance(writer, str)
else writer.__class__.__name__))
savefig_kwargs.pop('bbox_inches')
# Need to disconnect the first draw callback, since we'll be doing
# draws. Otherwise, we'll end up starting the animation.
if self._first_draw_id is not None:
self._fig.canvas.mpl_disconnect(self._first_draw_id)
reconnect_first_draw = True
else:
reconnect_first_draw = False
if fps is None and hasattr(self, '_interval'):
# Convert interval in ms to frames per second
fps = 1000. / self._interval
# If the writer is None, use the rc param to find the name of the one
# to use
if writer is None:
writer = rcParams['animation.writer']
# Re-use the savefig DPI for ours if none is given
if dpi is None:
dpi = rcParams['savefig.dpi']
if codec is None:
codec = rcParams['animation.codec']
if bitrate is None:
bitrate = rcParams['animation.bitrate']
all_anim = [self]
if not extra_anim is None:
all_anim.extend(anim
for anim
in extra_anim if anim._fig is self._fig)
# If we have the name of a writer, instantiate an instance of the
# registered class.
if is_string_like(writer):
if writer in writers.avail:
writer = writers[writer](fps, codec, bitrate,
extra_args=extra_args,
metadata=metadata)
else:
import warnings
warnings.warn("MovieWriter %s unavailable" % writer)
try:
writer = writers.list()[0]
except IndexError:
raise ValueError("Cannot save animation: no writers are "
"available. Please install mencoder or "
"ffmpeg to save animations.")
verbose.report('Animation.save using %s' % type(writer),
level='helpful')
# Create a new sequence of frames for saved data. This is different
# from new_frame_seq() to give the ability to save 'live' generated
# frame information to be saved later.
# TODO: Right now, after closing the figure, saving a movie won't work
# since GUI widgets are gone. Either need to remove extra code to
# allow for this non-existant use case or find a way to make it work.
with writer.saving(self._fig, filename, dpi):
for data in itertools.izip(*[a.new_saved_frame_seq()
for a in all_anim]):
for anim, d in zip(all_anim, data):
#TODO: Need to see if turning off blit is really necessary
anim._draw_next_frame(d, blit=False)
writer.grab_frame(**savefig_kwargs)
# Reconnect signal for first draw if necessary
if reconnect_first_draw:
self._first_draw_id = self._fig.canvas.mpl_connect('draw_event',
self._start)
def _step(self, *args):
'''
Handler for getting events. By default, gets the next frame in the
sequence and hands the data off to be drawn.
'''
# Returns True to indicate that the event source should continue to
# call _step, until the frame sequence reaches the end of iteration,
# at which point False will be returned.
try:
framedata = next(self.frame_seq)
self._draw_next_frame(framedata, self._blit)
return True
except StopIteration:
return False
def new_frame_seq(self):
'Creates a new sequence of frame information.'
# Default implementation is just an iterator over self._framedata
return iter(self._framedata)
def new_saved_frame_seq(self):
'Creates a new sequence of saved/cached frame information.'
# Default is the same as the regular frame sequence
return self.new_frame_seq()
def _draw_next_frame(self, framedata, blit):
# Breaks down the drawing of the next frame into steps of pre- and
# post- draw, as well as the drawing of the frame itself.
self._pre_draw(framedata, blit)
self._draw_frame(framedata)
self._post_draw(framedata, blit)
def _init_draw(self):
# Initial draw to clear the frame. Also used by the blitting code
# when a clean base is required.
pass
def _pre_draw(self, framedata, blit):
# Perform any cleaning or whatnot before the drawing of the frame.
# This default implementation allows blit to clear the frame.
if blit:
self._blit_clear(self._drawn_artists, self._blit_cache)
def _draw_frame(self, framedata):
# Performs actual drawing of the frame.
raise NotImplementedError('Needs to be implemented by subclasses to'
' actually make an animation.')
def _post_draw(self, framedata, blit):
# After the frame is rendered, this handles the actual flushing of
# the draw, which can be a direct draw_idle() or make use of the
# blitting.
if blit and self._drawn_artists:
self._blit_draw(self._drawn_artists, self._blit_cache)
else:
self._fig.canvas.draw_idle()
# The rest of the code in this class is to facilitate easy blitting
def _blit_draw(self, artists, bg_cache):
# Handles blitted drawing, which renders only the artists given instead
# of the entire figure.
updated_ax = []
for a in artists:
# If we haven't cached the background for this axes object, do
# so now. This might not always be reliable, but it's an attempt
# to automate the process.
if a.axes not in bg_cache:
bg_cache[a.axes] = a.figure.canvas.copy_from_bbox(a.axes.bbox)
a.axes.draw_artist(a)
updated_ax.append(a.axes)
# After rendering all the needed artists, blit each axes individually.
for ax in set(updated_ax):
ax.figure.canvas.blit(ax.bbox)
def _blit_clear(self, artists, bg_cache):
# Get a list of the axes that need clearing from the artists that
# have been drawn. Grab the appropriate saved background from the
# cache and restore.
axes = set(a.axes for a in artists)
for a in axes:
a.figure.canvas.restore_region(bg_cache[a])
def _setup_blit(self):
# Setting up the blit requires: a cache of the background for the
# axes
self._blit_cache = dict()
self._drawn_artists = []
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._handle_resize)
self._post_draw(None, self._blit)
def _handle_resize(self, *args):
# On resize, we need to disable the resize event handling so we don't
# get too many events. Also stop the animation events, so that
# we're paused. Reset the cache and re-init. Set up an event handler
# to catch once the draw has actually taken place.
self._fig.canvas.mpl_disconnect(self._resize_id)
self.event_source.stop()
self._blit_cache.clear()
self._init_draw()
self._resize_id = self._fig.canvas.mpl_connect('draw_event',
self._end_redraw)
def _end_redraw(self, evt):
# Now that the redraw has happened, do the post draw flushing and
# blit handling. Then re-enable all of the original events.
self._post_draw(None, self._blit)
self.event_source.start()
self._fig.canvas.mpl_disconnect(self._resize_id)
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._handle_resize)
class TimedAnimation(Animation):
'''
:class:`Animation` subclass that supports time-based animation, drawing
a new frame every *interval* milliseconds.
*repeat* controls whether the animation should repeat when the sequence
of frames is completed.
*repeat_delay* optionally adds a delay in milliseconds before repeating
the animation.
'''
def __init__(self, fig, interval=200, repeat_delay=None, repeat=True,
event_source=None, *args, **kwargs):
# Store the timing information
self._interval = interval
self._repeat_delay = repeat_delay
self.repeat = repeat
# If we're not given an event source, create a new timer. This permits
# sharing timers between animation objects for syncing animations.
if event_source is None:
event_source = fig.canvas.new_timer()
event_source.interval = self._interval
Animation.__init__(self, fig, event_source=event_source,
*args, **kwargs)
def _step(self, *args):
'''
Handler for getting events.
'''
# Extends the _step() method for the Animation class. If
# Animation._step signals that it reached the end and we want to
# repeat, we refresh the frame sequence and return True. If
# _repeat_delay is set, change the event_source's interval to our loop
# delay and set the callback to one which will then set the interval
# back.
still_going = Animation._step(self, *args)
if not still_going and self.repeat:
self.frame_seq = self.new_frame_seq()
if self._repeat_delay:
self.event_source.remove_callback(self._step)
self.event_source.add_callback(self._loop_delay)
self.event_source.interval = self._repeat_delay
return True
else:
return Animation._step(self, *args)
else:
return still_going
def _stop(self, *args):
# If we stop in the middle of a loop delay (which is relatively likely
# given the potential pause here, remove the loop_delay callback as
# well.
self.event_source.remove_callback(self._loop_delay)
Animation._stop(self)
def _loop_delay(self, *args):
# Reset the interval and change callbacks after the delay.
self.event_source.remove_callback(self._loop_delay)
self.event_source.interval = self._interval
self.event_source.add_callback(self._step)
Animation._step(self)
class ArtistAnimation(TimedAnimation):
'''
Before calling this function, all plotting should have taken place
and the relevant artists saved.
frame_info is a list, with each list entry a collection of artists that
represent what needs to be enabled on each frame. These will be disabled
for other frames.
'''
def __init__(self, fig, artists, *args, **kwargs):
# Internal list of artists drawn in the most recent frame.
self._drawn_artists = []
# Use the list of artists as the framedata, which will be iterated
# over by the machinery.
self._framedata = artists
TimedAnimation.__init__(self, fig, *args, **kwargs)
def _init_draw(self):
# Make all the artists involved in *any* frame invisible
axes = []
for f in self.new_frame_seq():
for artist in f:
artist.set_visible(False)
# Assemble a list of unique axes that need flushing
if artist.axes not in axes:
axes.append(artist.axes)
# Flush the needed axes
for ax in axes:
ax.figure.canvas.draw()
def _pre_draw(self, framedata, blit):
'''
Clears artists from the last frame.
'''
if blit:
# Let blit handle clearing
self._blit_clear(self._drawn_artists, self._blit_cache)
else:
# Otherwise, make all the artists from the previous frame invisible
for artist in self._drawn_artists:
artist.set_visible(False)
def _draw_frame(self, artists):
# Save the artists that were passed in as framedata for the other
# steps (esp. blitting) to use.
self._drawn_artists = artists
# Make all the artists from the current frame visible
for artist in artists:
artist.set_visible(True)
class FuncAnimation(TimedAnimation):
'''
Makes an animation by repeatedly calling a function *func*, passing in
(optional) arguments in *fargs*.
*frames* can be a generator, an iterable, or a number of frames.
*init_func* is a function used to draw a clear frame. If not given, the
results of drawing from the first item in the frames sequence will be
used. This function will be called once before the first frame.
If blit=True, *func* and *init_func* should return an iterable of
drawables to clear.
'''
def __init__(self, fig, func, frames=None, init_func=None, fargs=None,
save_count=None, **kwargs):
if fargs:
self._args = fargs
else:
self._args = ()
self._func = func
# Amount of framedata to keep around for saving movies. This is only
# used if we don't know how many frames there will be: in the case
# of no generator or in the case of a callable.
self.save_count = save_count
# Set up a function that creates a new iterable when needed. If nothing
# is passed in for frames, just use itertools.count, which will just
# keep counting from 0. A callable passed in for frames is assumed to
# be a generator. An iterable will be used as is, and anything else
# will be treated as a number of frames.
if frames is None:
self._iter_gen = itertools.count
elif callable(frames):
self._iter_gen = frames
elif iterable(frames):
self._iter_gen = lambda: iter(frames)
self.save_count = len(frames)
else:
self._iter_gen = lambda: iter(range(frames))
self.save_count = frames
# If we're passed in and using the default, set it to 100.
if self.save_count is None:
self.save_count = 100
self._init_func = init_func
# Needs to be initialized so the draw functions work without checking
self._save_seq = []
TimedAnimation.__init__(self, fig, **kwargs)
# Need to reset the saved seq, since right now it will contain data
# for a single frame from init, which is not what we want.
self._save_seq = []
def new_frame_seq(self):
# Use the generating function to generate a new frame sequence
return self._iter_gen()
def new_saved_frame_seq(self):
# Generate an iterator for the sequence of saved data. If there are
# no saved frames, generate a new frame sequence and take the first
# save_count entries in it.
if self._save_seq:
return iter(self._save_seq)
else:
return itertools.islice(self.new_frame_seq(), self.save_count)
def _init_draw(self):
# Initialize the drawing either using the given init_func or by
# calling the draw function with the first item of the frame sequence.
# For blitting, the init_func should return a sequence of modified
# artists.
if self._init_func is None:
self._draw_frame(next(self.new_frame_seq()))
else:
self._drawn_artists = self._init_func()
def _draw_frame(self, framedata):
# Save the data for potential saving of movies.
self._save_seq.append(framedata)
# Make sure to respect save_count (keep only the last save_count
# around)
self._save_seq = self._save_seq[-self.save_count:]
# Call the func with framedata and args. If blitting is desired,
# func needs to return a sequence of any artists that were modified.
self._drawn_artists = self._func(framedata, *self._args)
|
mit
|
qifeigit/scikit-learn
|
benchmarks/bench_plot_ward.py
|
290
|
1260
|
"""
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
|
bsd-3-clause
|
nickgentoo/scikit-learn-graph
|
scripts/WLKernel_calculate_matrix.py
|
1
|
2984
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 13 13:02:41 2015
Copyright 2015 Nicolo' Navarin
This file is part of scikit-learn-graph.
scikit-learn-graph is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
scikit-learn-graph is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with scikit-learn-graph. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from skgraph.kernel.WLGraphKernel import WLGraphKernel
from skgraph.datasets import load_graph_datasets
import numpy as np
if __name__=='__main__':
if len(sys.argv)<1:
sys.exit("python ODDKernel_example.py dataset r l filename kernel")
dataset=sys.argv[1]
max_radius=int(sys.argv[2])
la=float(sys.argv[3])
#hashs=int(sys.argv[3])
njobs=1
name=str(sys.argv[4])
kernel=sys.argv[5]
if dataset=="CAS":
print "Loading bursi(CAS) dataset"
g_it=load_graph_datasets.load_graphs_bursi()
elif dataset=="GDD":
print "Loading GDD dataset"
g_it=load_graph_datasets.load_graphs_GDD()
elif dataset=="CPDB":
print "Loading CPDB dataset"
g_it=load_graph_datasets.load_graphs_CPDB()
elif dataset=="AIDS":
print "Loading AIDS dataset"
g_it=load_graph_datasets.load_graphs_AIDS()
elif dataset=="NCI1":
print "Loading NCI1 dataset"
g_it=load_graph_datasets.load_graphs_NCI1()
else:
print "Unknown dataset name"
if kernel=="WL":
print "Lambda ignored"
print "Using WL fast subtree kernel"
ODDkernel=WLGraphKernel(r=max_radius)
else:
print "Unrecognized kernel"
GM=ODDkernel.computeKernelMatrixTrain(g_it.graphs) #Parallel ,njobs
#print GM
# GMsvm=[]
# for i in xrange(len(GM)):
# GMsvm.append([])
# GMsvm[i]=[i+1]
# GMsvm[i].extend(GM[i])
# #print GMsvm
# from sklearn import datasets
# print "Saving Gram matrix"
# #datasets.dump_svmlight_file(GMsvm,g_it.target, name+".svmlight")
# datasets.dump_svmlight_file(np.array(GMsvm),g_it.target, name+".svmlight")
# #Test manual dump
print "Saving Gram matrix"
output=open(name+".svmlight","w")
for i in xrange(len(GM)):
output.write(str(g_it.target[i])+" 0:"+str(i+1)+" ")
for j in range(len(GM[i])):
output.write(str(j+1)+":"+str(GM[i][j])+" ")
output.write("\n")
output.close()
#print GMsvm
from sklearn import datasets
#datasets.dump_svmlight_file(GMsvm,g_it.target, name+".svmlight")
#print GM
|
gpl-3.0
|
tomkooij/sapphire
|
scripts/kascade/plot_matching_events.py
|
1
|
4766
|
import datetime
import calendar
import tables
from pylab import *
from scipy.optimize import curve_fit
from sapphire.kascade import KascadeCoincidences
from artist import GraphArtist, MultiPlot
import utils
USE_TEX = True
# For matplotlib plots
if USE_TEX:
rcParams['font.serif'] = 'Computer Modern'
rcParams['font.sans-serif'] = 'Computer Modern'
rcParams['font.family'] = 'sans-serif'
rcParams['figure.figsize'] = [4 * x for x in (1, 2. / 3)]
rcParams['figure.subplot.left'] = 0.175
rcParams['figure.subplot.bottom'] = 0.175
rcParams['font.size'] = 10
rcParams['legend.fontsize'] = 'small'
rcParams['text.usetex'] = True
def do_matching_plots(data):
plot_nearest_neighbors(data)
plot_residual_time_differences(data)
def plot_nearest_neighbors(data, limit=None):
global coincidences
hisparc_group = data.root.hisparc.cluster_kascade.station_601
kascade_group = data.root.kascade
coincidences = KascadeCoincidences(data, hisparc_group, kascade_group,
ignore_existing=True)
#dt_opt = find_optimum_dt(coincidences, p0=-13, limit=1000)
#print dt_opt
graph = GraphArtist(axis='semilogy')
styles = iter(['solid', 'dashed', 'dashdotted'])
uncorrelated = None
figure()
#for shift in -12, -13, dt_opt, -14:
for shift in -12, -13, -14:
print "Shifting", shift
coincidences.search_coincidences(shift, dtlimit=1, limit=limit)
print "."
dts = coincidences.coincidences['dt']
n, bins, p = hist(abs(dts) / 1e9, bins=linspace(0, 1, 101),
histtype='step', label='%.3f s' % shift)
n = [u if u else 1e-99 for u in n]
graph.histogram(n, bins, linestyle=styles.next() + ',gray')
if uncorrelated is None:
uncorrelated = n, bins
y, bins = uncorrelated
x = (bins[:-1] + bins[1:]) / 2
f = lambda x, N, a: N * exp(-a * x)
popt, pcov = curve_fit(f, x, y)
plot(x, f(x, *popt), label=r"$\lambda = %.2f$ Hz" % popt[1])
graph.plot(x, f(x, *popt), mark=None)
yscale('log')
xlabel("Time difference [s]")
graph.set_xlabel(r"Time difference [\si{\second}]")
ylabel("Counts")
graph.set_ylabel("Counts")
legend()
graph.set_ylimits(min=10)
utils.saveplot()
graph.save('plots/MAT-nearest-neighbors')
def find_optimum_dt(coincidences, p0, delta=1e-3, limit=None):
dt_new = p0
dt_old = Inf
while abs(dt_new - dt_old) > delta:
print "Trying:", dt_new
coincidences.search_coincidences(dt_new, dtlimit=1, limit=limit)
dts = coincidences.coincidences['dt'] / 1e9
dt_old = dt_new
if median(dts) > 0:
dt_new -= median(dts.compress(dts > 0))
else:
dt_new -= median(dts.compress(dts < 0))
return dt_new
def plot_residual_time_differences(data):
global idxes, dts
events = data.root.kascade.events
c_index = data.root.kascade.c_index
t0 = make_timestamp(2008, 7, 2)
t1 = make_timestamp(2008, 7, 3)
idxes = events.get_where_list('(t0 <= timestamp) & (timestamp < t1)')
t0_idx = min(idxes)
t1_idx = max(idxes)
dts = c_index.read_where('(t0_idx <= k_idx) & (k_idx < t1_idx)',
field='dt')
all_dts = c_index.col('dt')
figure()
subplot(121)
hist(all_dts / 1e3, bins=arange(-10, 2, .01), histtype='step')
title("July 1 - Aug 6, 2008")
xlabel("Time difference [us]")
ylabel("Counts")
subplot(122)
hist(dts / 1e3, bins=arange(-8, -6, .01), histtype='step')
title("July 2, 2008")
xlabel("Time difference [us]")
utils.saveplot()
graph = MultiPlot(1, 2, width=r'.45\linewidth')
n, bins = histogram(all_dts / 1e3, bins=arange(-10, 2, .01))
graph.histogram(0, 1, n, bins)
graph.set_title(0, 1, "Jul 1 - Aug 6, 2008")
n, bins = histogram(dts / 1e3, bins=arange(-8, -6, .01))
graph.histogram(0, 0, n, bins)
graph.set_title(0, 0, "Jul 2, 2008")
graph.set_xlabel(r"Time difference [\si{\micro\second}]")
graph.set_ylabel("Counts")
graph.set_ylimits(min=0)
graph.show_xticklabels_for_all([(0, 0), (0, 1)])
graph.show_yticklabels_for_all([(0, 0), (0, 1)])
graph.save('plots/MAT-residual-time-differences')
graph.save_as_pdf('preview')
def make_timestamp(year, month, day, hour=0, minutes=0, seconds=0):
dt = datetime.datetime(year, month, day, hour, minutes, seconds)
timetuple = dt.utctimetuple()
timestamp = calendar.timegm(timetuple)
return timestamp
if __name__ == '__main__':
try:
data
except NameError:
data = tables.open_file('kascade.h5', 'r')
utils.set_prefix('MAT-')
do_matching_plots(data)
|
gpl-3.0
|
rrohan/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
11
|
48140
|
"""
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import ignore_warnings
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y )
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
|
bsd-3-clause
|
nickvandewiele/RMG-Py
|
rmgpy/tools/diff_models.py
|
7
|
15173
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script can be used to compare two RMG-generated kinetics models. To use,
pass the
"""
import os
import math
import numpy
import os.path
import logging
import argparse
from rmgpy.chemkin import loadChemkinFile
from rmgpy.rmg.model import ReactionModel
from rmgpy.rmg.output import saveDiffHTML
################################################################################
def compareModelKinetics(model1, model2):
"""
Compare the kinetics of :class:`ReactionModel` objects `model1` and
`model2`, printing the results to stdout.
"""
from matplotlib import pylab
# Determine reactions that both models have in common
commonReactions = {}
for rxn1 in model1.reactions:
for rxn2 in model2.reactions:
if rxn1.isIsomorphic(rxn2):
commonReactions[rxn1] = rxn2
model2.reactions.remove(rxn2)
break
uniqueReactions1 = [rxn for rxn in model1.reactions if rxn not in commonReactions.keys()]
uniqueReactions2 = model2.reactions
logging.info('{0:d} reactions were found in both models:'.format(len(commonReactions)))
for rxn in commonReactions:
logging.info(' {0!s}'.format(rxn))
logging.info('{0:d} reactions were only found in the first model:'.format(len(uniqueReactions1)))
for rxn in uniqueReactions1:
logging.info(' {0!s}'.format(rxn))
logging.info('{0:d} reactions were only found in the second model:'.format(len(uniqueReactions2)))
for rxn in uniqueReactions2:
logging.info(' {0!s}'.format(rxn))
from rmgpy.kinetics import Chebyshev
T = 1000; P = 1e5
kinetics1 = []; kinetics2 = []
for rxn1, rxn2 in commonReactions.iteritems():
kinetics1.append(rxn1.getRateCoefficient(T,P))
if rxn1.isIsomorphic(rxn2, eitherDirection=False):
kinetics2.append(rxn2.getRateCoefficient(T,P))
else:
kinetics2.append(rxn2.getRateCoefficient(T,P) / rxn2.getEquilibriumConstant(T))
fig = pylab.figure(figsize=(8,6))
ax = pylab.subplot(1,1,1)
pylab.loglog(kinetics1, kinetics2, 'o', picker=5)
xlim = pylab.xlim()
ylim = pylab.ylim()
lim = (min(xlim[0], ylim[0]), max(xlim[1], ylim[1]))
ax.loglog(lim, lim, '-k')
pylab.xlabel('Model 1 rate coefficient (SI units)')
pylab.ylabel('Model 2 rate coefficient (SI units)')
pylab.title('T = {0:g} K, P = {1:g} bar'.format(T, P/1e5))
pylab.xlim(lim)
pylab.ylim(lim)
def onpick(event):
xdata = event.artist.get_xdata()
ydata = event.artist.get_ydata()
for ind in event.ind:
logging.info(commonReactions.keys()[ind])
logging.info('k(T,P) = {0:9.2e} from model 1'.format(xdata[ind]))
logging.info('k(T,P) = {0:9.2e} from model 2'.format(ydata[ind]))
logging.info('ratio = 10**{0:.2f}'.format(math.log10(xdata[ind] / ydata[ind])))
connection_id = fig.canvas.mpl_connect('pick_event', onpick)
pylab.show()
def compareModelSpecies(model1, model2):
"""
This function compares two RMG models and returns a list of common reactions
as a dictionary, as well as a list of unique reactions for each model.
"""
commonSpecies = []
uniqueSpecies1 = model1.species[:]
uniqueSpecies2 = []
for spec2 in model2.species:
for spec1 in uniqueSpecies1[:]: # make a copy so you don't remove from the list you are iterating over
if spec1.isIsomorphic(spec2):
commonSpecies.append([spec1, spec2])
uniqueSpecies1.remove(spec1)
break
else:
uniqueSpecies2.append(spec2)
# Remove species in the mechanism that aren't identified (includes those called out as species
# but not used)
for spec in uniqueSpecies1[:]: # make a copy so you don't remove from the list you are iterating over
if not len(spec.molecule):
uniqueSpecies1.remove(spec)
logging.warning("Removing species {!r} from model 1 because it has no molecule info".format(spec))
for spec in uniqueSpecies2[:]: # make a copy so you don't remove from the list you are iterating over
if not spec.molecule:
uniqueSpecies2.remove(spec)
logging.warning("Removing species {!r} from model 2 because it has no molecule info".format(spec))
return commonSpecies, uniqueSpecies1, uniqueSpecies2
def compareModelReactions(model1, model2):
"""
This function compares two RMG models and returns a list of common reactions
as a dictionary, as well as a list of unique reactions for each model.
"""
reactionList1 = model1.reactions[:]
reactionList2 = model2.reactions[:]
# remove reactions that have an unidentified species
to_remove = []
for reactionList in (reactionList1, reactionList2):
for reaction in reactionList:
for side in (reaction.products, reaction.reactants):
for species in side:
if not species.molecule:
to_remove.append((reactionList,reaction))
logging.warning("Removing reaction {!r} that had unidentified species {!r}".format(reaction, species))
break
for reactionList, reaction in to_remove:
reactionList.remove(reaction)
commonReactions = []; uniqueReactions1 = []; uniqueReactions2 = []
for rxn1 in reactionList1:
for rxn2 in reactionList2[:]: # make a copy so you don't remove from the list you are iterating over
if rxn1.isIsomorphic(rxn2):
commonReactions.append([rxn1, rxn2])
# Remove reaction 2 from being chosen a second time.
# Let each reaction only appear only once in the diff comparison.
# Otherwise this miscounts number of reactions in model 2.
reactionList2.remove(rxn2)
break
for rxn1 in reactionList1:
for r1, r2 in commonReactions:
if rxn1 is r1:
break
else:
uniqueReactions1.append(rxn1)
for rxn2 in reactionList2:
for r1, r2 in commonReactions:
if rxn2 is r2:
break
else:
uniqueReactions2.append(rxn2)
return commonReactions, uniqueReactions1, uniqueReactions2
def saveCompareHTML(outputDir,chemkinPath1,speciesDictPath1,chemkinPath2,speciesDictPath2,readComments1=True,readComments2=True):
"""
Saves a model comparison HTML file based on two sets of chemkin and species dictionary
files.
"""
model1 = ReactionModel()
model1.species, model1.reactions = loadChemkinFile(chemkinPath1, speciesDictPath1, readComments = readComments1)
model2 = ReactionModel()
model2.species, model2.reactions = loadChemkinFile(chemkinPath2, speciesDictPath2, readComments = readComments2)
commonReactions, uniqueReactions1, uniqueReactions2 = compareModelReactions(model1, model2)
commonSpecies, uniqueSpecies1, uniqueSpecies2 = compareModelSpecies(model1, model2)
outputPath = outputDir + 'diff.html'
saveDiffHTML(outputPath, commonSpecies, uniqueSpecies1, uniqueSpecies2, commonReactions, uniqueReactions1, uniqueReactions2)
def enthalpyDiff(species):
"""
Returns the enthalpy discrepancy between the same species in the two models
"""
thermo0 = species[0].thermo
thermo1 = species[1].thermo
if thermo0 and thermo1:
diff = species[0].thermo.discrepancy(species[1].thermo)
else:
diff = 99999999
return -1*diff
def kineticsDiff(reaction):
"""
Returns some measure of the discrepancy between two reactions in a model
"""
kinetics0 = reaction[0].kinetics
kinetics1 = reaction[1].kinetics
if kinetics0 and kinetics1:
diff = reaction[0].kinetics.discrepancy(reaction[1].kinetics)
else:
diff = 9999999
return -1*diff
################################################################################
def parseCommandLineArguments():
parser = argparse.ArgumentParser()
parser.add_argument('chemkin1', metavar='CHEMKIN1', type=str, nargs=1,
help='the Chemkin file of the first model')
parser.add_argument('speciesDict1', metavar='SPECIESDICT1', type=str, nargs=1,
help='the species dictionary file of the first model')
parser.add_argument('--thermo1', metavar = 'THERMO1', type=str, nargs = 1,
help = 'the thermo file of the first model')
parser.add_argument('chemkin2', metavar='CHEMKIN2', type=str, nargs=1,
help='the Chemkin file of the second model')
parser.add_argument('speciesDict2', metavar='SPECIESDICT2', type=str, nargs=1,
help='the species dictionary file of the second model')
parser.add_argument('--thermo2', metavar = 'THERMO2', type=str, nargs = 1,
help = 'the thermo file of the second model')
parser.add_argument('--web', action='store_true', help='Running diff models through the RMG-website')
args = parser.parse_args()
return args
def main():
"""
Driver function that parses command line arguments and passes them to the execute function.
"""
args = parseCommandLineArguments()
chemkin1 = args.chemkin1[0]
speciesDict1 = args.speciesDict1[0]
if args.thermo1:
thermo1 = args.thermo1[0]
else:
thermo1 = None
chemkin2 = args.chemkin2[0]
speciesDict2 = args.speciesDict2[0]
if args.thermo2:
thermo2 = args.thermo2[0]
else:
thermo2 = None
kwargs = {
'web': args.web,
'wd': os.getcwd()
}
execute(chemkin1, speciesDict1, thermo1, chemkin2, speciesDict2, thermo2, **kwargs)
def execute(chemkin1, speciesDict1, thermo1, chemkin2, speciesDict2, thermo2, **kwargs):
model1 = ReactionModel()
model1.species, model1.reactions = loadChemkinFile(chemkin1, speciesDict1, thermoPath = thermo1)
model2 = ReactionModel()
model2.species, model2.reactions = loadChemkinFile(chemkin2, speciesDict2, thermoPath = thermo2)
commonSpecies, uniqueSpecies1, uniqueSpecies2 = compareModelSpecies(model1, model2)
commonReactions, uniqueReactions1, uniqueReactions2 = compareModelReactions(model1, model2)
try:
web = kwargs['web']
except KeyError:
web = False
if not web:
logging.info('{0:d} species were found in both models:'.format(len(commonSpecies)))
for spec1, spec2 in commonSpecies:
logging.info(' {0!s}'.format(spec1))
if spec1.thermo and spec2.thermo:
spec1.molecule[0].getSymmetryNumber()
logging.info(' {0:7.2f} {1:7.2f} {2:7.2f} {3:7.2f} {4:7.2f} {5:7.2f} {6:7.2f} {7:7.2f} {8:7.2f}'.format(
spec1.thermo.getEnthalpy(300) / 4184.,
spec1.thermo.getEntropy(300) / 4.184,
spec1.thermo.getHeatCapacity(300) / 4.184,
spec1.thermo.getHeatCapacity(400) / 4.184,
spec1.thermo.getHeatCapacity(500) / 4.184,
spec1.thermo.getHeatCapacity(600) / 4.184,
spec1.thermo.getHeatCapacity(800) / 4.184,
spec1.thermo.getHeatCapacity(1000) / 4.184,
spec1.thermo.getHeatCapacity(1500) / 4.184,
))
logging.info(' {0:7.2f} {1:7.2f} {2:7.2f} {3:7.2f} {4:7.2f} {5:7.2f} {6:7.2f} {7:7.2f} {8:7.2f}'.format(
spec2.thermo.getEnthalpy(300) / 4184.,
spec2.thermo.getEntropy(300) / 4.184,
spec2.thermo.getHeatCapacity(300) / 4.184,
spec2.thermo.getHeatCapacity(400) / 4.184,
spec2.thermo.getHeatCapacity(500) / 4.184,
spec2.thermo.getHeatCapacity(600) / 4.184,
spec2.thermo.getHeatCapacity(800) / 4.184,
spec2.thermo.getHeatCapacity(1000) / 4.184,
spec2.thermo.getHeatCapacity(1500) / 4.184,
))
logging.info('{0:d} species were only found in the first model:'.format(len(uniqueSpecies1)))
for spec in uniqueSpecies1:
logging.info(' {0!s}'.format(spec))
logging.info('{0:d} species were only found in the second model:'.format(len(uniqueSpecies2)))
for spec in uniqueSpecies2:
logging.info(' {0!s}'.format(spec))
logging.info('{0:d} reactions were found in both models:'.format(len(commonReactions)))
for rxn1, rxn2 in commonReactions:
logging.info(' {0!s}'.format(rxn1))
if rxn1.kinetics and rxn2.kinetics:
logging.info(' {0:7.2f} {1:7.2f} {2:7.2f} {3:7.2f} {4:7.2f} {5:7.2f} {6:7.2f} {7:7.2f}'.format(
math.log10(rxn1.kinetics.getRateCoefficient(300, 1e5)),
math.log10(rxn1.kinetics.getRateCoefficient(400, 1e5)),
math.log10(rxn1.kinetics.getRateCoefficient(500, 1e5)),
math.log10(rxn1.kinetics.getRateCoefficient(600, 1e5)),
math.log10(rxn1.kinetics.getRateCoefficient(800, 1e5)),
math.log10(rxn1.kinetics.getRateCoefficient(1000, 1e5)),
math.log10(rxn1.kinetics.getRateCoefficient(1500, 1e5)),
math.log10(rxn1.kinetics.getRateCoefficient(2000, 1e5)),
))
logging.info(' {0:7.2f} {1:7.2f} {2:7.2f} {3:7.2f} {4:7.2f} {5:7.2f} {6:7.2f} {7:7.2f}'.format(
math.log10(rxn2.kinetics.getRateCoefficient(300, 1e5)),
math.log10(rxn2.kinetics.getRateCoefficient(400, 1e5)),
math.log10(rxn2.kinetics.getRateCoefficient(500, 1e5)),
math.log10(rxn2.kinetics.getRateCoefficient(600, 1e5)),
math.log10(rxn2.kinetics.getRateCoefficient(800, 1e5)),
math.log10(rxn2.kinetics.getRateCoefficient(1000, 1e5)),
math.log10(rxn2.kinetics.getRateCoefficient(1500, 1e5)),
math.log10(rxn2.kinetics.getRateCoefficient(2000, 1e5)),
))
logging.info('{0:d} reactions were only found in the first model:'.format(len(uniqueReactions1)))
for rxn in uniqueReactions1:
logging.info(' {0!s}'.format(rxn))
logging.info('{0:d} reactions were only found in the second model:'.format(len(uniqueReactions2)))
for rxn in uniqueReactions2:
logging.info(' {0!s}'.format(rxn))
logging.info("Saving output in diff.html")
try:
wd = kwargs['wd']
except KeyError:
wd = os.getcwd()
outputPath = os.path.join(wd, 'diff.html')
saveDiffHTML(outputPath, commonSpecies, uniqueSpecies1, uniqueSpecies2, commonReactions, uniqueReactions1, uniqueReactions2)
logging.info("Finished!")
return commonSpecies, uniqueSpecies1, uniqueSpecies2, commonReactions, uniqueReactions1, uniqueReactions2
|
mit
|
jniediek/mne-python
|
examples/stats/plot_linear_regression_raw.py
|
9
|
2249
|
"""
========================================
Regression on continuous data (rER[P/F])
========================================
This demonstrates how rER[P/F]s - regressing the continuous data - is a
generalisation of traditional averaging. If all preprocessing steps
are the same, no overlap between epochs exists, and if all
predictors are binary, regression is virtually identical to traditional
averaging.
If overlap exists and/or predictors are continuous, traditional averaging
is inapplicable, but regression can estimate effects, including those of
continuous predictors.
rERPs are described in:
Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP
waveforms: II. Non-linear effects, overlap correction, and practical
considerations. Psychophysiology, 52(2), 169-189.
"""
# Authors: Jona Sassenhagen <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.stats.regression import linear_regression_raw
# Load and preprocess data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True).pick_types(
meg='grad', stim=True, eeg=False).filter(1, None, method='iir')
# Set up events
events = mne.find_events(raw)
event_id = {'Aud/L': 1, 'Aud/R': 2}
tmin, tmax = -.1, .5
# regular epoching
picks = mne.pick_types(raw.info, meg=True)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=None,
baseline=None, preload=True, verbose=False)
# rERF
evokeds = linear_regression_raw(raw, events=events, event_id=event_id,
reject=None, tmin=tmin, tmax=tmax)
# linear_regression_raw returns a dict of evokeds
# select conditions similarly to mne.Epochs objects
# plot both results, and their difference
cond = "Aud/L"
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
params = dict(spatial_colors=True, show=False, ylim=dict(grad=(-200, 200)))
epochs[cond].average().plot(axes=ax1, **params)
evokeds[cond].plot(axes=ax2, **params)
(evokeds[cond] - epochs[cond].average()).plot(axes=ax3, **params)
ax1.set_title("Traditional averaging")
ax2.set_title("rERF")
ax3.set_title("Difference")
plt.show()
|
bsd-3-clause
|
thientu/scikit-learn
|
examples/cluster/plot_digits_linkage.py
|
369
|
2959
|
"""
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
|
bsd-3-clause
|
TNT-Samuel/Coding-Projects
|
DNS Server/Source - Copy/Lib/site-packages/dask/bytes/tests/test_hdfs.py
|
4
|
10079
|
from __future__ import print_function, division, absolute_import
import os
import posixpath
from distutils.version import LooseVersion
import pytest
from toolz import concat
import dask
from dask.bytes.core import read_bytes, open_files, get_fs
from dask.compatibility import unicode, PY2
try:
import distributed
from distributed import Client
from distributed.utils_test import cluster, loop # noqa: F401
except ImportError:
distributed = None
try:
import hdfs3
except ImportError:
hdfs3 = None
try:
import pyarrow
from dask.bytes.pyarrow import _MIN_PYARROW_VERSION_SUPPORTED
PYARROW_DRIVER = LooseVersion(pyarrow.__version__) >= _MIN_PYARROW_VERSION_SUPPORTED
except ImportError:
PYARROW_DRIVER = False
pyarrow = None
if not os.environ.get('DASK_RUN_HDFS_TESTS', ''):
pytestmark = pytest.mark.skip(reason="HDFS tests not configured to run")
basedir = '/tmp/test-dask'
# This fixture checks for a minimum pyarrow version
@pytest.fixture(params=[pytest.mark.skipif(not hdfs3, 'hdfs3',
reason='hdfs3 not found'),
pytest.mark.skipif(not PYARROW_DRIVER, 'pyarrow',
reason='required pyarrow version not found')])
def hdfs(request):
if request.param == 'hdfs3':
hdfs = hdfs3.HDFileSystem(host='localhost', port=8020)
else:
hdfs = pyarrow.hdfs.connect(host='localhost', port=8020)
if hdfs.exists(basedir):
hdfs.rm(basedir, recursive=True)
hdfs.mkdir(basedir)
with dask.config.set(hdfs_driver=request.param):
yield hdfs
if hdfs.exists(basedir):
hdfs.rm(basedir, recursive=True)
# This mark doesn't check the minimum pyarrow version.
require_pyarrow = pytest.mark.skipif(not pyarrow, reason="pyarrow not installed")
require_hdfs3 = pytest.mark.skipif(not hdfs3, reason="hdfs3 not installed")
@require_pyarrow
@require_hdfs3
def test_fs_driver_backends():
from dask.bytes.hdfs3 import HDFS3HadoopFileSystem
from dask.bytes.pyarrow import PyArrowHadoopFileSystem
fs1, token1 = get_fs('hdfs')
assert isinstance(fs1, HDFS3HadoopFileSystem)
with dask.config.set(hdfs_driver='pyarrow'):
fs2, token2 = get_fs('hdfs')
assert isinstance(fs2, PyArrowHadoopFileSystem)
assert token1 != token2
with pytest.raises(ValueError):
with dask.config.set(hdfs_driver='not-a-valid-driver'):
get_fs('hdfs')
def test_read_bytes(hdfs):
nfiles = 10
data = b'a' * int(1e3)
for fn in ['%s/file.%d' % (basedir, i) for i in range(nfiles)]:
with hdfs.open(fn, 'wb', replication=1) as f:
f.write(data)
sample, values = read_bytes('hdfs://%s/file.*' % basedir)
(results,) = dask.compute(values)
assert [b''.join(r) for r in results] == nfiles * [data]
def test_read_bytes_URL(hdfs):
nfiles = 10
data = b'a' * int(1e3)
for fn in ['%s/file.%d' % (basedir, i) for i in range(nfiles)]:
with hdfs.open(fn, 'wb', replication=1) as f:
f.write(data)
path = 'hdfs://localhost:8020%s/file.*' % basedir
sample, values = read_bytes(path)
(results,) = dask.compute(values)
assert [b''.join(r) for r in results] == nfiles * [data]
def test_read_bytes_big_file(hdfs):
fn = '%s/file' % basedir
# Write 100 MB file
nblocks = int(1e3)
blocksize = int(1e5)
data = b'a' * blocksize
with hdfs.open(fn, 'wb', replication=1) as f:
for i in range(nblocks):
f.write(data)
sample, values = read_bytes('hdfs://' + fn, blocksize=blocksize)
assert sample[:5] == b'aaaaa'
assert len(values[0]) == nblocks
(results,) = dask.compute(values[0])
assert sum(map(len, results)) == nblocks * blocksize
for r in results:
assert set(r.decode('utf-8')) == {'a'}
def test_deterministic_key_names(hdfs):
data = b'abc\n' * int(1e3)
fn = '%s/file' % basedir
with hdfs.open(fn, 'wb', replication=1) as fil:
fil.write(data)
_, x = read_bytes('hdfs://%s/*' % basedir, delimiter=b'\n', sample=False)
_, y = read_bytes('hdfs://%s/*' % basedir, delimiter=b'\n', sample=False)
_, z = read_bytes('hdfs://%s/*' % basedir, delimiter=b'c', sample=False)
assert [f.key for f in concat(x)] == [f.key for f in concat(y)]
assert [f.key for f in concat(x)] != [f.key for f in concat(z)]
def test_open_files_write(hdfs):
path = 'hdfs://%s/' % basedir
data = [b'test data %i' % i for i in range(5)]
files = open_files(path, num=len(data), mode='wb')
for fil, b in zip(files, data):
with fil as f:
f.write(b)
sample, vals = read_bytes('hdfs://%s/*.part' % basedir)
(results,) = dask.compute(list(concat(vals)))
assert data == results
def test_read_csv(hdfs):
dd = pytest.importorskip('dask.dataframe')
with hdfs.open('%s/1.csv' % basedir, 'wb') as f:
f.write(b'name,amount,id\nAlice,100,1\nBob,200,2')
with hdfs.open('%s/2.csv' % basedir, 'wb') as f:
f.write(b'name,amount,id\nCharlie,300,3\nDennis,400,4')
df = dd.read_csv('hdfs://%s/*.csv' % basedir)
assert isinstance(df, dd.DataFrame)
assert df.id.sum().compute() == 1 + 2 + 3 + 4
@pytest.mark.skipif(PY2, reason=("pyarrow's hdfs isn't fork-safe, requires "
"multiprocessing `spawn` start method"))
def test_read_text(hdfs):
db = pytest.importorskip('dask.bag')
import multiprocessing as mp
pool = mp.get_context('spawn').Pool(2)
with hdfs.open('%s/text.1.txt' % basedir, 'wb') as f:
f.write('Alice 100\nBob 200\nCharlie 300'.encode())
with hdfs.open('%s/text.2.txt' % basedir, 'wb') as f:
f.write('Dan 400\nEdith 500\nFrank 600'.encode())
with hdfs.open('%s/other.txt' % basedir, 'wb') as f:
f.write('a b\nc d'.encode())
b = db.read_text('hdfs://%s/text.*.txt' % basedir)
with dask.config.set(pool=pool):
result = b.str.strip().str.split().map(len).compute()
assert result == [2, 2, 2, 2, 2, 2]
b = db.read_text('hdfs://%s/other.txt' % basedir)
with dask.config.set(pool=pool):
result = b.str.split().flatten().compute()
assert result == ['a', 'b', 'c', 'd']
def test_read_text_unicode(hdfs):
db = pytest.importorskip('dask.bag')
data = b'abcd\xc3\xa9'
fn = '%s/data.txt' % basedir
with hdfs.open(fn, 'wb') as f:
f.write(b'\n'.join([data, data]))
f = db.read_text('hdfs://' + fn, collection=False)
result = f[0].compute()
assert len(result) == 2
assert list(map(unicode.strip, result)) == [data.decode('utf-8')] * 2
assert len(result[0].strip()) == 5
@require_pyarrow
@require_hdfs3
def test_pyarrow_compat():
from dask.bytes.hdfs3 import HDFS3HadoopFileSystem
dhdfs = HDFS3HadoopFileSystem()
pa_hdfs = dhdfs._get_pyarrow_filesystem()
assert isinstance(pa_hdfs, pyarrow.filesystem.FileSystem)
@require_pyarrow
def test_parquet_pyarrow(hdfs):
dd = pytest.importorskip('dask.dataframe')
import pandas as pd
import numpy as np
fn = '%s/test.parquet' % basedir
hdfs_fn = 'hdfs://%s' % fn
df = pd.DataFrame(np.random.normal(size=(1000, 4)),
columns=list('abcd'))
ddf = dd.from_pandas(df, npartitions=4)
ddf.to_parquet(hdfs_fn, engine='pyarrow')
assert len(hdfs.ls(fn)) # Files are written
ddf2 = dd.read_parquet(hdfs_fn, engine='pyarrow')
assert len(ddf2) == 1000 # smoke test on read
def test_glob(hdfs):
if type(hdfs).__module__.startswith('hdfs3'):
from dask.bytes.hdfs3 import HDFS3HadoopFileSystem
hdfs = HDFS3HadoopFileSystem.from_hdfs3(hdfs)
else:
from dask.bytes.pyarrow import PyArrowHadoopFileSystem
hdfs = PyArrowHadoopFileSystem.from_pyarrow(hdfs)
tree = {basedir: (['c', 'c2'], ['a', 'a1', 'a2', 'a3', 'b1']),
basedir + '/c': (['d'], ['x1', 'x2']),
basedir + '/c2': (['d'], ['x1', 'x2']),
basedir + '/c/d': ([], ['x3'])}
hdfs.mkdirs(basedir + '/c/d/')
hdfs.mkdirs(basedir + '/c2/d/')
for fn in (posixpath.join(dirname, f)
for (dirname, (_, fils)) in tree.items()
for f in fils):
with hdfs.open(fn, mode='wb') as f2:
f2.write(b'000')
assert (set(hdfs.glob(basedir + '/a*')) ==
{basedir + p for p in ['/a', '/a1', '/a2', '/a3']})
assert (set(hdfs.glob(basedir + '/c/*')) ==
{basedir + p for p in ['/c/x1', '/c/x2', '/c/d']})
assert (set(hdfs.glob(basedir + '/*/x*')) ==
{basedir + p for p in ['/c/x1', '/c/x2', '/c2/x1', '/c2/x2']})
assert (set(hdfs.glob(basedir + '/*/x1')) ==
{basedir + p for p in ['/c/x1', '/c2/x1']})
assert hdfs.glob(basedir + '/c') == [basedir + '/c']
assert hdfs.glob(basedir + '/c/') == [basedir + '/c/']
assert hdfs.glob(basedir + '/a') == [basedir + '/a']
assert hdfs.glob('/this-path-doesnt-exist') == []
assert hdfs.glob(basedir + '/missing/') == []
assert hdfs.glob(basedir + '/missing/x1') == []
assert hdfs.glob(basedir + '/missing/*') == []
assert hdfs.glob(basedir + '/*/missing') == []
assert (set(hdfs.glob(basedir + '/*')) ==
{basedir + p for p in ['/a', '/a1', '/a2', '/a3', '/b1', '/c', '/c2']})
@pytest.mark.skipif(not distributed, # noqa: F811
reason="Skipped as distributed is not installed.") # noqa: F811
def test_distributed(hdfs, loop): # noqa: F811
dd = pytest.importorskip('dask.dataframe')
with hdfs.open('%s/1.csv' % basedir, 'wb') as f:
f.write(b'name,amount,id\nAlice,100,1\nBob,200,2')
with hdfs.open('%s/2.csv' % basedir, 'wb') as f:
f.write(b'name,amount,id\nCharlie,300,3\nDennis,400,4')
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop): # noqa: F811
df = dd.read_csv('hdfs://%s/*.csv' % basedir)
assert df.id.sum().compute() == 1 + 2 + 3 + 4
|
gpl-3.0
|
YinongLong/scikit-learn
|
examples/cluster/plot_agglomerative_clustering.py
|
343
|
2931
|
"""
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
|
bsd-3-clause
|
h-mayorquin/camp_india_2016
|
tutorials/LTPinnetworks2/Step3d_Zenke_etal_2014.py
|
1
|
8978
|
#!/usr/bin/env python
'''
Based on:
Zenke, Friedemann, Everton J. Agnes, and Wulfram Gerstner.
"Diverse Synaptic Plasticity Mechanisms Orchestrated to Form and Retrieve Memories in Spiking Neural Networks."
Nature Communications 6 (April 21, 2015).
Part of Zenke's rule embedded in modified Brunel 2000 / Ostojic 2014 network
author: Aditya Gilra, Jun 2016.
in Brian2rc3 for CAMP 2016.
'''
#import modules and functions to be used
from brian2 import * # importing brian also does:
# 'from pylab import *' which imports:
# matplot like commands into the namespace, further
# also can use np. for numpy and mpl. for matplotlib
from data_utils import *
stand_alone = True
if stand_alone: set_device('cpp_standalone', build_on_run=False)
else:
#prefs.codegen.target = 'numpy'
#prefs.codegen.target = 'weave'
prefs.codegen.target = 'cython'
import random
import time
np.random.seed(100) # set seed for reproducibility of simulations
random.seed(100) # set seed for reproducibility of simulations
# ###########################################
# Simulation parameters
# ###########################################
simdt = 0.1*ms
simtime = 10*second
defaultclock.dt = simdt # set Brian's sim time step
dt = simdt/second # convert to value in seconds
# ###########################################
# Neuron model
# ###########################################
# equation: dv/dt=(1/taum)*(-(v-el))
# with spike when v>vt, reset to vr
vt = 20.*mV # Spiking threshold
taum = 20.*ms # Membrane time constant
vr = 10.*mV # Reset potential
muext0 = 24*mV # external input to each neuron
taur = 0.5*ms # Refractory period
taudelay = 0.75*ms # synaptic delay
eqs_neurons='''
muext : volt
dv/dt=-v/taum + muext/taum : volt
'''
# ###########################################
# Network parameters: numbers
# ###########################################
N = 4096+1024 # Total number of neurons
fexc = 0.8 # Fraction of exc neurons
NE = int(fexc*N) # Number of excitatory cells
NI = N-NE # Number of inhibitory cells
# ###########################################
# Network parameters: synapses
# ###########################################
rescale = 2 # rescale C and J to maintain total input
C = 1000/rescale # Number of incoming connections on each neuron (exc or inh)
J = 0.01*mV*rescale # exc strength is J (in mV as we add to voltage)
# Critical J is ~ 0.45 mV in paper for N = 10000, C = 1000
g = 5.0 # -gJ is the inh strength. For exc-inh balance g>~f(1-f)=4
# ###########################################
# Network parameters: synaptic plasticity
# ###########################################
wmax = 10. # hard bound on synaptic weight
Apre_tau = 20*ms # STDP Apre LTP time constant; tauplus
Apost_tau = 20*ms # STDP Apost LTD time constant; tauminus
Apre0 = 1.0 # incr in Apre, on pre-spikes; Aplus for LTP
# at spike coincidence, delta w = -Apre0*eta
Apost0 = 1.0 # incr in Apost on post-spikes; Aminus for LTD
eta = 5e-2 # learning rate
Apostslow0 = 1.0 # incr in Apostslow on post spike
Apostslow_tau = 100*ms
stdp_eqns = ''' wsyn : 1
dApre/dt=-Apre/Apre_tau : 1 (event-driven)
dApost/dt=-Apost/Apost_tau : 1 (event-driven)
dApostslow/dt=-Apostslow/Apostslow_tau : 1 (event-driven) '''
rate0 = 50*Hz
Apost0 = Apre0*Apre_tau*(1+Apostslow0*Apostslow_tau*rate0)/Apost_tau
# incr in Apost on post spike for triplet rule
pre_eqns = '''Apre += Apre0
wsyn += -Apost*eta
wsyn=clip(wsyn,0,wmax)
v+=wsyn*J'''
post_eqns = '''Apost += Apost0
wsyn += eta*Apre*(1 + Apostslow)
Apostslow+=Apostslow0
wsyn=clip(wsyn,0,wmax)'''
def dwbydt(r):
return eta*(Apre0*Apre_tau/second - Apost0*Apost_tau/second)*r**2 + \
eta*Apre0*Apre_tau/second * Apostslow0*Apostslow_tau/second*r**3
figure()
rrange = arange(0,90,0.1)
plot(rrange,dwbydt(rrange))
# ###########################################
# Initialize neuron (sub)groups
# ###########################################
P=NeuronGroup(N,model=eqs_neurons,\
threshold='v>=vt',reset='v=vr',refractory=taur,method='euler')
P.v = uniform(0.,vt/mV,N)*mV
PE = P[:NE]
PI = P[NE:]
# ###########################################
# Connecting the network
# ###########################################
sparseness = C/float(N)
# E to E connections
#conEE = Synapses(PE,PE,'wsyn:1',on_pre='v_post+=wsyn*J',method='euler')
conEE = Synapses(PE,PE,stdp_eqns,\
on_pre=pre_eqns,on_post=post_eqns,\
method='euler')
#conEE.connect(condition='i!=j',p=sparseness)
# need exact connection indices for weight monitor in standalone mode
conEE_idxs_pre = []
conEE_idxs_post = []
Ce = int(fexc*C)
for k in range(NE):
conEE_idxs_pre.extend(Ce*[k])
idxs = range(NE)
idxs.remove(k) # no autapses i.e. no self-connections
l = np.random.permutation(idxs)[:Ce]
conEE_idxs_post.extend(l)
conEE_idxs_assembly = where(array(conEE_idxs_post)[:Ce*400]<400)[0]
conEE_idxs_cross = where(array(conEE_idxs_post)[:Ce*400]>400)[0]
conEE_idxs_bgnd = where(array(conEE_idxs_post)[Ce*400:]>400)[0]
conEE.connect(i=conEE_idxs_pre,j=conEE_idxs_post)
conEE.delay = taudelay
conEE.wsyn = 1.
# E to I connections
conIE = Synapses(PE,PI,'wsyn:1',on_pre='v_post+=wsyn*J',method='euler')
conIE.connect(p=sparseness)
conIE.delay = taudelay
conIE.wsyn = 1
# I to E connections
conEI = Synapses(PI,PE,'wsyn:1',on_pre='v_post+=wsyn*J',method='euler')
conEI.connect(p=sparseness)
conEI.delay = taudelay
conEI.wsyn = -g
# I to I connections
conII = Synapses(PI,PI,'wsyn:1',on_pre='v_post+=wsyn*J',method='euler')
conII.connect(condition='i!=j',p=sparseness)
conII.delay = taudelay
conII.wsyn = -g
# ###########################################
# Stimuli
# ###########################################
P.muext = muext0
# 400 neurons (~10%) receive stimulus current to increase firing
Pstim = P[:400]
Pstim.muext = muext0 + 7*mV
# ###########################################
# Setting up monitors
# ###########################################
Nmon = N
sm = SpikeMonitor(P)
# Population monitor
popm = PopulationRateMonitor(P)
# voltage monitor
sm_vm = StateMonitor(P,'v',record=range(10)+range(NE,NE+10))
# weights monitor
wm = StateMonitor(conEE,'wsyn', record=range(Ce*NE), dt=simtime/20.)
# ###########################################
# Simulate
# ###########################################
print "Setup complete, running for",simtime,"at dt =",dt,"s."
t1 = time.time()
run(simtime,report='text')
device.build(directory='output', compile=True, run=True, debug=False)
print 'inittime + runtime, t = ', time.time() - t1
#print "For g,J =",g,J,"mean exc rate =",\
# sm_e.num_spikes/float(NE)/(simtime/second),'Hz.'
#print "For g,J =",g,J,"mean inh rate =",\
# sm_i.num_spikes/float(NI)/(simtime/second),'Hz.'
# ###########################################
# Make plots
# ###########################################
# always convert spikemon.t and spikemon.i to array-s before indexing
# spikemon.i[] indexing is extremely slow!
spiket = array(sm.t/second) # take spiketimes of all neurons
spikei = array(sm.i)
fig = figure()
# raster plot
subplot(231)
plot(sm.t,sm.i,',')
title(str(N)+" exc & inh neurons")
xlim([simtime/second-1,simtime/second])
xlabel("")
print "plotting firing rates"
subplot(232)
tau=50e-3
sigma = tau/2.
# firing rates
timeseries = arange(0,simtime/second+dt,dt)
rate = np.zeros(int(simtime/simdt))
for nrni in range(400):
rate += rate_from_spiketrain(spiket,spikei,simtime/second,sigma,dt,nrni)
plot(timeseries[:len(rate)],rate/400.,'r')
rate = np.zeros(int(simtime/simdt))
for nrni in range(400,800):
rate += rate_from_spiketrain(spiket,spikei,simtime/second,sigma,dt,nrni)
plot(timeseries[:len(rate)],rate/400.,'b')
title("exc rates: assembly (r), bgnd (b)")
ylabel("Hz")
ylim(0,300)
subplot(233)
hist(wm.wsyn[:,-1],bins=500,edgecolor='none')
xlabel('weight')
ylabel('count')
subplot(235)
num_to_plot = 10
for nrni in range(NE,NE+num_to_plot):
rate = rate_from_spiketrain(spiket,spikei,simtime/second,sigma,dt,nrni)
plot(timeseries[:len(rate)],rate)
#print mean(rate),len(sm_i[nrni])
#rates.append(rate)
title(str(num_to_plot)+" inh rates")
ylim(0,300)
#print "Mean rate = ",mean(rates)
xlabel("time (s)")
ylabel("Hz")
print "plotting weights"
subplot(236)
plot(wm.t/second,mean(wm.wsyn[conEE_idxs_assembly,:],axis=0),color='r')
plot(wm.t/second,mean(wm.wsyn[conEE_idxs_cross,:],axis=0),color='m')
plot(wm.t/second,mean(wm.wsyn[conEE_idxs_bgnd,:],axis=0),color='b')
title("assembly weights (cross=m)")
ylabel("arb")
xlabel("time (s)")
print conEE.wsyn
fig.tight_layout()
show()
|
mit
|
wanggang3333/scikit-learn
|
examples/cluster/plot_lena_compress.py
|
271
|
2229
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
|
bsd-3-clause
|
laura-dietz/taia-stream-eval
|
code/perf-over-time.py
|
1
|
8722
|
"""
Plot system performance over time for days, weeks, and all.
"""
import os.path
import numpy as np
from utils import *
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from truthutil import *
from argparse import ArgumentParser
from utils import correctWeighting
evalDir = '~/kba-evaluation/taia/data/intervals/'
plotDir = 'perf-over-time/'
parser = ArgumentParser()
parser.add_argument('--plot-teams', action='store_true',help='Generate plots for each team',default=False)
parser.add_argument('--judgmentLevel', type=int, help='Judgement level', default=1)
parser.add_argument('--subplot', action='store_true', help='Plot week,day,all on one figure', default=False)
parser.add_argument('--weighted', action='store_true', help='Use weighted aggregation', default=False)
parser.add_argument('-d', '--dir', metavar='DIR', default=evalDir)
args = parser.parse_args()
judgmentLevel = args.judgmentLevel
plot_teams = args.plot_teams
CORRECTED = args.weighted
evalDir = os.path.expanduser(args.dir)
if not os.path.exists(evalDir+plotDir):
os.makedirs(evalDir+plotDir)
print "writing plots to ",(evalDir+plotDir)
metrics = ['correctedAUC','MAP','nDCG@R','Prec@R','numPosPredictions']
weekRunfiles = [(evalDir)+file for file in os.listdir(os.path.expanduser(evalDir)) if file.endswith('week.tsv')]
dayRunfiles = [(evalDir)+file for file in os.listdir(os.path.expanduser(evalDir)) if file.endswith('day.tsv')]
allRunfiles = [(evalDir)+file for file in os.listdir(os.path.expanduser(evalDir)) if file.endswith('all.tsv')]
testEntityList = [ 'Alex_Kapranos' ,'Darren_Rowse', 'Satoshi_Ishii', 'Bill_Coen']
fullEntityList = [
'Aharon_Barak',
'Alexander_McCall_Smith',
'Alex_Kapranos',
'Annie_Laurie_Gaylor',
'Basic_Element_(company)',
'Basic_Element_(music_group)',
'Bill_Coen',
'Boris_Berezovsky_(businessman)',
'Boris_Berezovsky_(pianist)',
'Charlie_Savage',
'Darren_Rowse',
'Douglas_Carswell',
'Frederick_M._Lawrence',
'Ikuhisa_Minowa',
'James_McCartney',
'Jim_Steyer',
'Lisa_Bloom',
'Lovebug_Starski',
'Mario_Garnero',
'Masaru_Emoto',
'Nassim_Nicholas_Taleb',
'Rodrigo_Pimentel',
'Roustam_Tariko',
'Ruth_Rendell',
'Satoshi_Ishii',
'Vladimir_Potanin',
'William_Cohen',
'William_D._Cohan',
'William_H._Gates,_Sr',
]
entityList = fullEntityList
eval_dtype = np.dtype([('team','50a'),('runname','50a'),('query','50a'),('intervalLow','d4'),('intervalUp','d4'),('unjudged','50a'),('judgmentLevel','d4'),('metric','50a'),('value','f4')])
def judgmentLevelToStr(judgementLevel):
return 'central' if judgmentLevel==2 else 'relevant+central'
def correctedToStr():
return 'WEIGHTED' if CORRECTED else 'UNIFORM'
stats_dtype = np.dtype([('team','50a'),('runname','50a'),('intervalLow','f4'),('unjudged','50a'),('judgmentLevel','d4'),('metric','50a'),('mean','f4'),('stdev','f4'),('intervalType','50a')])
def createStatsRecord(team, runname, intervalLow, unjudgedAs, judgmentLevel, metricname, mean, stdev, intervalType):
return np.array([(team, runname, intervalLow, unjudgedAs,judgmentLevel,metricname, mean, stdev, intervalType)], dtype=stats_dtype)
records = []
allIntervalRunfiles = {'all':allRunfiles, 'week':weekRunfiles, 'day':dayRunfiles}
fig = plt.figure()
allteams = ['CWI', 'LSIS', 'PRIS', 'SCIAITeam', 'UMass_CIIR', 'UvA', 'helsinki', 'hltcoe', 'igpi2012', 'udel_fang', 'uiucGSLIS']
teamColors={team:cm.hsv(1. * i/len(allteams),1) for i,team in enumerate(np.unique(allteams))}
print teamColors
def teamColor(team):
return teamColors[team]
teamss=[]
def createPlot(prefix,intervalRunfiles, metric,entityList):
fig = plt.figure(figsize=(8.0, 4.0))
for idx,(intervalType, runfiles) in enumerate(intervalRunfiles.items()[:]):
if args.subplot : fig.add_subplot(3,2,(idx*2+1))
else: fig.add_subplot(1,2,1)
plt.locator_params(axis='both', nbins=5)
plt.title(intervalType)
for runIdx, evalFile in enumerate(sorted(runfiles[:])):
print ' processing evalFile',evalFile
df = np.genfromtxt(evalFile, dtype=eval_dtype, missing_values='', autostrip=False, delimiter='\t')
team = df[0]['team']
teamss.append(team)
runname = df[0]['runname']
ys = []
xs = []
seriesLabel=team+' '+runname
for (intervalLow, intervalUp) in intervalBounds[judgmentLevel][intervalType]:
data = df[np.logical_and(df['intervalLow']==intervalLow,
np.logical_and(df['metric']==metric, df['judgmentLevel']==judgmentLevel))]
if len(data)>0:
values = [data[data['query']==entity]['value'][0]
if np.count_nonzero(data['query']==entity)>0 else 0.0
for entity in entityList
if isPosIntervalForEntity(judgmentLevel, entity, intervalLow, intervalUp)
]
#compute numPos / totalPos * numScoredIntervals * values
correctedValues = [ correctWeighting( data[data['query']==entity]['value'][0]
, posTruthsInterval(judgmentLevel, entity, intervalLow, intervalUp)
, posTruths(judgmentLevel, entity)
,numPosIntervals(judgmentLevel, entity, intervalType))
if np.count_nonzero(data['query']==entity)>0 else 0.0
for entity in entityList
if isPosIntervalForEntity(judgmentLevel, entity, intervalLow, intervalUp)
]
team = data[0]['team']
runname = data[0]['runname']
unjudgedAs = data[0]['unjudged']
uniformWeighting = values
correctedWeighting = correctedValues
weightedValues = uniformWeighting if not CORRECTED else correctedWeighting
#print 'uniform =',np.mean(uniformWeighting), ' corrected=',np.mean(correctedWeighting),' choosing ',np.mean(weightedValues)
records.append(createStatsRecord(team, runname, intervalLow, unjudgedAs, judgmentLevel, metric, np.mean(weightedValues), np.std(weightedValues), intervalType))
ys.append(np.mean(weightedValues))
xs.append(intervalLow)
if(ys) and intervalType=='all':
xs.append(evalTRend)
ys.append(ys[-1])
plotcolor = teamColors[team]
plt.plot(epochsToDate(np.array(xs)), ys, label=seriesLabel, color=plotcolor, alpha=0.5, ls='-',marker='.')
if args.subplot and idx==0:
plt.legend(loc='center left', bbox_to_anchor=(1.+(idx*0.5), 0.5),fontsize='small')
if not args.subplot:
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5),fontsize='small')
plt.ylabel(renameMetric(metric))
plt.xlabel('ETR days')
plt.xlim(0, 110)
if not args.subplot:
plt.savefig("%s%s_%s_teams_over_time_%s_%s.pdf"%(prefix,intervalType,metric, judgmentLevelToStr(judgmentLevel), correctedToStr()), bbox_inches='tight')
plt.clf()
if args.subplot: fig.subplots_adjust(hspace=0.5, wspace=0.5)
if args.subplot: plt.savefig("%s%s_teams_over_time_%s_%s.pdf"%(prefix,metric, judgmentLevelToStr(judgmentLevel),correctedToStr()), bbox_inches='tight')
plt.clf()
def plotAll(prefix):
for metric in metrics:
createPlot(evalDir+plotDir+prefix+'_',allIntervalRunfiles, metric,entityList)
def plotTeams():
if plot_teams:
for team in allteams[:]:
print 'processing team',team
tweekRunfiles = [(evalDir)+file for file in os.listdir(os.path.expanduser(evalDir)) if file.endswith('week.tsv') and team in file]
tdayRunfiles = [(evalDir)+file for file in os.listdir(os.path.expanduser(evalDir)) if file.endswith('day.tsv') and team in file]
tallRunfiles = [(evalDir)+file for file in os.listdir(os.path.expanduser(evalDir)) if file.endswith('all.tsv') and team in file]
intervalRunfiles = {'all':tallRunfiles[:], 'week':tweekRunfiles[:], 'day':tdayRunfiles[:]}
for metric in metrics:
createPlot(evalDir+plotDir+team+'_', intervalRunfiles, metric,entityList)
plotAll('overview')
plotTeams()
|
apache-2.0
|
CforED/Machine-Learning
|
sklearn/metrics/cluster/supervised.py
|
22
|
30444
|
"""Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, max_n_classes=5000):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
max_n_classes : int, optional (default=5000)
Maximal number of classeses handled for contingency_matrix.
This help to avoid Memory error with regression target
for mutual_information.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
if n_classes > max_n_classes:
raise ValueError("Too many classes for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
if n_clusters > max_n_classes:
raise ValueError("Too many clusters for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred, max_n_classes=5000):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes=5000):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred,
max_n_classes=max_n_classes)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred, max_n_classes=5000):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[0]
def completeness_score(labels_true, labels_pred, max_n_classes=5000):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[1]
def v_measure_score(labels_true, labels_pred, max_n_classes=5000):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None,
max_n_classes=5000):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the mutual_info_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
|
bsd-3-clause
|
adamgreenhall/scikit-learn
|
examples/semi_supervised/plot_label_propagation_versus_svm_iris.py
|
286
|
2378
|
"""
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
|
bsd-3-clause
|
xuewei4d/scikit-learn
|
sklearn/decomposition/_lda.py
|
5
|
31298
|
"""
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: https://github.com/blei-lab/onlineldavb
"""
# Author: Chyi-Kwei Yau
# Author: Matthew D. Hoffman (original onlineldavb implementation)
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln, logsumexp
from joblib import Parallel, effective_n_jobs
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, gen_batches, gen_even_slices
from ..utils.validation import check_non_negative
from ..utils.validation import check_is_fitted
from ..utils.validation import _deprecate_positional_args
from ..utils.fixes import delayed
from ._online_lda_fast import (mean_change, _dirichlet_expectation_1d,
_dirichlet_expectation_2d)
EPS = np.finfo(float).eps
def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
max_iters,
mean_change_tol, cal_sstats, random_state):
"""E-step: update document-topic distribution.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
exp_topic_word_distr : ndarray of shape (n_topics, n_features)
Exponential value of expectation of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_iters : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-setp.
cal_sstats : bool
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calculate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))
else:
doc_topic_distr = np.ones((n_samples, n_topics))
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in range(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
# The next one is a copy, since the inner loop overwrites it.
exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in range(0, max_iters):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
doc_topic_d = (exp_doc_topic_d *
np.dot(cnts / norm_phi, exp_topic_word_d.T))
# Note: adds doc_topic_prior to doc_topic_d, in-place.
_dirichlet_expectation_1d(doc_topic_d, doc_topic_prior,
exp_doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(TransformerMixin, BaseEstimator):
"""Latent Dirichlet Allocation with online variational Bayes algorithm
.. versionadded:: 0.17
Read more in the :ref:`User Guide <LatentDirichletAllocation>`.
Parameters
----------
n_components : int, default=10
Number of topics.
.. versionchanged:: 0.19
``n_topics`` was renamed to ``n_components``
doc_topic_prior : float, default=None
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_components`.
In [1]_, this is called `alpha`.
topic_word_prior : float, default=None
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_components`.
In [1]_, this is called `eta`.
learning_method : {'batch', 'online'}, default='batch'
Method used to update `_component`. Only used in :meth:`fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
Valid options::
'batch': Batch variational Bayes method. Use all training data in
each EM update.
Old `components_` will be overwritten in each iteration.
'online': Online variational Bayes method. In each EM update, use
mini-batch of training data to update the ``components_``
variable incrementally. The learning rate is controlled by the
``learning_decay`` and the ``learning_offset`` parameters.
.. versionchanged:: 0.20
The default learning method is now ``"batch"``.
learning_decay : float, default=0.7
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, default=10.
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : int, default=10
The maximum number of iterations.
batch_size : int, default=128
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int, default=0
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or negative number to not evaluate perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
total_samples : int, default=1e6
Total number of documents. Only used in the :meth:`partial_fit` method.
perp_tol : float, default=1e-1
Perplexity tolerance in batch learning. Only used when
``evaluate_every`` is greater than 0.
mean_change_tol : float, default=1e-3
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int, default=100
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, default=None
The number of jobs to use in the E-step.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
Verbosity level.
random_state : int, RandomState instance or None, default=None
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Variational parameters for topic word distribution. Since the complete
conditional for topic word distribution is a Dirichlet,
``components_[i, j]`` can be viewed as pseudocount that represents the
number of times word `j` was assigned to topic `i`.
It can also be viewed as distribution over the words for each topic
after normalization:
``model.components_ / model.components_.sum(axis=1)[:, np.newaxis]``.
exp_dirichlet_component_ : ndarray of shape (n_components, n_features)
Exponential value of expectation of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
n_batch_iter_ : int
Number of iterations of the EM step.
n_iter_ : int
Number of passes over the dataset.
bound_ : float
Final perplexity score on training set.
doc_topic_prior_ : float
Prior of document topic distribution `theta`. If the value is None,
it is `1 / n_components`.
random_state_ : RandomState instance
RandomState instance that is generated either from a seed, the random
number generator or by `np.random`.
topic_word_prior_ : float
Prior of topic word distribution `beta`. If the value is None, it is
`1 / n_components`.
Examples
--------
>>> from sklearn.decomposition import LatentDirichletAllocation
>>> from sklearn.datasets import make_multilabel_classification
>>> # This produces a feature matrix of token counts, similar to what
>>> # CountVectorizer would produce on text.
>>> X, _ = make_multilabel_classification(random_state=0)
>>> lda = LatentDirichletAllocation(n_components=5,
... random_state=0)
>>> lda.fit(X)
LatentDirichletAllocation(...)
>>> # get topics for some given samples:
>>> lda.transform(X[-2:])
array([[0.00360392, 0.25499205, 0.0036211 , 0.64236448, 0.09541846],
[0.15297572, 0.00362644, 0.44412786, 0.39568399, 0.003586 ]])
References
----------
.. [1] "Online Learning for Latent Dirichlet Allocation", Matthew D.
Hoffman, David M. Blei, Francis Bach, 2010
[2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei,
Chong Wang, John Paisley, 2013
[3] Matthew D. Hoffman's onlineldavb code. Link:
https://github.com/blei-lab/onlineldavb
"""
@_deprecate_positional_args
def __init__(self, n_components=10, *, doc_topic_prior=None,
topic_word_prior=None, learning_method='batch',
learning_decay=.7, learning_offset=10., max_iter=10,
batch_size=128, evaluate_every=-1, total_samples=1e6,
perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,
n_jobs=None, verbose=0, random_state=None):
self.n_components = n_components
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _check_params(self):
"""Check model parameters."""
if self.n_components <= 0:
raise ValueError("Invalid 'n_components' parameter: %r"
% self.n_components)
if self.total_samples <= 0:
raise ValueError("Invalid 'total_samples' parameter: %r"
% self.total_samples)
if self.learning_offset < 0:
raise ValueError("Invalid 'learning_offset' parameter: %r"
% self.learning_offset)
if self.learning_method not in ("batch", "online"):
raise ValueError("Invalid 'learning_method' parameter: %r"
% self.learning_method)
def _init_latent_vars(self, n_features):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1. / self.n_components
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1. / self.n_components
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.
init_var = 1. / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self.n_components, n_features))
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
def _e_step(self, X, cal_sstats, random_init, parallel=None):
"""E-step in EM update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
cal_sstats : bool
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : bool
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel, default=None
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
random_state = self.random_state_ if random_init else None
# TODO: make Parallel._effective_n_jobs public instead?
n_jobs = effective_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=max(0,
self.verbose - 1))
results = parallel(
delayed(_update_doc_distribution)(X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol, cal_sstats,
random_state)
for idx_slice in gen_even_slices(X.shape[0], n_jobs))
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update, parallel=None):
"""EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
total_samples : int
Total number of documents. It is only used when
batch_update is `False`.
batch_update : bool
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel, default=None
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : ndarray of shape (n_samples, n_components)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(X, cal_sstats=True, random_init=True,
parallel=parallel)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(self.learning_offset + self.n_batch_iter_,
-self.learning_decay)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_
+ doc_ratio * suff_stats))
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
def _more_tags(self):
return {'requires_positive_X': True}
def _check_non_neg_array(self, X, reset_n_features, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
X = self._validate_data(X, reset=reset_n_features,
accept_sparse='csr')
check_non_negative(X, whom)
return X
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Returns
-------
self
"""
self._check_params()
first_time = not hasattr(self, 'components_')
X = self._check_non_neg_array(
X, reset_n_features=first_time,
whom="LatentDirichletAllocation.partial_fit")
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if first_time:
self._init_latent_vars(n_features)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
n_jobs = effective_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs,
verbose=max(0, self.verbose - 1)) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel)
return self
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X, reset_n_features=True,
whom="LatentDirichletAllocation.fit")
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features)
# change to perplexity later
last_bound = None
n_jobs = effective_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs,
verbose=max(0, self.verbose - 1)) as parallel:
for i in range(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples,
batch_update=False, parallel=parallel)
else:
# batch update
self._em_step(X, total_samples=n_samples,
batch_update=True, parallel=parallel)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False,
parallel=parallel)
bound = self._perplexity_precomp_distr(X, doc_topics_distr,
sub_sampling=False)
if self.verbose:
print('iteration: %d of max_iter: %d, perplexity: %.4f'
% (i + 1, max_iter, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
elif self.verbose:
print('iteration: %d of max_iter: %d' % (i + 1, max_iter))
self.n_iter_ += 1
# calculate final perplexity value on train set
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False,
parallel=parallel)
self.bound_ = self._perplexity_precomp_distr(X, doc_topics_distr,
sub_sampling=False)
return self
def _unnormalized_transform(self, X):
"""Transform data X according to fitted model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : ndarray of shape (n_samples, n_components)
Document topic distribution for X.
"""
check_is_fitted(self)
# make sure feature size is the same in fitted model and in X
X = self._check_non_neg_array(
X, reset_n_features=True,
whom="LatentDirichletAllocation.transform")
n_samples, n_features = X.shape
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
doc_topic_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
return doc_topic_distr
def transform(self, X):
"""Transform data X according to the fitted model.
.. versionchanged:: 0.18
*doc_topic_distr* is now normalized
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : ndarray of shape (n_samples, n_components)
Document topic distribution for X.
"""
check_is_fitted(self)
X = self._check_non_neg_array(
X, reset_n_features=False,
whom="LatentDirichletAllocation.transform")
doc_topic_distr = self._unnormalized_transform(X)
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
doc_topic_distr : ndarray of shape (n_samples, n_components)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : bool, default=False
Compensate for subsampling of documents.
It is used in calculate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_components = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in range(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]
+ dirichlet_component_[:, ids])
norm_phi = logsumexp(temp, axis=0)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(doc_topic_prior, doc_topic_distr,
dirichlet_doc_topic, self.n_components)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(topic_word_prior, self.components_,
dirichlet_component_, n_features)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Returns
-------
score : float
Use approximate bound as score.
"""
check_is_fitted(self)
X = self._check_non_neg_array(X, reset_n_features=False,
whom="LatentDirichletAllocation.score")
doc_topic_distr = self._unnormalized_transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def _perplexity_precomp_distr(self, X, doc_topic_distr=None,
sub_sampling=False):
"""Calculate approximate perplexity for data X with ability to accept
precomputed doc_topic_distr
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
doc_topic_distr : ndarray of shape (n_samples, n_components), \
default=None
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
check_is_fitted(self)
X = self._check_non_neg_array(
X, reset_n_features=True,
whom="LatentDirichletAllocation.perplexity")
if doc_topic_distr is None:
doc_topic_distr = self._unnormalized_transform(X)
else:
n_samples, n_components = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError("Number of samples in X and doc_topic_distr"
" do not match.")
if n_components != self.n_components:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
def perplexity(self, X, sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
.. versionchanged:: 0.19
*doc_topic_distr* argument has been deprecated and is ignored
because user no longer has access to unnormalized distribution
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
sub_sampling : bool
Do sub-sampling or not.
Returns
-------
score : float
Perplexity score.
"""
return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling)
|
bsd-3-clause
|
bert9bert/statsmodels
|
examples/python/robust_models_1.py
|
5
|
8670
|
## M-Estimators for Robust Linear Modeling
from __future__ import print_function
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.compat.pandas import sort_values
# * An M-estimator minimizes the function
#
# $$Q(e_i, \rho) = \sum_i~\rho \left (\frac{e_i}{s}\right )$$
#
# where $\rho$ is a symmetric function of the residuals
#
# * The effect of $\rho$ is to reduce the influence of outliers
# * $s$ is an estimate of scale.
# * The robust estimates $\hat{\beta}$ are computed by the iteratively re-weighted least squares algorithm
# * We have several choices available for the weighting functions to be used
norms = sm.robust.norms
def plot_weights(support, weights_func, xlabels, xticks):
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(support, weights_func(support))
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels, fontsize=16)
ax.set_ylim(-.1, 1.1)
return ax
#### Andrew's Wave
help(norms.AndrewWave.weights)
a = 1.339
support = np.linspace(-np.pi*a, np.pi*a, 100)
andrew = norms.AndrewWave(a=a)
plot_weights(support, andrew.weights, ['$-\pi*a$', '0', '$\pi*a$'], [-np.pi*a, 0, np.pi*a]);
#### Hampel's 17A
help(norms.Hampel.weights)
c = 8
support = np.linspace(-3*c, 3*c, 1000)
hampel = norms.Hampel(a=2., b=4., c=c)
plot_weights(support, hampel.weights, ['3*c', '0', '3*c'], [-3*c, 0, 3*c]);
#### Huber's t
help(norms.HuberT.weights)
t = 1.345
support = np.linspace(-3*t, 3*t, 1000)
huber = norms.HuberT(t=t)
plot_weights(support, huber.weights, ['-3*t', '0', '3*t'], [-3*t, 0, 3*t]);
#### Least Squares
help(norms.LeastSquares.weights)
support = np.linspace(-3, 3, 1000)
lst_sq = norms.LeastSquares()
plot_weights(support, lst_sq.weights, ['-3', '0', '3'], [-3, 0, 3]);
#### Ramsay's Ea
help(norms.RamsayE.weights)
a = .3
support = np.linspace(-3*a, 3*a, 1000)
ramsay = norms.RamsayE(a=a)
plot_weights(support, ramsay.weights, ['-3*a', '0', '3*a'], [-3*a, 0, 3*a]);
#### Trimmed Mean
help(norms.TrimmedMean.weights)
c = 2
support = np.linspace(-3*c, 3*c, 1000)
trimmed = norms.TrimmedMean(c=c)
plot_weights(support, trimmed.weights, ['-3*c', '0', '3*c'], [-3*c, 0, 3*c]);
#### Tukey's Biweight
help(norms.TukeyBiweight.weights)
c = 4.685
support = np.linspace(-3*c, 3*c, 1000)
tukey = norms.TukeyBiweight(c=c)
plot_weights(support, tukey.weights, ['-3*c', '0', '3*c'], [-3*c, 0, 3*c]);
#### Scale Estimators
# * Robust estimates of the location
x = np.array([1, 2, 3, 4, 500])
# * The mean is not a robust estimator of location
x.mean()
# * The median, on the other hand, is a robust estimator with a breakdown point of 50%
np.median(x)
# * Analagously for the scale
# * The standard deviation is not robust
x.std()
# Median Absolute Deviation
#
# $$ median_i |X_i - median_j(X_j)|) $$
# Standardized Median Absolute Deviation is a consistent estimator for $\hat{\sigma}$
#
# $$\hat{\sigma}=K \cdot MAD$$
#
# where $K$ depends on the distribution. For the normal distribution for example,
#
# $$K = \Phi^{-1}(.75)$$
stats.norm.ppf(.75)
print(x)
sm.robust.scale.stand_mad(x)
np.array([1,2,3,4,5.]).std()
# * The default for Robust Linear Models is MAD
# * another popular choice is Huber's proposal 2
np.random.seed(12345)
fat_tails = stats.t(6).rvs(40)
kde = sm.nonparametric.KDE(fat_tails)
kde.fit()
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(kde.support, kde.density);
print(fat_tails.mean(), fat_tails.std())
print(stats.norm.fit(fat_tails))
print(stats.t.fit(fat_tails, f0=6))
huber = sm.robust.scale.Huber()
loc, scale = huber(fat_tails)
print(loc, scale)
sm.robust.stand_mad(fat_tails)
sm.robust.stand_mad(fat_tails, c=stats.t(6).ppf(.75))
sm.robust.scale.mad(fat_tails)
#### Duncan's Occupational Prestige data - M-estimation for outliers
from statsmodels.graphics.api import abline_plot
from statsmodels.formula.api import ols, rlm
prestige = sm.datasets.get_rdataset("Duncan", "car", cache=True).data
print(prestige.head(10))
fig = plt.figure(figsize=(12,12))
ax1 = fig.add_subplot(211, xlabel='Income', ylabel='Prestige')
ax1.scatter(prestige.income, prestige.prestige)
xy_outlier = prestige.ix['minister'][['income','prestige']]
ax1.annotate('Minister', xy_outlier, xy_outlier+1, fontsize=16)
ax2 = fig.add_subplot(212, xlabel='Education',
ylabel='Prestige')
ax2.scatter(prestige.education, prestige.prestige);
ols_model = ols('prestige ~ income + education', prestige).fit()
print(ols_model.summary())
infl = ols_model.get_influence()
student = infl.summary_frame()['student_resid']
print(student)
print(student.ix[np.abs(student) > 2])
print(infl.summary_frame().ix['minister'])
sidak = ols_model.outlier_test('sidak')
sort_values(sidak, 'unadj_p', inplace=True)
print(sidak)
fdr = ols_model.outlier_test('fdr_bh')
sort_values(fdr, 'unadj_p', inplace=True)
print(fdr)
rlm_model = rlm('prestige ~ income + education', prestige).fit()
print(rlm_model.summary())
print(rlm_model.weights)
#### Hertzprung Russell data for Star Cluster CYG 0B1 - Leverage Points
# * Data is on the luminosity and temperature of 47 stars in the direction of Cygnus.
dta = sm.datasets.get_rdataset("starsCYG", "robustbase", cache=True).data
from matplotlib.patches import Ellipse
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111, xlabel='log(Temp)', ylabel='log(Light)', title='Hertzsprung-Russell Diagram of Star Cluster CYG OB1')
ax.scatter(*dta.values.T)
# highlight outliers
e = Ellipse((3.5, 6), .2, 1, alpha=.25, color='r')
ax.add_patch(e);
ax.annotate('Red giants', xy=(3.6, 6), xytext=(3.8, 6),
arrowprops=dict(facecolor='black', shrink=0.05, width=2),
horizontalalignment='left', verticalalignment='bottom',
clip_on=True, # clip to the axes bounding box
fontsize=16,
)
# annotate these with their index
for i,row in dta.ix[dta['log.Te'] < 3.8].iterrows():
ax.annotate(i, row, row + .01, fontsize=14)
xlim, ylim = ax.get_xlim(), ax.get_ylim()
from IPython.display import Image
Image(filename='star_diagram.png')
y = dta['log.light']
X = sm.add_constant(dta['log.Te'], prepend=True)
ols_model = sm.OLS(y, X).fit()
abline_plot(model_results=ols_model, ax=ax)
rlm_mod = sm.RLM(y, X, sm.robust.norms.TrimmedMean(.5)).fit()
abline_plot(model_results=rlm_mod, ax=ax, color='red')
# * Why? Because M-estimators are not robust to leverage points.
infl = ols_model.get_influence()
h_bar = 2*(ols_model.df_model + 1 )/ols_model.nobs
hat_diag = infl.summary_frame()['hat_diag']
hat_diag.ix[hat_diag > h_bar]
sidak2 = ols_model.outlier_test('sidak')
sort_values(sidak2, 'unadj_p', inplace=True)
print(sidak2)
fdr2 = ols_model.outlier_test('fdr_bh')
sort_values(fdr2, 'unadj_p', inplace=True)
print(fdr2)
# * Let's delete that line
del ax.lines[-1]
weights = np.ones(len(X))
weights[X[X['log.Te'] < 3.8].index.values - 1] = 0
wls_model = sm.WLS(y, X, weights=weights).fit()
abline_plot(model_results=wls_model, ax=ax, color='green')
# * MM estimators are good for this type of problem, unfortunately, we don't yet have these yet.
# * It's being worked on, but it gives a good excuse to look at the R cell magics in the notebook.
yy = y.values[:,None]
xx = X['log.Te'].values[:,None]
get_ipython().magic(u'load_ext rmagic')
get_ipython().magic(u'R library(robustbase)')
get_ipython().magic(u'Rpush yy xx')
get_ipython().magic(u'R mod <- lmrob(yy ~ xx);')
get_ipython().magic(u'R params <- mod$coefficients;')
get_ipython().magic(u'Rpull params')
get_ipython().magic(u'R print(mod)')
print(params)
abline_plot(intercept=params[0], slope=params[1], ax=ax, color='green')
#### Exercise: Breakdown points of M-estimator
np.random.seed(12345)
nobs = 200
beta_true = np.array([3, 1, 2.5, 3, -4])
X = np.random.uniform(-20,20, size=(nobs, len(beta_true)-1))
# stack a constant in front
X = sm.add_constant(X, prepend=True) # np.c_[np.ones(nobs), X]
mc_iter = 500
contaminate = .25 # percentage of response variables to contaminate
all_betas = []
for i in range(mc_iter):
y = np.dot(X, beta_true) + np.random.normal(size=200)
random_idx = np.random.randint(0, nobs, size=int(contaminate * nobs))
y[random_idx] = np.random.uniform(-750, 750)
beta_hat = sm.RLM(y, X).fit().params
all_betas.append(beta_hat)
all_betas = np.asarray(all_betas)
se_loss = lambda x : np.linalg.norm(x, ord=2)**2
se_beta = map(se_loss, all_betas - beta_true)
##### Squared error loss
np.array(se_beta).mean()
all_betas.mean(0)
beta_true
se_loss(all_betas.mean(0) - beta_true)
|
bsd-3-clause
|
jameshicks/basketballdatabase
|
basketballdatabase/common.py
|
1
|
2943
|
import cPickle as pickle
import gzip
import re
from urlparse import urljoin
from time import sleep as delay
from functools import wraps
import requests
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
def load(filename):
with gzip.open(filename) as f:
return pickle.load(f)
searchurl = "http://www.basketball-reference.com/search/search.fcgi"
time_between_requests = .5
def throttled(f):
''' A function for throttling http requests '''
@wraps(f)
def wrapper(*args, **kwargs):
print 'Throttling request: {}:'.format(args)
delay(time_between_requests)
return f(*args, **kwargs)
return wrapper
baseurl = r'http://www.basketball-reference.com/'
def relativeurl_to_absolute(url):
return urljoin(baseurl, url)
def consolidate_dfs(dfs):
return pd.concat(dfs, axis=0)
def get_backtobacks(iterable):
''' Returns True if a team has played a game in the previous 36 hours '''
l = pd.to_datetime(iterable)
# 36 hours in seconds
thirtysixhours = 129600
return [i > 0 and (gamedate - l[i-1]).total_seconds() <= thirtysixhours
for i, gamedate in enumerate(l)]
@throttled
def search_player(playername):
''' Returns the player data URL '''
pg = requests.get(searchurl, params={'search': playername})
pg.raise_for_status()
if 'player' in pg.url:
# We've been redirected to the player page because it was the only
# result in professional basketball
return pg.url
soup = BeautifulSoup(pg.text)
search_results = soup.find_all(class_='search-item')
for result in search_results:
for tag in result.find_all(lambda x: x.name=='a' and
'players' in x.get('href')
and 'nbdl' not in x.get('href')
and 'euro' not in x.get('href')):
if tag.string.lower().startswith(playername.lower()):
return relativeurl_to_absolute(tag.get('href'))
else:
raise ValueError('No player or non-unique player: {}'.format(playername))
soup = BeautifulSoup(pg.text)
@throttled
def search_team(teamname):
pg = requests.get(searchurl, params={'search': teamname})
pg.raise_for_status()
soup = BeautifulSoup(pg.text)
def is_team_url(l):
if l.name != 'a':
return False
url = l.get('href')
return bool(re.match(r'^http://.*/teams/.*/', url))
return relativeurl_to_absolute(soup.find(name='a', href=re.compile(r'^/teams/.*/')).get('href'))
def streak(iterable, predicate):
iterable = list(int(x) for x in iterable)
strk = np.zeros(len(iterable), dtype=np.uint8)
prev = False
for i, x in enumerate(iterable):
v = 1 if predicate(x) else 0
if v and i > 0:
v += strk[i-1]
strk[i] = v
return strk
class ParseError(Exception):
pass
|
mit
|
plissonf/scikit-learn
|
examples/covariance/plot_mahalanobis_distances.py
|
348
|
6232
|
r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
|
bsd-3-clause
|
albz/Architect
|
utils/python_utils/general_plot_utilities/plot_integrated_parameters.py
|
1
|
2546
|
#!/usr/bin/python
######################################################################
# Name: plot_integrated_parameters
# Author: A. Marocchino
# Date: 20-09-2016
# Purpose: plot main bunch integrated parameters and export them in a figure
# Source: python
# --- --- --- #
#--- Architect-PYTHON-paths! ---#
#PATH="~/Codes/Code_Architect/Architect/utils/python_utils:$PATH"
#export PATH
#####################################################################
### loading shell commands
import os, os.path, glob, sys, shutil
import time, datetime
import scipy
import numpy as np
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import pylab as pyl
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
plt.style.use(os.path.join(os.path.expanduser('~'),'Codes','Python_general_controllers','python_plot','plot_style_ppth.mplstyle'))
### --- ###
# --- inputs --- #
if(len(sys.argv)<2):
print('not enought input arguments')
print('select bunch')
sys.exit()
bunch_select = int(sys.argv[1])
# --- path --- #
path = os.getcwd()
# --- plot --- #
for root_dir, sub_dirs, files in os.walk(path):
if '==started==' in files or '==completed==' in files:
if(bunch_select==1): bunch = np.loadtxt(os.path.join(root_dir,'out','integrated_diagnostics','bunch_integrated_quantity_1_dcut.dat'))
if(bunch_select==2): bunch = np.loadtxt(os.path.join(root_dir,'out','integrated_diagnostics','bunch_integrated_quantity_2_dcut.dat'))
fig = pyl.figure(1)
fig.set_size_inches(3.25,3.6,forward=True)
ax1 = pyl.subplot(111)
#--- sigma ---#
ax1.plot(bunch[:,0]/1e4,bunch[:,7], lw=1, label=r"$\sigma_x$ ($\mu$m)")
ax1.plot(bunch[:,0]/1e4,bunch[:,8], lw=1, label=r"$\sigma_y$ ($\mu$m)")
#--- emittance ---#
ax1.plot(bunch[:,0]/1e4,bunch[:,13], lw=1, label=r'$\epsilon_x$ ($\mu$m)' )
ax1.plot(bunch[:,0]/1e4,bunch[:,14], lw=1, label=r'$\epsilon_y$ ($\mu$m)' )
#--- energy spread ---#
ax1.plot(bunch[:,0]/1e4,bunch[:,16]*100, lw=1, label=r'$\sigma_E$%' )
#--- labels ---#
ax1.set_xlabel('Z (cm)')
# ax1.legend(loc=9, ncol=2, prop={'size':7.5})
ax1.legend(loc='upper center', bbox_to_anchor=(0.5, -0.17),fancybox=True, shadow=False, ncol=3)
pyl.subplots_adjust(bottom=0.30)
# ax1.xaxis.set_major_locator(MultipleLocator(1.))
# ax1.xaxis.set_minor_locator(MultipleLocator(.5))
# pyl.savefig(os.path.join(path,'im_witness_integrated_parameter.pdf'), format='pdf')
plt.show()
|
gpl-3.0
|
PrashntS/scikit-learn
|
sklearn/manifold/tests/test_isomap.py
|
226
|
3941
|
from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
|
bsd-3-clause
|
hitszxp/scikit-learn
|
sklearn/utils/tests/test_validation.py
|
12
|
7588
|
"""Tests for input validation functions"""
from tempfile import NamedTemporaryFile
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from itertools import product
from sklearn.utils import as_float_array, check_array
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.utils.validation import has_fit_parameter
def test_as_float_array():
"""Test function for as_float_array"""
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
"""Confirm that input validation code does not return np.matrix"""
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
"""Confirm that input validation code doesn't copy memory mapped arrays"""
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
"""Check that ordering is enforced correctly by validation utilities.
We need to check each validation utility, because a 'copy' without
'order=K' will kill the ordering.
"""
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
for copy in (True, False):
Y = check_array(X, accept_sparse='csr', copy=copy, order='C')
assert_true(Y.data.flags['C_CONTIGUOUS'])
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
X_checked = check_array(X, dtype=dtype, accept_sparse=accept_sparse,
copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
|
bsd-3-clause
|
PatrickOReilly/scikit-learn
|
benchmarks/bench_20newsgroups.py
|
377
|
3555
|
from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
|
bsd-3-clause
|
clemkoa/scikit-learn
|
sklearn/metrics/ranking.py
|
1
|
32074
|
"""Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from scipy.stats import rankdata
from ..utils import assert_all_finite
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array, check_X_y
from ..utils.multiclass import type_of_target
from ..utils.extmath import stable_cumsum
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
from ..preprocessing import LabelBinarizer
from .base import _average_binary_score
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`. For an alternative
way to summarize a precision-recall curve, see
:func:`average_precision_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Compute the area under the ROC curve
average_precision_score : Compute average precision from prediction scores
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
if isinstance(area, np.memmap):
# Reductions such as .sum used internally in np.trapz do not return a
# scalar by default for numpy.memmap instances contrary to
# regular numpy.ndarray instances.
area = area.dtype.type(area)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
AP summarizes a precision-recall curve as the weighted mean of precisions
achieved at each threshold, with the increase in recall from the previous
threshold used as the weight:
.. math::
\\text{AP} = \\sum_n (R_n - R_{n-1}) P_n
where :math:`P_n` and :math:`R_n` are the precision and recall at the nth
threshold [1]_. This implementation is not interpolated and is different
from computing the area under the precision-recall curve with the
trapezoidal rule, which uses linear interpolation and can be too
optimistic.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels (either {0, 1} or {-1, 1}).
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/w/index.php?title=Information_retrieval&
oldid=793358396#Average_precision>`_
See also
--------
roc_auc_score : Compute the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.83...
"""
def _binary_uninterpolated_average_precision(
y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
# Return the step function integral
# The following works because the last entry of precision is
# guaranteed to be 1, as returned by precision_recall_curve
return -np.sum(np.diff(recall) * np.array(precision)[:-1])
return _average_binary_score(_binary_uninterpolated_average_precision,
y_true, y_score, average,
sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC)
from prediction scores.
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels (either {0, 1} or {-1, 1}).
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC) curve
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int or str, default=None
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
# Check to make sure y_true is valid
y_type = type_of_target(y_true)
if not (y_type == "binary" or
(y_type == "multiclass" and pos_label is not None)):
raise ValueError("{0} format is not supported".format(y_type))
check_consistent_length(y_true, y_score, sample_weight)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
assert_all_finite(y_true)
assert_all_finite(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.array_equal(classes, [0, 1]) or
np.array_equal(classes, [-1, 1]) or
np.array_equal(classes, [0]) or
np.array_equal(classes, [-1]) or
np.array_equal(classes, [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = stable_cumsum(y_true * weight)[threshold_idxs]
if sample_weight is not None:
# express fps as a cumsum to ensure fps is increasing even in
# the presense of floating point errors
fps = stable_cumsum((1 - y_true) * weight)[threshold_idxs]
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int or str, default=None
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
See also
--------
average_precision_score : Compute average precision from prediction scores
roc_curve : Compute Receiver operating characteristic (ROC) curve
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None,
drop_intermediate=True):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
pos_label : int or str, default=None
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
drop_intermediate : boolean, optional (default=True)
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
.. versionadded:: 0.17
parameter *drop_intermediate*.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute the area under the ROC curve
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
# Attempt to drop thresholds corresponding to points in between and
# collinear with other points. These are always suboptimal and do not
# appear on a plotted ROC curve (and thus do not affect the AUC).
# Here np.diff(_, 2) is used as a "second derivative" to tell if there
# is a corner at the point. Both fps and tps must be tested to handle
# thresholds with multiple data points (which are combined in
# _binary_clf_curve). This keeps all cases where the point should be kept,
# but does not drop more complicated cases like fps = [1, 3, 7],
# tps = [1, 2, 4]; there is no harm in keeping too many thresholds.
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(np.r_[True,
np.logical_or(np.diff(fps, 2),
np.diff(tps, 2)),
True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formatted array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Note: Our implementation's score is 1 greater than the one given in
Tsoumakas et al., 2010. This extends it to handle the degenerate case
in which an instance has 0 true labels.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
.. versionadded:: 0.17
A function *label_ranking_loss*
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = np.bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = np.bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
def dcg_score(y_true, y_score, k=5):
"""Discounted cumulative gain (DCG) at rank K.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
score : float
References
----------
.. [1] `Wikipedia entry for the Discounted Cumulative Gain
<https://en.wikipedia.org/wiki/Discounted_cumulative_gain>`_
"""
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gain = 2 ** y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gain / discounts)
def ndcg_score(y_true, y_score, k=5):
"""Normalized discounted cumulative gain (NDCG) at rank K.
Normalized Discounted Cumulative Gain (NDCG) measures the performance of a
recommendation system based on the graded relevance of the recommended
entities. It varies from 0.0 to 1.0, with 1.0 representing the ideal
ranking of the entities.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (true labels represended as integers).
y_score : array, shape = [n_samples, n_classes]
Predicted probabilities.
k : int
Rank.
Returns
-------
score : float
Examples
--------
>>> y_true = [1, 0, 2]
>>> y_score = [[0.15, 0.55, 0.2], [0.7, 0.2, 0.1], [0.06, 0.04, 0.9]]
>>> ndcg_score(y_true, y_score, k=2)
1.0
>>> y_score = [[0.9, 0.5, 0.8], [0.7, 0.2, 0.1], [0.06, 0.04, 0.9]]
>>> ndcg_score(y_true, y_score, k=2)
0.66666666666666663
References
----------
.. [1] `Kaggle entry for the Normalized Discounted Cumulative Gain
<https://www.kaggle.com/wiki/NormalizedDiscountedCumulativeGain>`_
"""
y_score, y_true = check_X_y(y_score, y_true)
# Make sure we use all the labels (max between the length and the higher
# number in the array)
lb = LabelBinarizer()
lb.fit(np.arange(max(np.max(y_true) + 1, len(y_true))))
binarized_y_true = lb.transform(y_true)
if binarized_y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different value ranges")
scores = []
# Iterate over each y_value_true and compute the DCG score
for y_value_true, y_value_score in zip(binarized_y_true, y_score):
actual = dcg_score(y_value_true, y_value_score, k)
best = dcg_score(y_value_true, y_value_true, k)
scores.append(actual / best)
return np.mean(scores)
|
bsd-3-clause
|
ch3ll0v3k/scikit-learn
|
examples/ensemble/plot_adaboost_twoclass.py
|
347
|
3268
|
"""
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
|
bsd-3-clause
|
xuewei4d/scikit-learn
|
sklearn/model_selection/_validation.py
|
6
|
67265
|
"""
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
import warnings
import numbers
import time
from traceback import format_exc
from contextlib import suppress
import numpy as np
import scipy.sparse as sp
from joblib import Parallel, logger
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, _safe_indexing
from ..utils.validation import _check_fit_params
from ..utils.validation import _num_samples
from ..utils.validation import _deprecate_positional_args
from ..utils.fixes import delayed
from ..utils.metaestimators import _safe_split
from ..metrics import check_scoring
from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer
from ..exceptions import FitFailedWarning, NotFittedError
from ._split import check_cv
from ..preprocessing import LabelEncoder
__all__ = ['cross_validate', 'cross_val_score', 'cross_val_predict',
'permutation_test_score', 'learning_curve', 'validation_curve']
@_deprecate_positional_args
def cross_validate(estimator, X, y=None, *, groups=None, scoring=None, cv=None,
n_jobs=None, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs', return_train_score=False,
return_estimator=False, error_score=np.nan):
"""Evaluate metric(s) by cross-validation and also record fit/score times.
Read more in the :ref:`User Guide <multimetric_cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit. Can be for example a list, or an array.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
scoring : str, callable, list/tuple, or dict, default=None
A single str (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
If None, the estimator's score method is used.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the cross-validation splits.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
The verbosity level.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
pre_dispatch : int or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A str, giving an expression as a function of n_jobs,
as in '2*n_jobs'
return_train_score : bool, default=False
Whether to include train scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
.. versionadded:: 0.19
.. versionchanged:: 0.21
Default value was changed from ``True`` to ``False``
return_estimator : bool, default=False
Whether to return the estimators fitted on each split.
.. versionadded:: 0.20
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
Returns
-------
scores : dict of float arrays of shape (n_splits,)
Array of scores of the estimator for each run of the cross validation.
A dict of arrays containing the score/time arrays for each scorer is
returned. The possible keys for this ``dict`` are:
``test_score``
The score array for test scores on each cv split.
Suffix ``_score`` in ``test_score`` changes to a specific
metric like ``test_r2`` or ``test_auc`` if there are
multiple scoring metrics in the scoring parameter.
``train_score``
The score array for train scores on each cv split.
Suffix ``_score`` in ``train_score`` changes to a specific
metric like ``train_r2`` or ``train_auc`` if there are
multiple scoring metrics in the scoring parameter.
This is available only if ``return_train_score`` parameter
is ``True``.
``fit_time``
The time for fitting the estimator on the train
set for each cv split.
``score_time``
The time for scoring the estimator on the test set for each
cv split. (Note time for scoring on the train set is not
included even if ``return_train_score`` is set to ``True``
``estimator``
The estimator objects for each cv split.
This is available only if ``return_estimator`` parameter
is set to ``True``.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_validate
>>> from sklearn.metrics import make_scorer
>>> from sklearn.metrics import confusion_matrix
>>> from sklearn.svm import LinearSVC
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
Single metric evaluation using ``cross_validate``
>>> cv_results = cross_validate(lasso, X, y, cv=3)
>>> sorted(cv_results.keys())
['fit_time', 'score_time', 'test_score']
>>> cv_results['test_score']
array([0.33150734, 0.08022311, 0.03531764])
Multiple metric evaluation using ``cross_validate``
(please refer the ``scoring`` parameter doc for more information)
>>> scores = cross_validate(lasso, X, y, cv=3,
... scoring=('r2', 'neg_mean_squared_error'),
... return_train_score=True)
>>> print(scores['test_neg_mean_squared_error'])
[-3635.5... -3573.3... -6114.7...]
>>> print(scores['train_r2'])
[0.28010158 0.39088426 0.22784852]
See Also
---------
cross_val_score : Run cross-validation for single metric evaluation.
cross_val_predict : Get predictions from each split of cross-validation for
diagnostic purposes.
sklearn.metrics.make_scorer : Make a scorer from a performance metric or
loss function.
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
if callable(scoring):
scorers = scoring
elif scoring is None or isinstance(scoring, str):
scorers = check_scoring(estimator, scoring)
else:
scorers = _check_multimetric_scoring(estimator, scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
results = parallel(
delayed(_fit_and_score)(
clone(estimator), X, y, scorers, train, test, verbose, None,
fit_params, return_train_score=return_train_score,
return_times=True, return_estimator=return_estimator,
error_score=error_score)
for train, test in cv.split(X, y, groups))
# For callabe scoring, the return type is only know after calling. If the
# return type is a dictionary, the error scores can now be inserted with
# the correct key.
if callable(scoring):
_insert_error_scores(results, error_score)
results = _aggregate_score_dicts(results)
ret = {}
ret['fit_time'] = results["fit_time"]
ret['score_time'] = results["score_time"]
if return_estimator:
ret['estimator'] = results["estimator"]
test_scores_dict = _normalize_score_results(results["test_scores"])
if return_train_score:
train_scores_dict = _normalize_score_results(results["train_scores"])
for name in test_scores_dict:
ret['test_%s' % name] = test_scores_dict[name]
if return_train_score:
key = 'train_%s' % name
ret[key] = train_scores_dict[name]
return ret
def _insert_error_scores(results, error_score):
"""Insert error in `results` by replacing them inplace with `error_score`.
This only applies to multimetric scores because `_fit_and_score` will
handle the single metric case.
"""
successful_score = None
failed_indices = []
for i, result in enumerate(results):
if result["fit_failed"]:
failed_indices.append(i)
elif successful_score is None:
successful_score = result["test_scores"]
if successful_score is None:
raise NotFittedError("All estimators failed to fit")
if isinstance(successful_score, dict):
formatted_error = {name: error_score for name in successful_score}
for i in failed_indices:
results[i]["test_scores"] = formatted_error.copy()
if "train_scores" in results[i]:
results[i]["train_scores"] = formatted_error.copy()
def _normalize_score_results(scores, scaler_score_key='score'):
"""Creates a scoring dictionary based on the type of `scores`"""
if isinstance(scores[0], dict):
# multimetric scoring
return _aggregate_score_dicts(scores)
# scaler
return {scaler_score_key: scores}
@_deprecate_positional_args
def cross_val_score(estimator, X, y=None, *, groups=None, scoring=None,
cv=None, n_jobs=None, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs', error_score=np.nan):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit. Can be for example a list, or an array.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
scoring : str or callable, default=None
A str (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)`` which should return only
a single value.
Similar to :func:`cross_validate`
but only a single metric is permitted.
If None, the estimator's default scorer (if available) is used.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the cross-validation splits.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
The verbosity level.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
pre_dispatch : int or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A str, giving an expression as a function of n_jobs,
as in '2*n_jobs'
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
Returns
-------
scores : ndarray of float of shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y, cv=3))
[0.33150734 0.08022311 0.03531764]
See Also
---------
cross_validate : To run cross-validation on multiple metrics and also to
return train scores, fit times and score times.
cross_val_predict : Get predictions from each split of cross-validation for
diagnostic purposes.
sklearn.metrics.make_scorer : Make a scorer from a performance metric or
loss function.
"""
# To ensure multimetric format is not supported
scorer = check_scoring(estimator, scoring=scoring)
cv_results = cross_validate(estimator=estimator, X=X, y=y, groups=groups,
scoring={'score': scorer}, cv=cv,
n_jobs=n_jobs, verbose=verbose,
fit_params=fit_params,
pre_dispatch=pre_dispatch,
error_score=error_score)
return cv_results['test_score']
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, return_estimator=False,
split_progress=None, candidate_progress=None,
error_score=np.nan):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
verbose : int
The verbosity level.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : bool, default=False
Compute and return score on training set.
return_parameters : bool, default=False
Return parameters that has been used for the estimator.
split_progress : {list, tuple} of int, default=None
A list or tuple of format (<current_split_id>, <total_num_of_splits>).
candidate_progress : {list, tuple} of int, default=None
A list or tuple of format
(<current_candidate_id>, <total_number_of_candidates>).
return_n_test_samples : bool, default=False
Whether to return the ``n_test_samples``.
return_times : bool, default=False
Whether to return the fit/score times.
return_estimator : bool, default=False
Whether to return the fitted estimator.
Returns
-------
result : dict with the following attributes
train_scores : dict of scorer name -> float
Score on training set (for all the scorers),
returned only if `return_train_score` is `True`.
test_scores : dict of scorer name -> float
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None
The parameters that have been evaluated.
estimator : estimator object
The fitted estimator.
fit_failed : bool
The estimator failed to fit.
"""
if not isinstance(error_score, numbers.Number) and error_score != 'raise':
raise ValueError(
"error_score must be the string 'raise' or a numeric value. "
"(Hint: if using 'raise', please make sure that it has been "
"spelled correctly.)"
)
progress_msg = ""
if verbose > 2:
if split_progress is not None:
progress_msg = f" {split_progress[0]+1}/{split_progress[1]}"
if candidate_progress and verbose > 9:
progress_msg += (f"; {candidate_progress[0]+1}/"
f"{candidate_progress[1]}")
if verbose > 1:
if parameters is None:
params_msg = ''
else:
sorted_keys = sorted(parameters) # Ensure deterministic o/p
params_msg = (', '.join(f'{k}={parameters[k]}'
for k in sorted_keys))
if verbose > 9:
start_msg = f"[CV{progress_msg}] START {params_msg}"
print(f"{start_msg}{(80 - len(start_msg)) * '.'}")
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_fit_params(X, fit_params, train)
if parameters is not None:
# clone after setting parameters in case any parameters
# are estimators (like pipeline steps)
# because pipeline doesn't clone steps in fit
cloned_parameters = {}
for k, v in parameters.items():
cloned_parameters[k] = clone(v, safe=False)
estimator = estimator.set_params(**cloned_parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
result = {}
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
if isinstance(scorer, dict):
test_scores = {name: error_score for name in scorer}
if return_train_score:
train_scores = test_scores.copy()
else:
test_scores = error_score
if return_train_score:
train_scores = error_score
warnings.warn("Estimator fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%s" %
(error_score, format_exc()),
FitFailedWarning)
result["fit_failed"] = True
else:
result["fit_failed"] = False
fit_time = time.time() - start_time
test_scores = _score(estimator, X_test, y_test, scorer, error_score)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_scores = _score(
estimator, X_train, y_train, scorer, error_score
)
if verbose > 1:
total_time = score_time + fit_time
end_msg = f"[CV{progress_msg}] END "
result_msg = params_msg + (";" if params_msg else "")
if verbose > 2 and isinstance(test_scores, dict):
for scorer_name in sorted(test_scores):
result_msg += f" {scorer_name}: ("
if return_train_score:
scorer_scores = train_scores[scorer_name]
result_msg += f"train={scorer_scores:.3f}, "
result_msg += f"test={test_scores[scorer_name]:.3f})"
result_msg += f" total time={logger.short_format_time(total_time)}"
# Right align the result_msg
end_msg += "." * (80 - len(end_msg) - len(result_msg))
end_msg += result_msg
print(end_msg)
result["test_scores"] = test_scores
if return_train_score:
result["train_scores"] = train_scores
if return_n_test_samples:
result["n_test_samples"] = _num_samples(X_test)
if return_times:
result["fit_time"] = fit_time
result["score_time"] = score_time
if return_parameters:
result["parameters"] = parameters
if return_estimator:
result["estimator"] = estimator
return result
def _score(estimator, X_test, y_test, scorer, error_score="raise"):
"""Compute the score(s) of an estimator on a given test set.
Will return a dict of floats if `scorer` is a dict, otherwise a single
float is returned.
"""
if isinstance(scorer, dict):
# will cache method calls if needed. scorer() returns a dict
scorer = _MultimetricScorer(**scorer)
try:
if y_test is None:
scores = scorer(estimator, X_test)
else:
scores = scorer(estimator, X_test, y_test)
except Exception:
if error_score == 'raise':
raise
else:
if isinstance(scorer, _MultimetricScorer):
scores = {name: error_score for name in scorer._scorers}
else:
scores = error_score
warnings.warn(
f"Scoring failed. The score on this train-test partition for "
f"these parameters will be set to {error_score}. Details: \n"
f"{format_exc()}",
UserWarning,
)
error_msg = (
"scoring must return a number, got %s (%s) instead. (scorer=%s)"
)
if isinstance(scores, dict):
for name, score in scores.items():
if hasattr(score, 'item'):
with suppress(ValueError):
# e.g. unwrap memmapped scalars
score = score.item()
if not isinstance(score, numbers.Number):
raise ValueError(error_msg % (score, type(score), name))
scores[name] = score
else: # scalar
if hasattr(scores, 'item'):
with suppress(ValueError):
# e.g. unwrap memmapped scalars
scores = scores.item()
if not isinstance(scores, numbers.Number):
raise ValueError(error_msg % (scores, type(scores), scorer))
return scores
@_deprecate_positional_args
def cross_val_predict(estimator, X, y=None, *, groups=None, cv=None,
n_jobs=None, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs', method='predict'):
"""Generate cross-validated estimates for each input data point
The data is split according to the cv parameter. Each sample belongs
to exactly one test set, and its prediction is computed with an
estimator fitted on the corresponding training set.
Passing these predictions into an evaluation metric may not be a valid
way to measure generalization performance. Results can differ from
:func:`cross_validate` and :func:`cross_val_score` unless all tests sets
have equal size and the metric decomposes over samples.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and
predicting are parallelized over the cross-validation splits.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
The verbosity level.
fit_params : dict, defualt=None
Parameters to pass to the fit method of the estimator.
pre_dispatch : int or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A str, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : {'predict', 'predict_proba', 'predict_log_proba', \
'decision_function'}, default='predict'
The method to be invoked by `estimator`.
Returns
-------
predictions : ndarray
This is the result of calling `method`. Shape:
- When `method` is 'predict' and in special case where `method` is
'decision_function' and the target is binary: (n_samples,)
- When `method` is one of {'predict_proba', 'predict_log_proba',
'decision_function'} (unless special case above):
(n_samples, n_classes)
- If `estimator` is :term:`multioutput`, an extra dimension
'n_outputs' is added to the end of each shape above.
See Also
--------
cross_val_score : Calculate score for each CV split.
cross_validate : Calculate one or more scores and timings for each CV
split.
Notes
-----
In the case that one or more classes are absent in a training portion, a
default score needs to be assigned to all instances for that class if
``method`` produces columns per class, as in {'decision_function',
'predict_proba', 'predict_log_proba'}. For ``predict_proba`` this value is
0. In order to ensure finite output, we approximate negative infinity by
the minimum finite float value for the dtype in other cases.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y, cv=3)
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
splits = list(cv.split(X, y, groups))
test_indices = np.concatenate([test for _, test in splits])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
# If classification methods produce multiple columns of output,
# we need to manually encode classes to ensure consistent column ordering.
encode = method in ['decision_function', 'predict_proba',
'predict_log_proba'] and y is not None
if encode:
y = np.asarray(y)
if y.ndim == 1:
le = LabelEncoder()
y = le.fit_transform(y)
elif y.ndim == 2:
y_enc = np.zeros_like(y, dtype=int)
for i_label in range(y.shape[1]):
y_enc[:, i_label] = LabelEncoder().fit_transform(y[:, i_label])
y = y_enc
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
predictions = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params, method)
for train, test in splits)
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
elif encode and isinstance(predictions[0], list):
# `predictions` is a list of method outputs from each fold.
# If each of those is also a list, then treat this as a
# multioutput-multiclass task. We need to separately concatenate
# the method outputs for each label into an `n_labels` long list.
n_labels = y.shape[1]
concat_pred = []
for i_label in range(n_labels):
label_preds = np.concatenate([p[i_label] for p in predictions])
concat_pred.append(label_preds)
predictions = concat_pred
else:
predictions = np.concatenate(predictions)
if isinstance(predictions, list):
return [p[inv_test_indices] for p in predictions]
else:
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
.. versionchanged:: 0.20
X is only required to be an object with finite length or shape now
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
verbose : int
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : str
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_fit_params(X, fit_params, train)
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
encode = method in ['decision_function', 'predict_proba',
'predict_log_proba'] and y is not None
if encode:
if isinstance(predictions, list):
predictions = [_enforce_prediction_order(
estimator.classes_[i_label], predictions[i_label],
n_classes=len(set(y[:, i_label])), method=method)
for i_label in range(len(predictions))]
else:
# A 2D y array should be a binary label indicator matrix
n_classes = len(set(y)) if y.ndim == 1 else y.shape[1]
predictions = _enforce_prediction_order(
estimator.classes_, predictions, n_classes, method)
return predictions
def _enforce_prediction_order(classes, predictions, n_classes, method):
"""Ensure that prediction arrays have correct column order
When doing cross-validation, if one or more classes are
not present in the subset of data used for training,
then the output prediction array might not have the same
columns as other folds. Use the list of class names
(assumed to be ints) to enforce the correct column order.
Note that `classes` is the list of classes in this fold
(a subset of the classes in the full training set)
and `n_classes` is the number of classes in the full training set.
"""
if n_classes != len(classes):
recommendation = (
'To fix this, use a cross-validation '
'technique resulting in properly '
'stratified folds')
warnings.warn('Number of classes in training fold ({}) does '
'not match total number of classes ({}). '
'Results may not be appropriate for your use case. '
'{}'.format(len(classes), n_classes, recommendation),
RuntimeWarning)
if method == 'decision_function':
if (predictions.ndim == 2 and
predictions.shape[1] != len(classes)):
# This handles the case when the shape of predictions
# does not match the number of classes used to train
# it with. This case is found when sklearn.svm.SVC is
# set to `decision_function_shape='ovo'`.
raise ValueError('Output shape {} of {} does not match '
'number of classes ({}) in fold. '
'Irregular decision_function outputs '
'are not currently supported by '
'cross_val_predict'.format(
predictions.shape, method, len(classes)))
if len(classes) <= 2:
# In this special case, `predictions` contains a 1D array.
raise ValueError('Only {} class/es in training fold, but {} '
'in overall dataset. This '
'is not supported for decision_function '
'with imbalanced folds. {}'.format(
len(classes), n_classes, recommendation))
float_min = np.finfo(predictions.dtype).min
default_values = {'decision_function': float_min,
'predict_log_proba': float_min,
'predict_proba': 0}
predictions_for_all_classes = np.full((_num_samples(predictions),
n_classes),
default_values[method],
dtype=predictions.dtype)
predictions_for_all_classes[:, classes] = predictions
predictions = predictions_for_all_classes
return predictions
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
int array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(indices) is np.arange(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, dtype=bool)
hit[indices] = True
if not np.all(hit):
return False
return True
@_deprecate_positional_args
def permutation_test_score(estimator, X, y, *, groups=None, cv=None,
n_permutations=100, n_jobs=None, random_state=0,
verbose=0, scoring=None, fit_params=None):
"""Evaluate the significance of a cross-validated score with permutations
Permutes targets to generate 'randomized data' and compute the empirical
p-value against the null hypothesis that features and targets are
independent.
The p-value represents the fraction of randomized data sets where the
estimator performed as well or better than in the original data. A small
p-value suggests that there is a real dependency between features and
targets which has been used by the estimator to give good predictions.
A large p-value may be due to lack of real dependency between features
and targets or the estimator was not able to use the dependency to
give good predictions.
Read more in the :ref:`User Guide <permutation_test_score>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Labels to constrain permutation within groups, i.e. ``y`` values
are permuted among samples with the same group identifier.
When not specified, ``y`` values are permuted among all samples.
When a grouped cross-validator is used, the group labels are
also passed on to the ``split`` method of the cross-validator. The
cross-validator uses them for grouping the samples while splitting
the dataset into train/test set.
scoring : str or callable, default=None
A single str (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
If None the estimator's score method is used.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_permutations : int, default=100
Number of times to permute ``y``.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the cross-validated score are parallelized over the permutations.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, default=0
Pass an int for reproducible output for permutation of
``y`` values among samples. See :term:`Glossary <random_state>`.
verbose : int, default=0
The verbosity level.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. versionadded:: 0.24
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array of shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The p-value, which approximates the probability that the score would
be obtained by chance. This is calculated as:
`(C + 1) / (n_permutations + 1)`
Where C is the number of permutations whose score >= the true score.
The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. `Permutation Tests for Studying Classifier
Performance
<http://www.jmlr.org/papers/volume11/ojala10a/ojala10a.pdf>`_. The
Journal of Machine Learning Research (2010) vol. 11
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer,
fit_params=fit_params)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, groups, random_state),
groups, cv, scorer, fit_params=fit_params)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
def _permutation_test_score(estimator, X, y, groups, cv, scorer,
fit_params):
"""Auxiliary function for permutation_test_score"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
avg_score = []
for train, test in cv.split(X, y, groups):
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
fit_params = _check_fit_params(X, fit_params, train)
estimator.fit(X_train, y_train, **fit_params)
avg_score.append(scorer(estimator, X_test, y_test))
return np.mean(avg_score)
def _shuffle(y, groups, random_state):
"""Return a shuffled copy of y eventually shuffle among same groups."""
if groups is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(groups))
for group in np.unique(groups):
this_mask = (groups == group)
indices[this_mask] = random_state.permutation(indices[this_mask])
return _safe_indexing(y, indices)
@_deprecate_positional_args
def learning_curve(estimator, X, y, *, groups=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None,
scoring=None, exploit_incremental_learning=False,
n_jobs=None, pre_dispatch="all", verbose=0, shuffle=False,
random_state=None, error_score=np.nan, return_times=False,
fit_params=None):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
train_sizes : array-like of shape (n_ticks,), \
default=np.linspace(0.1, 1.0, 5)
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
scoring : str or callable, default=None
A str (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : bool, default=False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the different training and test sets.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
shuffle : bool, default=False
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : int, RandomState instance or None, default=None
Used when ``shuffle`` is True. Pass an int for reproducible
output across multiple function calls.
See :term:`Glossary <random_state>`.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
return_times : bool, default=False
Whether to return the fit and score times.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. versionadded:: 0.24
Returns
-------
train_sizes_abs : array of shape (n_unique_ticks,)
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array of shape (n_ticks, n_cv_folds)
Scores on test set.
fit_times : array of shape (n_ticks, n_cv_folds)
Times spent for fitting in seconds. Only present if ``return_times``
is True.
score_times : array of shape (n_ticks, n_cv_folds)
Times spent for scoring in seconds. Only present if ``return_times``
is True.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Store it as list as we will be iterating over the list multiple times
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if shuffle:
rng = check_random_state(random_state)
cv_iter = ((rng.permutation(train), test) for train, test in cv_iter)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose, return_times, error_score=error_score,
fit_params=fit_params)
for train, test in cv_iter
)
out = np.asarray(out).transpose((2, 1, 0))
else:
train_test_proportions = []
for train, test in cv_iter:
for n_train_samples in train_sizes_abs:
train_test_proportions.append((train[:n_train_samples], test))
results = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train, test, verbose,
parameters=None, fit_params=fit_params, return_train_score=True,
error_score=error_score, return_times=return_times)
for train, test in train_test_proportions
)
results = _aggregate_score_dicts(results)
train_scores = results["train_scores"].reshape(-1, n_unique_ticks).T
test_scores = results["test_scores"].reshape(-1, n_unique_ticks).T
out = [train_scores, test_scores]
if return_times:
fit_times = results["fit_time"].reshape(-1, n_unique_ticks).T
score_times = results["score_time"].reshape(-1, n_unique_ticks).T
out.extend([fit_times, score_times])
ret = train_sizes_abs, out[0], out[1]
if return_times:
ret = ret + (out[2], out[3])
return ret
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like of shape (n_ticks,)
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array of shape (n_unique_ticks,)
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.floating):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype(
dtype=int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose,
return_times, error_score, fit_params):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores, fit_times, score_times = [], [], [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
if fit_params is None:
fit_params = {}
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
start_fit = time.time()
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes,
**fit_params)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes, **fit_params)
fit_time = time.time() - start_fit
fit_times.append(fit_time)
start_score = time.time()
test_scores.append(
_score(estimator, X_test, y_test, scorer, error_score)
)
train_scores.append(
_score(estimator, X_train, y_train, scorer, error_score)
)
score_time = time.time() - start_score
score_times.append(score_time)
ret = ((train_scores, test_scores, fit_times, score_times)
if return_times else (train_scores, test_scores))
return np.array(ret).T
@_deprecate_positional_args
def validation_curve(estimator, X, y, *, param_name, param_range, groups=None,
cv=None, scoring=None, n_jobs=None, pre_dispatch="all",
verbose=0, error_score=np.nan, fit_params=None):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : str
Name of the parameter that will be varied.
param_range : array-like of shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
scoring : str or callable, default=None
A str (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the combinations of each parameter
value and each cross-validation split.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. versionadded:: 0.24
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
Returns
-------
train_scores : array of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array of shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
results = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=fit_params,
return_train_score=True, error_score=error_score)
# NOTE do not change order of iteration to allow one time cv splitters
for train, test in cv.split(X, y, groups) for v in param_range)
n_params = len(param_range)
results = _aggregate_score_dicts(results)
train_scores = results["train_scores"].reshape(-1, n_params).T
test_scores = results["test_scores"].reshape(-1, n_params).T
return train_scores, test_scores
def _aggregate_score_dicts(scores):
"""Aggregate the list of dict to dict of np ndarray
The aggregated output of _aggregate_score_dicts will be a list of dict
of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...]
Convert it to a dict of array {'prec': np.array([0.1 ...]), ...}
Parameters
----------
scores : list of dict
List of dicts of the scores for all scorers. This is a flat list,
assumed originally to be of row major order.
Example
-------
>>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3},
... {'a': 10, 'b': 10}] # doctest: +SKIP
>>> _aggregate_score_dicts(scores) # doctest: +SKIP
{'a': array([1, 2, 3, 10]),
'b': array([10, 2, 3, 10])}
"""
return {
key: np.asarray([score[key] for score in scores])
if isinstance(scores[0][key], numbers.Number)
else [score[key] for score in scores]
for key in scores[0]
}
|
bsd-3-clause
|
decvalts/cartopy
|
lib/cartopy/examples/wmts.py
|
4
|
1105
|
"""
Interactive WMTS (Web Map Tile Service)
---------------------------------------
This example demonstrates the interactive pan and zoom capability
supported by an OGC web services Web Map Tile Service (WMTS) aware axes.
The example WMTS layer is a single composite of data sampled over nine days
in April 2012 and thirteen days in October 2012 showing the Earth at night.
It does not vary over time.
The imagery was collected by the Suomi National Polar-orbiting Partnership
(Suomi NPP) weather satellite operated by the United States National Oceanic
and Atmospheric Administration (NOAA).
"""
__tags__ = ['Web services']
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
def main():
url = 'https://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'
layer = 'VIIRS_CityLights_2012'
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())
ax.add_wmts(url, layer)
ax.set_extent([-15, 25, 35, 60], crs=ccrs.PlateCarree())
ax.set_title('Suomi NPP Earth at night April/October 2012')
plt.show()
if __name__ == '__main__':
main()
|
gpl-3.0
|
yavalvas/yav_com
|
build/matplotlib/lib/matplotlib/tests/test_delaunay.py
|
1
|
6833
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import numpy as np
from matplotlib.testing.decorators import image_comparison, knownfailureif
from matplotlib.delaunay.triangulate import Triangulation
from matplotlib import pyplot as plt
import matplotlib as mpl
def constant(x, y):
return np.ones(x.shape, x.dtype)
constant.title = 'Constant'
def xramp(x, y):
return x
xramp.title = 'X Ramp'
def yramp(x, y):
return y
yramp.title = 'Y Ramp'
def exponential(x, y):
x = x*9
y = y*9
x1 = x+1.0
x2 = x-2.0
x4 = x-4.0
x7 = x-7.0
y1 = x+1.0
y2 = y-2.0
y3 = y-3.0
y7 = y-7.0
f = (0.75 * np.exp(-(x2*x2+y2*y2)/4.0) +
0.75 * np.exp(-x1*x1/49.0 - y1/10.0) +
0.5 * np.exp(-(x7*x7 + y3*y3)/4.0) -
0.2 * np.exp(-x4*x4 -y7*y7))
return f
exponential.title = 'Exponential and Some Gaussians'
def cliff(x, y):
f = np.tanh(9.0*(y-x) + 1.0)/9.0
return f
cliff.title = 'Cliff'
def saddle(x, y):
f = (1.25 + np.cos(5.4*y))/(6.0 + 6.0*(3*x-1.0)**2)
return f
saddle.title = 'Saddle'
def gentle(x, y):
f = np.exp(-5.0625*((x-0.5)**2+(y-0.5)**2))/3.0
return f
gentle.title = 'Gentle Peak'
def steep(x, y):
f = np.exp(-20.25*((x-0.5)**2+(y-0.5)**2))/3.0
return f
steep.title = 'Steep Peak'
def sphere(x, y):
circle = 64-81*((x-0.5)**2 + (y-0.5)**2)
f = np.where(circle >= 0, np.sqrt(np.clip(circle,0,100)) - 0.5, 0.0)
return f
sphere.title = 'Sphere'
def trig(x, y):
f = 2.0*np.cos(10.0*x)*np.sin(10.0*y) + np.sin(10.0*x*y)
return f
trig.title = 'Cosines and Sines'
def gauss(x, y):
x = 5.0-10.0*x
y = 5.0-10.0*y
g1 = np.exp(-x*x/2)
g2 = np.exp(-y*y/2)
f = g1 + 0.75*g2*(1 + g1)
return f
gauss.title = 'Gaussian Peak and Gaussian Ridges'
def cloverleaf(x, y):
ex = np.exp((10.0-20.0*x)/3.0)
ey = np.exp((10.0-20.0*y)/3.0)
logitx = 1.0/(1.0+ex)
logity = 1.0/(1.0+ey)
f = (((20.0/3.0)**3 * ex*ey)**2 * (logitx*logity)**5 *
(ex-2.0*logitx)*(ey-2.0*logity))
return f
cloverleaf.title = 'Cloverleaf'
def cosine_peak(x, y):
circle = np.hypot(80*x-40.0, 90*y-45.)
f = np.exp(-0.04*circle) * np.cos(0.15*circle)
return f
cosine_peak.title = 'Cosine Peak'
allfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss, cloverleaf, cosine_peak]
class LinearTester(object):
name = 'Linear'
def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0), nrange=101, npoints=250):
self.xrange = xrange
self.yrange = yrange
self.nrange = nrange
self.npoints = npoints
rng = np.random.RandomState(1234567890)
self.x = rng.uniform(xrange[0], xrange[1], size=npoints)
self.y = rng.uniform(yrange[0], yrange[1], size=npoints)
self.tri = Triangulation(self.x, self.y)
def replace_data(self, dataset):
self.x = dataset.x
self.y = dataset.y
self.tri = Triangulation(self.x, self.y)
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.linear_extrapolator(z, bbox=self.xrange+self.yrange)
def plot(self, func, interp=True, plotter='imshow'):
if interp:
lpi = self.interpolator(func)
z = lpi[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
else:
y, x = np.mgrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
z = func(x, y)
z = np.where(np.isinf(z), 0.0, z)
extent = (self.xrange[0], self.xrange[1],
self.yrange[0], self.yrange[1])
fig = plt.figure()
plt.hot() # Some like it hot
if plotter == 'imshow':
plt.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent, origin='lower')
elif plotter == 'contour':
Y, X = np.ogrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
plt.contour(np.ravel(X), np.ravel(Y), z, 20)
x = self.x
y = self.y
lc = mpl.collections.LineCollection(np.array([((x[i], y[i]), (x[j], y[j]))
for i, j in self.tri.edge_db]), colors=[(0,0,0,0.2)])
ax = plt.gca()
ax.add_collection(lc)
if interp:
title = '%s Interpolant' % self.name
else:
title = 'Reference'
if hasattr(func, 'title'):
plt.title('%s: %s' % (func.title, title))
else:
plt.title(title)
class NNTester(LinearTester):
name = 'Natural Neighbors'
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.nn_extrapolator(z, bbox=self.xrange+self.yrange)
def make_all_2d_testfuncs(allfuncs=allfuncs):
def make_test(func):
filenames = [
'%s-%s' % (func.__name__, x) for x in
['ref-img', 'nn-img', 'lin-img', 'ref-con', 'nn-con', 'lin-con']]
# We only generate PNGs to save disk space -- we just assume
# that any backend differences are caught by other tests.
@image_comparison(filenames, extensions=['png'],
freetype_version=('2.4.5', '2.4.9'),
remove_text=True)
def reference_test():
nnt.plot(func, interp=False, plotter='imshow')
nnt.plot(func, interp=True, plotter='imshow')
lpt.plot(func, interp=True, plotter='imshow')
nnt.plot(func, interp=False, plotter='contour')
nnt.plot(func, interp=True, plotter='contour')
lpt.plot(func, interp=True, plotter='contour')
tester = reference_test
tester.__name__ = str('test_%s' % func.__name__)
return tester
nnt = NNTester(npoints=1000)
lpt = LinearTester(npoints=1000)
for func in allfuncs:
globals()['test_%s' % func.__name__] = make_test(func)
make_all_2d_testfuncs()
# 1d and 0d grid tests
ref_interpolator = Triangulation([0,10,10,0],
[0,0,10,10]).linear_interpolator([1,10,5,2.0])
def test_1d_grid():
res = ref_interpolator[3:6:2j,1:1:1j]
assert np.allclose(res, [[1.6],[1.9]], rtol=0)
def test_0d_grid():
res = ref_interpolator[3:3:1j,1:1:1j]
assert np.allclose(res, [[1.6]], rtol=0)
@image_comparison(baseline_images=['delaunay-1d-interp'], extensions=['png'])
def test_1d_plots():
x_range = slice(0.25,9.75,20j)
x = np.mgrid[x_range]
ax = plt.gca()
for y in xrange(2,10,2):
plt.plot(x, ref_interpolator[x_range,y:y:1j])
ax.set_xticks([])
ax.set_yticks([])
|
mit
|
raghavrv/scikit-learn
|
examples/linear_model/plot_omp.py
|
385
|
2263
|
"""
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
|
bsd-3-clause
|
BhallaLab/moose-full
|
moose-gui/plugins/test_plotwidget.py
|
3
|
2438
|
# test_plotwidget.py ---
#
# Filename: test_plotwidget.py
# Description:
# Author:
# Maintainer:
# Created: Tue Mar 12 12:17:23 2013 (+0530)
# Version:
# Last-Updated: Tue Mar 12 12:26:42 2013 (+0530)
# By: subha
# Update #: 31
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import sys
import os
import numpy as np
from PyQt4 import QtGui, QtCore
from PyQt4.Qt import Qt
from matplotlib import mlab
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
import unittest
sys.path.append('..')
from default import PlotWidget
import moose
import config
class PlotWidgetTests(unittest.TestCase):
def setUp(self):
self.app = QtGui.QApplication([])
QtGui.qApp = self.app
icon = QtGui.QIcon(os.path.join(config.KEY_ICON_DIR,'moose_icon.png'))
self.model = moose.loadModel('../../Demos/Genesis_files/acc68.g', '/acc68')
self.app.setWindowIcon(icon)
self.window = QtGui.QMainWindow()
self.window.setWindowTitle('Test plot widget')
self.mdiArea = QtGui.QMdiArea()
self.window.setCentralWidget(self.mdiArea)
self.widget = PlotWidget()
self.widget.setDataRoot(self.model.path)
self.mdiArea.addSubWindow(self.widget)
self.window.show()
def testPlotAllData(self):
"""Test plot function"""
self.widget.plotAllData()
def tearDown(self):
self.app.exec_()
if __name__ == '__main__':
unittest.main()
#
# test_plotwidget.py ends here
|
gpl-2.0
|
trevorstephens/gplearn
|
gplearn/tests/test_functions.py
|
1
|
6173
|
"""Testing the Genetic Programming functions module."""
# Author: Trevor Stephens <trevorstephens.com>
#
# License: BSD 3 clause
import pickle
import numpy as np
from numpy import maximum
from sklearn.datasets import load_boston, load_breast_cancer
from sklearn.utils._testing import assert_equal, assert_raises
from sklearn.utils.validation import check_random_state
from gplearn.functions import _protected_sqrt, make_function
from gplearn.genetic import SymbolicRegressor, SymbolicTransformer
from gplearn.genetic import SymbolicClassifier
# load the boston dataset and randomly permute it
rng = check_random_state(0)
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# load the breast cancer dataset and randomly permute it
cancer = load_breast_cancer()
perm = check_random_state(0).permutation(cancer.target.size)
cancer.data = cancer.data[perm]
cancer.target = cancer.target[perm]
def test_validate_function():
"""Check that valid functions are accepted & invalid ones raise error"""
# Check arity tests
_ = make_function(function=_protected_sqrt, name='sqrt', arity=1)
# non-integer arity
assert_raises(ValueError, make_function, _protected_sqrt, 'sqrt', '1')
assert_raises(ValueError, make_function, _protected_sqrt, 'sqrt', 1.0)
# non-bool wrap
assert_raises(ValueError, make_function, _protected_sqrt, 'sqrt', 1, 'f')
# non-matching arity
assert_raises(ValueError, make_function, _protected_sqrt, 'sqrt', 2)
assert_raises(ValueError, make_function, maximum, 'max', 1)
# Check name test
assert_raises(ValueError, make_function, _protected_sqrt, 2, 1)
# Check return type tests
def bad_fun1(x1, x2):
return 'ni'
assert_raises(ValueError, make_function, bad_fun1, 'ni', 2)
# Check return shape tests
def bad_fun2(x1):
return np.ones((2, 1))
assert_raises(ValueError, make_function, bad_fun2, 'ni', 1)
# Check closure for negatives test
def _unprotected_sqrt(x1):
with np.errstate(divide='ignore', invalid='ignore'):
return np.sqrt(x1)
assert_raises(ValueError, make_function, _unprotected_sqrt, 'sqrt', 1)
# Check closure for zeros test
def _unprotected_div(x1, x2):
with np.errstate(divide='ignore', invalid='ignore'):
return np.divide(x1, x2)
assert_raises(ValueError, make_function, _unprotected_div, 'div', 2)
def test_function_in_program():
"""Check that using a custom function in a program works"""
def logic(x1, x2, x3, x4):
return np.where(x1 > x2, x3, x4)
logical = make_function(function=logic,
name='logical',
arity=4)
function_set = ['add', 'sub', 'mul', 'div', logical]
est = SymbolicTransformer(generations=2, population_size=2000,
hall_of_fame=100, n_components=10,
function_set=function_set,
parsimony_coefficient=0.0005,
max_samples=0.9, random_state=0)
est.fit(boston.data[:300, :], boston.target[:300])
formula = est._programs[0][906].__str__()
expected_formula = 'sub(logical(X6, add(X11, 0.898), X10, X2), X5)'
assert_equal(expected_formula, formula, True)
def test_parallel_custom_function():
"""Regression test for running parallel training with custom functions"""
def _logical(x1, x2, x3, x4):
return np.where(x1 > x2, x3, x4)
logical = make_function(function=_logical,
name='logical',
arity=4)
est = SymbolicRegressor(generations=2,
function_set=['add', 'sub', 'mul', 'div', logical],
random_state=0,
n_jobs=2)
est.fit(boston.data, boston.target)
_ = pickle.dumps(est)
# Unwrapped functions should fail
logical = make_function(function=_logical,
name='logical',
arity=4,
wrap=False)
est = SymbolicRegressor(generations=2,
function_set=['add', 'sub', 'mul', 'div', logical],
random_state=0,
n_jobs=2)
est.fit(boston.data, boston.target)
assert_raises(AttributeError, pickle.dumps, est)
# Single threaded will also fail in non-interactive sessions
est = SymbolicRegressor(generations=2,
function_set=['add', 'sub', 'mul', 'div', logical],
random_state=0)
est.fit(boston.data, boston.target)
assert_raises(AttributeError, pickle.dumps, est)
def test_parallel_custom_transformer():
"""Regression test for running parallel training with custom transformer"""
def _sigmoid(x1):
with np.errstate(over='ignore', under='ignore'):
return 1 / (1 + np.exp(-x1))
sigmoid = make_function(function=_sigmoid,
name='sig',
arity=1)
est = SymbolicClassifier(generations=2,
transformer=sigmoid,
random_state=0,
n_jobs=2)
est.fit(cancer.data, cancer.target)
_ = pickle.dumps(est)
# Unwrapped functions should fail
sigmoid = make_function(function=_sigmoid,
name='sig',
arity=1,
wrap=False)
est = SymbolicClassifier(generations=2,
transformer=sigmoid,
random_state=0,
n_jobs=2)
est.fit(cancer.data, cancer.target)
assert_raises(AttributeError, pickle.dumps, est)
# Single threaded will also fail in non-interactive sessions
est = SymbolicClassifier(generations=2,
transformer=sigmoid,
random_state=0)
est.fit(cancer.data, cancer.target)
assert_raises(AttributeError, pickle.dumps, est)
|
bsd-3-clause
|
mrjacobagilbert/gnuradio
|
gr-filter/examples/interpolate.py
|
5
|
8072
|
#!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
from gnuradio.fft import window
import sys, time
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 100000 # number of samples to use
self._fs = 2000 # initial sampling rate
self._interp = 5 # Interpolation rate for PFB interpolator
self._ainterp = 5.5 # Resampling rate for the PFB arbitrary resampler
# Frequencies of the signals we construct
freq1 = 100
freq2 = 200
# Create a set of taps for the PFB interpolator
# This is based on the post-interpolation sample rate
self._taps = filter.firdes.low_pass_2(self._interp,
self._interp*self._fs,
freq2+50, 50,
attenuation_dB=120,
window=window.WIN_BLACKMAN_hARRIS)
# Create a set of taps for the PFB arbitrary resampler
# The filter size is the number of filters in the filterbank; 32 will give very low side-lobes,
# and larger numbers will reduce these even farther
# The taps in this filter are based on a sampling rate of the filter size since it acts
# internally as an interpolator.
flt_size = 32
self._taps2 = filter.firdes.low_pass_2(flt_size,
flt_size*self._fs,
freq2+50, 150,
attenuation_dB=120,
window=window.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = numpy.ceil(float(len(self._taps)) / float(self._interp))
print("Number of taps: ", len(self._taps))
print("Number of filters: ", self._interp)
print("Taps per channel: ", tpc)
# Create a couple of signals at different frequencies
self.signal1 = analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freq1, 0.5)
self.signal2 = analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freq2, 0.5)
self.signal = blocks.add_cc()
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the PFB interpolator filter
self.pfb = filter.pfb.interpolator_ccf(self._interp, self._taps)
# Construct the PFB arbitrary resampler filter
self.pfb_ar = filter.pfb.arb_resampler_ccf(self._ainterp, self._taps2, flt_size)
self.snk_i = blocks.vector_sink_c()
#self.pfb_ar.pfb.print_taps()
#self.pfb.pfb.print_taps()
# Connect the blocks
self.connect(self.signal1, self.head, (self.signal,0))
self.connect(self.signal2, (self.signal,1))
self.connect(self.signal, self.pfb)
self.connect(self.signal, self.pfb_ar)
self.connect(self.signal, self.snk_i)
# Create the sink for the interpolated signals
self.snk1 = blocks.vector_sink_c()
self.snk2 = blocks.vector_sink_c()
self.connect(self.pfb, self.snk1)
self.connect(self.pfb_ar, self.snk2)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print("Run time: %f" % (tend - tstart))
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = numpy.blackman
# Plot input signal
fs = tb._fs
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*numpy.log10(abs(numpy.fft.fftshift(X)))
f_in = numpy.arange(-fs / 2.0, fs / 2.0, fs / float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0 / fs
Tmax = len(d)*Ts
t_in = numpy.arange(0, Tmax, Ts)
x_in = numpy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-2.5, 2.5])
sp1_t.set_title("Input Signal", weight="bold")
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot output of PFB interpolator
fs_int = tb._fs*tb._interp
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk1.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*numpy.log10(abs(numpy.fft.fftshift(X)))
f_o = numpy.arange(-fs_int / 2.0, fs_int / 2.0, fs_int / float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_int = 1.0 / fs_int
Tmax = len(d)*Ts_int
t_o = numpy.arange(0, Tmax, Ts_int)
x_o1 = numpy.array(d)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o1.real, "b-o")
#p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
# Plot output of PFB arbitrary resampler
fs_aint = tb._fs * tb._ainterp
sp3_f = fig3.add_subplot(2, 1, 1)
d = tb.snk2.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*numpy.log10(abs(numpy.fft.fftshift(X)))
f_o = numpy.arange(-fs_aint / 2.0, fs_aint / 2.0, fs_aint / float(X_o.size))
p3_f = sp3_f.plot(f_o, X_o, "b")
sp3_f.set_xlim([min(f_o), max(f_o)+1])
sp3_f.set_ylim([-200.0, 50.0])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Frequency (Hz)")
sp3_f.set_ylabel("Power (dBW)")
Ts_aint = 1.0 / fs_aint
Tmax = len(d)*Ts_aint
t_o = numpy.arange(0, Tmax, Ts_aint)
x_o2 = numpy.array(d)
sp3_f = fig3.add_subplot(2, 1, 2)
p3_f = sp3_f.plot(t_o, x_o2.real, "b-o")
p3_f = sp3_f.plot(t_o, x_o1.real, "m-o")
#p3_f = sp3_f.plot(t_o, x_o2.imag, "r-o")
sp3_f.set_ylim([-2.5, 2.5])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Time (s)")
sp3_f.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
bundgus/python-playground
|
matplotlib-playground/examples/pylab_examples/fonts_demo.py
|
2
|
2871
|
"""
Show how to set custom font properties.
For interactive users, you can also use kwargs to the text command,
which requires less typing. See examples/fonts_demo_kw.py
"""
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
plt.subplot(111, axisbg='w')
font0 = FontProperties()
alignment = {'horizontalalignment': 'center', 'verticalalignment': 'baseline'}
# Show family options
families = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']
font1 = font0.copy()
font1.set_size('large')
t = plt.text(-0.8, 0.9, 'family', fontproperties=font1,
**alignment)
yp = [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]
for k, family in enumerate(families):
font = font0.copy()
font.set_family(family)
t = plt.text(-0.8, yp[k], family, fontproperties=font,
**alignment)
# Show style options
styles = ['normal', 'italic', 'oblique']
t = plt.text(-0.4, 0.9, 'style', fontproperties=font1,
**alignment)
for k, style in enumerate(styles):
font = font0.copy()
font.set_family('sans-serif')
font.set_style(style)
t = plt.text(-0.4, yp[k], style, fontproperties=font,
**alignment)
# Show variant options
variants = ['normal', 'small-caps']
t = plt.text(0.0, 0.9, 'variant', fontproperties=font1,
**alignment)
for k, variant in enumerate(variants):
font = font0.copy()
font.set_family('serif')
font.set_variant(variant)
t = plt.text(0.0, yp[k], variant, fontproperties=font,
**alignment)
# Show weight options
weights = ['light', 'normal', 'medium', 'semibold', 'bold', 'heavy', 'black']
t = plt.text(0.4, 0.9, 'weight', fontproperties=font1,
**alignment)
for k, weight in enumerate(weights):
font = font0.copy()
font.set_weight(weight)
t = plt.text(0.4, yp[k], weight, fontproperties=font,
**alignment)
# Show size options
sizes = ['xx-small', 'x-small', 'small', 'medium', 'large',
'x-large', 'xx-large']
t = plt.text(0.8, 0.9, 'size', fontproperties=font1,
**alignment)
for k, size in enumerate(sizes):
font = font0.copy()
font.set_size(size)
t = plt.text(0.8, yp[k], size, fontproperties=font,
**alignment)
# Show bold italic
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('x-small')
t = plt.text(-0.4, 0.1, 'bold italic', fontproperties=font,
**alignment)
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('medium')
t = plt.text(-0.4, 0.2, 'bold italic', fontproperties=font,
**alignment)
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('x-large')
t = plt.text(-0.4, 0.3, 'bold italic', fontproperties=font,
**alignment)
plt.axis([-1, 1, 0, 1])
plt.show()
|
mit
|
thirdwing/mxnet
|
example/ssd/dataset/pycocotools/coco.py
|
29
|
19564
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
# from . import mask as maskUtils
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
# rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
raise NotImplementedError("maskUtils disabled!")
else:
rle = [ann['segmentation']]
# m = maskUtils.decode(rle)
raise NotImplementedError("maskUtils disabled!")
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
# ann['area'] = maskUtils.area(ann['segmentation'])
raise NotImplementedError("maskUtils disabled!")
if not 'bbox' in ann:
# ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
raise NotImplementedError("maskUtils disabled!")
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urlretrieve(img['coco_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
# rles = maskUtils.frPyObjects(segm, h, w)
# rle = maskUtils.merge(rles)
raise NotImplementedError("maskUtils disabled!")
elif type(segm['counts']) == list:
# uncompressed RLE
# rle = maskUtils.frPyObjects(segm, h, w)
raise NotImplementedError("maskUtils disabled!")
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
# m = maskUtils.decode(rle)
raise NotImplementedError("maskUtils disabled!")
return m
|
apache-2.0
|
lenovor/scikit-learn
|
examples/linear_model/plot_sgd_comparison.py
|
167
|
1659
|
"""
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
|
bsd-3-clause
|
justincassidy/scikit-learn
|
sklearn/utils/__init__.py
|
132
|
14185
|
"""
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
|
bsd-3-clause
|
mhvk/astropy
|
astropy/convolution/tests/test_convolve.py
|
3
|
42344
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
import numpy.ma as ma
from contextlib import nullcontext
from astropy.convolution.convolve import convolve, convolve_fft
from astropy.convolution.kernels import Gaussian2DKernel
from astropy.utils.exceptions import AstropyUserWarning
from astropy import units as u
from astropy.utils.compat.optional_deps import HAS_SCIPY, HAS_PANDAS # noqa
from numpy.testing import (assert_array_almost_equal_nulp,
assert_array_almost_equal,
assert_allclose)
import itertools
VALID_DTYPES = ('>f4', '<f4', '>f8', '<f8')
VALID_DTYPE_MATRIX = list(itertools.product(VALID_DTYPES, VALID_DTYPES))
BOUNDARY_OPTIONS = [None, 'fill', 'wrap', 'extend']
NANHANDLING_OPTIONS = ['interpolate', 'fill']
NORMALIZE_OPTIONS = [True, False]
PRESERVE_NAN_OPTIONS = [True, False]
BOUNDARIES_AND_CONVOLUTIONS = (list(zip(itertools.cycle((convolve,)),
BOUNDARY_OPTIONS)) + [(convolve_fft,
'wrap'),
(convolve_fft,
'fill')])
class TestConvolve1D:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [1, 4, 5, 6, 5, 7, 8]
y = [0.2, 0.6, 0.2]
z = convolve(x, y, boundary=None)
assert_array_almost_equal_nulp(z,
np.array([0., 3.6, 5., 5.6, 5.6, 6.8, 0.]), 10)
def test_tuple(self):
"""
Test that convolve works correctly when inputs are tuples
"""
x = (1, 4, 5, 6, 5, 7, 8)
y = (0.2, 0.6, 0.2)
z = convolve(x, y, boundary=None)
assert_array_almost_equal_nulp(z,
np.array([0., 3.6, 5., 5.6, 5.6, 6.8, 0.]), 10)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan', 'dtype'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES))
def test_quantity(self, boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve works correctly when input array is a Quantity
"""
x = np.array([1, 4, 5, 6, 5, 7, 8], dtype=dtype) * u.ph
y = np.array([0.2, 0.6, 0.2], dtype=dtype)
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
assert x.unit == z.unit
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan', 'dtype'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES))
def test_input_unmodified(self, boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve works correctly when inputs are lists
"""
array = [1., 4., 5., 6., 5., 7., 8.]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
assert np.all(np.array(array, dtype=dtype) == x)
assert np.all(np.array(kernel, dtype=dtype) == y)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan', 'dtype'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES))
def test_input_unmodified_with_nan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve doesn't modify the input data
"""
array = [1., 4., 5., np.nan, 5., 7., 8.]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
# make copies for post call comparison
x_copy = x.copy()
y_copy = y.copy()
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
# ( NaN == NaN ) = False
# Only compare non NaN values for canonical equivalance
# and then check NaN explicitly with np.isnan()
array_is_nan = np.isnan(array)
kernel_is_nan = np.isnan(kernel)
array_not_nan = ~array_is_nan
kernel_not_nan = ~kernel_is_nan
assert np.all(x_copy[array_not_nan] == x[array_not_nan])
assert np.all(y_copy[kernel_not_nan] == y[kernel_not_nan])
assert np.all(np.isnan(x[array_is_nan]))
assert np.all(np.isnan(y[kernel_is_nan]))
@pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)
def test_dtype(self, dtype_array, dtype_kernel):
'''
Test that 32- and 64-bit floats are correctly handled
'''
x = np.array([1., 2., 3.], dtype=dtype_array)
y = np.array([0., 1., 0.], dtype=dtype_kernel)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize(('convfunc', 'boundary',), BOUNDARIES_AND_CONVOLUTIONS)
def test_unity_1_none(self, boundary, convfunc):
'''
Test that a unit kernel with a single element returns the same array
'''
x = np.array([1., 2., 3.], dtype='>f8')
y = np.array([1.], dtype='>f8')
z = convfunc(x, y, boundary=boundary)
np.testing.assert_allclose(z, x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3(self, boundary):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None).
'''
x = np.array([1., 2., 3.], dtype='>f8')
y = np.array([0., 1., 0.], dtype='>f8')
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(z == np.array([0., 2., 0.], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3(self, boundary):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements
'''
x = np.array([1., 0., 3.], dtype='>f8')
y = np.array([1., 1., 1.], dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert np.all(z == np.array([0., 4., 0.], dtype='>f8'))
elif boundary == 'fill':
assert np.all(z == np.array([1., 4., 3.], dtype='>f8'))
elif boundary == 'wrap':
assert np.all(z == np.array([4., 4., 4.], dtype='>f8'))
else:
assert np.all(z == np.array([2., 4., 6.], dtype='>f8'))
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS))
def test_unity_3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
'''
x = np.array([1., np.nan, 3.], dtype='>f8')
y = np.array([0., 1., 0.], dtype='>f8')
# Since the kernel is actually only one pixel wide (because of the
# zeros) the NaN value doesn't get interpolated over so a warning is
# expected.
if nan_treatment == 'interpolate' and not preserve_nan:
ctx = pytest.warns(AstropyUserWarning,
match="nan_treatment='interpolate', however, "
"NaN values detected")
else:
ctx = nullcontext()
with ctx:
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([0., 0., 0.], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS))
def test_uniform_3_withnan(self, boundary, nan_treatment, normalize_kernel,
preserve_nan):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements. This version includes a NaN
value in the original array.
'''
x = np.array([1., np.nan, 3.], dtype='>f8')
y = np.array([1., 1., 1.], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
# boundary, nan_treatment, normalize_kernel
rslt = {
(None, 'interpolate', True): [0, 2, 0],
(None, 'interpolate', False): [0, 6, 0],
(None, 'fill', True): [0, 4/3., 0],
(None, 'fill', False): [0, 4, 0],
('fill', 'interpolate', True): [1/2., 2, 3/2.],
('fill', 'interpolate', False): [3/2., 6, 9/2.],
('fill', 'fill', True): [1/3., 4/3., 3/3.],
('fill', 'fill', False): [1, 4, 3],
('wrap', 'interpolate', True): [2, 2, 2],
('wrap', 'interpolate', False): [6, 6, 6],
('wrap', 'fill', True): [4/3., 4/3., 4/3.],
('wrap', 'fill', False): [4, 4, 4],
('extend', 'interpolate', True): [1, 2, 3],
('extend', 'interpolate', False): [3, 6, 9],
('extend', 'fill', True): [2/3., 4/3., 6/3.],
('extend', 'fill', False): [2, 4, 6],
}[boundary, nan_treatment, normalize_kernel]
if preserve_nan:
rslt[1] = 0
assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary', 'normalize_kernel'),
itertools.product(BOUNDARY_OPTIONS,
NORMALIZE_OPTIONS))
def test_zero_sum_kernel(self, boundary, normalize_kernel):
"""
Test that convolve works correctly with zero sum kernels.
"""
if normalize_kernel:
pytest.xfail("You can't normalize by a zero sum kernel")
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y = [-1, -1, -1, -1, 8, -1, -1, -1, -1]
assert(np.isclose(sum(y), 0, atol=1e-8))
z = convolve(x, y, boundary=boundary, normalize_kernel=normalize_kernel)
# boundary, normalize_kernel == False
rslt = {
(None): [0., 0., 0., 0., 0., 0., 0., 0., 0.],
('fill'): [-6., -3., -1., 0., 0., 10., 21., 33., 46.],
('wrap'): [-36., -27., -18., -9., 0., 9., 18., 27., 36.],
('extend'): [-10., -6., -3., -1., 0., 1., 3., 6., 10.]
}[boundary]
assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary', 'normalize_kernel'),
itertools.product(BOUNDARY_OPTIONS,
NORMALIZE_OPTIONS))
def test_int_masked_kernel(self, boundary, normalize_kernel):
"""
Test that convolve works correctly with integer masked kernels.
"""
if normalize_kernel:
pytest.xfail("You can't normalize by a zero sum kernel")
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y = ma.array([-1, -1, -1, -1, 8, -1, -1, -1, -1], mask=[1, 0, 0, 0, 0, 0, 0, 0, 0], fill_value=0.)
z = convolve(x, y, boundary=boundary, normalize_kernel=normalize_kernel)
# boundary, normalize_kernel == False
rslt = {
(None): [0., 0., 0., 0., 9., 0., 0., 0., 0.],
('fill'): [-1., 3., 6., 8., 9., 10., 21., 33., 46.],
('wrap'): [-31., -21., -11., -1., 9., 10., 20., 30., 40.],
('extend'): [-5., 0., 4., 7., 9., 10., 12., 15., 19.]
}[boundary]
assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)
@pytest.mark.parametrize('preserve_nan', PRESERVE_NAN_OPTIONS)
def test_int_masked_array(self, preserve_nan):
"""
Test that convolve works correctly with integer masked arrays.
"""
x = ma.array([3, 5, 7, 11, 13], mask=[0, 0, 1, 0, 0], fill_value=0.)
y = np.array([1., 1., 1.], dtype='>f8')
z = convolve(x, y, preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[2])
z[2] = 8
assert_array_almost_equal_nulp(z, (8/3., 4, 8, 12, 8), 10)
class TestConvolve2D:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]
z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=True)
assert_array_almost_equal_nulp(z, x, 10)
z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=False)
assert_array_almost_equal_nulp(z, np.array(x, float)*9, 10)
@pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)
def test_dtype(self, dtype_array, dtype_kernel):
'''
Test that 32- and 64-bit floats are correctly handled
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype=dtype_array)
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype=dtype_kernel)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_1x1_none(self, boundary):
'''
Test that a 1x1 unit kernel returns the same array
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='>f8')
y = np.array([[1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary)
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3x3(self, boundary):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None).
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='>f8')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='>f8')
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(z == np.array([[0., 0., 0.],
[0., 5., 0.],
[0., 0., 0.]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
'''
x = np.array([[0., 0., 3.],
[1., 0., 0.],
[0., 2., 0.]], dtype='>f8')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 6., 0.],
[0., 0., 0.]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1., 4., 3.],
[3., 6., 5.],
[3., 3., 2.]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[6., 6., 6.],
[6., 6., 6.],
[6., 6., 6.]], dtype='>f8'), 10)
else:
assert_array_almost_equal_nulp(z, np.array([[2., 7., 12.],
[4., 6., 8.],
[6., 5., 4.]], dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3x3_withnan(self, boundary):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
'''
x = np.array([[1., 2., 3.],
[4., np.nan, 6.],
[7., 8., 9.]], dtype='>f8')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
preserve_nan=True)
assert np.isnan(z[1, 1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3_withnanfilled(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[0., 0., 4.],
[1., np.nan, 0.],
[0., 3., 0.]], dtype='>f8')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 8., 0.],
[0., 0., 0.]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1., 5., 4.],
[4., 8., 7.],
[4., 4., 3.]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[8., 8., 8.],
[8., 8., 8.],
[8., 8., 8.]], dtype='>f8'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[2., 9., 16.],
[5., 8., 11.],
[8., 7., 6.]], dtype='>f8'), 10)
else:
raise ValueError("Invalid boundary specification")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3_withnaninterped(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[0., 0., 4.],
[1., np.nan, 0.],
[0., 3., 0.]], dtype='>f8')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='interpolate',
normalize_kernel=True)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1./8, 5./8, 4./8],
[4./8, 8./8, 7./8],
[4./8, 4./8, 3./8]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[2./8, 9./8, 16./8],
[5./8, 8./8, 11./8],
[8./8, 7./8, 6./8]], dtype='>f8'), 10)
else:
raise ValueError("Invalid boundary specification")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_non_normalized_kernel_2D(self, boundary):
x = np.array([[0., 0., 4.],
[1., 2., 0.],
[0., 3., 0.]], dtype='float')
y = np.array([[1., -1., 1.],
[-1., 0., -1.],
[1., -1., 1.]], dtype='float')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]], dtype='float'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1., -5., 2.],
[1., 0., -3.],
[-2., -1., -1.]], dtype='float'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[0., -8., 6.],
[5., 0., -4.],
[2., 3., -4.]], dtype='float'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[2., -1., -2.],
[0., 0., 1.],
[2., -4., 2.]], dtype='float'), 10)
else:
raise ValueError("Invalid boundary specification")
class TestConvolve3D:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]]
z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=False)
assert_array_almost_equal_nulp(z / 27, x, 10)
@pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)
def test_dtype(self, dtype_array, dtype_kernel):
'''
Test that 32- and 64-bit floats are correctly handled
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype=dtype_array)
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype=dtype_kernel)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_1x1x1_none(self, boundary):
'''
Test that a 1x1x1 unit kernel returns the same array
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., 0., 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.array([[[1.]]], dtype='>f8')
z = convolve(x, y, boundary=boundary)
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3x3x3(self, boundary):
'''
Test that a 3x3x3 unit kernel returns the same array (except when
boundary is None).
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., 3., 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.zeros((3, 3, 3), dtype='>f8')
y[1, 1, 1] = 1.
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(z == np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 3., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3x3(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., 3., 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.ones((3, 3, 3), dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 81., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[[23., 28., 16.], [35., 46., 25.], [25., 34., 18.]],
[[40., 50., 23.], [63., 81., 36.], [46., 60., 27.]],
[[32., 40., 16.], [50., 61., 22.], [36., 44., 16.]]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]],
[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]],
[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]]], dtype='>f8'), 10)
else:
assert_array_almost_equal_nulp(z, np.array([[[65., 54., 43.], [75., 66., 57.], [85., 78., 71.]],
[[96., 71., 46.], [108., 81., 54.], [120., 91., 62.]],
[[127., 88., 49.], [141., 96., 51.], [155., 104., 53.]]], dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary', 'nan_treatment'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS))
def test_unity_3x3x3_withnan(self, boundary, nan_treatment):
'''
Test that a 3x3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.zeros((3, 3, 3), dtype='>f8')
y[1, 1, 1] = 1.
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
preserve_nan=True)
assert np.isnan(z[1, 1, 1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3x3_withnan_filled(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.ones((3, 3, 3), dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 78., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[[20., 25., 13.],
[32., 43., 22.],
[22., 31., 15.]],
[[37., 47., 20.],
[60., 78., 33.],
[43., 57., 24.]],
[[29., 37., 13.],
[47., 58., 19.],
[33., 41., 13.]]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]],
[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]],
[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]]], dtype='>f8'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[[62., 51., 40.],
[72., 63., 54.],
[82., 75., 68.]],
[[93., 68., 43.],
[105., 78., 51.],
[117., 88., 59.]],
[[124., 85., 46.],
[138., 93., 48.],
[152., 101., 50.]]],
dtype='>f8'), 10)
else:
raise ValueError("Invalid Boundary Option")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3x3_withnan_interped(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.ones((3, 3, 3), dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='interpolate',
normalize_kernel=True)
kernsum = y.sum() - 1 # one nan is missing
mid = x[np.isfinite(x)].sum() / kernsum
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 78., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
dtype='>f8')/kernsum, 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[[20., 25., 13.],
[32., 43., 22.],
[22., 31., 15.]],
[[37., 47., 20.],
[60., 78., 33.],
[43., 57., 24.]],
[[29., 37., 13.],
[47., 58., 19.],
[33., 41., 13.]]],
dtype='>f8')/kernsum, 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.tile(mid.astype('>f8'), [3, 3, 3]), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[[62., 51., 40.],
[72., 63., 54.],
[82., 75., 68.]],
[[93., 68., 43.],
[105., 78., 51.],
[117., 88., 59.]],
[[124., 85., 46.],
[138., 93., 48.],
[152., 101., 50.]]],
dtype='>f8')/kernsum, 10)
else:
raise ValueError("Invalid Boundary Option")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_asymmetric_kernel(boundary):
'''
Regression test for #6264: make sure that asymmetric convolution
functions go the right direction
'''
x = np.array([3., 0., 1.], dtype='>f8')
y = np.array([1, 2, 3], dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([6., 10., 2.], dtype='float'), 10)
elif boundary is None:
assert_array_almost_equal_nulp(z, np.array([0., 10., 0.], dtype='float'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([15., 10., 3.], dtype='float'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([9., 10., 5.], dtype='float'), 10)
@pytest.mark.parametrize('ndims', (1, 2, 3))
def test_convolution_consistency(ndims):
np.random.seed(0)
array = np.random.randn(*([3]*ndims))
np.random.seed(0)
kernel = np.random.rand(*([3]*ndims))
conv_f = convolve_fft(array, kernel, boundary='fill')
conv_d = convolve(array, kernel, boundary='fill')
assert_array_almost_equal_nulp(conv_f, conv_d, 30)
def test_astropy_convolution_against_numpy():
x = np.array([1, 2, 3])
y = np.array([5, 4, 3, 2, 1])
assert_array_almost_equal(np.convolve(y, x, 'same'),
convolve(y, x, normalize_kernel=False))
assert_array_almost_equal(np.convolve(y, x, 'same'),
convolve_fft(y, x, normalize_kernel=False))
@pytest.mark.skipif('not HAS_SCIPY')
def test_astropy_convolution_against_scipy():
from scipy.signal import fftconvolve
x = np.array([1, 2, 3])
y = np.array([5, 4, 3, 2, 1])
assert_array_almost_equal(fftconvolve(y, x, 'same'),
convolve(y, x, normalize_kernel=False))
assert_array_almost_equal(fftconvolve(y, x, 'same'),
convolve_fft(y, x, normalize_kernel=False))
@pytest.mark.skipif('not HAS_PANDAS')
def test_regression_6099():
import pandas
wave = np.array(np.linspace(5000, 5100, 10))
boxcar = 3
nonseries_result = convolve(wave, np.ones((boxcar,))/boxcar)
wave_series = pandas.Series(wave)
series_result = convolve(wave_series, np.ones((boxcar,))/boxcar)
assert_array_almost_equal(nonseries_result, series_result)
def test_invalid_array_convolve():
kernel = np.ones(3)/3.
with pytest.raises(TypeError):
convolve('glork', kernel)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_non_square_kernel_asymmetric(boundary):
# Regression test for a bug that occurred when using non-square kernels in
# 2D when using boundary=None
kernel = np.array([[1, 2, 3, 2, 1], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]])
image = np.zeros((13, 13))
image[6, 6] = 1
result = convolve(image, kernel, normalize_kernel=False, boundary=boundary)
assert_allclose(result[5:8, 4:9], kernel)
@pytest.mark.parametrize(('boundary', 'normalize_kernel'),
itertools.product(BOUNDARY_OPTIONS,
NORMALIZE_OPTIONS))
def test_uninterpolated_nan_regions(boundary, normalize_kernel):
#8086
# Test NaN interpolation of contiguous NaN regions with kernels of size
# identical and greater than that of the region of NaN values.
# Test case: kernel.shape == NaN_region.shape
kernel = Gaussian2DKernel(1, 5, 5)
nan_centroid = np.full(kernel.shape, np.nan)
image = np.pad(nan_centroid, pad_width=kernel.shape[0]*2, mode='constant',
constant_values=1)
with pytest.warns(AstropyUserWarning,
match=r"nan_treatment='interpolate', however, NaN values detected "
r"post convolution. A contiguous region of NaN values, larger "
r"than the kernel size, are present in the input array. "
r"Increase the kernel size to avoid this."):
result = convolve(image, kernel, boundary=boundary, nan_treatment='interpolate',
normalize_kernel=normalize_kernel)
assert(np.any(np.isnan(result)))
# Test case: kernel.shape > NaN_region.shape
nan_centroid = np.full((kernel.shape[0]-1, kernel.shape[1]-1), np.nan) # 1 smaller than kerenel
image = np.pad(nan_centroid, pad_width=kernel.shape[0]*2, mode='constant',
constant_values=1)
result = convolve(image, kernel, boundary=boundary, nan_treatment='interpolate',
normalize_kernel=normalize_kernel)
assert(~np.any(np.isnan(result))) # Note: negation
def test_regressiontest_issue9168():
"""
Issue #9168 pointed out that kernels can be (unitless) quantities, which
leads to crashes when inplace modifications are made to arrays in
convolve/convolve_fft, so we now strip the quantity aspects off of kernels.
"""
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]],)
kernel_fwhm = 1*u.arcsec
pixel_size = 1*u.arcsec
kernel = Gaussian2DKernel(x_stddev=kernel_fwhm/pixel_size)
result = convolve_fft(x, kernel, boundary='fill', fill_value=np.nan,
preserve_nan=True)
result = convolve(x, kernel, boundary='fill', fill_value=np.nan,
preserve_nan=True)
|
bsd-3-clause
|
jhamman/xarray
|
asv_bench/benchmarks/interp.py
|
4
|
1572
|
import numpy as np
import pandas as pd
import xarray as xr
from . import parameterized, randn, requires_dask
nx = 3000
long_nx = 30000000
ny = 2000
nt = 1000
window = 20
randn_xy = randn((nx, ny), frac_nan=0.1)
randn_xt = randn((nx, nt))
randn_t = randn((nt,))
randn_long = randn((long_nx,), frac_nan=0.1)
new_x_short = np.linspace(0.3 * nx, 0.7 * nx, 100)
new_x_long = np.linspace(0.3 * nx, 0.7 * nx, 1000)
new_y_long = np.linspace(0.1, 0.9, 1000)
class Interpolation:
def setup(self, *args, **kwargs):
self.ds = xr.Dataset(
{
"var1": (("x", "y"), randn_xy),
"var2": (("x", "t"), randn_xt),
"var3": (("t",), randn_t),
},
coords={
"x": np.arange(nx),
"y": np.linspace(0, 1, ny),
"t": pd.date_range("1970-01-01", periods=nt, freq="D"),
"x_coords": ("x", np.linspace(1.1, 2.1, nx)),
},
)
@parameterized(["method", "is_short"], (["linear", "cubic"], [True, False]))
def time_interpolation(self, method, is_short):
new_x = new_x_short if is_short else new_x_long
self.ds.interp(x=new_x, method=method).load()
@parameterized(["method"], (["linear", "nearest"]))
def time_interpolation_2d(self, method):
self.ds.interp(x=new_x_long, y=new_y_long, method=method).load()
class InterpolationDask(Interpolation):
def setup(self, *args, **kwargs):
requires_dask()
super().setup(**kwargs)
self.ds = self.ds.chunk({"t": 50})
|
apache-2.0
|
FRESNA/vresutils
|
vresutils/costdata.py
|
1
|
9711
|
# -*- coding: utf-8 -*-
## Copyright 2015-2017 Frankfurt Institute for Advanced Studies
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
import numpy as np
import pandas as pd
#af : annualization factor
#ICi: Overnight investment cost / kW
#FCi: Fix O&M costs /kW/year
#VCi: Variable O&M costs /kWh
#efi: efficiency
NhoursPerYear=8760.
USD2013_to_EUR2013 = 0.7532 #[EUR/USD] # ECB: https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html
discountrate=0.07
CO2intens=np.array([0.27,0.32,0.27,0.,0.45])/1000. # [t/kWht] # Hirth+13
#0.55,0.69 ,0.33, 0, 0.85
def annuity(n,r=discountrate):
"""Calculate the annuity factor for an asset with lifetime n years and
discount rate of r, e.g. annuity(20,0.05)*20 = 1.6"""
if isinstance(r, pd.Series):
return pd.Series(1/n, index=r.index).where(r == 0, r/(1. - 1./(1.+r)**n))
elif r > 0:
return r/(1. - 1./(1.+r)**n)
else:
return 1/n
def get_cost(ref, CO2cost=0.):
'''Return cost dictionary for different fuel types.
Power costs in [Eur/MW/year], Energy costs in [Eur/MWh]'''
#Gas-GT,Coal-new,Gas-CCGT,Nuclear,Lignite
if ref=='Sch' or ref=='Schaber+12':
# Schaber+12
cost={'ref':'Schaber+12'}
cost['fueltype']=np.char.array(['OCGT','Coal','CCGT','Nuclear','Lignite'])
cost['lifetime']=np.array([25.,30,25,40,30]) #Schaber+12b
cost['ICi']=np.array([400.,1400,650,3000,2300]) #[Euro/kW] # Schaber+12
cost['FCi']=np.array([18.,35,18,65,40]) #[Euro/kW/year] # Schaber+12
cost['VCi']=np.array([68.,21,44,12,13])/1000. #[Euro/kWhel] # Schaber+12
cost['efi']=np.array([0.38,0.46,0.60,0.33,0.43]) #[1] # Schaber+12
elif ref=='NREL':
# NREL+15
cost={'ref':'NREL 15'}
cost['fueltype']=np.char.array(['OCGT','Coal','CCGT','Nuclear'])
cost['lifetime']=30
cost['ICi']=np.array([866.79,3446.5,1020.74,6481.77])*USD2013_to_EUR2013 #[Euro/kW] #NREL+15
cost['FCi']=np.array([7.3,31.65,14.48,94.68])*USD2013_to_EUR2013 #[Euro/kW/year] #NREL+15
cost['VCi']=np.array([58.14,25.74,33.02,10.09])/1000.*USD2013_to_EUR2013 #[Euro/kWhel] #NREL+15
cost['efi']=np.array([0.38,0.46,0.60,0.33]) # GUESS # (Schaber+Hirth)/2
#efi_NREL_guess=np.array([0.38,0.46,0.60,0.33]) # (Schaber+Hirth)/2
elif ref=='Hir' or ref=='Hirth+13':
# Hirth+13
cost={'ref':'Hirth+13'}
cost['fueltype']=np.char.array(['OCGT','Coal','CCGT','Nuclear','Lignite'])
cost['lifetime']=np.array([25.,25,25,50,25])
cost['ICi']=np.array([600.,1500,1000,4000,2200]) #[Euro/kW] # Hirth+13
cost['FCi']=np.array([7.,25,12,40,30]) #[Euro/kW/year] # Hirth+13
cost['vai']=np.array([2.,1,2,2,1])/1000. #[Euro/kWhel] #variable cost# Hirth+13
cost['fui']=np.array([50.,11.5,25,3,3])/1000. #[Euro/kWht] #fuel cost# Hirth+13
cost['efi']=np.array([0.30,0.39,0.48,0.33,0.38]) #[1] #efficiency# Hirth+13
elif ref=='diw' or ref=='Schroeder+12':
#Gas-GT,Coal-new,Gas-CCGT,Nuclear,Lignite
#diw
cost={'ref':'diw'}
cost['fueltype']=np.char.array(['OCGT','Coal','CCGT','Nuclear','Lignite'])
cost['lifetime']=np.array([30.,40,30,50,40])
cost['ICi']=np.array([400.,1300,800,4000,2000]) #[Euro/kW] # diw #proposed values except for higher price for lignite
cost['FCi']=np.array([15.,25,20,0,30]) #[Euro/kW/year] # diw
cost['vai']=np.array([3.,6,4,8,7])/1000. #[Euro/kWh] #variable cost# diw
cost['fui']=np.array([21.6,8.4,21.6,3,2.9])/1000. #[Euro/kWht] #fuel cost# diw
cost['efi']=np.array([0.3,0.46,0.6,0.33,0.39]) #[1] #efficiency# diw
#Gas-GT,Coal-new,Gas-CCGT,Nuclear,Lignite
#diw http://hdl.handle.net/10419/80348
elif ref=='diw2030':
cost={'ref':'diw2030'}
cost['fueltype']=np.char.array(['windon','windoff','solar','PHS','hydro','ror','OCGT','Coal','CCGT','Nuclear','Lignite'])
cost['lifetime']=np.array([20.,20,20,100,100,100,30,40,30,50,40])
cost['ICi']=np.array([1182,2506,600,2000,2000,3000,400,1300,800,4000,2000 ]) #[Euro/kW] # diw #proposed values 2030
cost['FCi']=np.array([35,80,25,20,20,60,15,25,20,0,30 ]) #[Euro/kW/year] # diw #proposed values 2030
cost['vai']=np.array([1.5e-2,2e-2,1e-2,0,0,0,3,6,4,8,7 ])/1000. #[Euro/kWh] #variable cost #diw #own assumptions for wind/solar
cost['fui']=np.array([0.,0,0,0,0,0,21.6,8.4,21.6,3,2.9])/1000. #[Euro/kWht] #fuel cost# diw
cost['efi']=np.array([1,1,1,0.75,0.90,0.90,0.39,0.46,0.6,0.33,0.39 ]) #[1] #efficiency# diw
cost['CO2int']=np.array([0.,0,0,0,0,0,0.1872,0.32,0.1872,0.,0.45 ])/1000. # Hirth 2013
elif ref=='iea':
#http://www.worldenergyoutlook.org/media/weowebsite/2014/weio/WEIO2014PGAssumptions.xlsx
#http://www.iea.org/publications/freepublications/publication/WEIO2014.pdf
#Gas-GT,Gas-CCGT,Nuclear,Steam Coal {SUB/SUPER}critical
# is sub/super crit coal = lignite/coal?
# DONT provide variable costs!
raise NotImplemented
elif ref=='Spiecker,Weber 13':
#http://www.sciencedirect.com/science/article/pii/S0301421513010549#bib17
#give (large) price ranges
#no direct fuel costs
#but startup variable and fuel costs
raise NotImplemented
else:
raise KeyError('No data available for ref={0}'.format(ref))
cost = pd.DataFrame(cost).set_index('fueltype')
## Define fallbacks if the chosen source does not include all of
## the data
# CO2
if not 'CO2int' in cost:
cost['CO2int'] = pd.Series(CO2intens, index=['OCGT','Coal','CCGT','Nuclear','Lignite'])
# [t/kWht] from Hirth+13
# Renewables
# Onshore wind
if 'windon' not in cost.index:
cost.loc['windon',:] = pd.Series(
{'lifetime': 20.,
'ICi': 1e3, #[Euro/kW]
'FCi': 0., #[Euro/kW/year]
'vai': 1.5e-5, #[Euro/kWh] #variable cost#
'fui': 0., #[Euro/kWht] #fuel cost#
'efi': 1., #efficiency#
'CO2int': 0.})
if 'windoff' not in cost.index:
cost.loc['windoff',:] = pd.Series(
{'lifetime': 20.,
'ICi': 2e3, #[Euro/kW]
'FCi': 0., #[Euro/kW/year]
'vai': 2e-5, #[Euro/kWh] #variable cost#
'fui': 0., #[Euro/kWht] #fuel cost#
'efi': 1., #efficiency#
'CO2int': 0.})
if 'solar' not in cost.index:
cost.loc['solar',:] = pd.Series(
{'lifetime': 20.,
'ICi': 1e3, #[Euro/kW]
'FCi': 0., #[Euro/kW/year]
'vai': 1e-5, #[Euro/kWh] #variable cost#
'fui': 0., #[Euro/kWht] #fuel cost#
'efi': 1., #efficiency#
'CO2int': 0.})
# Globals
cost['rate'] = discountrate
# Switch units to refer to MW
cost.loc[:,['ICi', 'FCi', 'vai', 'fui', 'CO2int']] *= 1e3
# Derived columns
cost['afi'] = annuity(cost['lifetime'], cost['rate'])
cost['VCi'] = cost['vai'] + cost['fui'] / cost['efi'] #[Euro/MWhel] #total variable cost (w/o CO2 costs)
cost['wki'] = (cost['afi'] * cost['ICi'] + cost['FCi'])
#[Euro/MW/year] annualized fix costs
cost['wbi'] = (cost['VCi'] + (CO2cost * cost['CO2int']/cost['efi']))
#[Euro/MWh] variable energy cost per kWh including CO2 cost
return cost
def get_full_cost_CO2(ref,CO2cost):
'''Return cost dictionary for different fuel types,
including total fix costs 'wki', and CO2 price dep. variable costs 'wbi'.
Power costs in [Eur/MW/year], Energy costs in [Eur/MWh]'''
cost=get_cost(ref, CO2cost=CO2cost)
return cost
def _get_intersect(costCO2,typ1,typ2):
'''Return the point of intersection in the (util factor, cost)-plane
between two linear cost functions.'''
#y=V1*x+I1; y=V2*x+I2
# I1,I2=costCO2['wki'][np.array([typ1,typ2])]
# V1,V2=costCO2['wbi'][np.array([typ1,typ2])]*8760.
I1=costCO2['wki'][typ1]
I2=costCO2['wki'][typ2]
V1=costCO2['wbi'][typ1]*8760.
V2=costCO2['wbi'][typ2]*8760.
x=(I2-I1)/(V1-V2)
y=I1+V1*x
return np.array([x,y]).T
def get_cheapest(cost):
'''Calculate which technology is cheapest in which utilization factor range.
Return the (index) of the technology and its maximum (util factor, cost) positon.'''
sortI=np.argsort(cost['wki'])
typ=[]
pos=[]
ufold=0
while sortI.size >1:# and ufold<1:
sects=_get_intersect(cost,sortI[0],sortI[1:])
nskip=sects[:,1].argmin()
ufnew=sects[nskip][0]
if not ufold<ufnew<1:
#no further intersection within range, follow current type to uf=1
break
else: ufold=ufnew
typ.append(sortI[0])
pos.append(sects[nskip])
sortI=sortI[1+nskip:]
typ.append(sortI[0])
pos.append([1,1*cost['wbi'][sortI[0]]*8760.+cost['wki'][sortI[0]]])
return np.array(typ),np.array(pos)
|
gpl-3.0
|
dgasmith/EEX_scratch
|
tests/test_datalayer.py
|
1
|
25755
|
"""
Tests the datalayer object for EEX
"""
import eex
import pytest
import pandas as pd
import numpy as np
from eex.testing import df_compare, dict_compare
from eex import nb_converter
# Set the Seed
np.random.seed(0)
# Any parameters to loop over
_backend_list = ["HDF5", "Memory"]
def _build_atom_df(nmols):
"""
Builds a random testing DataFrame
"""
ncols = nmols * 3
bond_df = pd.DataFrame()
bond_df["atom_index"] = np.arange(ncols)
bond_df["molecule_index"] = np.repeat(np.arange(nmols), 3)
bond_df["atom_name"] = np.tile(["O", "H", "H"], nmols)
bond_df["atom_type"] = np.tile([1, 2, 2], nmols)
bond_df["charge"] = np.tile([-0.8, 0.4, 0.4], nmols)
bond_df["X"] = np.random.rand(ncols)
bond_df["Y"] = np.random.rand(ncols)
bond_df["Z"] = np.random.rand(ncols)
bond_df.set_index("atom_index", inplace=True)
return bond_df
@pytest.mark.parametrize("backend", _backend_list)
def test_df_atoms(backend):
"""
Tests adding bonds as a DataFrame
"""
dl = eex.datalayer.DataLayer("test_df_bonds", backend=backend)
tmp_df = _build_atom_df(10)
# Add a random DataFrame with name "atoms" to check interference with "add_atoms"
rand_df = pd.DataFrame(np.random.rand(4, 4))
dl.add_other("atoms", rand_df)
dl.add_atoms(tmp_df.iloc[:5], by_value=True)
dl.add_atoms(tmp_df.iloc[5:], by_value=True)
# Test count functionality works
assert dl.get_atom_count() == 30
assert dl.get_atom_count("charge") == 30
assert dl.get_atom_count("xyz") == 30
dl_df = dl.get_atoms(None, by_value=True)
# Compare DL df
assert df_compare(tmp_df, dl_df)
# Compare rand DF
dl_rand_df = dl.get_other("atoms")
assert df_compare(rand_df, dl_rand_df)
@pytest.mark.parametrize("backend", _backend_list)
def test_misc(backend):
dl = eex.datalayer.DataLayer("test_misc_functions", backend=backend)
atoms = _build_atom_df(2)
dl.add_atoms(atoms)
with pytest.raises(AttributeError):
dl.call_by_string("not_a_function")
assert(set(dl.list_tables()) == {'atom_name', 'xyz', 'molecule_index', 'charge', 'atom_type'})
assert(dl.list_other_tables() == [])
def test_add_atom_parameter():
dl = eex.datalayer.DataLayer("test_add_atom_parameters")
# Test duplicate and uid add of same
assert 0 == dl.add_atom_parameter("mass", 5.0)
assert 0 == dl.add_atom_parameter("mass", 5.0)
assert 0 == dl.add_atom_parameter("mass", 5.0, uid=0)
# Once more
assert 1 == dl.add_atom_parameter("mass", 6.0)
assert 1 == dl.add_atom_parameter("mass", 6.0, uid=1)
# Test out of order adds
assert 2 == dl.add_atom_parameter("mass", 7.0, uid=2)
assert 5 == dl.add_atom_parameter("mass", 8.0, uid=5)
assert 3 == dl.add_atom_parameter("mass", 9.0)
assert 3 == dl.add_atom_parameter("mass", {"mass": 9.0})
# Test possible raises
with pytest.raises(KeyError):
dl.add_atom_parameter("mass", 6.0, uid=0)
def test_add_atom_parameter_units():
dl = eex.datalayer.DataLayer("test_add_atom_parameters")
# Test duplicate and uid add of same
assert 0 == dl.add_atom_parameter("mass", 5.0, utype="kilogram / mol")
assert pytest.approx(5.0) == dl.get_atom_parameter("mass", 0, utype="kilogram / mol")
assert pytest.approx(5000.0) == dl.get_atom_parameter("mass", 0, utype="gram / mol")
assert pytest.approx(5000.0) == dl.get_atom_parameter("mass", 0, utype={"mass": "gram / mol"})
def test_add_term_parameter():
"""
Test adding parameters to the DL object
"""
dl = eex.datalayer.DataLayer("test_add_term_parameters")
two_body_md = eex.metadata.get_term_metadata(2, "forms", "harmonic")
three_body_md = eex.metadata.get_term_metadata(3, "forms", "harmonic")
# Check duplicates
assert 0 == dl.add_term_parameter(2, "harmonic", [4.0, 5.0])
assert 0 == dl.add_term_parameter(2, "harmonic", [4.0, 5.0])
assert 0 == dl.add_term_parameter(2, "harmonic", [4.0, 5.0 + 1.e-10])
# New parameters
assert 1 == dl.add_term_parameter(2, "harmonic", [4.0, 6.0])
assert 0 == dl.add_term_parameter(3, "harmonic", [4.0, 6.0])
# Check uid types
assert 0 == dl.add_term_parameter(2, "harmonic", [4.0, 5.0], uid=0)
assert 0 == dl.add_term_parameter(2, "harmonic", [4.0, 5.0 + 1.e-10], uid=0)
# Add out of order uid
assert 10 == dl.add_term_parameter(2, "harmonic", [9.0, 9], uid=10)
assert 2 == dl.add_term_parameter(2, "harmonic", [9.0, 10.0])
# Do we want to allow forced dups?
assert 15 == dl.add_term_parameter(2, "harmonic", [22.0, 22.0], uid=15)
assert 11 == dl.add_term_parameter(2, "harmonic", [22.0, 22.0], uid=11)
# Check add by dict
mdp = two_body_md["parameters"]
assert 0 == dl.add_term_parameter(2, "harmonic", {mdp[0]: 4.0, mdp[1]: 5.0})
assert 1 == dl.add_term_parameter(2, "harmonic", {mdp[0]: 4.0, mdp[1]: 6.0})
assert 3 == dl.add_term_parameter(2, "harmonic", {mdp[0]: 4.0, mdp[1]: 7.0})
with pytest.raises(KeyError):
dl.add_term_parameter(2, "harmonic", {mdp[0]: 4.0, "turtle": 5.0})
mdp = three_body_md["parameters"]
assert 0 == dl.add_term_parameter(3, "harmonic", {mdp[0]: 4.0, mdp[1]: 6.0})
# Check uid type
with pytest.raises(TypeError):
dl.add_term_parameter(2, "harmonic", [11.0, 9.0], uid="turtle")
# Validate parameter data
with pytest.raises(ValueError):
dl.add_term_parameter(2, "harmonic", [4.0, 5.0, 3.0])
with pytest.raises(TypeError):
dl.add_term_parameter(2, "harmonic", [11.0, "duck"])
# Check name errors
with pytest.raises(KeyError):
dl.add_term_parameter("turtle", "harmonic", [4.0, 5.0, 3.0])
with pytest.raises(KeyError):
dl.add_term_parameter(2, "harmonic_abc", [4.0, 5.0])
def test_add_term_parameters_units():
"""
Test adding parameters to the DL object with units
"""
dl = eex.datalayer.DataLayer("test_add_parameters_units")
two_body_md = eex.metadata.get_term_metadata(2, "forms", "harmonic")
three_body_md = eex.metadata.get_term_metadata(3, "forms", "harmonic")
# Test unit types
assert 0 == dl.add_term_parameter(2, "harmonic", [4.0, 5.0])
assert 0 == dl.add_term_parameter(2, "harmonic", [4.0, 5.0])
# Same units
utype_2b = {"K": "(kJ / mol) * angstrom ** -2", "R0": "angstrom"}
utype_2bl = [utype_2b["K"], utype_2b["R0"]]
assert 0 == dl.add_term_parameter(2, "harmonic", {"K": 4.0, "R0": 5.0}, utype=utype_2b)
assert 0 == dl.add_term_parameter(2, "harmonic", {"K": 4.0, "R0": 5.0}, utype=utype_2bl)
assert 0 == dl.add_term_parameter(2, "harmonic", [4.0, 5.0], utype=utype_2b)
assert 0 == dl.add_term_parameter(2, "harmonic", [4.0, 5.0], utype=utype_2bl)
# Scale by 2
utype_2b = {"K": "0.5 * (kJ / mol) * angstrom ** -2", "R0": "0.5 * angstrom"}
utype_2bl = [utype_2b["K"], utype_2b["R0"]]
assert 0 == dl.add_term_parameter(2, "harmonic", {"K": 8.0, "R0": 10.0}, utype=utype_2b)
assert 0 == dl.add_term_parameter(2, "harmonic", {"K": 8.0, "R0": 10.0}, utype=utype_2bl)
assert 0 == dl.add_term_parameter(2, "harmonic", [8.0, 10.0], utype=utype_2b)
assert 0 == dl.add_term_parameter(2, "harmonic", [8.0, 10.0], utype=utype_2bl)
# Different unit type
utype_2b = {"K": "0.5 * (kJ / mol) * angstrom ** -2", "R0": "picometers"}
assert 0 == dl.add_term_parameter(2, "harmonic", [8.0, 500.0], utype=utype_2b)
# Finally a different parameter type all together
utype_2b = {"K": "0.5 * (kJ / mol) * angstrom ** -2", "R0": "picometers"}
assert 1 == dl.add_term_parameter(2, "harmonic", [8.0, 5.0], utype=utype_2b)
def test_get_term_parameter():
"""
Test obtaining parameters from the DL
"""
dl = eex.datalayer.DataLayer("test_get_parameters")
# Add a few parameters
assert 0 == dl.add_term_parameter(2, "harmonic", {"K": 4.0, "R0": 5.0})
assert 1 == dl.add_term_parameter(2, "harmonic", {"K": 6.0, "R0": 7.0})
parm1 = dl.get_term_parameter(2, 0)
assert parm1[0] == "harmonic"
assert dict_compare(parm1[1], {"K": 4.0, "R0": 5.0})
assert dict_compare(parm1[1], {"K": 4.0, "R0": 5.0 + 1.e-12})
parm2 = dl.get_term_parameter(2, 1)
assert parm2[0] == "harmonic"
assert dict_compare(parm2[1], {"K": 6.0, "R0": 7.0})
with pytest.raises(KeyError):
dl.get_term_parameter(2, 1231234123)
def test_get_term_parameters_units():
"""
Test obtaining parameters from the DL with units
"""
dl = eex.datalayer.DataLayer("test_get_parameters_units")
utype_2b = {"K": "(kJ / mol) * angstrom ** -2", "R0": "angstrom"}
assert 0 == dl.add_term_parameter(2, "harmonic", {"K": 4.0, "R0": 5.0}, utype=utype_2b)
assert 1 == dl.add_term_parameter(2, "harmonic", {"K": 6.0, "R0": 7.0}, utype=utype_2b)
utype_2scale = {"K": "2.0 * (kJ / mol) * angstrom ** -2", "R0": "2.0 * angstrom"}
parm1 = dl.get_term_parameter(2, 0, utype=utype_2scale)
assert parm1[0] == "harmonic"
assert dict_compare(parm1[1], {"K": 2.0, "R0": 2.5})
parm2 = dl.get_term_parameter(2, 0, utype=[utype_2scale["K"], utype_2scale["R0"]])
assert parm2[0] == "harmonic"
assert dict_compare(parm2[1], {"K": 2.0, "R0": 2.5})
with pytest.raises(TypeError):
dl.get_term_parameter(2, 0, utype={5, 6})
def test_list_parameters():
"""
Test listing parameters uids
"""
dl = eex.datalayer.DataLayer("test_list_parameters")
# Add two-body
assert 0 == dl.add_term_parameter(2, "harmonic", [4.0, 5.0])
assert 1 == dl.add_term_parameter(2, "harmonic", [4.0, 6.0])
assert 9 == dl.add_term_parameter(2, "harmonic", [6.0, 7.0], uid=9)
# Add three-body
assert 0 == dl.add_term_parameter(3, "harmonic", [4.0, 5.0])
assert 1 == dl.add_term_parameter(3, "harmonic", [4.0, 6.0])
assert 9 == dl.add_term_parameter(3, "harmonic", [6.0, 7.0], uid=9)
full_list = dl.list_term_uids()
assert {2, 3, 4} == set(full_list)
assert {0, 1, 9} == set(full_list[2])
assert {0, 1, 9} == set(full_list[3])
assert set() == set(full_list[4])
bond_list = dl.list_term_uids(2)
assert {0, 1, 9} == set(bond_list)
def test_atom_units():
"""
Tests adding bonds as a DataFrame
"""
dl = eex.datalayer.DataLayer("test_df_bonds", backend="memory")
utypes = {"charge": "elementary_charge", "XYZ": "picometers"}
tmp_df = _build_atom_df(2)
tmp_df.index.name = "atom_index"
dl.add_atoms(tmp_df, by_value=True, utype=utypes)
assert df_compare(tmp_df, dl.get_atoms("charge", by_value=True), columns="charge")
# Check multiple output types
xyz_pm = dl.get_atoms("XYZ", by_value=True, utype={"xyz": "picometers"})
assert df_compare(xyz_pm, tmp_df[["X", "Y", "Z"]])
# Check twice incase we accidently scale internal data
xyz_pm = dl.get_atoms("XYZ", by_value=True, utype={"xyz": "picometers"})
assert df_compare(xyz_pm, tmp_df[["X", "Y", "Z"]])
# Check default and specified
xyz_ang1 = dl.get_atoms("XYZ", by_value=True, utype={"xyz": "angstrom"})
xyz_ang2 = dl.get_atoms("XYZ", by_value=True)
assert df_compare(xyz_ang1, xyz_ang2)
assert df_compare(xyz_ang1, xyz_pm * 0.01)
def test_box_size():
dl = eex.datalayer.DataLayer("test_box_size", backend="memory")
tmp = {"a": 5, "b": 6, "c": 7, "alpha": np.pi/3, "beta": 0.0, "gamma": 0.0}
# Normal set/get
dl.set_box_size(tmp)
comp = dl.get_box_size()
assert dict_compare(tmp, comp)
# Set/get with units
utype = {"a": "nanometers", "b": "nanometers", "c": "nanometers", "alpha": "radian", "beta": "radian", "gamma": "radian"}
dl.set_box_size(tmp, utype=utype)
comp = dl.get_box_size(utype=utype)
assert np.isclose(comp["a"], tmp["a"])
assert np.isclose(comp["b"], tmp["b"])
assert np.isclose(comp["c"], tmp["c"])
assert np.isclose(comp["alpha"], tmp["alpha"])
assert np.isclose(comp["beta"], tmp["beta"])
assert np.isclose(comp["gamma"], tmp["gamma"])
# Set/get with units
utype_1 = {"a": "angstroms", "b": "angstroms", "c": "angstroms", "alpha": "degree", "beta": "degree", "gamma": "degree"}
dl.set_box_size(tmp, utype=utype)
comp = dl.get_box_size(utype=utype_1)
assert np.isclose(comp["a"], tmp["a"] * 10)
assert np.isclose(comp["b"], tmp["b"] * 10)
assert np.isclose(comp["c"], tmp["c"] * 10)
assert np.isclose(comp["alpha"], tmp["alpha"] * 180.0 / np.pi)
assert np.isclose(comp["beta"], tmp["beta"] * 180.0 / np.pi)
assert np.isclose(comp["gamma"], tmp["gamma"] * 180.0 / np.pi)
utype_2 = {"a": "miles", "b": "miles", "c": "miles", "alpha": "radians", "beta": "radians", "gamma": "radians"}
with pytest.raises(AssertionError):
dl.set_box_size(tmp, utype=utype_2)
comp = dl.get_box_size()
assert dict_compare(tmp, comp)
def test_box_center():
dl = eex.datalayer.DataLayer("test_box_center", backend="memory")
tmp = {"x": 5, "y": 6, "z": 7}
# Normal set/get
dl.set_box_center(tmp)
comp = dl.get_box_center()
assert dict_compare(tmp, comp)
# Check failure
with pytest.raises(KeyError):
dl.set_box_center({'x': tmp['x']})
# Set/get with units
utype = {"x": "nanometers", "y": "nanometers", "z": "nanometers"}
dl.set_box_center(tmp, utype=utype)
comp = dl.get_box_center(utype=utype)
assert np.isclose(comp["x"], tmp["x"])
assert np.isclose(comp["y"], tmp["y"])
assert np.isclose(comp["z"], tmp["z"])
# Set/get with units
utype_1 = {"x": "angstroms", "y": "angstroms", "z": "angstroms"}
dl.set_box_center(tmp, utype=utype)
comp = dl.get_box_center(utype=utype_1)
assert np.isclose(comp["x"], tmp["x"] * 10)
assert np.isclose(comp["y"], tmp["y"] * 10)
assert np.isclose(comp["z"], tmp["z"] * 10)
utype_2 = {"x": "miles", "y": "miles", "z": "miles"}
with pytest.raises(AssertionError):
dl.set_box_center(tmp, utype=utype_2)
comp = dl.get_box_center()
assert dict_compare(tmp, comp)
def test_add_nb_parameter():
# Create empty data layer
dl = eex.datalayer.DataLayer("test_add_nb_parameters", backend="memory")
# Create system with three molecules
atom_sys = _build_atom_df(3)
# Add atomic system to datalayer
dl.add_atoms(atom_sys)
# Add AB LJ parameters to data layer - add to single atom
dl.add_nb_parameter(atom_type=1, nb_name="LJ", nb_model="AB", nb_parameters=[1.0, 1.0])
dl.add_nb_parameter(atom_type=2, nb_name="LJ", nb_model="epsilon/sigma", nb_parameters=[1.0, 1.0])
# Add AB LJ parameters to data layer - add to two atoms
dl.add_nb_parameter(atom_type=1, atom_type2=2, nb_name="LJ", nb_model="AB", nb_parameters=[2.0, 2.0])
# Grab stored test parameters - will need to replace dl._nb_parameters with dl.get_nb_parameter when implemented
test_parameters = dl._nb_parameters
assert test_parameters[(1, None)]["parameters"] == {'A': 1.0, 'B': 1.0}
assert test_parameters[(2, None)]["parameters"] == {'A': 4.0, 'B': 4.0}
assert test_parameters[(1, 2)]["parameters"] == {'A': 2.0, 'B': 2.0}
dl.add_nb_parameter(atom_type=1, nb_name="Buckingham", nb_model=None, nb_parameters=[1.0, 1.0, 1.0])
with pytest.raises(KeyError):
dl.add_nb_parameter(atom_type=1, nb_name="LJ", nb_model=None, nb_parameters=[1.0, 1.0])
def test_add_nb_parameter_units():
# Create empty data layer
dl = eex.datalayer.DataLayer("test_add_nb_parameters", backend="memory")
# Create system with three molecules
atom_sys = _build_atom_df(3)
# Add atomic system to datalayer
dl.add_atoms(atom_sys)
# Add AB LJ parameters to data layer - add to single atom
dl.add_nb_parameter(
atom_type=1,
nb_name="LJ",
nb_model="AB",
nb_parameters=[1.0, 1.0],
utype=["kJ * mol ** -1 * nanometers ** 12", "kJ * mol ** -1 * nanometers ** 6"])
# Add AB LJ parameters to data layer - add to two atoms
dl.add_nb_parameter(
atom_type=1,
nb_name="LJ",
nb_model="AB",
nb_parameters=[2.0, 2.0],
atom_type2=2,
utype=["kJ * mol ** -1 * nanometers ** 12", "kJ * mol ** -1 * nanometers ** 6"])
# Test atom types out of order
dl.add_nb_parameter(
atom_type=3,
nb_name="LJ",
nb_model="AB",
nb_parameters=[2.0, 2.0],
atom_type2=1,
utype=["kJ * mol ** -1 * nanometers ** 12", "kJ * mol ** -1 * nanometers ** 6"])
# Grab stored test parameters
test_parameters = dl.list_nb_parameters(nb_name="LJ")
# make sure this grabs all stored values
assert (set(list(test_parameters)) == set([(1, None), (1, 2), (1, 3)]))
# Check conversion
assert dict_compare(test_parameters[(1, None)], {'A': 1.e12, 'B': 1.e6})
assert dict_compare(test_parameters[(1, 2)], {'A': 2.e12, 'B': 2.e6})
assert dict_compare(test_parameters[(1, 3)], {'A': 2.e12, 'B': 2.e6})
# Check keywords
test_parameters2 = dl.list_nb_parameters(nb_name="LJ", itype="single")
assert(list(test_parameters2) == [(1, None)])
test_parameters3 = dl.list_nb_parameters(nb_name="LJ", itype="pair")
assert (list(test_parameters3) == [(1, 2), (1,3)])
def test_get_nb_parameter():
# Create empty data layer
dl = eex.datalayer.DataLayer("test_add_nb_parameters", backend="memory")
# Create system with three molecules
atom_sys = _build_atom_df(3)
# Add atomic system to datalayer
dl.add_atoms(atom_sys)
# Add AB LJ parameters to data layer - add to single atom
dl.add_nb_parameter(atom_type=1, nb_name="LJ", nb_model="AB", nb_parameters={'A': 1.0, 'B': 2.0})
# Add Buckingham parameter to datalayer
dl.add_nb_parameter(atom_type=2, nb_name="Buckingham", nb_parameters={"A": 1.0, "C": 1.0, "rho": 1.0})
# The following should raise an error because (1,2) interaction is not set
with pytest.raises(KeyError):
dl.get_nb_parameter(atom_type=1, atom_type2=2, nb_model="AB")
# Test that what returned is expected
assert dict_compare(dl.get_nb_parameter(atom_type=2), {"A": 1.0, "C": 1.0, "rho": 1.0})
assert dict_compare(dl.get_nb_parameter(atom_type=1, nb_model="AB"), {'A': 1.0, 'B': 2.0})
# Test conversion of AB to different forms
assert dict_compare(
dl.get_nb_parameter(atom_type=1, nb_model="epsilon/sigma"), {'epsilon': 1.0,
'sigma': (1. / 2.)**(1. / 6.)})
assert dict_compare(dl.get_nb_parameter(atom_type=1, nb_model="epsilon/Rmin"), {'epsilon': 1.0, 'Rmin': 1})
# Test that correct parameters are pulled from data layer based on name
assert (set(dl.list_stored_nb_types()) == set(["LJ", "Buckingham"]))
assert dict_compare(dl.list_nb_parameters(nb_name="LJ"), {(1, None): {'A': 1., 'B': 2.}})
comp = {(2, None): {'A': 1.0, "C": 1.0, "rho": 1.0}}
assert dict_compare(dl.list_nb_parameters(nb_name="Buckingham"), comp)
# Test translation of units
comp = {(1, None): {'A': 1.e-12, 'B': 2.e-6}}
result = dl.list_nb_parameters(
nb_name="LJ", utype={'A': "kJ * mol ** -1 * nanometers ** 12",
'B': "kJ * mol ** -1 * nanometers ** 6"})
assert dict_compare(result, comp)
# Sigma/epsilon test
comp = {'sigma': (1. / 2.)**(1. / 6.), 'epsilon': eex.units.conversion_factor('kJ', 'kcal')}
result = dl.get_nb_parameter(
atom_type=1, nb_model='epsilon/sigma', utype={'epsilon': 'kcal * mol ** -1',
'sigma': 'angstrom'})
assert dict_compare(result, comp)
def test_mixing_rule():
dl = eex.datalayer.DataLayer("test_add_nb_parameters", backend="memory")
# Create system with three molecules
atom_sys = _build_atom_df(3)
# Add atomic system to datalayer
dl.add_atoms(atom_sys)
# Add mixing rule to datalayer
dl.set_mixing_rule("geometric")
# Check failure
with pytest.raises(ValueError):
dl.set_mixing_rule("test")
with pytest.raises(TypeError):
dl.set_mixing_rule(5)
# Add AB LJ parameters to data layer - add to single atom
dl.add_nb_parameter(atom_type=1, nb_name="LJ", nb_model="epsilon/sigma", nb_parameters={'epsilon': 1.0, 'sigma': 2.0})
# Add Buckingham parameter to datalayer
dl.add_nb_parameter(atom_type=2, nb_name="Buckingham", nb_parameters={"A": 1.0, "C": 1.0, "rho": 1.0})
# This should fail because we can not combine LJ and Buckingham parameters.
with pytest.raises(ValueError):
dl.mix_LJ_parameters(atom_type1=1, atom_type2=2)
with pytest.raises(KeyError):
dl.mix_LJ_parameters(atom_type1=1, atom_type2=3)
# Overwrite Buckingham parameter
dl.add_nb_parameter(atom_type=2, nb_name="LJ", nb_model="epsilon/sigma", nb_parameters={'epsilon': 1.0, 'sigma': 1.0})
# Apply mixing rule
dl.mix_LJ_parameters(atom_type1=1, atom_type2=2)
# Get values from datalayer and check
params = dl.get_nb_parameter(atom_type=1,atom_type2=2, nb_model="epsilon/sigma")
ans = {'sigma': 2 ** (1./2.), 'epsilon': 1.}
assert dict_compare(params, ans)
def test_mixing_table():
dl = eex.datalayer.DataLayer("test_add_nb_parameters", backend="memory")
# Create system with three molecules
atom_sys = _build_atom_df(3)
# Add atomic system to datalayer
dl.add_atoms(atom_sys)
# Add mixing rule to datalayer
dl.set_mixing_rule("arithmetic")
# Add AB LJ parameters to data layer - add to single atom
dl.add_nb_parameter(atom_type=1, nb_name="LJ", nb_model="epsilon/sigma", nb_parameters={'epsilon': 1.0, 'sigma': 2.0})
# Add AB LJ parameters to data layer - add to single atom
dl.add_nb_parameter(atom_type=2, nb_name="LJ", nb_model="epsilon/sigma", nb_parameters={'epsilon': 2.0, 'sigma': 1.0})
# Apply mixing rule
dl.build_LJ_mixing_table()
# Get mixed parameters
pairIJ = dl.list_nb_parameters(nb_name="LJ", itype="pair", nb_model="epsilon/sigma")
ans = {
(1,1): {
'sigma': 2.,
'epsilon': 1,
},
(1, 2): {
'sigma': 1.5,
'epsilon': (2.) ** (1./2.)
},
(2, 2): {
'sigma': 1.,
'epsilon': 2.
},
}
assert(dict_compare(pairIJ, ans))
def test_nb_scaling():
dl = eex.datalayer.DataLayer("test_add_nb_parameters", backend="memory")
# Create system with three molecules
atom_sys = _build_atom_df(3)
# Add atomic system to datalayer
dl.add_atoms(atom_sys)
# Add mixing rule to datalayer
dl.set_mixing_rule("arithmetic")
# Add AB LJ parameters to data layer - add to single atom
dl.add_nb_parameter(atom_type=1, nb_name="LJ", nb_model="epsilon/sigma", nb_parameters={'epsilon': 1.0, 'sigma': 2.0})
# Add AB LJ parameters to data layer - add to single atom
dl.add_nb_parameter(atom_type=2, nb_name="LJ", nb_model="epsilon/sigma", nb_parameters={'epsilon': 2.0, 'sigma': 1.0})
# Apply mixing rule
dl.build_LJ_mixing_table()
# Build scaling dataframe
scale_df = pd.DataFrame()
scale_df["coul_scale"] = [0.0, 0.0, 0.0]
scale_df["atom_index1"] = [1,1,2]
scale_df["atom_index2"] = [2,3,3]
scale_df["vdw_scale"] = [0.5, 0.5, 0.5]
# Check adding data
dl.set_pair_scalings(scale_df)
# Check function failures
new_df = scale_df.copy()
new_df["test"] = [0,0,0]
with pytest.raises(KeyError):
dl.set_pair_scalings(new_df)
with pytest.raises(ValueError):
dl.set_pair_scalings(scale_df[["atom_index1", "atom_index2"]])
scale_df.drop(['atom_index1'], axis=1, inplace=True)
with pytest.raises(KeyError):
dl.set_pair_scalings(scale_df)
# Retrieve information from dl
with pytest.raises(KeyError):
dl.get_pair_scalings(nb_labels=["not_a_label"])
stored_scalings = dl.get_pair_scalings()
for col in stored_scalings.columns:
assert set(scale_df[col].values) == set(stored_scalings[col].values)
def test_set_nb_scaling_factors():
dl = eex.datalayer.DataLayer("test_add_nb_parameters", backend="memory")
# Create system with three molecules
atom_sys = _build_atom_df(3)
# Add atomic system to datalayer
dl.add_atoms(atom_sys)
# Add bonds to system
bond_df = pd.DataFrame()
bond_data = np.array([[0,1,0], [1,2,0]])
bond_columns = ["atom1", "atom2", "term_index"]
for num, name in enumerate(bond_columns):
bond_df[name] = bond_data[:, num]
dl.add_bonds(bond_df)
# Add an angle
angle_df = pd.DataFrame()
angle_data = np.array([[0,1,2,0]])
angle_columns = ["atom1", "atom2", "atom3", "term_index"]
for num, name in enumerate(angle_columns):
angle_df[name] = angle_data[:,num]
dl.add_angles(angle_df)
scaling_factors = {
"coul": {
"scale12": 0.0,
"scale13": 0.25,
"scale14": 0.75,
},
"vdw": {
"scale12": 0.0,
"scale13": 0.5,
"scale14": 0.75,
}
}
# Test failures
with pytest.raises(TypeError):
dl.set_nb_scaling_factors("not a dictionary")
# Test adding to dl
dl.set_nb_scaling_factors(scaling_factors)
# Retrieve from dl
stored_scaling = dl.get_nb_scaling_factors()
assert eex.testing.dict_compare(scaling_factors, stored_scaling)
# Test build_scaling_list
dl.build_scaling_list()
# Retrieve from datalayer
scaling = dl.get_pair_scalings()
assert(set(scaling['vdw_scale'].values) == set([0, 0.5]))
assert(set(scaling['coul_scale'].values) == set([0, 0.25]))
|
bsd-3-clause
|
MicrosoftGenomics/FaST-LMM
|
fastlmm/inference/lmm_cov.py
|
1
|
49146
|
import numpy as np
import numpy.linalg as la
import scipy.optimize as opt
import scipy.stats as st
import scipy.special as ss
from fastlmm.util.mingrid import *
from fastlmm.util.util import *
import time
class LMM(object):
'''
linear mixed model with up to two kernels
N(y | X*beta + covariates*alpha; sigma2(h2*K + (1-h2)*I),
where
K = G*G^T
Relative to lmm.py, lmm_cov handles multiple phenotypes, and regresses out covariates before GWAS testing (which makes it O(#covs^3) faster).
Currently, h2 for each phenotype is the same, but much of the code is written to allow h2 to be different (see e.g. nlleval_2k).
This class is intended for a single full-rank kernel (and optionally a low-rank kernel). If two full-rank kernels are at hand,
they must first be combined into a single full-rank kernel.
TODO: Add full support for multiple phenotypes by extending h2 and a2
TODO: Deal with h2_1 parameterization more eloquently (see comments below)
'''
__slots__ = ["linreg","G","Y","X","K","U","S","UX","UY","UUX","UUY","forcefullrank","regressX","numcalls"]
def __init__(self, forcefullrank=False, X=None, linreg=None, Y=None, G=None, K=None, regressX=True, inplace=False):
'''
Args:
forcefullrank : if True, then the code always computes K and runs cubically (default: False)
X : [N x D] np.array of D covariates for N individuals (including bias term).
If None is given, only a bias term is used. (Default: None)
linreg : optionally allows to provide pre-computed Linreg object instead of covariates
(default: None)
Y : [N x P] np.array holding P phenotypes for N individuals
G : [N x k] np.array holding k design vectors for building linear kernel K
K : [N x N] np.array positive semi-definite Kernel matrix
regressX : regress out covariates to speed up computations? (default: True)
inplace : set kernel without copying? (default: False)
'''
self.numcalls = 0
self.setX(X=X, regressX=regressX, linreg=linreg) #set the covariates (needs to be first)
self.forcefullrank = forcefullrank
self.setK(K=K, G=G, inplace=inplace) #set the kernel, if available
self.setY(Y=Y) #set the phenotypes
def setY(self, Y):
'''
set the phenotype y.
Args:
Y : [N x P] np.array holding P phenotypes for N individuals
'''
self.Y = Y
self.UY = None
self.UUY = None
def setX(self, X=None, regressX=True, linreg=None):
"""
set the covariates.
Args:
X: [N x D] np.array of D covariates for N individuals (including bias term).
If None is given, only a bias term is used. (Default: None)
regressX: regress out covariates to speed up computations? (default: True)
linreg: optionally allows to provide pre-computed Linreg object instead of covariates
(default: None)
"""
self.X = X
self.UX = None
self.UUX = None
self.linreg = linreg
self.regressX = regressX
if self.linreg is None and regressX:
self.linreg = Linreg(X=self.X)
def setSU_fromK(self):
"""
compute the spectral decomposition from full kernel (full rank computations).
"""
N = self.K.shape[0]
D = self.linreg.D
self.K.flat[::N+1]+=1.0
K_ = self.linreg.regress(Y=self.K)
K_ = self.linreg.regress(Y=K_.T)
[self.S,self.U] = la.eigh(K_)
if np.any(self.S < -0.1):
logging.warning("kernel contains a negative Eigenvalue")
self.U = self.U[:,D:N]
self.S = self.S[D:N] - 1.0
def setSU_fromG(self):
"""
compute the spectral decomposition from design matrix G for linear kernel
(allows low rank computations, if G has more rows than columns)
"""
k = self.G.shape[1]
N = self.G.shape[0]
if k:
if ((not self.forcefullrank) and (k < N)):
#it is faster using the eigen decomposition of G.T*G but this is more
#accurate
PxG = self.linreg.regress(Y=self.G)
try:
[self.U,self.S,V] = la.svd(PxG,False,True)
# SVD is defined to be all positive in S, so the following will always be true (unless the SVD is buggy).
# if np.any(self.S < -0.1):
# logging.warning("kernel contains a negative Eigenvalue")
inonzero = self.S > 1E-10
self.S = self.S[inonzero]
self.S = self.S * self.S
self.U = self.U[:,inonzero]
except la.LinAlgError: # revert to Eigenvalue decomposition
print "Got SVD exception, trying eigenvalue decomposition of square of G. Note that this is a little bit less accurate"
[S,V] = la.eigh(PxG.T.dot(PxG))
if np.any(S < -0.1):
logging.warning("kernel contains a negative Eigenvalue")
inonzero = (S > 1E-10)
self.S = S[inonzero]
#self.S*=(N/self.S.sum())
self.U = self.G.dot(V[:,inonzero] / np.sqrt(self.S))
else:
K = self.G.dot(self.G.T)
self.setK(K=K)
self.setSU_fromK()
pass
else:#rank of kernel = 0 (linear regression case)
self.S = np.zeros((0))
self.U = np.zeros_like(self.G)
def getSU(self):
"""
get the spectral decomposition of the kernel matrix. Computes it if needed.
"""
if self.U is None or self.S is None:
if self.K is not None:
logging.debug("starting 'lmm_cov.setSU_fromK()'")
self.setSU_fromK()
logging.debug("finished 'lmm_cov.setSU_fromK()'")
elif self.G is not None:
logging.debug("starting 'lmm_cov.setSU_fromG()'")
self.setSU_fromG()
logging.debug("finished 'lmm_cov.setSU_fromG()'")
else:
raise Exception("No Kernel is set. Cannot return U and S.")
return self.S, self.U
def rotate(self, A):
"""
rotate a matrix A with the eigenvalues of the kernel matrix.
Args:
A: [N x D] np.array
Returns:
U.T.dot(A)
A - U.dot(U.T.dot(A)) (if kernel is full rank this is None)
"""
S,U = self.getSU()
N = A.shape[0]
D = self.linreg.D
A = self.linreg.regress(A)
# treat pathological case where a variable is explained by the covariates
A_std = A.std(0)
A[:,A_std<=1e-10] = 0.0
if (S.shape[0] < N - D):#lowrank case
# A = self.linreg.regress(A)
UA = self.U.T.dot(A)
UUA = A - U.dot(UA)
else:
#A=self.linreg.regress(A)
UA = U.T.dot(A)
#A1 = UA=U.T.dot(A)
#diff = np.absolute(A1-UA).sum()
#print diff
#print UA.shape
UUA = None
return UA,UUA
def getUY(self, idx_pheno=None):
"""
get the rotated phenotype matrix
Args:
idx_pheno boolean numpy.array of phenotype indices to use
(default None, returns all)
Returns:
UY U.T.dot(Y) rotated phenotypes
UUY None if kernel is full rank, otherwise Y-U.dot(U.T.dot(Y))
"""
if self.UY is None:
self.UY,self.UUY = self.rotate(A=self.Y)
if idx_pheno is None:
return self.UY,self.UUY
else:
UY = self.UY[:,idx_pheno]
UUY = None
if self.UUY is not None:
UUY = self.UUY[:,idx_pheno]
return UY, UUY
def setK(self, K=None, G=None, inplace=False):
'''
set the Kernel K.
Args:
K : [N*N] array, random effects covariance (positive semi-definite)
G : [NxS] array of random effects (will be used for linear kernel)
inplace: set kernel without copying? (default: False)
'''
self.clear_cache()
if K is not None:
if inplace:
self.K = K
else:
self.K = K.copy()
elif G is not None:
if inplace:
self.G = G
else:
self.G = G.copy()
def clear_cache(self, reset_K=True):
"""
delete all cached objects
Args:
reset_K: also delete the kernel matrix and the kernel design matrix G? (default: True)
"""
self.U = None
self.S = None
self.UY = None
self.UUY = None
self.UX = None
self.UUX = None
if reset_K:
self.G = None
self.K = None
def innerLoop_2K(self, h2=0.5, nGridA2=10, minA2=0.0, maxA2=1.0, i_up=None, i_G1=None, UW=None, UUW=None, **kwargs):
'''
For a given h2, finds the optimal kernel mixture weight a2 and returns the negative log-likelihood
Find the optimal a2 given h2, such that K=(1.0-a2)*K0+a2*K1. Performs a double loop optimization (could be expensive for large grid-sizes)
(default maxA2 value is set to 1 as loss of positive definiteness of the final model covariance only depends on h2, not a2)
Allows to provide a second "low-rank" kernel matrix in form of a rotated design matrix W
second kernel K2 = W.dot(W.T))
W may hold a design matrix G1 of a second kernel and some columns that are identical to columns of the design matrix of the first kernel to enable subtracting out sub kernels (as for correcting for proximal contamination)
Args:
h2 : "heritability" of the kernel matrix
nGridA2 : number of a2-grid points to evaluate the negative log-likelihood at. Number of grid points for Brent search intervals (default: 10)
minA2 : minimum value for a2 optimization
maxA2 : maximum value for a2 optimization
i_up : indices of columns in W corresponding to columns from first kernel that are subtracted of
i_G1 : indeces of columns in W corresponding to columns of the design matrix for second kernel G1
UW : U.T.dot(W), where W is [N x S2] np.array holding the design matrix of the second kernel
UUW : W - U.dot(U.T.dot(W)) (provide None if U is full rank)
Returns:
dictionary containing the model parameters at the optimal a2
'''
#TODO: ckw: is this method needed? seems like a wrapper around findA2_2K!
#Christoph and Chris: probably not needed
if self.Y.shape[1] > 1:
print "not implemented"
raise NotImplementedError("only single pheno case implemented")
#if self.K0 is not None:
# self.setK(K0 = self.K0, K1 = self.K1, a2 = a2)
#else:
# self.setG(G0 = self.G0, G1 = self.G1, a2 = a2)
#self.setX(self.X)
#self.sety(self.y)
return self.findA2_2K(nGridA2=nGridA2, minA2=minA2, maxA2=maxA2, i_up=i_up, i_G1=i_G1, UW=UW, UUW=UUW, h2=h2, **kwargs)
def findA2_2K(self, nGridA2=10, minA2=0.0, maxA2=1.0, verbose=False, i_up=None, i_G1=None, UW=None, UUW=None, h2=0.5, **kwargs):
'''
For a given h2, finds the optimal kernel mixture weight a2 and returns the negative log-likelihood
Find the optimal a2 given h2, such that K=(1.0-a2)*K0+a2*K1. Performs a double loop optimization (could be expensive for large grid-sizes)
(default maxA2 value is set to 1 as loss of positive definiteness of the final model covariance only depends on h2, not a2)
Allows to provide a second "low-rank" kernel matrix in form of a rotated design matrix W
second kernel K2 = W.dot(W.T))
W may hold a design matrix G1 of a second kernel and some columns that are identical to columns of the design matrix of the first kernel to enable subtracting out sub kernels (as for correcting for proximal contamination)
Args:
h2 : "heritability" of the kernel matrix
nGridA2 : number of a2-grid points to evaluate the negative log-likelihood at. Number of grid points for Brent search intervals (default: 10)
minA2 : minimum value for a2 optimization
maxA2 : maximum value for a2 optimization
verbose : verbose output? (default: False)
i_up : indices of columns in W corresponding to columns from first kernel that are subtracted of
i_G1 : indeces of columns in W corresponding to columns of the design matrix for second kernel G1
UW : U.T.dot(W), where W is [N x S2] np.array holding the design matrix of the second kernel
UUW : W - U.dot(U.T.dot(W)) (provide None if U is full rank)
Returns:
dictionary containing the model parameters at the optimal a2
'''
if self.Y.shape[1] > 1:
print "not implemented"
raise NotImplementedError("only single pheno case implemented")
self.numcalls = 0
resmin = [None]
def f(x,resmin=resmin, **kwargs):
self.numcalls+=1
t0 = time.time()
h2_1 = (1.0 - h2) * x
res = self.nLLeval_2K(h2_1=h2_1, i_up=i_up, i_G1=i_G1, UW=UW, UUW=UUW, h2=h2, **kwargs)
if (resmin[0] is None) or (res['nLL'] < resmin[0]['nLL']):
resmin[0] = res
t1 = time.time()
#print "one objective function call took %.2f seconds elapsed" % (t1-t0)
#import pdb; pdb.set_trace()
return res['nLL']
if verbose: print "finda2"
min = minimize1D(f=f, nGrid=nGridA2, minval=minA2, maxval=maxA2,verbose=False)
#print "numcalls to innerLoopTwoKernel= " + str(self.numcalls)
return resmin[0]
def findH2_2K(self, nGridH2=10, minH2=0.0, maxH2=0.99999, nGridA2=10, minA2=0.0, maxA2=1.0, i_up=None, i_G1=None, UW=None, UUW=None, **kwargs):
'''
Find the optimal h2 and a2 for a given K (and G1 - if provided in W).
(default maxH2 value is set to a value smaller than 1 to avoid loss of positive definiteness of the final model covariance)
Allows to provide a second "low-rank" kernel matrix in form of a rotated design matrix W
second kernel K2 = W.dot(W.T))
W may hold a design matrix G1 of a second kernel and some columns that are identical to columns of the design matrix of the first kernel to enable subtracting out sub kernels (as for correcting for proximal contamination)
Args:
nGridH2 : number of h2-grid points to evaluate the negative log-likelihood at. Number of grid points for Brent search intervals (default: 10)
minH2 : minimum value for h2 optimization
maxH2 : maximum value for h2 optimization
nGridA2 : number of a2-grid points to evaluate the negative log-likelihood at. Number of grid points for Brent search intervals (default: 10)
minA2 : minimum value for a2 optimization
maxA2 : maximum value for a2 optimization
i_up : indices of columns in W corresponding to columns from first kernel that are subtracted of
i_G1 : indeces of columns in W corresponding to columns of the design matrix for second kernel G1
UW : U.T.dot(W), where W is [N x S2] np.array holding the design matrix of the second kernel
UUW : W - U.dot(U.T.dot(W)) (provide None if U is full rank)
Returns:
dictionary containing the model parameters at the optimal h2 (and a2 if a G1 is provided in W)
'''
#f = lambda x : (self.nLLeval(h2=x,**kwargs)['nLL'])
if self.Y.shape[1] > 1:
print "not implemented"
raise NotImplementedError("only single pheno case implemented")
resmin = [None]
noG1 = True
if i_G1.any():
noG1 = False
def f(x,resmin=resmin,**kwargs):
if noG1:
res = self.nLLeval_2K(h2_1=0.0, i_up=i_up, i_G1=i_G1, UW=UW, UUW=UUW, h2=x, **kwargs)
else:
res = self.innerLoop_2K(h2=x, i_up=i_up, i_G1=i_G1, UW=UW, UUW=UUW, nGridA2=nGridA2, minA2=minA2, maxA2=maxA2, **kwargs)
if (resmin[0] is None) or (res['nLL'] < resmin[0]['nLL']):
resmin[0] = res
return res['nLL']
min = minimize1D(f=f, nGrid=nGridH2, minval=minH2, maxval=maxH2)
return resmin[0]
def find_log_delta(self, sid_count=1, min_log_delta=-5, max_log_delta=10, nGrid=10, **kwargs):
'''
perform search for optimal log delta (single kernel case)
Args:
sid_count: number of log delta grid points to evaluate the negative log-likelihood at. Number of columns in design matrix for kernel for normalization (default: 10)
min_log_delta: minimum value for log delta search (default: -5)
max_log_delta: maximum value for log delta search (default: 5)
nGrid: number of grid points for Brent search intervals (default: 10)
Returns:
dictionary containing the model parameters at the optimal log delta
'''
#f = lambda x : (self.nLLeval(h2=x,**kwargs)['nLL'])
resmin = [None]
#logging.info("starting log_delta search")
def f(x,resmin=resmin,**kwargs):
h2 = 1.0 / (np.exp(x) * sid_count + 1) #We convert from external log_delta to h2 and then back again so that this
#code is most similar to findH2
res = self.nLLeval(h2=h2,**kwargs)
if (resmin[0] is None) or (res['nLL'] < resmin[0]['nLL']):
resmin[0] = res
#logging.info("search\t{0}\t{1}".format(x,res['nLL']))
return res['nLL']
min = minimize1D(f=f, nGrid=nGrid, minval=min_log_delta, maxval=max_log_delta)
res = resmin[0]
internal_delta = 1.0 / res['h2'] - 1.0
ln_external_delta = np.log(internal_delta / sid_count)
res['log_delta'] = ln_external_delta
return res
def findH2(self, nGridH2=10, minH2=0.0, maxH2=0.99999, estimate_Bayes=False, **kwargs):
'''
Find the optimal h2 for a given K. Note that this is the single kernel case. So there is no a2.
(default maxH2 value is set to a value smaller than 1 to avoid loss of positive definiteness of the final model covariance)
Args:
nGridH2 : number of h2-grid points to evaluate the negative log-likelihood at. Number of columns in design matrix for kernel for normalization (default: 10)
minH2 : minimum value for h2 optimization (default: 0.0)
maxH2 : maximum value for h2 optimization (default: 0.99999)
estimate_Bayes: implement me! (default: False)
Returns:
dictionary containing the model parameters at the optimal h2
'''
#f = lambda x : (self.nLLeval(h2=x,**kwargs)['nLL'])
resmin = [None for i in xrange(self.Y.shape[1])]
#logging.info("starting H2 search")
assert estimate_Bayes == False, "not implemented"
if self.Y.shape[1] > 1:
def f(x):
res = self.nLLeval(h2=x,**kwargs)
#check all results for local minimum:
for i in xrange(self.Y.shape[1]):
if (resmin[i] is None) or (res['nLL'][i] < resmin[i]['nLL']):
resmin[i] = res.copy()
resmin[i]['nLL'] = res['nLL'][i]
#logging.info("search\t{0}\t{1}".format(x,res['nLL']))
return res['nLL']
(evalgrid,resultgrid) = evalgrid1D(f, evalgrid = None, nGrid=nGridH2, minval=minH2, maxval = maxH2, dimF=self.Y.shape[1])
#import ipdb;ipdb.set_trace()
return resmin
elif estimate_Bayes:
def f(x):
res = self.nLLeval(h2=x,**kwargs)
#logging.info("search\t{0}\t{1}".format(x,res['nLL']))
return res['nLL']
(evalgrid,resultgrid) = evalgrid1D(f, evalgrid = None, nGrid=nGridH2, minval=minH2, maxval = maxH2, dimF=self.Y.shape[1])
lik = np.exp(-resultgrid)
evalgrid = lik * evalgrid[:,np.newaxis]
posterior_mean = evalgrid.sum(0) / lik.sum(0)
return posterior_mean
else:
def f(x,resmin=resmin):
res = self.nLLeval(h2=x,**kwargs)
if (resmin[0] is None) or (res['nLL'] < resmin[0]['nLL']):
resmin[0] = res
logging.debug("search\t{0}\t{1}".format(x,res['nLL']))
return res['nLL'][0]
min = minimize1D(f=f, nGrid=nGridH2, minval=minH2, maxval=maxH2)
#logging.info("search\t{0}\t{1}".format("?",resmin[0]))
return resmin[0]
def posterior_h2(self, nGridH2=1000, minH2=0.0, maxH2=0.99999, **kwargs):
'''
Find the optimal h2 for a given K. Note that this is the single kernel case. So there is no a2.
(default maxH2 value is set to a value smaller than 1 to avoid loss of positive definiteness of the final model covariance)
Args:
nGridH2 : number of h2-grid points to evaluate the negative log-likelihood at. Number of columns in design matrix for kernel for normalization (default: 10)
minH2 : minimum value for h2 optimization (default: 0.0)
maxH2 : maximum value for h2 optimization (default: 0.99999)
estimate_Bayes: implement me! (default: False)
Returns:
dictionary containing the model parameters at the optimal h2
'''
#f = lambda x : (self.nLLeval(h2=x,**kwargs)['nLL'])
resmin = [None]
#logging.info("starting H2 search")
assert self.Y.shape[1] == 1, "only works for single phenotype"
def f(x):
res = self.nLLeval(h2=x,**kwargs)
#check all results for local minimum:
if (resmin[0] is None):
resmin[0] = {'nLL':res['nLL'],'h2':np.zeros_like(res['nLL'])+res['h2']}
else:
for i in xrange(self.Y.shape[1]):
if (res['nLL'][i] < resmin[0]['nLL'][i]):
resmin[0]['nLL'][i] = res['nLL'][i]
resmin[0]['h2'][i] = res['h2']
#logging.info("search\t{0}\t{1}".format(x,res['nLL']))
return res['nLL']
(evalgrid,resultgrid) = evalgrid1D(f, evalgrid = None, nGrid=nGridH2, minval=minH2, maxval = maxH2, dimF=self.Y.shape[1])
#import ipdb;ipdb.set_trace()
return resmin[0], evalgrid, resultgrid
def nLLeval_2K(self, h2=0.0, h2_1=0.0, dof=None, scale=1.0, penalty=0.0, snps=None, UW=None, UUW=None, i_up=None, i_G1=None, subset=False):
'''
TODO: rename to nLLeval
currently h2 is a scalar, but could be modified to be a vector (i.e., a separate h2 for each phenotype); only if-then-elses need to be modified
evaluate -ln( N( y | X*beta , sigma^2(h2*K + h2_1*G1*G1.T (1-h2-h2_1)*I ) )),
where h2>0, h2_1>=0, h2+h2_1 <= 0.99999
If scale is not equal to 1, then the above is generalized from a Normal to a multivariate Student's t distribution.
Allows to provide a second "low-rank" kernel matrix in form of a rotated design matrix W
second kernel K2 = W.dot(W.T))
G1 is provided as columns in W.
W is provided in rotated form: UW = U.T*W and UUW = (W - U*U.T*W)
W may hold a design matrix G1 of a second kernel and some columns that are identical to columns of the design matrix of the first kernel
to enable subtracting out sub kernels (as for correcting for proximal contamination) -- see i_up and i_G1 below.
UW and UUW can be obtainted by calling rotate on W
(nice interface wrapper for nLLcore)
Args:
h2 : mixture weight between K and Identity (environmental noise)
REML : boolean
if True : compute REML
if False : compute ML
dof : Degrees of freedom of the Multivariate student-t
(default None uses multivariate Normal likelihood)
scale : Scale parameter that multiplies the shape matrix in Student's multivariate t (default 1.0, corresponding to a Gaussian)
penalty : L2 penalty for the fixed-effect SNPs being tested (default: 0.0)
snps : [N x S] np.array holding S SNPs for N individuals to be tested
i_up : indices of columns in W corresponding to columns from first kernel that are subtracted off
i_G1 : indices of columns in W corresponding to columns of the design matrix for second kernel G1
UW : U.T.dot(W), where W is [N x S2] np.array holding the design matrix of the second kernel
UUW : W - U.dot(U.T.dot(W)) (provide None if U is full rank)
subset : if G1 is a subset of G, then we don't need to subtract and add separately (default: False)
Returns:
Output dictionary:
'nLL' : negative log-likelihood
'sigma2' : the model variance sigma^2 (sigma^2_g+sigma^2_g1+sigma^2_e)
'beta' : [D*1] array of fixed effects weights beta
'h2' : mixture weight between Covariance and noise
'REML' : True: REML was computed, False: ML was computed
'dof' : Degrees of freedom of the Multivariate student-t
(default None uses multivariate Normal likelihood)
'scale' : Scale parameter that multiplies the Covariance matrix (default 1.0)
'''
N = self.Y.shape[0] - self.linreg.D
P = self.Y.shape[1]
S,U = self.getSU()
k = S.shape[0] # the rank of the data (or dof of the likelihood)
if (h2 < 0.0) or (h2 + h2_1 >= 0.99999) or (h2_1 < 0.0):
return {'nLL':3E20,
'h2':h2,
'h2_1':h2_1,
'scale':scale}
denom = (1.0 - h2 - h2_1) * scale # determine normalization factor
Sd = (h2 * self.S) * scale + denom
if subset: #if G1 is a complete subset of G, then we don't need to subtract and add separately
h2_1 = h2_1 - h2
if UW is not None:
weightW = np.zeros(UW.shape[1])
weightW[i_up] = -h2
weightW[i_G1] = h2_1
else:
weightW = None
Usnps,UUsnps = None,None
if snps is not None:
if snps.shape[0] != self.Y.shape[0]:
print "shape mismatch between snps and Y"
Usnps,UUsnps = self.rotate(A=snps)
result = self.nLLcore(Sd=Sd, dof=dof, scale=scale, penalty=penalty, UW=UW, UUW=UUW, weightW=weightW, denom=denom, Usnps=Usnps, UUsnps=UUsnps)
result['h2'] = h2
result['h2_1'] = h2_1
return result
def nLLeval(self, h2=0.0, logdelta=None, delta=None, dof=None, scale=1.0, penalty=0.0, snps=None, Usnps=None, UUsnps=None, UW=None, UUW=None, weightW=None, idx_pheno=None):
'''
TODO: rename to be a private function
This function is a hack to fix a parameterization bug regarding h2_1 parameterization in findA2 or findH2 or innerLoop.
Also, it is a different way of parameterizing nLLeval_2k (e.g., it implements the delta parameterization).
evaluate -ln( N( y | X*beta , sigma^2(h2*K + h2*(W*diag(weightW)*W^T) + (1-h2)*I ) )) (in h2 parameterization)
-ln( N( y | X*beta , sigma^2(delta*K + I ))) (in delta parameterization, 1 Kernel only)
If scale is not equal to 1, then the above is generalized from a Normal to a multivariate Student's t distribution.
Allows to provide a second "low-rank" kernel matrix in form of a rotated design matrix W
second kernel K2 = W.dot(W.T))
G1 is provided as columns in W.
W is provided in rotated form: UW = U.T*W and UUW = (W - U*U.T*W)
W may hold a design matrix G1 of a second kernel and some columns that are identical to columns of the design matrix of the first kernel to enable subtracting out sub kernels (as for correcting for proximal contamination)
To be efficient, W should have more rows (people) than columns (SNPs)
(nice interface wrapper for nLLcore)
Args:
h2 : mixture weight between K and Identity (environmental noise)
REML : boolean
if True : compute REML
if False : compute ML
dof : Degrees of freedom of the Multivariate Student-t
(default None uses multivariate Normal likelihood)
logdelta: log(delta) allows to optionally parameterize in delta space
delta : delta allows to optionally parameterize in delta space
scale : Scale parameter that multiplies the shape matrix in the Student's multivariate t (default 1.0, corresponding to a Gaussian)
penalty : L2 penalty for SNP effects (default: 0.0)
snps : [N x S] np.array holding S SNPs for N individuals to be tested
Usnps : [k x S] np.array holding S rotated SNPs (U.T.dot(snps)) for N individuals to be tested, where k is rank of the kernel used
UUsnps : [N x S] np.array holding S rotated SNPs (snps - U.dot(U.T.dot(snps))), None in full rnak case (k=N)
UW : U.T.dot(W), where W is [N x S2] np.array holding the design matrix of the second kernel
UUW : W - U.dot(U.T.dot(W)) (provide None if U is full rank)
weightW : vector of weights for columns in W
idx_pheno: index of the phenotype(s) to be tested
Returns:
Output dictionary:
'nLL' : negative log-likelihood
'sigma2' : the model variance sigma^2 (if h2 parameterization it is sigma^2_g+sigma^2_e; if delta parameterization it is sigma^2_e)
'beta' : [D*1] array of fixed effects weights beta
'h2' : mixture weight between Covariance and noise
'REML' : True: REML was computed, False: ML was computed
'dof' : Degrees of freedom of the Multivariate student-t
(default None uses multivariate Normal likelihood)
'scale' : Scale parameter that multiplies the Covariance matrix (default 1.0)
--------------------------------------------------------------------------
'''
#N = self.Y.shape[0] - self.linreg.D #number of degrees of freedom - commented out because not use and misleading name for dof
S,U = self.getSU()
k = S.shape[0]
if logdelta is not None:
delta = np.exp(logdelta)
if delta is not None:
Sd = (self.S + delta) * scale
denom = delta * scale # determine normalization factor
h2 = 1.0 / (1.0 + delta)
assert weightW is None, 'weightW should be none when used with delta or logdelta parameterization, which support only a single Kernel'
else:
Sd = (h2 * self.S + (1.0 - h2)) * scale
denom = (1.0 - h2) * scale # determine normalization factor
if (h2 < 0.0) or (h2 >= 1.0):
return {'nLL':3E20,
'h2':h2,
'scale':scale}
UY,UUY = self.getUY(idx_pheno = idx_pheno)
P = UY.shape[1] #number of phenotypes used
if (snps is not None) and (Usnps is None):
assert snps.shape[0] == self.Y.shape[0], "shape mismatch between snps and Y"
Usnps,UUsnps = self.rotate(A=snps)
if weightW is not None:
#multiply the weight by h2
weightW = weightW * h2#Christoph: fixes bug with h2_1 parameterization in findA2 and/or findH2 and/or innerLoop
result = self.nLLcore(Sd=Sd, dof=dof, scale=scale, penalty=penalty, UW=UW, UUW=UUW, weightW=weightW, denom=denom, Usnps=Usnps, UUsnps=UUsnps, idx_pheno=idx_pheno)
result['h2'] = h2
return result
def nLLcore(self, Sd=None, dof=None, scale=1.0, penalty=0.0, UW=None, UUW=None, weightW=None, denom=1.0, Usnps=None, UUsnps=None, idx_pheno=None):
'''
evaluate -ln( N( U^T y | U^T X*beta , diag(Sd)^-1 + U^T*W*diag(weightW)*W^T*U)) ),
--------------------------------------------------------------------------
Args:
Sd : Diagonal scaling for inverse kernel in rotated space (e.g. Sd = 1.0/(delta*S+1.0))
dof : Degrees of freedom of the Multivariate student-t
(default None - uses multivariate Normal likelihood)
denom : denominator for low rank part (delta*scale for delta scaling of Sd, (1.0-h2)*scale for h2 scaling)
scale : Scale parameter the multiplies the Covariance matrix (default 1.0)
penalty : L2 penalty for SNP effects (default: 0.0)
Usnps : [k x S] np.array holding S rotated SNPs (U.T.dot(snps)) for N individuals to be tested, where k is rank of the kernel used
UUsnps : [N x S] np.array holding S rotated SNPs (snps - U.dot(U.T.dot(snps))), None in full rnak case (k=N)
UW : U.T.dot(W), where W is [N x S2] np.array holding the design matrix of the second kernel
UUW : W - U.dot(U.T.dot(W)) (provide None if U is full rank)
weightW : vector of weights for columns in W
idx_pheno: index of the phenotype(s) to be tested
Returns:
Output dictionary:
'nLL' : negative log-likelihood
'sigma2' : the model variance sigma^2
'beta' : [D*1] array of fixed effects weights beta
'h2' : mixture weight between Covariance and noise
'REML' : True: REML was computed, False: ML was computed
'dof' : Degrees of freedom of the Multivariate student-t
(default None uses multivariate Normal likelihood)
'scale' : Scale parameter that multiplies the Covariance matrix (default 1.0)
--------------------------------------------------------------------------
'''
N = self.Y.shape[0] - self.linreg.D
S,U = self.getSU()#not used, as provided from outside. Remove???
k = S.shape[0]
assert Sd.shape[0] == k, "shape missmatch"
UY,UUY = self.getUY(idx_pheno = idx_pheno)
P = UY.shape[1] #number of phenotypes used
YKY = computeAKA(Sd=Sd, denom=denom, UA=UY, UUA=UUY)
logdetK = np.log(Sd).sum()
if (UUY is not None):#low rank part
logdetK+=(N - k) * np.log(denom)
if Usnps is not None:
snpsKsnps = computeAKA(Sd=Sd, denom=denom, UA=Usnps, UUA=UUsnps)[:,np.newaxis]
snpsKY = computeAKB(Sd=Sd, denom=denom, UA=Usnps, UB=UY, UUA=UUsnps, UUB=UUY)
if weightW is not None:
absw = np.absolute(weightW)
weightW_nonz = absw > 1e-10
if (UW is not None and weightW_nonz.any()):#low rank updates
multsign = False
absw = np.sqrt(absw)
signw = np.sign(weightW)
#make sure that the identity works and if needed remove any W with zero
#weight:
if (~weightW_nonz).any():
weightW = weightW[weightW_nonz]
absw = absw[weightW_nonz]
signw = signw[weightW_nonz]
UW = UW[:,weightW_nonz]
if UUW is not None:
UUW = UUW[:,weightW_nonz]
UW = UW * absw[np.newaxis,:]
if multsign:
UW_ = UW * signw[np.newaxis,:]
if UUW is not None:
UUW = UUW * absw[np.newaxis,:]
if multsign:
UUW_ = UUW * signw[np.newaxis,:]
num_exclude = UW.shape[1]
if multsign:
WW = np.eye(num_exclude) + computeAKB(Sd=Sd, denom=denom, UA=UW, UUA=UUW, UB=UW_, UUB=UUW_)
else:
WW = np.diag(signw) + computeAKB(Sd=Sd, denom=denom, UA=UW, UUA=UUW, UB=UW, UUB=UUW)
# compute inverse efficiently
[S_WW,U_WW] = la.eigh(WW)
# compute S_WW^{-1} * UWX
WY = computeAKB(Sd=Sd, denom=denom, UA=UW, UUA=UUW, UB=UY, UUB=UUY)
UWY = U_WW.T.dot(WY)
WY = UWY / np.lib.stride_tricks.as_strided(S_WW, (S_WW.size,UWY.shape[1]), (S_WW.itemsize,0))
# compute S_WW^{-1} * UWy
# perform updates (instantiations for a and b in Equation (1.5) of
# Supplement)
YKY -= (UWY * WY).sum(0)
if Usnps is not None:
Wsnps = computeAKB(Sd=Sd, denom=denom, UA=UW, UUA=UUW, UB=Usnps, UUB=UUsnps)
UWsnps = U_WW.T.dot(Wsnps)
Wsnps = UWsnps / np.lib.stride_tricks.as_strided(S_WW, (S_WW.size,UWsnps.shape[1]), (S_WW.itemsize,0))
snpsKY -= UWsnps.T.dot(WY)
# perform updates (instantiations for a and b in Equation (1.5) of
# Supplement)
snpsKsnps -= (UWsnps * Wsnps).sum(0)[:,np.newaxis]
# determinant update
prod_diags = signw * S_WW
if np.mod((prod_diags < 0).sum(),2):
raise FloatingPointError("nan log determinant")
logdetK += np.log(np.absolute(S_WW)).sum()
########
if Usnps is not None:
penalty_ = penalty or 0.0
assert (penalty_ >= 0.0), "penalty has to be non-negative"
beta = snpsKY / (snpsKsnps + penalty_)
if np.isnan(beta.min()):
logging.warning("NaN beta value seen, may be due to an SNC (a constant SNP)")
beta[snpsKY==0] = 0.0
variance_explained_beta = (snpsKY * beta)
r2 = YKY[np.newaxis,:] - variance_explained_beta
if penalty:
variance_beta = r2 / (N - 1.0) * (snpsKsnps / ((snpsKsnps + penalty_) * (snpsKsnps + penalty_)))#note that we assume the loss in DOF is 1 here, even though it is less, so the
#variance estimate is conservative, due to N-1 for penalty case
variance_explained_beta *= (snpsKsnps/(snpsKsnps+penalty_)) * (snpsKsnps/(snpsKsnps + penalty_))
else:
variance_beta = r2 / (N - 1.0) / snpsKsnps
fraction_variance_explained_beta = variance_explained_beta / YKY[np.newaxis,:] # variance explained by beta over total variance
else:
r2 = YKY
beta = None
variance_beta = None
variance_explained_beta = None
fraction_variance_explained_beta = None
if dof is None:#Use the Multivariate Gaussian
sigma2 = r2 / N
nLL = 0.5 * (logdetK + N * (np.log(2.0 * np.pi * sigma2) + 1))
else:#Use multivariate student-t
nLL = 0.5 * (logdetK + (dof + N) * np.log(1.0 + r2 / dof))
nLL += 0.5 * N * np.log(dof * np.pi) + ss.gammaln(0.5 * dof) - ss.gammaln(0.5 * (dof + N))
result = {
'nLL':nLL,
'dof':dof,
'beta':beta,
'variance_beta':variance_beta,
'variance_explained_beta':variance_explained_beta,
'fraction_variance_explained_beta':fraction_variance_explained_beta,
'scale':scale
}
return result
class Linreg(object):
""" linear regression class"""
__slots__ = ["X", "Xdagger", "beta", "N", "D"]
def __init__(self,X=None, Xdagger=None):
self.N = 0
self.setX(X=X, Xdagger=Xdagger)
def setX(self, X=None, Xdagger=None):
self.beta = None
self.Xdagger = Xdagger
self.X = X
if X is not None:
self.D = X.shape[1]
else:
self.D = 1
def set_beta(self,Y):
self.N = Y.shape[0]
if Y.ndim == 1:
P = 1
else:
P = Y.shape[1]
if self.X is None:
self.beta = Y.mean(0)
else:
if self.Xdagger is None:
if self.X.shape[1]:
self.Xdagger = la.pinv(self.X) #SVD-based, and seems fast
else:
self.Xdagger = np.zeros_like(self.X.T)
self.beta = self.Xdagger.dot(Y)
def regress(self, Y):
self.set_beta(Y=Y)
if self.X is None:
RxY = Y - self.beta
else:
RxY = Y - self.X.dot(self.beta)
return RxY
def predict(self,Xstar):
return Xstar.dot(self.beta)
def computeAKB(Sd, denom, UA, UB, UUA=None, UUB=None):
"""
compute asymmetric squared form
A.T.dot( f(K) ).dot(B)
"""
UAS = UA / np.lib.stride_tricks.as_strided(Sd, (Sd.size,UA.shape[1]), (Sd.itemsize,0))
AKB = UAS.T.dot(UB)
if UUA is not None:
AKB += UUA.T.dot(UUB) / denom
return AKB
def computeAKA(Sd, denom, UA, UUA=None):
"""
compute symmetric squared form
A.T.dot( f(K) ).dot(A)
"""
#To save memory divide the work into 10 pieces
piece_count = 10 if UA.shape[0] > 10 and UA.shape[1] > 1 else 1
AKA = np.zeros(UA.shape[1])
start0, start1 = 0, 0
for piece_index in xrange(piece_count):
end0 = UA.shape[0] * (piece_index+1) // piece_count
AKA += (UA[start0:end0,:] / Sd.reshape(-1,1)[start0:end0,:] * UA[start0:end0,:]).sum(0)
start0 = end0
if UUA is not None:
end1 = UUA.shape[0] * (piece_index+1) // piece_count
AKA += (UUA[start1:end1,:] * UUA[start1:end1,:]).sum(0) / denom
start1 = end1
return AKA
#def _elementwise_mult_and_sum(a,b,start,end):
# s = (a[start:end,:] * b[start:end,:]).sum(0)
# return s
if 0:
N = 7
D = 2
X = np.randn(N,D)
X_K = np.randn(N,N)
K = np.dot(X_K,X_K.T) + np.eye(N)
Kinv = la.inv(K)
linreg = linreg(X=X)
Kinv_ = linreg.regress(Kinv)
Kinv_ = linreg.regress(Kinv_.T)
P_ = Kinv_#this one does not match with P
X_K_ = linreg.regress(X_K)
S_x = linreg.regress(sp.eye(N))
S_x = linreg.regress(S_x.T)
K_ = X_K_.dot(X_K_.T) + S_x
[u,s,v] = la.svd(X_K_)
inonz = s > 1e-10
s = s[inonz] * s[inonz] + 1
u = u[:,inonz]
#s+=1
P__ = u.dot(np.diag(1.0 / s)).dot(u.T)#matches with P
P___ = la.pinv(K_)#matches with P
KX = Kinv.dot(X)
XKX = X.T.dot(KX)
P = Kinv - KX.dot(la.inv(XKX)).dot(KX.T)#matches with P
if __name__ == "__main__":
from fastlmm.association.gwas import *
#from fastlmm.pyplink.snpreader.Bed import Bed
#import time
delta = 1.0
num_pcs = 100
mixing = 0.5
#bed_fn = "G:\Genetics/dbgap/ARIC/autosomes.genic"
#pheno_fn = "G:\Genetics/dbgap/ARIC/all-ldlsiu02.phe"
#bed_fn = "../data/autosomes.genic"
#pheno_fn = "../all-ldlsiu02.phe"
bed_fn = "../feature_selection/examples/toydata"
pheno_fn = "../feature_selection/examples/toydata.phe"
selected_snp_pos_fn = "../feature_selection/examples/test_snps.txt"
selected_snp_pos = np.loadtxt(selected_snp_pos_fn,comments=None)
snp_reader = Bed(bed_fn)
G, y, rs = load_intersect(snp_reader, pheno_fn)
# get chr names/id
chr_ids = snp_reader.pos[:,0]
snp_pos = snp_reader.pos[:,2]
#snp_name = geno['rs']
#loco = LeaveOneChromosomeOut(chr_ids, indices=True)
loco = [[range(0,5000), range(5000,10000)]]
if 0:
#TODO: wrap up results using pandas
for train_snp_idx, test_snp_idx in loco:
print len(train_snp_idx), len(test_snp_idx)
int_snp_idx = argintersect_left(snp_pos[train_snp_idx], selected_snp_pos)
sim_keeper_idx = np.array(train_snp_idx)[int_snp_idx]
print sim_keeper_idx
G_train = G[:,train_snp_idx]
G_sim = G[:,sim_keeper_idx]
G_test = G[:,test_snp_idx]
import pdb
pdb.set_trace()
logging.info("computing pca...")
t0 = time.time()
pca = PCA(n_components = num_pcs)
pcs = pca.fit_transform(G_train)
t1 = time.time()
logging.info("done after %.4f seconds" % (t1 - t0))
gwas = Gwas(G_sim, G_test, y, delta, train_pcs=pcs, mixing_weight=mixing)
gwas.run_gwas()
if 1:
i_min = np.array([[576],
[2750],
[4684],
[7487],
[3999],
[4742],
[564],
[9930],
[6252],
[5480],
[8209],
[3829],
[582],
[6072],
[2237],
[7051],
[71],
[8590],
[5202],
[6598]])
N = G.shape[0]
S = G.shape[1]
t0 = time.time()
Gup = np.hstack((G[:,i_min[17:18,0]],G[:,18:27])).copy()
Gdown = G[:,20:25]
Gback = np.hstack((G[:,0:12],G[:,i_min[10:12,0]],0 * Gdown)).copy()
Gback_ = np.hstack((Gup,G[:,0:12],G[:,i_min[10:12,0]])).copy()
Gcovar = G[:,[9374,1344]]
covariates = np.hstack([Gcovar,np.ones((N,1))]).copy()
fullr = False
K = None
weightW = np.ones(Gup.shape[1] + Gdown.shape[1]) * 0.0
weightW[0:Gup.shape[1]] = -1.0
W = np.hstack((Gup,Gdown)).copy()
#W = G_snp
lmm = LMM(X=covariates,Y=y[:,np.newaxis],G=Gback_,K=K,forcefullrank=fullr)
UGup,UUGup = lmm.rotate(W)
#UGup,UUGup=None,None
opt = lmm.findH2(nGridH2=10,UW=UGup,UUW=UUGup,weightW=weightW)
h2 = opt['h2']
delta = None#=(1.0/h2-1.0)
#REML=False
REML = False
#lmm.set_snps(snps=G)
i_up = weightW == -1.0
i_G1 = weightW == 4
res2 = lmm.nLLeval_2K(h2=h2, h2_1=(4.0 * h2), dof = None, scale = 1.0, penalty=0.0, snps=G, UW=UGup, UUW=UUGup, i_up=i_up, i_G1=i_G1, subset=False)
res = lmm.nLLeval(h2=h2, logdelta = None, delta = None, dof = None, scale = 1.0, penalty=0.0, snps = G, UW=UGup, UUW=UUGup, weightW=weightW)#see comment about weightW*h2 in nLLeval
chi2stats = res['beta'] * res['beta'] / res['variance_beta']
pv = st.chi2.sf(chi2stats,1)
pv_ = st.f.sf(chi2stats,1,G.shape[0] - (lmm.linreg.D+1))#note that G.shape is the number of individuals
chi2stats2 = res2['beta'] * res2['beta'] / res2['variance_beta']
pv2 = st.chi2.sf(chi2stats2,1)
opt_2K = lmm.findH2_2K(nGridH2=10, minH2 = 0.0, maxH2 = 0.99999, i_up=i_up, i_G1=i_G1, UW=UGup, UUW=UUGup)
res_2K_ = lmm.nLLeval_2K(h2=opt['h2'], h2_1=0, dof = None, scale = 1.0, penalty=0.0, snps=G, UW=UGup, UUW=UUGup, i_up=i_up, i_G1=i_G1, subset=False)
res_2K = lmm.nLLeval_2K(h2=opt_2K['h2'], h2_1=opt_2K['h2_1'], dof = None, scale = 1.0, penalty=0.0, snps=G, UW=UGup, UUW=UUGup, i_up=i_up, i_G1=i_G1, subset=False)
t1 = time.time()
i_pv = pv[:,0].argsort()
if 0:
#lmm.findH2()
gwas = Gwas(Gback, G, y, mixing_weight=mixing, cov=covariates, delta=delta, REML=REML)
gwas.run_gwas()
t2 = time.time()
timing1 = t1 - t0
timing2 = t2 - t1
print "t1 = %.5f t2 = %.5f" % (timing1,timing2)
#import pylab as PL
PL.ion()
PL.figure()
PL.plot([0,8],[0,8])
PL.plot(-np.log10(pv[gwas.p_idx,0]),-np.log10(gwas.p_values),'.g')
PL.plot(-np.log10(pv),-np.log10(pv2),'.r')
|
apache-2.0
|
c-PRIMED/puq
|
puq/plot.py
|
1
|
3697
|
"""
Plotting functions for UQ
This file is part of PUQ
Copyright (c) 2013 PUQ Authors
See LICENSE file for terms.
"""
from __future__ import absolute_import, division, print_function
from puq.jpickle import unpickle, pickle
import sys, string, matplotlib
if sys.platform == 'darwin':
matplotlib.use('macosx', warn=False)
else:
matplotlib.use('tkagg', warn=False)
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
#from matplotlib import rc
#rc('text', usetex=True)
import numpy as np
from .pdf import ExperimentalPDF
def plot_customize(opt, p, fname):
try:
if opt.nogrid:
plt.grid(False)
else:
plt.grid(True)
except:
pass
if isinstance(p, Axes3D) and opt.zlabel:
p.set_zlabel(opt.zlabel)
if opt.xlabel:
plt.xlabel(opt.xlabel)
if opt.ylabel:
plt.ylabel(opt.ylabel)
if opt.title:
plt.title(opt.title)
if opt.fontsize:
matplotlib.rcParams.update({'font.size': opt.fontsize})
if opt.f != 'i':
plt.savefig('%s.%s' % (fname, opt.f))
def plot(sweep, h5, opt, params=[]):
if opt.v:
opt.v = [s.strip() for s in opt.v.split(',')]
if not opt.l:
opt.k = True
method = string.lower(sweep.psweep.__class__.__name__)
if opt.r:
for vname in h5[method]:
if not opt.v or vname in opt.v:
print("Plotting Response Surface for %s" % vname)
desc = h5[method][vname].attrs['description']
rsv = h5[method][vname]['response'].value
rs = unpickle(rsv)
p = rs.plot()
if desc and desc != vname:
plt.title(desc)
else:
plt.title("Response Function for %s" % vname)
plot_customize(opt, p, 'response-%s' % vname)
else:
if opt.psamples:
psamples = get_psamples_from_csv(sweep, h5, opt.samples)
else:
psamples = None
for vname in h5[method]:
if not opt.v or vname in opt.v:
print("plotting PDF for %s" % vname)
desc = h5[method][vname].attrs['description']
if 'samples' in h5[method][vname]:
# response surface already sampled. Just calculate pdf.
data = h5[method][vname]['samples'].value
if opt.k:
p = ExperimentalPDF(data, fit=True).plot()
if opt.l:
p = ExperimentalPDF(data, fit=False).plot()
else:
rsv = h5[method][vname]['response'].value
rs = unpickle(rsv)
data = None
if opt.k:
pdf, data = rs.pdf(fit=True, params=params, psamples=psamples, return_samples=True)
p = pdf.plot()
if opt.l:
if data is not None:
p = ExperimentalPDF(data, fit=False).plot()
else:
pdf, data = rs.pdf(fit=False, params=params, psamples=psamples, return_samples=True)
p = pdf.plot()
h5[method][vname]['samples'] = data
if 'pdf' not in h5[method][vname]:
h5[method][vname]['pdf'] = pickle(pdf)
plt.xlabel(vname)
if desc and desc != vname:
plt.title("PDF for %s" % desc)
else:
plt.title("PDF for %s" % vname)
plot_customize(opt, p, 'pdf-%s' % vname)
|
mit
|
mojoboss/scikit-learn
|
examples/text/hashing_vs_dict_vectorizer.py
|
284
|
3265
|
"""
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
|
bsd-3-clause
|
alexsavio/scikit-learn
|
examples/semi_supervised/plot_label_propagation_versus_svm_iris.py
|
50
|
2378
|
"""
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
|
bsd-3-clause
|
zak-k/iris
|
docs/iris/example_code/General/inset_plot.py
|
7
|
2357
|
"""
Test Data Showing Inset Plots
=============================
This example demonstrates the use of a single 3D data cube with time, latitude
and longitude dimensions to plot a temperature series for a single latitude
coordinate, with an inset plot of the data region.
"""
import matplotlib.pyplot as plt
import numpy as np
import iris
import cartopy.crs as ccrs
import iris.quickplot as qplt
import iris.plot as iplt
def main():
# Load the data
with iris.FUTURE.context(netcdf_promote=True):
cube1 = iris.load_cube(iris.sample_data_path('ostia_monthly.nc'))
# Slice into cube to retrieve data for the inset map showing the
# data region
region = cube1[-1, :, :]
# Average over latitude to reduce cube to 1 dimension
plot_line = region.collapsed('latitude', iris.analysis.MEAN)
# Open a window for plotting
fig = plt.figure()
# Add a single subplot (axes). Could also use "ax_main = plt.subplot()"
ax_main = fig.add_subplot(1, 1, 1)
# Produce a quick plot of the 1D cube
qplt.plot(plot_line)
# Set x limits to match the data
ax_main.set_xlim(0, plot_line.coord('longitude').points.max())
# Adjust the y limits so that the inset map won't clash with main plot
ax_main.set_ylim(294, 310)
ax_main.set_title('Meridional Mean Temperature')
# Add grid lines
ax_main.grid()
# Add a second set of axes specifying the fractional coordinates within
# the figure with bottom left corner at x=0.55, y=0.58 with width
# 0.3 and height 0.25.
# Also specify the projection
ax_sub = fig.add_axes([0.55, 0.58, 0.3, 0.25],
projection=ccrs.Mollweide(central_longitude=180))
# Use iris.plot (iplt) here so colour bar properties can be specified
# Also use a sequential colour scheme to reduce confusion for those with
# colour-blindness
iplt.pcolormesh(region, cmap='Blues')
# Manually set the orientation and tick marks on your colour bar
ticklist = np.linspace(np.min(region.data), np.max(region.data), 4)
plt.colorbar(orientation='horizontal', ticks=ticklist)
ax_sub.set_title('Data Region')
# Add coastlines
ax_sub.coastlines()
# request to show entire map, using the colour mesh on the data region only
ax_sub.set_global()
qplt.show()
if __name__ == '__main__':
main()
|
gpl-3.0
|
jmcq89/megaman
|
megaman/embedding/base.py
|
4
|
5249
|
""" base estimator class for megaman """
# Author: James McQueen -- <[email protected]>
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
import numpy as np
from scipy.sparse import isspmatrix
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_array
from ..geometry.geometry import Geometry
# from sklearn.utils.validation import FLOAT_DTYPES
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class BaseEmbedding(BaseEstimator, TransformerMixin):
""" Base Class for all megaman embeddings.
Inherits BaseEstimator and TransformerMixin from sklearn.
BaseEmbedding creates the common interface to the geometry
class for all embeddings as well as providing a common
.fit_transform().
Parameters
----------
n_components : integer
number of coordinates for the manifold.
radius : float (optional)
radius for adjacency and affinity calculations. Will be overridden if
either is set in `geom`
geom : dict or megaman.geometry.Geometry object
specification of geometry parameters: keys are
["adjacency_method", "adjacency_kwds", "affinity_method",
"affinity_kwds", "laplacian_method", "laplacian_kwds"]
Attributes
----------
geom_ : a fitted megaman.geometry.Geometry object.
"""
def __init__(self, n_components=2, radius=None, geom=None):
self.n_components = n_components
self.radius = radius
self.geom = geom
def _validate_input(self, X, input_type):
if input_type == 'data':
sparse_formats = None
elif input_type in ['adjacency', 'affinity']:
sparse_formats = ['csr', 'coo', 'lil', 'bsr', 'dok', 'dia']
else:
raise ValueError("unrecognized input_type: {0}".format(input_type))
return check_array(X, dtype=FLOAT_DTYPES, accept_sparse=sparse_formats)
# # The world is not ready for this...
# def estimate_radius(self, X, input_type='data', intrinsic_dim=None):
# """Estimate a radius based on the data and intrinsic dimensionality
#
# Parameters
# ----------
# X : array_like, [n_samples, n_features]
# dataset for which radius is estimated
# intrinsic_dim : int (optional)
# estimated intrinsic dimensionality of the manifold. If not
# specified, then intrinsic_dim = self.n_components
#
# Returns
# -------
# radius : float
# The estimated radius for the fit
# """
# if input_type == 'affinity':
# return None
# elif input_type == 'adjacency':
# return X.max()
# elif input_type == 'data':
# if intrinsic_dim is None:
# intrinsic_dim = self.n_components
# mean_std = np.std(X, axis=0).mean()
# n_features = X.shape[1]
# return 0.5 * mean_std / n_features ** (1. / (intrinsic_dim + 6))
# else:
# raise ValueError("Unrecognized input_type: {0}".format(input_type))
def fit_geometry(self, X=None, input_type='data'):
"""Inputs self.geom, and produces the fitted geometry self.geom_"""
if self.geom is None:
self.geom_ = Geometry()
elif isinstance(self.geom, Geometry):
self.geom_ = self.geom
else:
try:
kwds = dict(**self.geom)
except TypeError:
raise ValueError("geom must be a Geometry instance or "
"a mappable/dictionary")
self.geom_ = Geometry(**kwds)
if self.radius is not None:
self.geom_.set_radius(self.radius, override=False)
# if self.radius == 'auto':
# if X is not None and input_type != 'affinity':
# self.geom_.set_radius(self.estimate_radius(X, input_type),
# override=False)
# else:
# self.geom_.set_radius(self.radius,
# override=False)
if X is not None:
self.geom_.set_matrix(X, input_type)
return self
def fit_transform(self, X, y=None, input_type='data'):
"""Fit the model from data in X and transform X.
Parameters
----------
input_type : string, one of: 'data', 'distance' or 'affinity'.
The values of input data X. (default = 'data')
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
If self.input_type is 'distance':
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed distance or adjacency graph
computed from samples.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self.fit(X, y=y, input_type=input_type)
return self.embedding_
def transform(self, X, y=None, input_type='data'):
raise NotImplementedError("transform() not implemented. "
"Try fit_transform()")
|
bsd-2-clause
|
eblossom/gnuradio
|
gr-filter/examples/synth_to_chan.py
|
40
|
3854
|
#!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
fmtx = list()
for fi in freqs:
s = analog.sig_source_f(fs, analog.GR_SIN_WAVE, fi, 1)
fm = analog.nbfm_tx(fs, 4*fs, max_dev=10000, tau=75e-6)
sigs.append(s)
fmtx.append(fm)
syntaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print "Synthesis Num. Taps = %d (taps per filter = %d)" % (len(syntaps),
len(syntaps)/nchans)
chtaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print "Channelizer Num. Taps = %d (taps per filter = %d)" % (len(chtaps),
len(chtaps)/nchans)
filtbank = filter.pfb_synthesizer_ccf(nchans, syntaps)
channelizer = filter.pfb.channelizer_ccf(nchans, chtaps)
noise_level = 0.01
head = blocks.head(gr.sizeof_gr_complex, N)
noise = analog.noise_source_c(analog.GR_GAUSSIAN, noise_level)
addnoise = blocks.add_cc()
snk_synth = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(noise, (addnoise,0))
tb.connect(filtbank, head, (addnoise, 1))
tb.connect(addnoise, channelizer)
tb.connect(addnoise, snk_synth)
snk = list()
for i,si in enumerate(sigs):
tb.connect(si, fmtx[i], (filtbank, i))
for i in xrange(nchans):
snk.append(blocks.vector_sink_c())
tb.connect((channelizer, i), snk[i])
tb.run()
if 1:
channel = 1
data = snk[channel].data()[1000:]
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(data[10000:10200] )
s1.set_title(("Output Signal from Channel %d" % channel))
fftlen = 2048
winfunc = scipy.blackman
#winfunc = scipy.hamming
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.psd(data, NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
s2.set_title(("Output PSD from Channel %d" % channel))
f3 = pylab.figure(3)
s3 = f3.add_subplot(1,1,1)
s3.psd(snk_synth.data()[1000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
s3.set_title("Output of Synthesis Filter")
pylab.show()
if __name__ == "__main__":
main()
|
gpl-3.0
|
shyamalschandra/scikit-learn
|
examples/decomposition/plot_sparse_coding.py
|
27
|
4037
|
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as plt
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y.reshape(1, -1))
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
|
bsd-3-clause
|
sanjayankur31/nest-simulator
|
pynest/examples/brunel_alpha_evolution_strategies.py
|
8
|
20476
|
# -*- coding: utf-8 -*-
#
# brunel_alpha_evolution_strategies.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Use evolution strategies to find parameters for a random balanced network (alpha synapses)
------------------------------------------------------------------------------------------
This script uses an optimization algorithm to find the appropriate
parameter values for the external drive "eta" and the relative ratio
of excitation and inhibition "g" for a balanced random network that
lead to particular population-averaged rates, coefficients of
variation and correlations.
From an initial Gaussian search distribution parameterized with mean
and standard deviation network parameters are sampled. Network
realizations of these parameters are simulated and evaluated according
to an objective function that measures how close the activity
statistics are to their desired values (~fitness). From these fitness
values the approximate natural gradient of the fitness landscape is
computed and used to update the parameters of the search
distribution. This procedure is repeated until the maximal number of
function evaluations is reached or the width of the search
distribution becomes extremely small. We use the following fitness
function:
.. math::
f = - alpha(r - r*)^2 - beta(cv - cv*)^2 - gamma(corr - corr*)^2
where `alpha`, `beta` and `gamma` are weighting factors, and stars indicate
target values.
The network contains an excitatory and an inhibitory population on
the basis of the network used in [1]_.
The optimization algorithm (evolution strategies) is described in
Wierstra et al. [2]_.
References
~~~~~~~~~~~~
.. [1] Brunel N (2000). Dynamics of Sparsely Connected Networks of
Excitatory and Inhibitory Spiking Neurons. Journal of Computational
Neuroscience 8, 183-208.
.. [2] Wierstra et al. (2014). Natural evolution strategies. Journal of
Machine Learning Research, 15(1), 949-980.
See Also
~~~~~~~~~~
:doc:`brunel_alpha_nest`
Authors
~~~~~~~
Jakob Jordan
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
import scipy.special as sp
import nest
###############################################################################
# Analysis
def cut_warmup_time(spikes, warmup_time):
# Removes initial warmup time from recorded spikes
spikes['senders'] = spikes['senders'][
spikes['times'] > warmup_time]
spikes['times'] = spikes['times'][
spikes['times'] > warmup_time]
return spikes
def compute_rate(spikes, N_rec, sim_time):
# Computes average rate from recorded spikes
return (1. * len(spikes['times']) / N_rec / sim_time * 1e3)
def sort_spikes(spikes):
# Sorts recorded spikes by node ID
unique_node_ids = sorted(np.unique(spikes['senders']))
spiketrains = []
for node_id in unique_node_ids:
spiketrains.append(spikes['times'][spikes['senders'] == node_id])
return unique_node_ids, spiketrains
def compute_cv(spiketrains):
# Computes coefficient of variation from sorted spikes
if spiketrains:
isis = np.hstack([np.diff(st) for st in spiketrains])
if len(isis) > 1:
return np.std(isis) / np.mean(isis)
else:
return 0.
else:
return 0.
def bin_spiketrains(spiketrains, t_min, t_max, t_bin):
# Bins sorted spikes
bins = np.arange(t_min, t_max, t_bin)
return bins, [np.histogram(s, bins=bins)[0] for s in spiketrains]
def compute_correlations(binned_spiketrains):
# Computes correlations from binned spiketrains
n = len(binned_spiketrains)
if n > 1:
cc = np.corrcoef(binned_spiketrains)
return 1. / (n * (n - 1.)) * (np.sum(cc) - n)
else:
return 0.
def compute_statistics(parameters, espikes, ispikes):
# Computes population-averaged rates coefficients of variation and
# correlations from recorded spikes of excitatory and inhibitory
# populations
espikes = cut_warmup_time(espikes, parameters['warmup_time'])
ispikes = cut_warmup_time(ispikes, parameters['warmup_time'])
erate = compute_rate(espikes, parameters['N_rec'], parameters['sim_time'])
irate = compute_rate(espikes, parameters['N_rec'], parameters['sim_time'])
enode_ids, espiketrains = sort_spikes(espikes)
inode_ids, ispiketrains = sort_spikes(ispikes)
ecv = compute_cv(espiketrains)
icv = compute_cv(ispiketrains)
ecorr = compute_correlations(
bin_spiketrains(espiketrains, 0., parameters['sim_time'], 1.)[1])
icorr = compute_correlations(
bin_spiketrains(ispiketrains, 0., parameters['sim_time'], 1.)[1])
return (np.mean([erate, irate]),
np.mean([ecv, icv]),
np.mean([ecorr, icorr]))
###############################################################################
# Network simulation
def simulate(parameters):
# Simulates the network and returns recorded spikes for excitatory
# and inhibitory population
# Code taken from brunel_alpha_nest.py
def LambertWm1(x):
# Using scipy to mimic the gsl_sf_lambert_Wm1 function.
return sp.lambertw(x, k=-1 if x < 0 else 0).real
def ComputePSPnorm(tauMem, CMem, tauSyn):
a = (tauMem / tauSyn)
b = (1.0 / tauSyn - 1.0 / tauMem)
# time of maximum
t_max = 1.0 / b * (-LambertWm1(-np.exp(-1.0 / a) / a) - 1.0 / a)
# maximum of PSP for current of unit amplitude
return (np.exp(1.0) / (tauSyn * CMem * b) *
((np.exp(-t_max / tauMem) - np.exp(-t_max / tauSyn)) / b -
t_max * np.exp(-t_max / tauSyn)))
# number of excitatory neurons
NE = int(parameters['gamma'] * parameters['N'])
# number of inhibitory neurons
NI = parameters['N'] - NE
# number of excitatory synapses per neuron
CE = int(parameters['epsilon'] * NE)
# number of inhibitory synapses per neuron
CI = int(parameters['epsilon'] * NI)
tauSyn = 0.5 # synaptic time constant in ms
tauMem = 20.0 # time constant of membrane potential in ms
CMem = 250.0 # capacitance of membrane in in pF
theta = 20.0 # membrane threshold potential in mV
neuron_parameters = {
'C_m': CMem,
'tau_m': tauMem,
'tau_syn_ex': tauSyn,
'tau_syn_in': tauSyn,
't_ref': 2.0,
'E_L': 0.0,
'V_reset': 0.0,
'V_m': 0.0,
'V_th': theta
}
J = 0.1 # postsynaptic amplitude in mV
J_unit = ComputePSPnorm(tauMem, CMem, tauSyn)
J_ex = J / J_unit # amplitude of excitatory postsynaptic current
# amplitude of inhibitory postsynaptic current
J_in = -parameters['g'] * J_ex
nu_th = (theta * CMem) / (J_ex * CE * np.exp(1) * tauMem * tauSyn)
nu_ex = parameters['eta'] * nu_th
p_rate = 1000.0 * nu_ex * CE
nest.ResetKernel()
nest.set_verbosity('M_FATAL')
nest.SetKernelStatus({'rng_seed': parameters['seed'],
'resolution': parameters['dt']})
nodes_ex = nest.Create('iaf_psc_alpha', NE, params=neuron_parameters)
nodes_in = nest.Create('iaf_psc_alpha', NI, params=neuron_parameters)
noise = nest.Create('poisson_generator', params={'rate': p_rate})
espikes = nest.Create('spike_recorder', params={'label': 'brunel-py-ex'})
ispikes = nest.Create('spike_recorder', params={'label': 'brunel-py-in'})
nest.CopyModel('static_synapse', 'excitatory',
{'weight': J_ex, 'delay': parameters['delay']})
nest.CopyModel('static_synapse', 'inhibitory',
{'weight': J_in, 'delay': parameters['delay']})
nest.Connect(noise, nodes_ex, syn_spec='excitatory')
nest.Connect(noise, nodes_in, syn_spec='excitatory')
if parameters['N_rec'] > NE:
raise ValueError(
f'Requested recording from {parameters["N_rec"]} neurons, but only {NE} in excitatory population')
if parameters['N_rec'] > NI:
raise ValueError(
f'Requested recording from {parameters["N_rec"]} neurons, but only {NI} in inhibitory population')
nest.Connect(nodes_ex[:parameters['N_rec']], espikes)
nest.Connect(nodes_in[:parameters['N_rec']], ispikes)
conn_parameters_ex = {'rule': 'fixed_indegree', 'indegree': CE}
nest.Connect(nodes_ex, nodes_ex + nodes_in, conn_parameters_ex, 'excitatory')
conn_parameters_in = {'rule': 'fixed_indegree', 'indegree': CI}
nest.Connect(nodes_in, nodes_ex + nodes_in, conn_parameters_in, 'inhibitory')
nest.Simulate(parameters['sim_time'])
return (espikes.events,
ispikes.events)
###############################################################################
# Optimization
def default_population_size(dimensions):
# Returns a population size suited for the given number of dimensions
# See Wierstra et al. (2014)
return 4 + int(np.floor(3 * np.log(dimensions)))
def default_learning_rate_mu():
# Returns a default learning rate for the mean of the search distribution
# See Wierstra et al. (2014)
return 1
def default_learning_rate_sigma(dimensions):
# Returns a default learning rate for the standard deviation of the
# search distribution for the given number of dimensions
# See Wierstra et al. (2014)
return (3 + np.log(dimensions)) / (12. * np.sqrt(dimensions))
def compute_utility(fitness):
# Computes utility and order used for fitness shaping
# See Wierstra et al. (2014)
n = len(fitness)
order = np.argsort(fitness)[::-1]
fitness = fitness[order]
utility = [
np.max([0, np.log((n / 2) + 1)]) - np.log(k + 1) for k in range(n)]
utility = utility / np.sum(utility) - 1. / n
return order, utility
def optimize(func, mu, sigma, learning_rate_mu=None, learning_rate_sigma=None,
population_size=None, fitness_shaping=True,
mirrored_sampling=True, record_history=False,
max_generations=2000, min_sigma=1e-8, verbosity=0):
###########################################################################
# Optimizes an objective function via evolution strategies using the
# natural gradient of multinormal search distributions in natural
# coordinates. Does not consider covariances between parameters (
# "Separable natural evolution strategies").
# See Wierstra et al. (2014)
#
# Parameters
# ----------
# func: function
# The function to be maximized.
# mu: float
# Initial mean of the search distribution.
# sigma: float
# Initial standard deviation of the search distribution.
# learning_rate_mu: float
# Learning rate of mu.
# learning_rate_sigma: float
# Learning rate of sigma.
# population_size: int
# Number of individuals sampled in each generation.
# fitness_shaping: bool
# Whether to use fitness shaping, compensating for large
# deviations in fitness, see Wierstra et al. (2014).
# mirrored_sampling: bool
# Whether to use mirrored sampling, i.e., evaluating a mirrored
# sample for each sample, see Wierstra et al. (2014).
# record_history: bool
# Whether to record history of search distribution parameters,
# fitness values and individuals.
# max_generations: int
# Maximal number of generations.
# min_sigma: float
# Minimal value for standard deviation of search
# distribution. If any dimension has a value smaller than this,
# the search is stopped.
# verbosity: bool
# Whether to continuously print progress information.
#
# Returns
# -------
# dict
# Dictionary of final parameters of search distribution and
# history.
if not isinstance(mu, np.ndarray):
raise TypeError('mu needs to be of type np.ndarray')
if not isinstance(sigma, np.ndarray):
raise TypeError('sigma needs to be of type np.ndarray')
if learning_rate_mu is None:
learning_rate_mu = default_learning_rate_mu()
if learning_rate_sigma is None:
learning_rate_sigma = default_learning_rate_sigma(mu.size)
if population_size is None:
population_size = default_population_size(mu.size)
generation = 0
mu_history = []
sigma_history = []
pop_history = []
fitness_history = []
while True:
# create new population using the search distribution
s = np.random.normal(0, 1, size=(population_size,) + np.shape(mu))
z = mu + sigma * s
# add mirrored perturbations if enabled
if mirrored_sampling:
z = np.vstack([z, mu - sigma * s])
s = np.vstack([s, -s])
# evaluate fitness for every individual in population
fitness = np.fromiter((func(*zi) for zi in z), np.float)
# print status if enabled
if verbosity > 0:
print(
f'# Generation {generation:d} | fitness {np.mean(fitness):.3f} | '
f'mu {", ".join(str(np.round(mu_i, 3)) for mu_i in mu)} | '
f'sigma {", ".join(str(np.round(sigma_i, 3)) for sigma_i in sigma)}'
)
# apply fitness shaping if enabled
if fitness_shaping:
order, utility = compute_utility(fitness)
s = s[order]
z = z[order]
else:
utility = fitness
# bookkeeping
if record_history:
mu_history.append(mu.copy())
sigma_history.append(sigma.copy())
pop_history.append(z.copy())
fitness_history.append(fitness)
# exit if max generations reached or search distributions are
# very narrow
if generation == max_generations or np.all(sigma < min_sigma):
break
# update parameter of search distribution via natural gradient
# descent in natural coordinates
mu += learning_rate_mu * sigma * np.dot(utility, s)
sigma *= np.exp(learning_rate_sigma / 2. * np.dot(utility, s**2 - 1))
generation += 1
return {
'mu': mu,
'sigma': sigma,
'fitness_history': np.array(fitness_history),
'mu_history': np.array(mu_history),
'sigma_history': np.array(sigma_history),
'pop_history': np.array(pop_history)
}
def optimize_network(optimization_parameters, simulation_parameters):
# Searches for suitable network parameters to fulfill defined constraints
np.random.seed(simulation_parameters['seed'])
def objective_function(g, eta):
# Returns the fitness of a specific network parametrization
# create local copy of parameters that uses parameters given
# by optimization algorithm
simulation_parameters_local = simulation_parameters.copy()
simulation_parameters_local['g'] = g
simulation_parameters_local['eta'] = eta
# perform the network simulation
espikes, ispikes = simulate(simulation_parameters_local)
# analyse the result and compute fitness
rate, cv, corr = compute_statistics(
simulation_parameters, espikes, ispikes)
fitness = (
-optimization_parameters['fitness_weight_rate'] * (rate - optimization_parameters['target_rate'])**2 -
optimization_parameters['fitness_weight_cv'] * (cv - optimization_parameters['target_cv'])**2 -
optimization_parameters['fitness_weight_corr'] * (corr - optimization_parameters['target_corr'])**2
)
return fitness
return optimize(
objective_function,
np.array(optimization_parameters['mu']),
np.array(optimization_parameters['sigma']),
max_generations=optimization_parameters['max_generations'],
record_history=True,
verbosity=optimization_parameters['verbosity']
)
###############################################################################
# Main
if __name__ == '__main__':
simulation_parameters = {
'seed': 123,
'dt': 0.1, # (ms) simulation resolution
'sim_time': 1000., # (ms) simulation duration
'warmup_time': 300., # (ms) duration ignored during analysis
'delay': 1.5, # (ms) synaptic delay
'g': None, # relative ratio of excitation and inhibition
'eta': None, # relative strength of external drive
'epsilon': 0.1, # average connectivity of network
'N': 400, # number of neurons in network
'gamma': 0.8, # relative size of excitatory and
# inhibitory population
'N_rec': 40, # number of neurons to record activity from
}
optimization_parameters = {
'verbosity': 1, # print progress over generations
'max_generations': 20, # maximal number of generations
'target_rate': 1.89, # (spikes/s) target rate
'target_corr': 0.0, # target correlation
'target_cv': 1., # target coefficient of variation
'mu': [1., 3.], # initial mean for search distribution
# (mu(g), mu(eta))
'sigma': [0.15, 0.05], # initial sigma for search
# distribution (sigma(g), sigma(eta))
# hyperparameters of the fitness function; these are used to
# compensate for the different typical scales of the
# individual measures, rate ~ O(1), cv ~ (0.1), corr ~ O(0.01)
'fitness_weight_rate': 1., # relative weight of rate deviation
'fitness_weight_cv': 10., # relative weight of cv deviation
'fitness_weight_corr': 100., # relative weight of corr deviation
}
# optimize network parameters
optimization_result = optimize_network(optimization_parameters,
simulation_parameters)
simulation_parameters['g'] = optimization_result['mu'][0]
simulation_parameters['eta'] = optimization_result['mu'][1]
espikes, ispikes = simulate(simulation_parameters)
rate, cv, corr = compute_statistics(
simulation_parameters, espikes, ispikes)
print('Statistics after optimization:', end=' ')
print('Rate: {:.3f}, cv: {:.3f}, correlation: {:.3f}'.format(
rate, cv, corr))
# plot results
fig = plt.figure(figsize=(10, 4))
ax1 = fig.add_axes([0.06, 0.12, 0.25, 0.8])
ax2 = fig.add_axes([0.4, 0.12, 0.25, 0.8])
ax3 = fig.add_axes([0.74, 0.12, 0.25, 0.8])
ax1.set_xlabel('Time (ms)')
ax1.set_ylabel('Neuron id')
ax2.set_xlabel(r'Relative strength of inhibition $g$')
ax2.set_ylabel(r'Relative strength of external drive $\eta$')
ax3.set_xlabel('Generation')
ax3.set_ylabel('Fitness')
# raster plot
ax1.plot(espikes['times'], espikes['senders'], ls='', marker='.')
# search distributions and individuals
for mu, sigma in zip(optimization_result['mu_history'],
optimization_result['sigma_history']):
ellipse = Ellipse(
xy=mu, width=2 * sigma[0], height=2 * sigma[1], alpha=0.5, fc='k')
ellipse.set_clip_box(ax2.bbox)
ax2.add_artist(ellipse)
ax2.plot(optimization_result['mu_history'][:, 0],
optimization_result['mu_history'][:, 1],
marker='.', color='k', alpha=0.5)
for generation in optimization_result['pop_history']:
ax2.scatter(generation[:, 0], generation[:, 1])
# fitness over generations
ax3.errorbar(np.arange(len(optimization_result['fitness_history'])),
np.mean(optimization_result['fitness_history'], axis=1),
yerr=np.std(optimization_result['fitness_history'], axis=1))
fig.savefig('brunel_alpha_evolution_strategies.pdf')
|
gpl-2.0
|
JsNoNo/scikit-learn
|
sklearn/decomposition/base.py
|
313
|
5647
|
"""Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
|
bsd-3-clause
|
RomainBrault/scikit-learn
|
benchmarks/bench_rcv1_logreg_convergence.py
|
58
|
7229
|
# Authors: Tom Dupre la Tour <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
def get_max_squared_sum(X):
"""Get the maximum row-wise sum of squares"""
return np.sum(X ** 2, axis=1).max()
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
|
bsd-3-clause
|
griffincalme/MicroDeconvolution
|
TestingScripts/IHCRandomWalktest.py
|
1
|
5385
|
#Griffin Calme
#This program will output DAB, Hematoxylin, and perm red
#Hematoxylin (Basic blue)= binds to nuclei
#Cytokines are perm Red Chromagen
#CD3 (T cells) are DAB
import numpy as np
from numpy import linalg
import matplotlib.pyplot as plt
from skimage.io import imread
from skimage.exposure import rescale_intensity
from skimage.segmentation import random_walker
#from skimage.color import separate_stains
from skimage.color import rgb2grey
from skimage.util import dtype, dtype_limits
import os
from pyamg import *
#Color deconvolution
#Normalized optical density matrix
#see Ruifrok AC, Johnston DA. Quantification of histological staining by color deconvolution.
# R G B
# X X X Hematoxylin(0)
# X X X Red(1)
# X X X DAB(2)
#Hematoxylin(0), Red(1), DAB(2)
rgb_from_hrd = np.array([[0.644, 0.710, 0.285],
[0.0326, 0.872, 0.487],
[0.270, 0.562, 0.781]])
#conv_matrix
hrd_from_rgb = linalg.inv(rgb_from_hrd)
print(rgb_from_hrd)
print(hrd_from_rgb)
ihc_rgb = imread(r'TestImage.jpg')
def separate_stains(rgb, color_deconv_vector):
rgb = dtype.img_as_float(rgb, force_copy=True)
rgb += 2
stains = np.dot(np.reshape(-np.log(rgb), (-1, 3)), color_deconv_vector)
return np.reshape(stains, rgb.shape)
# Rescale signals so that intensity ranges from 0 to 1
# ihc_hrd[:, :, (0,1, or 2 -- is the color channel)]
def stainspace_to_2d_array(ihc_xyz, channel):
rescale = rescale_intensity(ihc_xyz[:, :, channel], out_range=(0,1))
stain_array = np.dstack((np.zeros_like(rescale), rescale, rescale))
grey_array = rgb2grey(stain_array)
return grey_array
#Stain space conversion
ihc_hrd = separate_stains(ihc_rgb, hrd_from_rgb)
DAB_Grey_Array = stainspace_to_2d_array(ihc_hrd, 2)
Hema_Gray_Array = stainspace_to_2d_array(ihc_hrd, 0)
permRed_Gray_Array = stainspace_to_2d_array(ihc_hrd, 1)
#Get markers for random walk
def get_markers(grey_array, bottom_thresh, top_thresh):
markers = np.zeros_like(grey_array)
markers[grey_array < bottom_thresh] = 1
markers[grey_array > top_thresh] = 2
return markers
#perform Random Walker, fills in positive regions
DAB_segmentation = random_walker(DAB_Grey_Array, get_markers(DAB_Grey_Array, .3, .5), beta=130, mode='cg_mg')
Hema_segmentation = random_walker(Hema_Gray_Array, get_markers(Hema_Gray_Array, .2, .4), beta=130, mode='cg_mg')
permRed_segmentation = random_walker(permRed_Gray_Array, get_markers(permRed_Gray_Array, .4, .5), beta=130, mode='cg_mg')
"""PRINTING OUTPUT"""
print('-------------------------')
print(' ')
'''Compute and Output'''
# Compute and output percentages of pixels stained by each chromagen
pic_dimensions = np.shape(DAB_segmentation) # both arrays same shape
total_pixels = pic_dimensions[0] * pic_dimensions[1]
#change negative pixel values from 1 -> 0, positives 2 -> 1
subtrahend_array = np.ones_like(DAB_segmentation)
DAB_segmentation = np.subtract(DAB_segmentation, subtrahend_array)
Hema_segmentation = np.subtract(Hema_segmentation, subtrahend_array)
permRed_segmentation = np.subtract(permRed_segmentation, subtrahend_array)
#count positive pixels
DAB_pixels = np.count_nonzero(DAB_segmentation)
Hema_pixels = np.count_nonzero(Hema_segmentation)
red_pixels = np.count_nonzero(permRed_segmentation)
DAB_coverage_percent = (round((DAB_pixels / total_pixels * 100), 1))
print("The percentage of the image covered by DAB is: " + str(DAB_coverage_percent) + "%")
Hema_coverage_percent = (round((Hema_pixels / total_pixels * 100), 1))
print("The percentage of the image covered by Hematoxylin is: " + str(Hema_coverage_percent) + "%")
total_cell_array = np.add(DAB_segmentation, Hema_segmentation)
total_cell_pixels = np.count_nonzero(total_cell_array)
total_cell_percent = (round((total_cell_pixels / total_pixels * 100), 1))
print("The percentage of the image covered by DAB & Hematoxylin is: " + str(total_cell_percent) + "%")
percent_pos_cells = (round((DAB_pixels / total_cell_pixels * 100), 1))
print("The percentage of CD3+ cells out of the total number of cells is: " + str(percent_pos_cells) + "%")
PercentPos = percent_pos_cells
# Cytokines
print(" ")
Red_coverage_percent = (round((red_pixels / total_pixels * 100), 1))
print("The percentage of the image covered by cytokines is: " + str(Red_coverage_percent) + "%")
red_plus_total_array = np.add(total_cell_array, permRed_segmentation)
red_plus_total_pixels = np.count_nonzero(red_plus_total_array)
adjusted_red_coverage_percent = (round((red_pixels / red_plus_total_pixels * 100), 1))
print("The percentage of the area covered by cytokines, with non-cellular regions subtracted is: " + str(
adjusted_red_coverage_percent) + "%")
"""PLOTTING IMAGES"""
#Plot images
fig, axes = plt.subplots(2, 2, figsize=(12, 11))
#ax0 = axes.ravel()
ax0, ax1, ax2, ax3 = axes.ravel()
ax0.imshow(ihc_rgb, cmap=plt.cm.gray, interpolation='nearest')
ax0.set_title("Original")
ax1.imshow(DAB_segmentation, cmap=plt.cm.gray, interpolation='nearest')
ax1.set_title("DAB")
ax2.imshow(permRed_segmentation, cmap=plt.cm.gray)
ax2.set_title("perm red")
ax3.imshow(Hema_segmentation, cmap=plt.cm.gray)
ax3.set_title("Hematoxylin")
for ax in axes.ravel():
ax.axis('off')
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
plt.show()
|
apache-2.0
|
Erotemic/ibeis
|
ibeis/algo/graph/core.py
|
1
|
55530
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np # NOQA
import utool as ut
# import logging
import itertools as it
import copy
import six
import collections
from ibeis import constants as const
from ibeis.algo.graph import nx_dynamic_graph
# from ibeis.algo.graph import _dep_mixins
from ibeis.algo.graph import mixin_viz
from ibeis.algo.graph import mixin_helpers
from ibeis.algo.graph import mixin_dynamic
from ibeis.algo.graph import mixin_priority
from ibeis.algo.graph import mixin_loops
from ibeis.algo.graph import mixin_matching
from ibeis.algo.graph import mixin_groundtruth
from ibeis.algo.graph import mixin_simulation
from ibeis.algo.graph import mixin_ibeis
from ibeis.algo.graph import nx_utils as nxu
import pandas as pd
from ibeis.algo.graph.state import POSTV, NEGTV, INCMP, UNREV, UNKWN
from ibeis.algo.graph.state import UNINFERABLE
from ibeis.algo.graph.state import SAME, DIFF, NULL
import networkx as nx
import logging
print, rrr, profile = ut.inject2(__name__)
DEBUG_CC = False
# DEBUG_CC = True
def _rectify_decision(evidence_decision, meta_decision):
"""
If evidence decision is not explicitly set, then meta decision is used to
make a guess. Raises a ValueError if decisions are in incompatible states.
"""
# Default to the decision based on the media evidence
decision = evidence_decision
# Overwrite the graph decision with the meta decision if necessary
if meta_decision == SAME:
if decision in UNINFERABLE:
decision = POSTV
elif decision == NEGTV:
raise ValueError('evidence=negative and meta=same')
elif meta_decision == DIFF:
if decision in UNINFERABLE:
decision = NEGTV
elif decision == POSTV:
raise ValueError('evidence=positive and meta=diff')
return decision
class Feedback(object):
def _check_edge(infr, edge):
aid1, aid2 = edge
if aid1 not in infr.aids_set:
raise ValueError('aid1=%r is not part of the graph' % (aid1,))
if aid2 not in infr.aids_set:
raise ValueError('aid2=%r is not part of the graph' % (aid2,))
def add_feedback_from(infr, items, verbose=None, **kwargs):
if verbose is None:
verbose = infr.verbose > 5
if isinstance(items, pd.DataFrame):
if list(items.index.names) == ['aid1', 'aid2']:
for edge, data in items.iterrows():
infr.add_feedback(edge=edge, verbose=verbose, **data)
else:
raise ValueError(
'Cannot interpret pd.DataFrame without edge index')
else:
# Dangerous if item length > 3
for item in items:
args = []
if len(item) == 1:
# Case where items=[edge1, edge2]
if isinstance(item[0], int) or len(item[0]) != 2:
raise ValueError('invalid edge')
if len(item) == 2:
# Case where items=[(edge1, state), (edge2, state)]
if ut.isiterable(item[0]):
edge = item[0]
args = item[1:]
else:
edge = item
else:
raise ValueError('invalid edge')
# Case where items=[(u, v, state), (u, v, state)]
if len(item) > 3:
raise ValueError('pass in data as a dataframe or '
'use kwargs')
infr.add_feedback(edge, *args, verbose=verbose, **kwargs)
def edge_decision(infr, edge):
r"""
Gets a decision on an edge, either explicitly or implicitly
CommandLine:
python -m ibeis.algo.graph.core edge_decision
Doctest:
>>> from ibeis.algo.graph.core import * # NOQA
>>> from ibeis.algo.graph import demo
>>> infr = demo.demodata_infr(num_pccs=1, p_incon=1)
>>> decision = infr.edge_decision((1, 2))
>>> print('decision = %r' % (decision,))
>>> assert decision == POSTV
>>> decision = infr.edge_decision((199, 299))
>>> print('decision = %r' % (decision,))
>>> assert decision == UNREV
"""
evidence_decision = infr.get_edge_attr(edge, 'evidence_decision',
on_missing='default',
default=UNREV)
meta_decision = infr.get_edge_attr(edge, 'meta_decision',
on_missing='default', default=NULL)
decision = _rectify_decision(evidence_decision, meta_decision)
return decision
def edge_decision_from(infr, edges):
r"""
Gets a decision for multiple edges
"""
edges = list(edges)
evidence_decisions = infr.gen_edge_values(
'evidence_decision', edges, on_missing='default', default=UNREV)
meta_decisions = infr.gen_edge_values(
'meta_decision', edges, on_missing='default', default=NULL)
for ed, md in zip(evidence_decisions, meta_decisions):
yield _rectify_decision(ed, md)
def add_node_feedback(infr, aid, **attrs):
infr.print('Writing annot aid=%r %s' % (aid, ut.repr2(attrs)))
ibs = infr.ibs
ibs.set_annot_quality_texts([aid], [attrs['quality_texts']])
ibs.set_annot_viewpoint_code([aid], [attrs['viewpoint_code']])
ibs.overwrite_annot_case_tags([aid], [attrs['case_tags']])
ibs.set_annot_multiple([aid], [attrs['multiple']])
@profile
def add_feedback(infr, edge, evidence_decision=None, tags=None,
user_id=None, meta_decision=None, confidence=None,
timestamp_c1=None, timestamp_c2=None, timestamp_s1=None,
timestamp=None, verbose=None, priority=None):
r"""
Doctest:
>>> from ibeis.algo.graph.core import * # NOQA
>>> infr = testdata_infr('testdb1')
>>> infr.add_feedback((5, 6), POSTV)
>>> infr.add_feedback((5, 6), NEGTV, tags=['photobomb'])
>>> infr.add_feedback((1, 2), INCMP)
>>> print(ut.repr2(infr.internal_feedback, nl=2))
>>> assert len(infr.external_feedback) == 0
>>> assert len(infr.internal_feedback) == 2
>>> assert len(infr.internal_feedback[(5, 6)]) == 2
>>> assert len(infr.internal_feedback[(1, 2)]) == 1
"""
prev_verbose = infr.verbose
if verbose is not None:
infr.verbose = verbose
edge = aid1, aid2 = nxu.e_(*edge)
if not infr.has_edge(edge):
if True:
# Allow new aids
if not infr.graph.has_node(aid1):
infr.add_aids([aid1])
if not infr.graph.has_node(aid2):
infr.add_aids([aid2])
infr._check_edge(edge)
infr.graph.add_edge(aid1, aid2)
if evidence_decision is None:
evidence_decision = UNREV
if meta_decision is None:
meta_decision = const.META_DECISION.CODE.NULL
if confidence is None:
confidence = const.CONFIDENCE.CODE.UNKNOWN
if timestamp is None:
timestamp = ut.get_timestamp('int', isutc=True)
msg = 'add_feedback ({}, {}), '.format(aid1, aid2)
loc = locals()
msg += ', '.join([
str(val)
# key + '=' + str(val)
for key, val in (
(key, loc[key])
for key in ['evidence_decision', 'tags', 'user_id',
'confidence', 'meta_decision'])
if val is not None
])
infr.print(msg, 2, color='white')
if meta_decision == NULL:
# TODO: check previous meta_decision and use that if its consistent
# with the evidence decision.
pass
decision = _rectify_decision(evidence_decision, meta_decision)
if decision == UNREV:
# Unreviewing an edge deletes anything not yet committed
if edge in infr.external_feedback:
raise ValueError('External edge reviews cannot be undone')
if edge in infr.internal_feedback:
del infr.internal_feedback[edge]
# Remove the edge from the queue if it is in there.
if infr.queue:
if edge in infr.queue:
del infr.queue[edge]
# Keep track of sequential reviews and set properties on global graph
num_reviews = infr.get_edge_attr(edge, 'num_reviews', default=0)
review_id = next(infr.review_counter)
feedback_item = {
'tags': tags,
'evidence_decision': evidence_decision,
'meta_decision': meta_decision,
'timestamp_c1': timestamp_c1,
'timestamp_c2': timestamp_c2,
'timestamp_s1': timestamp_s1,
'timestamp': timestamp,
'confidence': confidence,
'user_id': user_id,
'num_reviews': num_reviews + 1,
'review_id': review_id,
}
infr.internal_feedback[edge].append(feedback_item)
infr.set_edge_attr(edge, feedback_item)
if infr.test_mode:
prev_decision = infr._get_current_decision(edge)
infr._dynamic_test_callback(edge, decision, prev_decision, user_id)
# must happen after dynamic test callback
infr.set_edge_attr(edge, {'decision': decision})
if infr.params['inference.enabled']:
assert infr.dirty is False, (
'need to recompute before dynamic inference continues')
# Update priority queue based on the new edge
action = infr.add_review_edge(edge, decision)
if infr.test_mode:
infr.test_state['action'] = action
if False:
infr._print_debug_ccs()
else:
action = None
infr.dirty = True
infr._add_review_edge(edge, decision)
if infr.params['inference.enabled'] and infr.refresh:
# only add to criteria if this wasn't requested as a fix edge
if priority is not None and priority <= 1.0:
meaningful = bool({'merge', 'split'} & set(action))
infr.refresh.add(meaningful, user_id, decision)
if infr.test_mode:
infr.metrics_list.append(infr.measure_metrics())
infr.verbose = prev_verbose
def _print_debug_ccs(infr):
assert all([ut.allsame(infr.node_labels(*cc))
for cc in infr.positive_components()])
sorted_ccs = sorted([
set(cc) for cc in infr.pos_graph.connected_components()
])
msg = '[' + ', '.join([
repr(cc)
if infr.is_consistent(cc) else
ut.highlight_text(repr(cc), 'red')
for cc in sorted_ccs]) + ']'
print(msg)
@ut.classproperty
def feedback_keys(Infr):
""" edge attribute keys used for feedback """
return Infr.feedback_data_keys + ['num_reviews', 'review_id']
@ut.classproperty
def feedback_data_keys(Infr):
""" edge attribute keys used for feedback """
return [
'evidence_decision', 'tags', 'user_id',
'meta_decision', 'timestamp_c1', 'timestamp_c2',
'timestamp_s1', 'timestamp', 'confidence'
]
@profile
def apply_feedback_edges(infr):
r"""
Transforms the feedback dictionaries into nx graph edge attributes
CommandLine:
python -m ibeis.algo.graph.core apply_feedback_edges
Doctest:
>>> from ibeis.algo.graph.core import * # NOQA
>>> infr = testdata_infr('testdb1')
>>> infr.reset_feedback()
>>> infr.params['inference.enabled'] = False
>>> #infr.add_feedback((1, 2), 'unknown', tags=[])
>>> infr.add_feedback((1, 2), INCMP, tags=[])
>>> infr.apply_feedback_edges()
>>> print('edges = ' + ut.repr4(dict(infr.graph.edges)))
>>> result = str(infr)
>>> print(result)
<AnnotInference(nNodes=6, nEdges=3, nCCs=4)>
"""
infr.print('apply_feedback_edges', 1)
# Transforms dictionary feedback into numpy array
edges = []
attr_lists = {key: [] for key in infr.feedback_keys}
for edge, vals in infr.all_feedback_items():
# hack for feedback rectification
feedback_item = infr._rectify_feedback_item(vals)
feedback_item['review_id'] = next(infr.review_counter)
feedback_item['num_reviews'] = len(vals)
# if feedback_item['decision'] == 'unknown':
# continue
set1 = set(feedback_item.keys())
set2 = set(attr_lists.keys())
if set1 != set2:
raise AssertionError(
'Bad feedback keys: ' +
ut.repr2(ut.set_overlap_items(set1, set2, 'got', 'want'), nl=1)
# ut.repr2(sorted(feedback_item.keys()), sv=True) + ' ' +
# ut.repr2(sorted(attr_lists.keys()), sv=True)
)
for key, val in feedback_item.items():
attr_lists[key].append(val)
edges.append(edge)
assert ut.allsame(list(map(len, attr_lists.values())))
assert len(edges) == len(next(iter(attr_lists.values())))
# Put pair orders in context of the graph
infr.print('_set_feedback_edges(nEdges=%d)' % (len(edges),), 3)
# Ensure edges exist
for edge in edges:
if not infr.graph.has_edge(*edge):
infr.graph.add_edge(*edge)
# take evidence_decision and meta_decision into account
decisions = [
_rectify_decision(ed, md) for ed, md in
zip(attr_lists['evidence_decision'], attr_lists['meta_decision'])
]
for state, es in ut.group_items(edges, decisions).items():
infr._add_review_edges_from(es, state)
for key, val_list in attr_lists.items():
infr.set_edge_attrs(key, ut.dzip(edges, val_list))
if infr.params['inference.enabled']:
infr.apply_nondynamic_update()
def _rectify_feedback(infr, feedback):
return {edge: infr._rectify_feedback_item(vals)
for edge, vals in feedback.items()}
def _rectify_feedback_item(infr, vals):
""" uses most recently use strategy """
return vals[-1]
def all_feedback_items(infr):
for edge, vals in six.iteritems(infr.external_feedback):
yield edge, vals
for edge, vals in six.iteritems(infr.internal_feedback):
yield edge, vals
def all_feedback(infr):
all_feedback = ut.ddict(list)
all_feedback.update(infr.all_feedback_items())
return all_feedback
def clear_feedback(infr, edges=None):
""" Delete all edges properties related to feedback """
if edges is None:
edges = infr.graph.edges()
edges = list(edges)
infr.print('clear_feedback len(edges) = %r' % (len(edges)), 2)
infr.external_feedback = ut.ddict(list)
infr.internal_feedback = ut.ddict(list)
# Kill all feedback, remote edge labels, but leave graph edges alone
keys = infr.feedback_keys + ['inferred_state']
ut.nx_delete_edge_attr(infr.graph, keys, edges)
# Move reviewed edges back into the unreviewed graph
for key in (POSTV, NEGTV, INCMP):
subgraph = infr.review_graphs[key]
prev_edges = ut.compress(edges, list(subgraph.has_edges(edges)))
subgraph.remove_edges_from(prev_edges)
infr.review_graphs[UNREV].add_edges_from(prev_edges)
infr.pos_redun_nids.clear()
infr.neg_redun_metagraph.clear()
infr.nid_to_errors.clear()
if __debug__:
infr.assert_disjoint_invariant()
def clear_edges(infr):
"""
Removes all edges from the graph
"""
for graph in infr.review_graphs.values():
graph.remove_edges_from(list(graph.edges()))
infr.graph.remove_edges_from(list(infr.graph.edges()))
infr.pos_redun_nids.clear()
infr.neg_redun_metagraph.clear()
infr.nid_to_errors.clear()
def reset_feedback(infr, mode='annotmatch', apply=True):
""" Resets feedback edges to state of the SQL annotmatch table """
infr.print('reset_feedback mode=%r' % (mode,), 1)
infr.clear_feedback()
if mode == 'annotmatch':
infr.external_feedback = infr.read_ibeis_annotmatch_feedback()
elif mode == 'staging':
infr.external_feedback = infr.read_ibeis_staging_feedback()
else:
raise ValueError('no mode=%r' % (mode,))
infr.internal_feedback = ut.ddict(list)
if apply:
infr.apply_feedback_edges()
def reset(infr, state='empty'):
"""
Removes all edges from graph and resets name labels.
Ignore:
>>> from ibeis.algo.graph.core import * # NOQA
>>> from ibeis.algo.graph import demo
>>> infr = demo.demodata_infr(num_pccs=5)
>>> assert len(list(infr.edges())) > 0
>>> infr.reset(state='empty')
>>> assert len(list(infr.edges())) == 0
"""
infr.clear_edges()
infr.clear_feedback()
if state == 'empty':
# Remove all edges, and component names
infr.clear_name_labels()
elif state == 'orig':
raise NotImplementedError('unused')
infr.reset_name_labels()
else:
raise ValueError('Unknown state=%r' % (state,))
def reset_name_labels(infr):
""" Resets all annotation node name labels to their initial values """
infr.print('reset_name_labels', 1)
orig_names = infr.get_node_attrs('orig_name_label')
infr.set_node_attrs('name_label', orig_names)
def clear_name_labels(infr):
""" Sets all annotation node name labels to be unknown """
infr.print('clear_name_labels()', 1)
# make distinct names for all nodes
distinct_names = {
node: -aid for node, aid in infr.get_node_attrs('aid').items()
}
infr.set_node_attrs('name_label', distinct_names)
class NameRelabel(object):
def node_label(infr, aid):
return infr.pos_graph.node_label(aid)
def node_labels(infr, *aids):
return infr.pos_graph.node_labels(*aids)
def _next_nid(infr):
if getattr(infr, 'nid_counter', None) is None:
nids = nx.get_node_attributes(infr.graph, 'name_label')
infr.nid_counter = max(nids)
infr.nid_counter += 1
new_nid = infr.nid_counter
return new_nid
def _rectify_names(infr, old_names, new_labels):
"""
Finds the best assignment of old names based on the new groups each is
assigned to.
old_names = [None, None, None, 1, 2, 3, 3, 4, 4, 4, 5, None]
new_labels = [ 1, 2, 2, 3, 4, 5, 5, 6, 3, 3, 7, 7]
"""
infr.print('rectifying name lists', 3)
from ibeis.scripts import name_recitifer
newlabel_to_oldnames = ut.group_items(old_names, new_labels)
unique_newlabels = list(newlabel_to_oldnames.keys())
grouped_oldnames_ = ut.take(newlabel_to_oldnames, unique_newlabels)
# Mark annots that are unknown and still grouped by themselves
still_unknown = [len(g) == 1 and g[0] is None for g in grouped_oldnames_]
# Remove nones for name rectifier
grouped_oldnames = [
[n for n in oldgroup if n is not None]
for oldgroup in grouped_oldnames_]
new_names = name_recitifer.find_consistent_labeling(
grouped_oldnames, verbose=infr.verbose >= 3, extra_prefix=None)
unknown_labels = ut.compress(unique_newlabels, still_unknown)
new_flags = [n is None for n in new_names]
# isinstance(n, six.string_types) and n.startswith('_extra_name')
# for n in new_names
# ]
label_to_name = ut.dzip(unique_newlabels, new_names)
needs_assign = ut.compress(unique_newlabels, new_flags)
return label_to_name, needs_assign, unknown_labels
def _rectified_relabel(infr, cc_subgraphs):
"""
Reuses as many names as possible
"""
# Determine which names can be reused
from ibeis.scripts import name_recitifer
infr.print('grouping names for rectification', 3)
grouped_oldnames_ = [
list(nx.get_node_attributes(subgraph, 'name_label').values())
for count, subgraph in enumerate(cc_subgraphs)
]
# Make sure negatives dont get priority
grouped_oldnames = [
[n for n in group if len(group) == 1 or n > 0]
for group in grouped_oldnames_
]
infr.print('begin rectification of %d grouped old names' % (
len(grouped_oldnames)), 2)
new_labels = name_recitifer.find_consistent_labeling(
grouped_oldnames, verbose=infr.verbose >= 3)
infr.print('done rectifying new names', 2)
new_flags = [
not isinstance(n, int) and n.startswith('_extra_name')
for n in new_labels
]
for idx in ut.where(new_flags):
new_labels[idx] = infr._next_nid()
for idx, label in enumerate(new_labels):
if label < 0 and len(grouped_oldnames[idx]) > 1:
# Remove negative ids for grouped items
new_labels[idx] = infr._next_nid()
return new_labels
@profile
def relabel_using_reviews(infr, graph=None, rectify=True):
r"""
Relabels nodes in graph based on positive connected components
This will change all of the names on the nodes to be consistent while
preserving any existing names as best as possible. If rectify=False,
this will be faster, but the old names will not be preserved and each
PCC will be assigned an arbitrary name.
Note:
if something messes up you can call infr.reset_labels_to_ibeis() to
reset node labels to their original values --- this will almost
always put the graph in an inconsistent state --- but then you can
this with rectify=True to fix everything up.
Args:
graph (nx.Graph, optional): only edges in `graph` are relabeled
defaults to current graph.
rectify (bool, optional): if True names attempt to remain
consistent otherwise there are no restrictions on name labels
other than that they are distinct.
"""
infr.print('relabel_using_reviews', 2)
if graph is None:
graph = infr.graph
# Get subgraphs and check consistency
cc_subgraphs = []
num_inconsistent = 0
for cc in infr.positive_components(graph=graph):
cc_subgraphs.append(infr.graph.subgraph(cc))
if not infr.is_consistent(cc):
num_inconsistent += 1
infr.print('num_inconsistent = %r' % (num_inconsistent,), 2)
if infr.verbose >= 2:
cc_sizes = list(map(len, cc_subgraphs))
pcc_size_hist = ut.dict_hist(cc_sizes)
pcc_size_stats = ut.get_stats(cc_sizes)
if len(pcc_size_hist) < 8:
infr.print('PCC size hist = %s' % (ut.repr2(pcc_size_hist),))
infr.print('PCC size stats = %s' % (ut.repr2(pcc_size_stats),))
if rectify:
# Rectified relabeling, preserves grouping and labeling if possible
new_labels = infr._rectified_relabel(cc_subgraphs)
else:
# Arbitrary relabeling, only preserves grouping
if graph is infr.graph:
# Use union find labels
new_labels = {
count:
infr.node_label(next(iter(subgraph.nodes())))
for count, subgraph in enumerate(cc_subgraphs)
}
else:
new_labels = {count: infr._next_nid()
for count, subgraph in enumerate(cc_subgraphs)}
for count, subgraph in enumerate(cc_subgraphs):
new_nid = new_labels[count]
node_to_newlabel = ut.dzip(subgraph.nodes(), [new_nid])
infr.set_node_attrs('name_label', node_to_newlabel)
num_names = len(cc_subgraphs)
infr.print('done relabeling', 3)
return num_names, num_inconsistent
def connected_component_status(infr):
r"""
Returns:
dict: num_inconsistent, num_names_max
CommandLine:
python -m ibeis.algo.graph.core connected_component_status
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.graph.core import * # NOQA
>>> infr = testdata_infr('testdb1')
>>> infr.add_feedback_from([(2, 3, NEGTV), (5, 6, NEGTV), (1, 2, POSTV)])
>>> status = infr.connected_component_status()
>>> print(ut.repr3(status))
"""
infr.print('checking status', 3)
num_inconsistent = len(infr.recovery_ccs)
num_names_max = infr.pos_graph.number_of_components()
status = dict(
num_names_max=num_names_max,
num_inconsistent=num_inconsistent,
)
infr.print('done checking status', 3)
return status
class MiscHelpers(object):
def _rectify_nids(infr, aids, nids):
if nids is None:
if infr.ibs is None:
nids = [-aid for aid in aids]
else:
nids = infr.ibs.get_annot_nids(aids)
elif ut.isscalar(nids):
nids = [nids] * len(aids)
return nids
def remove_aids(infr, aids):
"""
Remove annotations from the graph.
Returns:
dict: split: indicates which PCCs were split by this action.
Note:
This may cause unintended splits!
Ignore:
>>> from graphid import demo, util
>>> infr = demo.demodata_infr(num_pccs=5, pos_redun=1)
>>> infr.refresh_candidate_edges()
>>> infr.pin_node_layout()
>>> before = infr.copy()
>>> aids = infr.aids[::5]
>>> splits = infr.remove_aids(aids)
>>> assert len(splits['old']) > 0
>>> infr.assert_invariants()
>>> # xdoc: +REQUIRES(--show)
>>> util.qtensure()
>>> after = infr
>>> before.show(fnum=1, pnum=(1, 2, 1), pickable=True)
>>> after.show(fnum=1, pnum=(1, 2, 2), pickable=True)
"""
infr.print('remove_aids len(aids)={}'.format(len(aids)), level=3)
# Determine which edges are going to be removed
remove_edges = nxu.edges_outgoing(infr.graph, aids)
old_groups = list(infr.positive_components())
# Remove from tertiary bookkeeping structures
remove_idxs = list(ut.take(ut.make_index_lookup(infr.aids), aids))
ut.delete_items_by_index(infr.orig_name_labels, remove_idxs)
ut.delete_items_by_index(infr.aids, remove_idxs)
infr.aids_set = set(infr.aids)
# Remove from secondary bookkeeping structures
ut.delete_dict_keys(infr.external_feedback, remove_edges)
ut.delete_dict_keys(infr.internal_feedback, remove_edges)
# Remove from core bookkeeping structures
infr.graph.remove_nodes_from(aids)
for graph in infr.review_graphs.values():
graph.remove_nodes_from(aids)
infr.queue.delete_items(remove_edges)
# TODO: should refactor to preform a dyanmic step, but in this case is
# less work to use a bazooka to shoot a fly.
infr.apply_nondynamic_update()
# I'm unsure if relabeling is necessary
infr.relabel_using_reviews()
new_groups = list(infr.positive_components())
# print('old_groups = {!r}'.format(old_groups))
# print('new_groups = {!r}'.format(new_groups))
delta = ut.grouping_delta(old_groups, new_groups)
splits = delta['splits']
n_old = len(splits['old'])
n_new = len(list(ut.flatten(splits['new'])))
infr.print(
'removing {} aids split {} old PCCs into {} new PCCs'.format(
len(aids), n_old, n_new))
return splits
# print(ub.repr2(delta, nl=2))
def add_aids(infr, aids, nids=None):
"""
CommandLine:
python -m ibeis.algo.graph.core add_aids --show
Doctest:
>>> from ibeis.algo.graph.core import * # NOQA
>>> aids_ = [1, 2, 3, 4, 5, 6, 7, 9]
>>> infr = AnnotInference(ibs=None, aids=aids_, autoinit=True)
>>> aids = [2, 22, 7, 9, 8]
>>> nids = None
>>> infr.add_aids(aids, nids)
>>> result = infr.aids
>>> print(result)
>>> assert len(infr.graph) == len(infr.aids)
[1, 2, 3, 4, 5, 6, 7, 9, 22, 8]
"""
nids = infr._rectify_nids(aids, nids)
assert len(aids) == len(nids), 'must correspond'
if infr.aids is None:
nids = infr._rectify_nids(aids, nids)
# Set object attributes
infr.aids = aids
infr.aids_set = set(infr.aids)
infr.orig_name_labels = nids
else:
aid_to_idx = ut.make_index_lookup(infr.aids)
orig_idxs = ut.dict_take(aid_to_idx, aids, None)
new_flags = ut.flag_None_items(orig_idxs)
new_aids = ut.compress(aids, new_flags)
new_nids = ut.compress(nids, new_flags)
# Extend object attributes
infr.aids.extend(new_aids)
infr.orig_name_labels.extend(new_nids)
infr.aids_set.update(new_aids)
infr.update_node_attributes(new_aids, new_nids)
if infr.graph is not None:
infr.graph.add_nodes_from(aids)
for subgraph in infr.review_graphs.values():
subgraph.add_nodes_from(aids)
nids = set(infr.pos_graph.node_labels(*aids))
infr.neg_metagraph.add_nodes_from(nids)
def update_node_attributes(infr, aids=None, nids=None):
if aids is None:
aids = infr.aids
nids = infr.orig_name_labels
assert aids is not None, 'must have aids'
assert nids is not None, 'must have nids'
node_to_aid = {aid: aid for aid in aids}
node_to_nid = {aid: nid for aid, nid in zip(aids, nids)}
ut.assert_eq_len(node_to_nid, node_to_aid)
infr.graph.add_nodes_from(aids)
for subgraph in infr.review_graphs.values():
subgraph.add_nodes_from(aids)
infr.set_node_attrs('aid', node_to_aid)
infr.set_node_attrs('name_label', node_to_nid)
infr.set_node_attrs('orig_name_label', node_to_nid)
# TODO: depricate these, they will always be identity I think
def initialize_graph(infr, graph=None):
infr.print('initialize_graph', 1)
if graph is None:
infr.graph = infr._graph_cls()
else:
infr.graph = graph
infr.review_graphs[POSTV] = nx_dynamic_graph.DynConnGraph()
infr.review_graphs[NEGTV] = infr._graph_cls()
infr.review_graphs[INCMP] = infr._graph_cls()
infr.review_graphs[UNKWN] = infr._graph_cls()
infr.review_graphs[UNREV] = infr._graph_cls()
if graph is not None:
for u, v, d in graph.edges(data=True):
evidence_decision = d.get('evidence_decision', UNREV)
meta_decision = d.get('meta_decision', NULL)
decision = _rectify_decision(evidence_decision, meta_decision)
if decision in {POSTV, NEGTV, INCMP, UNREV, UNKWN}:
infr.review_graphs[decision].add_edge(u, v)
else:
raise ValueError('Unknown decision=%r' % (decision,))
infr.update_node_attributes()
@profile
def log_message(infr, msg, level=1, color=None):
if color is None:
color = 'blue'
if True:
# Record the name of the calling function
parent_name = ut.get_parent_frame().f_code.co_name
msg = '[{}] '.format(parent_name) + msg
if True:
# Append the message to an internal log deque
infr.logs.append((msg, color))
if len(infr.logs) == infr.logs.maxlen:
infr.log_index = max(infr.log_index - 1, 0)
if infr.verbose >= level:
# Print the message to stdout
loglevel = logging.INFO
ut.cprint('[infr] ' + msg, color)
else:
loglevel = logging.DEBUG
if infr.logger:
# Send the message to a python logger
infr.logger.log(loglevel, msg)
print(msg)
print = log_message
def latest_logs(infr, colored=False):
index = infr.log_index
infr.log_index = len(infr.logs)
if colored:
return [infr.logs[x] for x in range(index, len(infr.logs))]
else:
return [infr.logs[x][0] for x in range(index, len(infr.logs))]
def dump_logs(infr):
print('--- <LOG DUMP> ---')
for msg, color in infr.logs:
ut.cprint('[infr] ' + msg, color)
print('--- <\LOG DUMP> ---')
class AltConstructors(object):
_graph_cls = nx_dynamic_graph.NiceGraph
# _graph_cls = nx.Graph
# nx.Graph
# _graph_cls = nx.DiGraph
@classmethod
def from_pairs(AnnotInference, aid_pairs, attrs=None, ibs=None, verbose=False):
import networkx as nx
G = AnnotInference._graph_cls()
assert not any([a1 == a2 for a1, a2 in aid_pairs]), 'cannot have self-edges'
G.add_edges_from(aid_pairs)
if attrs is not None:
for key in attrs.keys():
nx.set_edge_attributes(G, name=key, values=ut.dzip(aid_pairs, attrs[key]))
infr = AnnotInference.from_netx(G, ibs=ibs, verbose=verbose)
return infr
@classmethod
def from_netx(AnnotInference, G, ibs=None, verbose=False, infer=True):
aids = list(G.nodes())
if ibs is not None:
nids = None
else:
nids = [-a for a in aids]
infr = AnnotInference(ibs, aids, nids, autoinit=False,
verbose=verbose)
infr.initialize_graph(graph=G)
# hack
orig_name_labels = [infr.pos_graph.node_label(a) for a in aids]
infr.orig_name_labels = orig_name_labels
infr.set_node_attrs('orig_name_label',
ut.dzip(aids, orig_name_labels))
if infer:
infr.apply_nondynamic_update()
return infr
@classmethod
def from_qreq_(AnnotInference, qreq_, cm_list, autoinit=False):
"""
Create a AnnotInference object using a precomputed query / results
"""
# raise NotImplementedError('do not use')
aids = ut.unique(ut.flatten([qreq_.qaids, qreq_.daids]))
nids = qreq_.get_qreq_annot_nids(aids)
ibs = qreq_.ibs
infr = AnnotInference(ibs, aids, nids, verbose=False, autoinit=autoinit)
infr.cm_list = cm_list
infr.qreq_ = qreq_
return infr
def status(infr, extended=False):
status_dict = ut.odict([
('nNodes', len(infr.aids)),
('nEdges', infr.graph.number_of_edges()),
('nCCs', infr.pos_graph.number_of_components()),
('nPostvEdges', infr.pos_graph.number_of_edges()),
('nNegtvEdges', infr.neg_graph.number_of_edges()),
('nIncmpEdges', infr.incomp_graph.number_of_edges()),
('nUnrevEdges', infr.unreviewed_graph.number_of_edges()),
('nPosRedunCCs', len(infr.pos_redun_nids)),
('nNegRedunPairs', infr.neg_redun_metagraph.number_of_edges()),
('nInconsistentCCs', len(infr.nid_to_errors)),
#('nUnkwnEdges', infr.unknown_graph.number_of_edges()),
])
if extended:
def count_within_between(edges):
n_within = 0
n_between = 0
for u, v in edges:
nid1, nid2 = infr.pos_graph.node_labels(u, v)
if nid1 == nid2:
n_within += 1
else:
n_between += 1
return n_within, n_between
a, b = count_within_between(infr.neg_graph.edges())
status_dict['nNegEdgesWithin'] = a
status_dict['nNegEdgesBetween'] = b
a, b = count_within_between(infr.incomp_graph.edges())
status_dict['nIncompEdgesWithin'] = a
status_dict['nIncompEdgesBetween'] = b
a, b = count_within_between(infr.unreviewed_graph.edges())
status_dict['nUnrevEdgesWithin'] = a
status_dict['nUrevEdgesBetween'] = b
return status_dict
def __nice__(infr):
if infr.graph is None:
return 'nAids=%r, G=None' % (len(infr.aids))
else:
fmt = 'nNodes={}, nEdges={}, nCCs={}'
msg = fmt.format(
len(infr.aids),
infr.graph.number_of_edges(),
infr.pos_graph.number_of_components(),
# infr.incomp_graph.number_of_edges(),
# infr.unreviewed_graph.number_of_edges(),
)
return msg
# return 'nAids={}, nEdges={}, nCCs={}'.format(
# len(infr.aids),
# infr.graph.number_of_edges(),
# infr.pos_graph.number_of_components()
# )
@six.add_metaclass(ut.ReloadingMetaclass)
class AnnotInference(ut.NiceRepr,
# Old internal stuffs
AltConstructors,
MiscHelpers,
Feedback,
NameRelabel,
# New core algorithm stuffs
mixin_dynamic.NonDynamicUpdate,
mixin_dynamic.Recovery,
mixin_dynamic.Consistency,
mixin_dynamic.Redundancy,
mixin_dynamic.DynamicUpdate,
mixin_priority.Priority,
mixin_matching.CandidateSearch,
mixin_matching.InfrLearning,
mixin_matching.AnnotInfrMatching,
# General helpers
mixin_helpers.AssertInvariants,
mixin_helpers.DummyEdges,
mixin_helpers.Convenience,
mixin_helpers.AttrAccess,
# Simulation and Loops
mixin_simulation.SimulationHelpers,
mixin_loops.InfrReviewers,
mixin_loops.InfrLoops,
# Visualization
mixin_viz.GraphVisualization,
# plugging into IBEIS
mixin_groundtruth.Groundtruth,
mixin_ibeis.IBEISIO,
mixin_ibeis.IBEISGroundtruth,
# _dep_mixins._AnnotInfrDepMixin,
):
"""
class for maintaining state of an identification
Terminology and Concepts:
CommandLine:
ibeis make_qt_graph_interface --show --aids=1,2,3,4,5,6,7
ibeis AnnotInference:0 --show
ibeis AnnotInference:1 --show
ibeis AnnotInference:2 --show
ibeis AnnotInference:0 --loginfr
Doctest:
>>> from ibeis.algo.graph.core import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
>>> aids = [1, 2, 3, 4, 5, 6]
>>> infr = AnnotInference(ibs, aids, autoinit=True, verbose=1000)
>>> result = ('infr = %s' % (infr,))
>>> print(result)
>>> ut.quit_if_noshow()
>>> use_image = True
>>> infr.initialize_visual_node_attrs()
>>> # Note that there are initially no edges
>>> infr.show_graph(use_image=use_image)
>>> ut.show_if_requested()
infr = <AnnotInference(nNodes=6, nEdges=0, nCCs=6)>
Example:
>>> # SCRIPT
>>> from ibeis.algo.graph.core import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
>>> aids = [1, 2, 3, 4, 5, 6, 7, 9]
>>> infr = AnnotInference(ibs, aids, autoinit=True)
>>> result = ('infr = %s' % (infr,))
>>> print(result)
>>> ut.quit_if_noshow()
>>> use_image = False
>>> infr.initialize_visual_node_attrs()
>>> # Note that there are initially no edges
>>> infr.show_graph(use_image=use_image)
>>> # But we can add nodes between the same names
>>> infr.ensure_mst()
>>> infr.show_graph(use_image=use_image)
>>> # Add some feedback
>>> infr.add_feedback((1, 4), NEGTV)
>>> infr.apply_feedback_edges()
>>> infr.show_graph(use_image=use_image)
>>> ut.show_if_requested()
Example:
>>> # SCRIPT
>>> from ibeis.algo.graph.core import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
>>> aids = [1, 2, 3, 4, 5, 6, 7, 9]
>>> infr = AnnotInference(ibs, aids, autoinit=True)
>>> result = ('infr = %s' % (infr,))
>>> print(result)
>>> ut.quit_if_noshow()
>>> use_image = False
>>> infr.initialize_visual_node_attrs()
>>> infr.ensure_mst()
>>> # Add some feedback
>>> infr.add_feedback((1, 4), NEGTV)
>>> try:
>>> infr.add_feedback((1, 10), NEGTV)
>>> except ValueError:
>>> pass
>>> try:
>>> infr.add_feedback((11, 12), NEGTV)
>>> except ValueError:
>>> pass
>>> infr.apply_feedback_edges()
>>> infr.show_graph(use_image=use_image)
>>> ut.show_if_requested()
Ignore:
>>> import ibeis
>>> import utool as ut
>>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
>>> infr = ibeis.AnnotInference(ibs, 'all')
>>> class_ = infr
>>> fpath = None
>>> static_attrs = ut.check_static_member_vars(class_, fpath)
>>> uninitialized = set(infr.__dict__.keys()) - set(static_attrs)
"""
def __getstate__(self):
state = self.__dict__.copy()
# Dont pickle generators
state['_gen'] = None
state['logger'] = None
return state
def __init__(infr, ibs, aids=[], nids=None, autoinit=True, verbose=False):
"""
Ignore:
pass
"""
# infr.verbose = verbose
infr.name = None
infr.verbose = verbose
# ibeis controller and initial nodes
# TODO: aids can be abstracted as a property that simply looks at the
# nodes in infr.graph.
if isinstance(ibs, six.string_types):
import ibeis
ibs = ibeis.opendb(ibs)
# setup logging
infr.logger = None
do_logging = ut.get_argflag(('--loginfr', '--log-infr'))
# do_logging = True
if do_logging:
if ibs is not None:
from os.path import join
# import ubelt as ub
# logdir = ibs.get_logdir_local()
logdir = '.'
logname = 'AnnotInference' + ut.timestamp()
logger = logging.getLogger(logname)
if not logger.handlers:
fh = logging.FileHandler(join(logdir, logname + '.log'))
print('logger.handlers = {!r}'.format(logger.handlers))
logger.addHandler(fh)
# logger.setLevel(logging.INFO)
logger.setLevel(logging.DEBUG)
infr.logger = logger
infr.logs = collections.deque(maxlen=10000)
infr.log_index = 0
infr.print('__init__ queue', level=1)
# If not dirty, new feedback should dynamically maintain a consistent
# state. If dirty it means we need to recompute connected compoments
# before we can continue with dynamic review.
infr.dirty = False
infr.readonly = False
infr.ibs = ibs
infr.aids = None
infr.aids_set = None
infr.orig_name_labels = None
# Underlying graph structure
infr.graph = None
infr.review_graphs = {
POSTV: None,
NEGTV: None,
INCMP: None,
UNKWN: None,
UNREV: None,
}
infr.print('__init__ structures', level=1)
# Criterion
infr.queue = ut.PriorityQueue()
infr.refresh = None
infr.review_counter = it.count(0)
infr.nid_counter = None
# Dynamic Properties (requires bookkeeping)
infr.nid_to_errors = {}
infr.recovery_ccs = []
# Recover graph holds positive edges of inconsistent PCCs
infr.recover_graph = nx_dynamic_graph.DynConnGraph()
# Set of PCCs that are positive redundant
infr.pos_redun_nids = set([])
# Represents the metagraph of negative edges between PCCs
infr.neg_redun_metagraph = infr._graph_cls()
# NEW VERSION: metagraph of PCCs with ANY number of negative edges
# between them. The weight on the edge should represent the strength.
infr.neg_metagraph = infr._graph_cls()
infr.print('__init__ feedback', level=1)
# This should represent The feedback read from a database. We do not
# need to do any updates to an external database based on this data.
infr.external_feedback = ut.ddict(list)
# Feedback that has not been synced with the external database.
# Once we sync, this is merged into external feedback.
infr.internal_feedback = ut.ddict(list)
# Bookkeeping
infr.edge_truth = {}
infr.task_probs = ut.ddict(dict)
# A generator that maintains the state of the algorithm
infr._gen = None
# Computer vision algorithms
infr.ranker = None
infr.verifiers = None
infr.print('__init__ configuration', level=1)
# TODO: move to params
infr.task_thresh_dict = {
'zebra_grevys': {
'match_state': {
POSTV: np.inf,
NEGTV: np.inf,
INCMP: np.inf,
},
'photobomb_state': {
'pb': np.inf,
'nopb': np.inf,
}
},
'giraffe_reticulated': {
'match_state': {
POSTV: np.inf,
NEGTV: np.inf,
INCMP: np.inf,
},
'photobomb_state': {
'pb': np.inf,
'nopb': np.inf,
}
},
}
infr.task_thresh = None
# Parameters / Configurations / Callbacks
infr.callbacks = {
'request_review': None,
'review_ready': None,
'review_finished': None,
}
infr.params = {
'manual.n_peek': 1,
'manual.autosave': True,
'ranking.enabled': True,
'ranking.ntop': 5,
'algo.max_outer_loops': None,
'algo.quickstart': False,
'algo.hardcase': False,
# Dynamic Inference
'inference.enabled': True,
'inference.update_attrs': True,
# Termination / Refresh
'refresh.window': 20,
'refresh.patience': 72,
'refresh.thresh': 0.052,
'refresh.method': 'binomial',
# Redundancy
# if redun.enabled is True, then redundant edges will be ignored by
# # the priority queue and extra edges needed to achieve minimum
# redundancy will be searched for if the queue is empty.
'redun.enabled': True,
# positive/negative k
'redun.pos': 2,
'redun.neg': 2,
# does positive/negative augmentation
'redun.enforce_pos': True,
'redun.enforce_neg': True,
# prevents user interaction in final phase
'redun.neg.only_auto': True,
# Only review CCs connected by confidence less than this value
# a good values is 'pretty_sure'
'queue.conf.thresh': None,
# Autoreviewer params
'autoreview.enabled': True,
'autoreview.prioritize_nonpos': True,
}
infr._viz_image_config = {
'in_image': False,
'thumbsize': 221,
}
infr.print('__init__ storage', level=1)
infr.verifier_params = {} # TODO
infr.ranker_params = {
'K': 5,
}
# Developer modes (consoldate this)
infr.test_mode = False
infr.simulation_mode = False
# set to the current phase of the main loop
# (mostly for testing)
infr.phase = None
infr.loop_phase = None
# Testing state
infr.metrics_list = None
infr.test_state = None
infr.test_gt_pos_graph = None
infr.nid_to_gt_cc = None
infr.node_truth = None
infr.real_n_pcc_mst_edges = None
# External: Can we remove these?
infr.cm_list = None
infr.vsone_matches = {}
infr.qreq_ = None
infr.manual_wgt = None
infr.print('__init__ aids', level=1)
if aids == 'all':
aids = ibs.get_valid_aids()
infr.add_aids(aids, nids)
infr.print('__init__ autoinit', level=1)
if autoinit:
infr.initialize_graph()
if isinstance(autoinit, six.string_types):
infr.reset_feedback(autoinit)
infr.print('__init__ done', level=1)
def subparams(infr, prefix):
"""
Returns dict of params prefixed with <prefix>.
The returned dict does not contain the prefix
Doctest:
>>> from ibeis.algo.graph.core import *
>>> import ibeis
>>> infr = ibeis.AnnotInference(None)
>>> result = ut.repr2(infr.subparams('refresh'))
>>> print(result)
{'method': 'binomial', 'patience': 72, 'thresh': 0.1, 'window': 20}
"""
prefix_ = prefix + '.'
subparams = {k[len(prefix_):]: v for k, v in infr.params.items()
if k.startswith(prefix_)}
return subparams
def copy(infr):
# shallow copy ibs
infr2 = AnnotInference(
infr.ibs, copy.deepcopy(infr.aids),
copy.deepcopy(infr.orig_name_labels), autoinit=False,
verbose=infr.verbose)
# shallow algorithm classes
infr2.verifiers = infr.verifiers
infr2.ranker = infr.ranker
infr2.graph = infr.graph.copy()
infr2.external_feedback = copy.deepcopy(infr.external_feedback)
infr2.internal_feedback = copy.deepcopy(infr.internal_feedback)
infr2.cm_list = copy.deepcopy(infr.cm_list)
infr2.qreq_ = copy.deepcopy(infr.qreq_)
infr2.nid_counter = infr.nid_counter
infr2.recover_graph = copy.deepcopy(infr.recover_graph)
infr2.pos_redun_nids = copy.deepcopy(infr.pos_redun_nids)
infr2.neg_redun_metagraph = copy.deepcopy(infr.neg_redun_metagraph)
infr2.neg_metagraph = copy.deepcopy(infr.neg_metagraph)
infr2.review_graphs = copy.deepcopy(infr.review_graphs)
infr2.nid_to_errors = copy.deepcopy(infr.nid_to_errors)
infr2.recovery_ccs = copy.deepcopy(infr.recovery_ccs)
infr2.readonly = infr.readonly
infr2.dirty = infr.dirty
infr2.test_mode = infr.test_mode
infr2.test_mode = infr.test_mode
infr2.simulation_mode = infr.simulation_mode
infr.queue = copy.deepcopy(infr.queue)
infr.params = copy.deepcopy(infr.params)
infr2._viz_image_config = infr._viz_image_config.copy()
if infr.test_mode:
infr2.test_state = copy.deepcopy(infr.test_state)
infr2.metrics_list = copy.deepcopy(infr.metrics_list)
return infr2
def subgraph(infr, aids):
"""
Makes a new inference object that is a subset of the original.
Note, this is not robust, be careful. The subgraph should be treated as
read only. Do not commit any reviews made from here.
"""
orig_name_labels = list(infr.gen_node_values('orig_name_label', aids))
infr2 = AnnotInference(infr.ibs, aids, orig_name_labels,
autoinit=False, verbose=infr.verbose)
# deep copy the graph structure
infr2.graph = infr.graph.subgraph(aids).copy()
infr2.readonly = True
infr2.verifiers = infr.verifiers
infr2.ranker = infr.ranker
infr.params = copy.deepcopy(infr.params)
infr2._viz_image_config = infr._viz_image_config.copy()
# infr2._viz_init_nodes = infr._viz_image_config
# infr2._viz_image_config_dirty = infr._viz_image_config_dirty
infr2.edge_truth = {
e: infr.edge_truth[e] for e in infr2.graph.edges()
if e in infr.edge_truth
}
# TODO: internal/external feedback
infr2.nid_counter = infr.nid_counter
infr2.dirty = True
infr2.cm_list = None
infr2.qreq_ = None
# TODO:
# infr2.nid_to_errors {} # = copy.deepcopy(infr.nid_to_errors)
# infr2.recover_graph = copy.deepcopy(infr.recover_graph)
# infr2.pos_redun_nids = copy.deepcopy(infr.pos_redun_nids)
# infr2.neg_redun_metagraph = copy.deepcopy(infr.neg_redun_metagraph)
infr2.review_graphs = {}
for k, g in infr.review_graphs.items():
if g is None:
infr2.review_graphs[k] = None
elif k == POSTV:
infr2.review_graphs[k] = g.subgraph(aids, dynamic=True)
else:
infr2.review_graphs[k] = g.subgraph(aids)
return infr2
def set_config(infr, config, **kw):
pass
def testdata_infr(defaultdb='PZ_MTEST'):
import ibeis
ibs = ibeis.opendb(defaultdb=defaultdb)
aids = [1, 2, 3, 4, 5, 6]
infr = AnnotInference(ibs, aids, autoinit=True)
return infr
if __name__ == '__main__':
r"""
CommandLine:
python -m ibeis.viz.viz_graph2 make_qt_graph_interface --show --aids=1,2,3,4,5,6,7 --graph --match=1,4 --nomatch=3,1,5,7
python -m ibeis.algo.graph.core
python -m ibeis.algo.graph all
python -m ibeis.algo.graph.core --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
apache-2.0
|
abele/bokeh
|
bokeh/charts/builder/tests/test_area_builder.py
|
33
|
3666
|
""" This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pandas as pd
from bokeh.charts import Area
from bokeh.models import DataRange1d, Range1d
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestAreaBuilder(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict(
python=[2, 3, 7, 5, 26],
pypy=[12, 33, 47, 15, 126],
jython=[22, 43, 10, 25, 26],
)
# prepare some data to check tests results...
zeros = np.zeros(5)
x = np.array([4,3,2,1,0,0,1,2,3,4])
y_jython = np.hstack((zeros, np.array(xyvalues['jython'])))
y_pypy = np.hstack((zeros, np.array(xyvalues['pypy'])))
y_python = np.hstack((zeros, np.array(xyvalues['python'])))
data_keys = ['x', 'y_jython', 'y_pypy', 'y_python']
for _xy in [xyvalues, dict(xyvalues), pd.DataFrame(xyvalues)]:
area = create_chart(Area, _xy)
builder = area._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
self.assertListEqual(sorted(builder._data.keys()), data_keys)
assert_array_equal(builder._data['x'], x)
assert_array_equal(builder._data['y_jython'], y_jython)
assert_array_equal(builder._data['y_pypy'], y_pypy)
assert_array_equal(builder._data['y_python'], y_python)
self.assertIsInstance(area.x_range, DataRange1d)
self.assertIsInstance(area.y_range, Range1d)
assert_array_almost_equal(area.y_range.start, -12.6, decimal=4)
assert_array_almost_equal(area.y_range.end, 138.6, decimal=4)
self.assertEqual(builder._source._data, builder._data)
data_keys = ['x', 'y_0', 'y_1', 'y_2']
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
y_0, y_1, y_2 = y_python, y_pypy, y_jython
for _xy in [lvalues, np.array(lvalues)]:
area = create_chart(Area, _xy)
builder = area._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
self.assertListEqual(sorted(builder._data.keys()), data_keys)
assert_array_equal(builder._data['x'], x)
assert_array_equal(builder._data['y_0'], y_0)
assert_array_equal(builder._data['y_1'], y_1)
assert_array_equal(builder._data['y_2'], y_2)
self.assertIsInstance(area.x_range, DataRange1d)
self.assertIsInstance(area.y_range, Range1d)
assert_array_almost_equal(area.y_range.start, -12.6, decimal=4)
assert_array_almost_equal(area.y_range.end, 138.6, decimal=4)
self.assertEqual(builder._source._data, builder._data)
|
bsd-3-clause
|
phobson/statsmodels
|
statsmodels/tsa/statespace/tools.py
|
1
|
49901
|
"""
Statespace Tools
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from scipy.linalg import solve_sylvester
from statsmodels.tools.data import _is_using_pandas
from . import _statespace
has_find_best_blas_type = True
try:
from scipy.linalg.blas import find_best_blas_type
except ImportError: # pragma: no cover
has_find_best_blas_type = False
# Shim for SciPy 0.11, derived from tag=0.11 scipy.linalg.blas
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z', 'G': 'z'}
def find_best_blas_type(arrays):
dtype, index = max(
[(ar.dtype, i) for i, ar in enumerate(arrays)])
prefix = _type_conv.get(dtype.char, 'd')
return prefix, dtype, None
has_trmm = True
try:
from scipy.linalg.blas import dtrmm
except ImportError:
has_trmm = False
prefix_dtype_map = {
's': np.float32, 'd': np.float64, 'c': np.complex64, 'z': np.complex128
}
prefix_statespace_map = {
's': _statespace.sStatespace, 'd': _statespace.dStatespace,
'c': _statespace.cStatespace, 'z': _statespace.zStatespace
}
prefix_kalman_filter_map = {
's': _statespace.sKalmanFilter, 'd': _statespace.dKalmanFilter,
'c': _statespace.cKalmanFilter, 'z': _statespace.zKalmanFilter
}
if has_trmm:
prefix_pacf_map = {
's': _statespace._scompute_coefficients_from_multivariate_pacf,
'd': _statespace._dcompute_coefficients_from_multivariate_pacf,
'c': _statespace._ccompute_coefficients_from_multivariate_pacf,
'z': _statespace._zcompute_coefficients_from_multivariate_pacf
}
prefix_sv_map = {
's': _statespace._sconstrain_sv_less_than_one,
'd': _statespace._dconstrain_sv_less_than_one,
'c': _statespace._cconstrain_sv_less_than_one,
'z': _statespace._zconstrain_sv_less_than_one
}
def companion_matrix(polynomial):
r"""
Create a companion matrix
Parameters
----------
polynomial : array_like or list
If an iterable, interpreted as the coefficients of the polynomial from
which to form the companion matrix. Polynomial coefficients are in
order of increasing degree, and may be either scalars (as in an AR(p)
model) or coefficient matrices (as in a VAR(p) model). If an integer,
it is interpereted as the size of a companion matrix of a scalar
polynomial, where the polynomial coefficients are initialized to zeros.
If a matrix polynomial is passed, :math:`C_0` may be set to the scalar
value 1 to indicate an identity matrix (doing so will improve the speed
of the companion matrix creation).
Returns
-------
companion_matrix : array
Notes
-----
Given coefficients of a lag polynomial of the form:
.. math::
c(L) = c_0 + c_1 L + \dots + c_p L^p
returns a matrix of the form
.. math::
\begin{bmatrix}
\phi_1 & 1 & 0 & \cdots & 0 \\
\phi_2 & 0 & 1 & & 0 \\
\vdots & & & \ddots & 0 \\
& & & & 1 \\
\phi_n & 0 & 0 & \cdots & 0 \\
\end{bmatrix}
where some or all of the :math:`\phi_i` may be non-zero (if `polynomial` is
None, then all are equal to zero).
If the coefficients provided are scalars :math:`(c_0, c_1, \dots, c_p)`,
then the companion matrix is an :math:`n \times n` matrix formed with the
elements in the first column defined as
:math:`\phi_i = -\frac{c_i}{c_0}, i \in 1, \dots, p`.
If the coefficients provided are matrices :math:`(C_0, C_1, \dots, C_p)`,
each of shape :math:`(m, m)`, then the companion matrix is an
:math:`nm \times nm` matrix formed with the elements in the first column
defined as :math:`\phi_i = -C_0^{-1} C_i', i \in 1, \dots, p`.
It is important to understand the expected signs of the coefficients. A
typical AR(p) model is written as:
.. math::
y_t = a_1 y_{t-1} + \dots + a_p y_{t-p} + \varepsilon_t
This can be rewritten as:
.. math::
(1 - a_1 L - \dots - a_p L^p )y_t = \varepsilon_t \\
(1 + c_1 L + \dots + c_p L^p )y_t = \varepsilon_t \\
c(L) y_t = \varepsilon_t
The coefficients from this form are defined to be :math:`c_i = - a_i`, and
it is the :math:`c_i` coefficients that this function expects to be
provided.
"""
identity_matrix = False
if isinstance(polynomial, int):
n = polynomial
m = 1
polynomial = None
else:
n = len(polynomial) - 1
if n < 1:
raise ValueError("Companion matrix polynomials must include at"
" least two terms.")
if isinstance(polynomial, list) or isinstance(polynomial, tuple):
try:
# Note: can't use polynomial[0] because of the special behavior
# associated with matrix polynomials and the constant 1, see
# below.
m = len(polynomial[1])
except TypeError:
m = 1
# Check if we just have a scalar polynomial
if m == 1:
polynomial = np.asanyarray(polynomial)
# Check if 1 was passed as the first argument (indicating an
# identity matrix)
elif polynomial[0] == 1:
polynomial[0] = np.eye(m)
identity_matrix = True
else:
m = 1
polynomial = np.asanyarray(polynomial)
matrix = np.zeros((n * m, n * m), dtype=np.asanyarray(polynomial).dtype)
idx = np.diag_indices((n - 1) * m)
idx = (idx[0], idx[1] + m)
matrix[idx] = 1
if polynomial is not None and n > 0:
if m == 1:
matrix[:, 0] = -polynomial[1:] / polynomial[0]
elif identity_matrix:
for i in range(n):
matrix[i * m:(i + 1) * m, :m] = -polynomial[i+1].T
else:
inv = np.linalg.inv(polynomial[0])
for i in range(n):
matrix[i * m:(i + 1) * m, :m] = -np.dot(inv, polynomial[i+1]).T
return matrix
def diff(series, k_diff=1, k_seasonal_diff=None, k_seasons=1):
r"""
Difference a series simply and/or seasonally along the zero-th axis.
Given a series (denoted :math:`y_t`), performs the differencing operation
.. math::
\Delta^d \Delta_s^D y_t
where :math:`d =` `diff`, :math:`s =` `k_seasons`,
:math:`D =` `seasonal\_diff`, and :math:`\Delta` is the difference
operator.
Parameters
----------
series : array_like
The series to be differenced.
diff : int, optional
The number of simple differences to perform. Default is 1.
seasonal_diff : int or None, optional
The number of seasonal differences to perform. Default is no seasonal
differencing.
k_seasons : int, optional
The seasonal lag. Default is 1. Unused if there is no seasonal
differencing.
Returns
-------
differenced : array
The differenced array.
"""
pandas = _is_using_pandas(series, None)
differenced = np.asanyarray(series) if not pandas else series
# Seasonal differencing
if k_seasonal_diff is not None:
while k_seasonal_diff > 0:
if not pandas:
differenced = (
differenced[k_seasons:] - differenced[:-k_seasons]
)
else:
differenced = differenced.diff(k_seasons)[k_seasons:]
k_seasonal_diff -= 1
# Simple differencing
if not pandas:
differenced = np.diff(differenced, k_diff, axis=0)
else:
while k_diff > 0:
differenced = differenced.diff()[1:]
k_diff -= 1
return differenced
def is_invertible(polynomial, threshold=1.):
r"""
Determine if a polynomial is invertible.
Requires all roots of the polynomial lie inside the unit circle.
Parameters
----------
polynomial : array_like or tuple, list
Coefficients of a polynomial, in order of increasing degree.
For example, `polynomial=[1, -0.5]` corresponds to the polynomial
:math:`1 - 0.5x` which has root :math:`2`. If it is a matrix
polynomial (in which case the coefficients are coefficient matrices),
a tuple or list of matrices should be passed.
threshold : number
Allowed threshold for `is_invertible` to return True. Default is 1.
Notes
-----
If the coefficients provided are scalars :math:`(c_0, c_1, \dots, c_n)`,
then the corresponding polynomial is :math:`c_0 + c_1 L + \dots + c_n L^n`.
If the coefficients provided are matrices :math:`(C_0, C_1, \dots, C_n)`,
then the corresponding polynomial is :math:`C_0 + C_1 L + \dots + C_n L^n`.
There are three equivalent methods of determining if the polynomial
represented by the coefficients is invertible:
The first method factorizes the polynomial into:
.. math::
C(L) & = c_0 + c_1 L + \dots + c_n L^n \\
& = constant (1 - \lambda_1 L)
(1 - \lambda_2 L) \dots (1 - \lambda_n L)
In order for :math:`C(L)` to be invertible, it must be that each factor
:math:`(1 - \lambda_i L)` is invertible; the condition is then that
:math:`|\lambda_i| < 1`, where :math:`\lambda_i` is a root of the
polynomial.
The second method factorizes the polynomial into:
.. math::
C(L) & = c_0 + c_1 L + \dots + c_n L^n \\
& = constant (L - \zeta_1) (L - \zeta_2) \dots (L - \zeta_3)
The condition is now :math:`|\zeta_i| > 1`, where :math:`\zeta_i` is a root
of the polynomial with reversed coefficients and
:math:`\lambda_i = \frac{1}{\zeta_i}`.
Finally, a companion matrix can be formed using the coefficients of the
polynomial. Then the eigenvalues of that matrix give the roots of the
polynomial. This last method is the one actually used.
See Also
--------
companion_matrix
"""
# First method:
# np.all(np.abs(np.roots(np.r_[1, params])) < 1)
# Second method:
# np.all(np.abs(np.roots(np.r_[1, params][::-1])) > 1)
# Final method:
eigvals = np.linalg.eigvals(companion_matrix(polynomial))
return np.all(np.abs(eigvals) < threshold)
def solve_discrete_lyapunov(a, q, complex_step=False):
r"""
Solves the discrete Lyapunov equation using a bilinear transformation.
Notes
-----
This is a modification of the version in Scipy (see
https://github.com/scipy/scipy/blob/master/scipy/linalg/_solvers.py)
which allows passing through the complex numbers in the matrix a
(usually the transition matrix) in order to allow complex step
differentiation.
"""
eye = np.eye(a.shape[0])
if not complex_step:
aH = a.conj().transpose()
aHI_inv = np.linalg.inv(aH + eye)
b = np.dot(aH - eye, aHI_inv)
c = 2*np.dot(np.dot(np.linalg.inv(a + eye), q), aHI_inv)
return solve_sylvester(b.conj().transpose(), b, -c)
else:
aH = a.transpose()
aHI_inv = np.linalg.inv(aH + eye)
b = np.dot(aH - eye, aHI_inv)
c = 2*np.dot(np.dot(np.linalg.inv(a + eye), q), aHI_inv)
return solve_sylvester(b.transpose(), b, -c)
def constrain_stationary_univariate(unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array
Unconstrained parameters used by the optimizer, to be transformed to
stationary coefficients of, e.g., an autoregressive or moving average
component.
Returns
-------
constrained : array
Constrained parameters of, e.g., an autoregressive or moving average
component, to be transformed to arbitrary parameters used by the
optimizer.
References
----------
.. [1] Monahan, John F. 1984.
"A Note on Enforcing Stationarity in
Autoregressive-moving Average Models."
Biometrika 71 (2) (August 1): 403-404.
"""
n = unconstrained.shape[0]
y = np.zeros((n, n), dtype=unconstrained.dtype)
r = unconstrained/((1 + unconstrained**2)**0.5)
for k in range(n):
for i in range(k):
y[k, i] = y[k - 1, i] + r[k] * y[k - 1, k - i - 1]
y[k, k] = r[k]
return -y[n - 1, :]
def unconstrain_stationary_univariate(constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array
Constrained parameters of, e.g., an autoregressive or moving average
component, to be transformed to arbitrary parameters used by the
optimizer.
Returns
-------
unconstrained : array
Unconstrained parameters used by the optimizer, to be transformed to
stationary coefficients of, e.g., an autoregressive or moving average
component.
References
----------
.. [1] Monahan, John F. 1984.
"A Note on Enforcing Stationarity in
Autoregressive-moving Average Models."
Biometrika 71 (2) (August 1): 403-404.
"""
n = constrained.shape[0]
y = np.zeros((n, n), dtype=constrained.dtype)
y[n-1:] = -constrained
for k in range(n-1, 0, -1):
for i in range(k):
y[k-1, i] = (y[k, i] - y[k, k]*y[k, k-i-1]) / (1 - y[k, k]**2)
r = y.diagonal()
x = r / ((1 - r**2)**0.5)
return x
def _constrain_sv_less_than_one_python(unconstrained, order=None,
k_endog=None):
"""
Transform arbitrary matrices to matrices with singular values less than
one.
Parameters
----------
unconstrained : list
Arbitrary matrices. Should be a list of length `order`, where each
element is an array sized `k_endog` x `k_endog`.
order : integer, optional
The order of the autoregression.
k_endog : integer, optional
The dimension of the data vector.
Returns
-------
constrained : list
Partial autocorrelation matrices. Should be a list of length
`order`, where each element is an array sized `k_endog` x `k_endog`.
Notes
-----
Corresponds to Lemma 2.2 in Ansley and Kohn (1986). See
`constrain_stationary_multivariate` for more details.
There is a Cython implementation of this function that can be much faster,
but which requires SciPy 0.14.0 or greater. See
`constrain_stationary_multivariate` for details.
"""
from scipy import linalg
constrained = [] # P_s, s = 1, ..., p
if order is None:
order = len(unconstrained)
if k_endog is None:
k_endog = unconstrained[0].shape[0]
eye = np.eye(k_endog)
for i in range(order):
A = unconstrained[i]
B, lower = linalg.cho_factor(eye + np.dot(A, A.T), lower=True)
constrained.append(linalg.solve_triangular(B, A, lower=lower))
return constrained
def _compute_coefficients_from_multivariate_pacf_python(
partial_autocorrelations, error_variance, transform_variance=False,
order=None, k_endog=None):
"""
Transform matrices with singular values less than one to matrices
corresponding to a stationary (or invertible) process.
Parameters
----------
partial_autocorrelations : list
Partial autocorrelation matrices. Should be a list of length `order`,
where each element is an array sized `k_endog` x `k_endog`.
error_variance : array
The variance / covariance matrix of the error term. Should be sized
`k_endog` x `k_endog`. This is used as input in the algorithm even if
is not transformed by it (when `transform_variance` is False). The
error term variance is required input when transformation is used
either to force an autoregressive component to be stationary or to
force a moving average component to be invertible.
transform_variance : boolean, optional
Whether or not to transform the error variance term. This option is
not typically used, and the default is False.
order : integer, optional
The order of the autoregression.
k_endog : integer, optional
The dimension of the data vector.
Returns
-------
coefficient_matrices : list
Transformed coefficient matrices leading to a stationary VAR
representation.
Notes
-----
Corresponds to Lemma 2.1 in Ansley and Kohn (1986). See
`constrain_stationary_multivariate` for more details.
There is a Cython implementation of this function that can be much faster,
but which requires SciPy 0.14.0 or greater. See
`constrain_stationary_multivariate` for details.
"""
from scipy import linalg
if order is None:
order = len(partial_autocorrelations)
if k_endog is None:
k_endog = partial_autocorrelations[0].shape[0]
# If we want to keep the provided variance but with the constrained
# coefficient matrices, we need to make a copy here, and then after the
# main loop we will transform the coefficients to match the passed variance
if not transform_variance:
initial_variance = error_variance
# Need to make the input variance large enough that the recursions
# don't lead to zero-matrices due to roundoff error, which would case
# exceptions from the Cholesky decompositions.
# Note that this will still not always ensure positive definiteness,
# and for k_endog, order large enough an exception may still be raised
error_variance = np.eye(k_endog) * (order + k_endog)**10
forward_variances = [error_variance] # \Sigma_s
backward_variances = [error_variance] # \Sigma_s^*, s = 0, ..., p
autocovariances = [error_variance] # \Gamma_s
# \phi_{s,k}, s = 1, ..., p
# k = 1, ..., s+1
forwards = []
# \phi_{s,k}^*
backwards = []
error_variance_factor = linalg.cholesky(error_variance, lower=True)
forward_factors = [error_variance_factor]
backward_factors = [error_variance_factor]
# We fill in the entries as follows:
# [1,1]
# [2,2], [2,1]
# [3,3], [3,1], [3,2]
# ...
# [p,p], [p,1], ..., [p,p-1]
# the last row, correctly ordered, is then used as the coefficients
for s in range(order): # s = 0, ..., p-1
prev_forwards = forwards
prev_backwards = backwards
forwards = []
backwards = []
# Create the "last" (k = s+1) matrix
# Note: this is for k = s+1. However, below we then have to fill
# in for k = 1, ..., s in order.
# P L*^{-1} = x
# x L* = P
# L*' x' = P'
forwards.append(
linalg.solve_triangular(
backward_factors[s], partial_autocorrelations[s].T,
lower=True, trans='T'))
forwards[0] = np.dot(forward_factors[s], forwards[0].T)
# P' L^{-1} = x
# x L = P'
# L' x' = P
backwards.append(
linalg.solve_triangular(
forward_factors[s], partial_autocorrelations[s],
lower=True, trans='T'))
backwards[0] = np.dot(backward_factors[s], backwards[0].T)
# Update the variance
# Note: if s >= 1, this will be further updated in the for loop
# below
# Also, this calculation will be re-used in the forward variance
tmp = np.dot(forwards[0], backward_variances[s])
autocovariances.append(tmp.copy().T)
# Create the remaining k = 1, ..., s matrices,
# only has an effect if s >= 1
for k in range(s):
forwards.insert(k, prev_forwards[k] - np.dot(
forwards[-1], prev_backwards[s-(k+1)]))
backwards.insert(k, prev_backwards[k] - np.dot(
backwards[-1], prev_forwards[s-(k+1)]))
autocovariances[s+1] += np.dot(autocovariances[k+1],
prev_forwards[s-(k+1)].T)
# Create forward and backwards variances
forward_variances.append(
forward_variances[s] - np.dot(tmp, forwards[s].T)
)
backward_variances.append(
backward_variances[s] -
np.dot(
np.dot(backwards[s], forward_variances[s]),
backwards[s].T
)
)
# Cholesky factors
forward_factors.append(
linalg.cholesky(forward_variances[s+1], lower=True)
)
backward_factors.append(
linalg.cholesky(backward_variances[s+1], lower=True)
)
# If we do not want to use the transformed variance, we need to
# adjust the constrained matrices, as presented in Lemma 2.3, see above
variance = forward_variances[-1]
if not transform_variance:
# Here, we need to construct T such that:
# variance = T * initial_variance * T'
# To do that, consider the Cholesky of variance (L) and
# input_variance (M) to get:
# L L' = T M M' T' = (TM) (TM)'
# => L = T M
# => L M^{-1} = T
initial_variance_factor = np.linalg.cholesky(initial_variance)
transformed_variance_factor = np.linalg.cholesky(variance)
transform = np.dot(initial_variance_factor,
np.linalg.inv(transformed_variance_factor))
inv_transform = np.linalg.inv(transform)
for i in range(order):
forwards[i] = (
np.dot(np.dot(transform, forwards[i]), inv_transform)
)
return forwards, variance
def constrain_stationary_multivariate_python(unconstrained, error_variance,
transform_variance=False,
prefix=None):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation for a vector autoregression.
Parameters
----------
unconstrained : array or list
Arbitrary matrices to be transformed to stationary coefficient matrices
of the VAR. If a list, should be a list of length `order`, where each
element is an array sized `k_endog` x `k_endog`. If an array, should be
the matrices horizontally concatenated and sized
`k_endog` x `k_endog * order`.
error_variance : array
The variance / covariance matrix of the error term. Should be sized
`k_endog` x `k_endog`. This is used as input in the algorithm even if
is not transformed by it (when `transform_variance` is False). The
error term variance is required input when transformation is used
either to force an autoregressive component to be stationary or to
force a moving average component to be invertible.
transform_variance : boolean, optional
Whether or not to transform the error variance term. This option is
not typically used, and the default is False.
prefix : {'s','d','c','z'}, optional
The appropriate BLAS prefix to use for the passed datatypes. Only
use if absolutely sure that the prefix is correct or an error will
result.
Returns
-------
constrained : array or list
Transformed coefficient matrices leading to a stationary VAR
representation. Will match the type of the passed `unconstrained`
variable (so if a list was passed, a list will be returned).
Notes
-----
In the notation of [1]_, the arguments `(variance, unconstrained)` are
written as :math:`(\Sigma, A_1, \dots, A_p)`, where :math:`p` is the order
of the vector autoregression, and is here determined by the length of
the `unconstrained` argument.
There are two steps in the constraining algorithm.
First, :math:`(A_1, \dots, A_p)` are transformed into
:math:`(P_1, \dots, P_p)` via Lemma 2.2 of [1]_.
Second, :math:`(\Sigma, P_1, \dots, P_p)` are transformed into
:math:`(\Sigma, \phi_1, \dots, \phi_p)` via Lemmas 2.1 and 2.3 of [1]_.
If `transform_variance=True`, then only Lemma 2.1 is applied in the second
step.
While this function can be used even in the univariate case, it is much
slower, so in that case `constrain_stationary_univariate` is preferred.
References
----------
.. [1] Ansley, Craig F., and Robert Kohn. 1986.
"A Note on Reparameterizing a Vector Autoregressive Moving Average Model
to Enforce Stationarity."
Journal of Statistical Computation and Simulation 24 (2): 99-106.
.. [2] Ansley, Craig F, and Paul Newbold. 1979.
"Multivariate Partial Autocorrelations."
In Proceedings of the Business and Economic Statistics Section, 349-53.
American Statistical Association
"""
use_list = type(unconstrained) == list
if not use_list:
k_endog, order = unconstrained.shape
order //= k_endog
unconstrained = [
unconstrained[:k_endog, i*k_endog:(i+1)*k_endog]
for i in range(order)
]
order = len(unconstrained)
k_endog = unconstrained[0].shape[0]
# Step 1: convert from arbitrary matrices to those with singular values
# less than one.
sv_constrained = _constrain_sv_less_than_one_python(
unconstrained, order, k_endog)
# Step 2: convert matrices from our "partial autocorrelation matrix" space
# (matrices with singular values less than one) to the space of stationary
# coefficient matrices
constrained, var = _compute_coefficients_from_multivariate_pacf_python(
sv_constrained, error_variance, transform_variance, order, k_endog)
if not use_list:
constrained = np.concatenate(constrained, axis=1).reshape(
k_endog, k_endog * order)
return constrained, var
# Conditionally use the Cython versions of the multivariate constraint if
# possible (i.e. if Scipy >= 0.14.0 is available.)
if has_trmm:
def constrain_stationary_multivariate(unconstrained, variance,
transform_variance=False,
prefix=None):
use_list = type(unconstrained) == list
if use_list:
unconstrained = np.concatenate(unconstrained, axis=1)
k_endog, order = unconstrained.shape
order //= k_endog
if order < 1:
raise ValueError('Must have order at least 1')
if k_endog < 1:
raise ValueError('Must have at least 1 endogenous variable')
if prefix is None:
prefix, dtype, _ = find_best_blas_type(
[unconstrained, variance])
dtype = prefix_dtype_map[prefix]
unconstrained = np.asfortranarray(unconstrained, dtype=dtype)
variance = np.asfortranarray(variance, dtype=dtype)
# Step 1: convert from arbitrary matrices to those with singular values
# less than one.
# sv_constrained = _constrain_sv_less_than_one(unconstrained, order,
# k_endog, prefix)
sv_constrained = prefix_sv_map[prefix](unconstrained, order, k_endog)
# Step 2: convert matrices from our "partial autocorrelation matrix"
# space (matrices with singular values less than one) to the space of
# stationary coefficient matrices
constrained, variance = prefix_pacf_map[prefix](
sv_constrained, variance, transform_variance, order, k_endog)
constrained = np.array(constrained, dtype=dtype)
variance = np.array(variance, dtype=dtype)
if use_list:
constrained = [
constrained[:k_endog, i*k_endog:(i+1)*k_endog]
for i in range(order)
]
return constrained, variance
constrain_stationary_multivariate.__doc__ = (
constrain_stationary_multivariate_python.__doc__)
else:
constrain_stationary_multivariate = (
constrain_stationary_multivariate_python)
def _unconstrain_sv_less_than_one(constrained, order=None, k_endog=None):
"""
Transform matrices with singular values less than one to arbitrary
matrices.
Parameters
----------
constrained : list
The partial autocorrelation matrices. Should be a list of length
`order`, where each element is an array sized `k_endog` x `k_endog`.
order : integer, optional
The order of the autoregression.
k_endog : integer, optional
The dimension of the data vector.
Returns
-------
unconstrained : list
Unconstrained matrices. A list of length `order`, where each element is
an array sized `k_endog` x `k_endog`.
Notes
-----
Corresponds to the inverse of Lemma 2.2 in Ansley and Kohn (1986). See
`unconstrain_stationary_multivariate` for more details.
"""
from scipy import linalg
unconstrained = [] # A_s, s = 1, ..., p
if order is None:
order = len(constrained)
if k_endog is None:
k_endog = constrained[0].shape[0]
eye = np.eye(k_endog)
for i in range(order):
P = constrained[i]
# B^{-1} B^{-1}' = I - P P'
B_inv, lower = linalg.cho_factor(eye - np.dot(P, P.T), lower=True)
# A = BP
# B^{-1} A = P
unconstrained.append(linalg.solve_triangular(B_inv, P, lower=lower))
return unconstrained
def _compute_multivariate_sample_acovf(endog, maxlag):
"""
Computer multivariate sample autocovariances
Parameters
----------
endog : array_like
Sample data on which to compute sample autocovariances. Shaped
`nobs` x `k_endog`.
Returns
-------
sample_autocovariances : list
A list of the first `maxlag` sample autocovariance matrices. Each
matrix is shaped `k_endog` x `k_endog`.
Notes
-----
This function computes the forward sample autocovariances:
.. math::
\hat \Gamma(s) = \frac{1}{n} \sum_{t=1}^{n-s}
(Z_t - \bar Z) (Z_{t+s} - \bar Z)'
See page 353 of Wei (1990). This function is primarily implemented for
checking the partial autocorrelation functions below, and so is quite slow.
References
----------
.. [1] Wei, William. 1990.
Time Series Analysis : Univariate and Multivariate Methods.
Boston: Pearson.
"""
# Get the (demeaned) data as an array
endog = np.array(endog)
if endog.ndim == 1:
endog = endog[:, np.newaxis]
endog -= np.mean(endog, axis=0)
# Dimensions
nobs, k_endog = endog.shape
sample_autocovariances = []
for s in range(maxlag + 1):
sample_autocovariances.append(np.zeros((k_endog, k_endog)))
for t in range(nobs - s):
sample_autocovariances[s] += np.outer(endog[t], endog[t+s])
sample_autocovariances[s] /= nobs
return sample_autocovariances
def _compute_multivariate_acovf_from_coefficients(
coefficients, error_variance, maxlag=None,
forward_autocovariances=False):
"""
Compute multivariate autocovariances from vector autoregression coefficient
matrices
Parameters
----------
coefficients : array or list
The coefficients matrices. If a list, should be a list of length
`order`, where each element is an array sized `k_endog` x `k_endog`. If
an array, should be the coefficient matrices horizontally concatenated
and sized `k_endog` x `k_endog * order`.
error_variance : array
The variance / covariance matrix of the error term. Should be sized
`k_endog` x `k_endog`.
maxlag : integer, optional
The maximum autocovariance to compute. Default is `order`-1. Can be
zero, in which case it returns the variance.
forward_autocovariances : boolean, optional
Whether or not to compute forward autocovariances
:math:`E(y_t y_{t+j}')`. Default is False, so that backward
autocovariances :math:`E(y_t y_{t-j}')` are returned.
Returns
-------
autocovariances : list
A list of the first `maxlag` autocovariance matrices. Each matrix is
shaped `k_endog` x `k_endog`.
Notes
-----
Computes
..math::
\Gamma(j) = E(y_t y_{t-j}')
for j = 1, ..., `maxlag`, unless `forward_autocovariances` is specified,
in which case it computes:
..math::
E(y_t y_{t+j}') = \Gamma(j)'
Coefficients are assumed to be provided from the VAR model:
.. math::
y_t = A_1 y_{t-1} + \dots + A_p y_{t-p} + \varepsilon_t
Autocovariances are calculated by solving the associated discrete Lyapunov
equation of the state space representation of the VAR process.
"""
from scipy import linalg
# Convert coefficients to a list of matrices, for use in
# `companion_matrix`; get dimensions
if type(coefficients) == list:
order = len(coefficients)
k_endog = coefficients[0].shape[0]
else:
k_endog, order = coefficients.shape
order //= k_endog
coefficients = [
coefficients[:k_endog, i*k_endog:(i+1)*k_endog]
for i in range(order)
]
if maxlag is None:
maxlag = order-1
# Start with VAR(p): w_{t+1} = phi_1 w_t + ... + phi_p w_{t-p+1} + u_{t+1}
# Then stack the VAR(p) into a VAR(1) in companion matrix form:
# z_{t+1} = F z_t + v_t
companion = companion_matrix(
[1] + [-coefficients[i] for i in range(order)]
).T
# Compute the error variance matrix for the stacked form: E v_t v_t'
selected_variance = np.zeros(companion.shape)
selected_variance[:k_endog, :k_endog] = error_variance
# Compute the unconditional variance of z_t: E z_t z_t'
stacked_cov = linalg.solve_discrete_lyapunov(companion, selected_variance)
# The first (block) row of the variance of z_t gives the first p-1
# autocovariances of w_t: \Gamma_i = E w_t w_t+i with \Gamma_0 = Var(w_t)
# Note: these are okay, checked against ArmaProcess
autocovariances = [
stacked_cov[:k_endog, i*k_endog:(i+1)*k_endog]
for i in range(min(order, maxlag+1))
]
for i in range(maxlag - (order-1)):
stacked_cov = np.dot(companion, stacked_cov)
autocovariances += [
stacked_cov[:k_endog, -k_endog:]
]
if forward_autocovariances:
for i in range(len(autocovariances)):
autocovariances[i] = autocovariances[i].T
return autocovariances
def _compute_multivariate_sample_pacf(endog, maxlag):
"""
Computer multivariate sample partial autocorrelations
Parameters
----------
endog : array_like
Sample data on which to compute sample autocovariances. Shaped
`nobs` x `k_endog`.
maxlag : integer
Maximum lag for which to calculate sample partial autocorrelations.
Returns
-------
sample_pacf : list
A list of the first `maxlag` sample partial autocorrelation matrices.
Each matrix is shaped `k_endog` x `k_endog`.
"""
sample_autocovariances = _compute_multivariate_sample_acovf(endog, maxlag)
return _compute_multivariate_pacf_from_autocovariances(
sample_autocovariances)
def _compute_multivariate_pacf_from_autocovariances(autocovariances,
order=None, k_endog=None):
"""
Compute multivariate partial autocorrelations from autocovariances.
Parameters
----------
autocovariances : list
Autocorrelations matrices. Should be a list of length `order` + 1,
where each element is an array sized `k_endog` x `k_endog`.
order : integer, optional
The order of the autoregression.
k_endog : integer, optional
The dimension of the data vector.
Returns
-------
pacf : list
List of first `order` multivariate partial autocorrelations.
Notes
-----
Note that this computes multivariate partial autocorrelations.
Corresponds to the inverse of Lemma 2.1 in Ansley and Kohn (1986). See
`unconstrain_stationary_multivariate` for more details.
Notes
-----
Computes sample partial autocorrelations if sample autocovariances are
given.
"""
from scipy import linalg
if order is None:
order = len(autocovariances)-1
if k_endog is None:
k_endog = autocovariances[0].shape[0]
# Now apply the Ansley and Kohn (1986) algorithm, except that instead of
# calculating phi_{s+1, s+1} = L_s P_{s+1} {L_s^*}^{-1} (which requires
# the partial autocorrelation P_{s+1} which is what we're trying to
# calculate here), we calculate it as in Ansley and Newbold (1979), using
# the autocovariances \Gamma_s and the forwards and backwards residual
# variances \Sigma_s, \Sigma_s^*:
# phi_{s+1, s+1} = [ \Gamma_{s+1}' - \phi_{s,1} \Gamma_s' - ... -
# \phi_{s,s} \Gamma_1' ] {\Sigma_s^*}^{-1}
# Forward and backward variances
forward_variances = [] # \Sigma_s
backward_variances = [] # \Sigma_s^*, s = 0, ..., p
# \phi_{s,k}, s = 1, ..., p
# k = 1, ..., s+1
forwards = []
# \phi_{s,k}^*
backwards = []
forward_factors = [] # L_s
backward_factors = [] # L_s^*, s = 0, ..., p
# Ultimately we want to construct the partial autocorrelation matrices
# Note that this is "1-indexed" in the sense that it stores P_1, ... P_p
# rather than starting with P_0.
partial_autocorrelations = []
# We fill in the entries of phi_{s,k} as follows:
# [1,1]
# [2,2], [2,1]
# [3,3], [3,1], [3,2]
# ...
# [p,p], [p,1], ..., [p,p-1]
# the last row, correctly ordered, should be the same as the coefficient
# matrices provided in the argument `constrained`
for s in range(order): # s = 0, ..., p-1
prev_forwards = list(forwards)
prev_backwards = list(backwards)
forwards = []
backwards = []
# Create forward and backwards variances Sigma_s, Sigma*_s
forward_variance = autocovariances[0].copy()
backward_variance = autocovariances[0].T.copy()
for k in range(s):
forward_variance -= np.dot(prev_forwards[k],
autocovariances[k+1])
backward_variance -= np.dot(prev_backwards[k],
autocovariances[k+1].T)
forward_variances.append(forward_variance)
backward_variances.append(backward_variance)
# Cholesky factors
forward_factors.append(
linalg.cholesky(forward_variances[s], lower=True)
)
backward_factors.append(
linalg.cholesky(backward_variances[s], lower=True)
)
# Create the intermediate sum term
if s == 0:
# phi_11 = \Gamma_1' \Gamma_0^{-1}
# phi_11 \Gamma_0 = \Gamma_1'
# \Gamma_0 phi_11' = \Gamma_1
forwards.append(linalg.cho_solve(
(forward_factors[0], True), autocovariances[1]).T)
# backwards.append(forwards[-1])
# phi_11_star = \Gamma_1 \Gamma_0^{-1}
# phi_11_star \Gamma_0 = \Gamma_1
# \Gamma_0 phi_11_star' = \Gamma_1'
backwards.append(linalg.cho_solve(
(backward_factors[0], True), autocovariances[1].T).T)
else:
# G := \Gamma_{s+1}' -
# \phi_{s,1} \Gamma_s' - .. - \phi_{s,s} \Gamma_1'
tmp_sum = autocovariances[s+1].T.copy()
for k in range(s):
tmp_sum -= np.dot(prev_forwards[k], autocovariances[s-k].T)
# Create the "last" (k = s+1) matrix
# Note: this is for k = s+1. However, below we then have to
# fill in for k = 1, ..., s in order.
# phi = G Sigma*^{-1}
# phi Sigma* = G
# Sigma*' phi' = G'
# Sigma* phi' = G'
# (because Sigma* is symmetric)
forwards.append(linalg.cho_solve(
(backward_factors[s], True), tmp_sum.T).T)
# phi = G' Sigma^{-1}
# phi Sigma = G'
# Sigma' phi' = G
# Sigma phi' = G
# (because Sigma is symmetric)
backwards.append(linalg.cho_solve(
(forward_factors[s], True), tmp_sum).T)
# Create the remaining k = 1, ..., s matrices,
# only has an effect if s >= 1
for k in range(s):
forwards.insert(k, prev_forwards[k] - np.dot(
forwards[-1], prev_backwards[s-(k+1)]))
backwards.insert(k, prev_backwards[k] - np.dot(
backwards[-1], prev_forwards[s-(k+1)]))
# Partial autocorrelation matrix: P_{s+1}
# P = L^{-1} phi L*
# L P = (phi L*)
partial_autocorrelations.append(linalg.solve_triangular(
forward_factors[s], np.dot(forwards[s], backward_factors[s]),
lower=True))
return partial_autocorrelations
def _compute_multivariate_pacf_from_coefficients(constrained, error_variance,
order=None, k_endog=None):
"""
Transform matrices corresponding to a stationary (or invertible) process
to matrices with singular values less than one.
Parameters
----------
constrained : array or list
The coefficients matrices. If a list, should be a list of length
`order`, where each element is an array sized `k_endog` x `k_endog`. If
an array, should be the coefficient matrices horizontally concatenated
and sized `k_endog` x `k_endog * order`.
error_variance : array
The variance / covariance matrix of the error term. Should be sized
`k_endog` x `k_endog`.
order : integer, optional
The order of the autoregression.
k_endog : integer, optional
The dimension of the data vector.
Returns
-------
pacf : list
List of first `order` multivariate partial autocorrelations.
Notes
-----
Note that this computes multivariate partial autocorrelations.
Corresponds to the inverse of Lemma 2.1 in Ansley and Kohn (1986). See
`unconstrain_stationary_multivariate` for more details.
Notes
-----
Coefficients are assumed to be provided from the VAR model:
.. math::
y_t = A_1 y_{t-1} + \dots + A_p y_{t-p} + \varepsilon_t
"""
if type(constrained) == list:
order = len(constrained)
k_endog = constrained[0].shape[0]
else:
k_endog, order = constrained.shape
order //= k_endog
# Get autocovariances for the process; these are defined to be
# E z_t z_{t-j}'
# However, we want E z_t z_{t+j}' = (E z_t z_{t-j}')'
_acovf = _compute_multivariate_acovf_from_coefficients
autocovariances = [
autocovariance.T for autocovariance in
_acovf(constrained, error_variance, maxlag=order)]
return _compute_multivariate_pacf_from_autocovariances(autocovariances)
def unconstrain_stationary_multivariate(constrained, error_variance):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array or list
Constrained parameters of, e.g., an autoregressive or moving average
component, to be transformed to arbitrary parameters used by the
optimizer. If a list, should be a list of length `order`, where each
element is an array sized `k_endog` x `k_endog`. If an array, should be
the coefficient matrices horizontally concatenated and sized
`k_endog` x `k_endog * order`.
error_variance : array
The variance / covariance matrix of the error term. Should be sized
`k_endog` x `k_endog`. This is used as input in the algorithm even if
is not transformed by it (when `transform_variance` is False).
Returns
-------
unconstrained : array
Unconstrained parameters used by the optimizer, to be transformed to
stationary coefficients of, e.g., an autoregressive or moving average
component. Will match the type of the passed `constrained`
variable (so if a list was passed, a list will be returned).
Notes
-----
Uses the list representation internally, even if an array is passed.
References
----------
.. [1] Ansley, Craig F., and Robert Kohn. 1986.
"A Note on Reparameterizing a Vector Autoregressive Moving Average Model
to Enforce Stationarity."
Journal of Statistical Computation and Simulation 24 (2): 99-106.
"""
from scipy import linalg
use_list = type(constrained) == list
if not use_list:
k_endog, order = constrained.shape
order //= k_endog
constrained = [
constrained[:k_endog, i*k_endog:(i+1)*k_endog]
for i in range(order)
]
else:
order = len(constrained)
k_endog = constrained[0].shape[0]
# Step 1: convert matrices from the space of stationary
# coefficient matrices to our "partial autocorrelation matrix" space
# (matrices with singular values less than one)
partial_autocorrelations = _compute_multivariate_pacf_from_coefficients(
constrained, error_variance, order, k_endog)
# Step 2: convert from arbitrary matrices to those with singular values
# less than one.
unconstrained = _unconstrain_sv_less_than_one(
partial_autocorrelations, order, k_endog)
if not use_list:
unconstrained = np.concatenate(unconstrained, axis=1)
return unconstrained, error_variance
def validate_matrix_shape(name, shape, nrows, ncols, nobs):
"""
Validate the shape of a possibly time-varying matrix, or raise an exception
Parameters
----------
name : str
The name of the matrix being validated (used in exception messages)
shape : array_like
The shape of the matrix to be validated. May be of size 2 or (if
the matrix is time-varying) 3.
nrows : int
The expected number of rows.
ncols : int
The expected number of columns.
nobs : int
The number of observations (used to validate the last dimension of a
time-varying matrix)
Raises
------
ValueError
If the matrix is not of the desired shape.
"""
ndim = len(shape)
# Enforce dimension
if ndim not in [2, 3]:
raise ValueError('Invalid value for %s matrix. Requires a'
' 2- or 3-dimensional array, got %d dimensions' %
(name, ndim))
# Enforce the shape of the matrix
if not shape[0] == nrows:
raise ValueError('Invalid dimensions for %s matrix: requires %d'
' rows, got %d' % (name, nrows, shape[0]))
if not shape[1] == ncols:
raise ValueError('Invalid dimensions for %s matrix: requires %d'
' columns, got %d' % (name, ncols, shape[1]))
# If we don't yet know `nobs`, don't allow time-varying arrays
if nobs is None and not (ndim == 2 or shape[-1] == 1):
raise ValueError('Invalid dimensions for %s matrix: time-varying'
' matrices cannot be given unless `nobs` is specified'
' (implicitly when a dataset is bound or else set'
' explicity)' % name)
# Enforce time-varying array size
if ndim == 3 and nobs is not None and not shape[-1] in [1, nobs]:
raise ValueError('Invalid dimensions for time-varying %s'
' matrix. Requires shape (*,*,%d), got %s' %
(name, nobs, str(shape)))
def validate_vector_shape(name, shape, nrows, nobs):
"""
Validate the shape of a possibly time-varying vector, or raise an exception
Parameters
----------
name : str
The name of the vector being validated (used in exception messages)
shape : array_like
The shape of the vector to be validated. May be of size 1 or (if
the vector is time-varying) 2.
nrows : int
The expected number of rows (elements of the vector).
nobs : int
The number of observations (used to validate the last dimension of a
time-varying vector)
Raises
------
ValueError
If the vector is not of the desired shape.
"""
ndim = len(shape)
# Enforce dimension
if ndim not in [1, 2]:
raise ValueError('Invalid value for %s vector. Requires a'
' 1- or 2-dimensional array, got %d dimensions' %
(name, ndim))
# Enforce the shape of the vector
if not shape[0] == nrows:
raise ValueError('Invalid dimensions for %s vector: requires %d'
' rows, got %d' % (name, nrows, shape[0]))
# If we don't yet know `nobs`, don't allow time-varying arrays
if nobs is None and not (ndim == 1 or shape[-1] == 1):
raise ValueError('Invalid dimensions for %s vector: time-varying'
' vectors cannot be given unless `nobs` is specified'
' (implicitly when a dataset is bound or else set'
' explicity)' % name)
# Enforce time-varying array size
if ndim == 2 and not shape[1] in [1, nobs]:
raise ValueError('Invalid dimensions for time-varying %s'
' vector. Requires shape (*,%d), got %s' %
(name, nobs, str(shape)))
|
bsd-3-clause
|
Djabbz/scikit-learn
|
examples/model_selection/plot_train_error_vs_test_error.py
|
349
|
2577
|
"""
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
|
bsd-3-clause
|
glennq/scikit-learn
|
examples/svm/plot_rbf_parameters.py
|
26
|
8016
|
'''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_splits`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
scores = grid.cv_results_['mean_test_score'].reshape(len(C_range),
len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
|
bsd-3-clause
|
marcsans/cnn-physics-perception
|
phy/lib/python2.7/site-packages/matplotlib/tri/trirefine.py
|
8
|
14593
|
"""
Mesh refinement for triangular grids.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from matplotlib.tri.triangulation import Triangulation
import matplotlib.tri.triinterpolate
class TriRefiner(object):
"""
Abstract base class for classes implementing mesh refinement.
A TriRefiner encapsulates a Triangulation object and provides tools for
mesh refinement and interpolation.
Derived classes must implements:
- ``refine_triangulation(return_tri_index=False, **kwargs)`` , where
the optional keyword arguments *kwargs* are defined in each
TriRefiner concrete implementation, and which returns :
- a refined triangulation
- optionally (depending on *return_tri_index*), for each
point of the refined triangulation: the index of
the initial triangulation triangle to which it belongs.
- ``refine_field(z, triinterpolator=None, **kwargs)`` , where:
- *z* array of field values (to refine) defined at the base
triangulation nodes
- *triinterpolator* is a
:class:`~matplotlib.tri.TriInterpolator` (optional)
- the other optional keyword arguments *kwargs* are defined in
each TriRefiner concrete implementation
and which returns (as a tuple) a refined triangular mesh and the
interpolated values of the field at the refined triangulation nodes.
"""
def __init__(self, triangulation):
if not isinstance(triangulation, Triangulation):
raise ValueError("Expected a Triangulation object")
self._triangulation = triangulation
class UniformTriRefiner(TriRefiner):
"""
Uniform mesh refinement by recursive subdivisions.
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation`
The encapsulated triangulation (to be refined)
"""
# See Also
# --------
# :class:`~matplotlib.tri.CubicTriInterpolator` and
# :class:`~matplotlib.tri.TriAnalyzer`.
# """
def __init__(self, triangulation):
TriRefiner.__init__(self, triangulation)
def refine_triangulation(self, return_tri_index=False, subdiv=3):
"""
Computes an uniformly refined triangulation *refi_triangulation* of
the encapsulated :attr:`triangulation`.
This function refines the encapsulated triangulation by splitting each
father triangle into 4 child sub-triangles built on the edges midside
nodes, recursively (level of recursion *subdiv*).
In the end, each triangle is hence divided into ``4**subdiv``
child triangles.
The default value for *subdiv* is 3 resulting in 64 refined
subtriangles for each triangle of the initial triangulation.
Parameters
----------
return_tri_index : boolean, optional
Boolean indicating whether an index table indicating the father
triangle index of each point will be returned. Default value
False.
subdiv : integer, optional
Recursion level for the subdivision. Defaults value 3.
Each triangle will be divided into ``4**subdiv`` child triangles.
Returns
-------
refi_triangulation : :class:`~matplotlib.tri.Triangulation`
The returned refined triangulation
found_index : array-like of integers
Index of the initial triangulation containing triangle, for each
point of *refi_triangulation*.
Returned only if *return_tri_index* is set to True.
"""
refi_triangulation = self._triangulation
ntri = refi_triangulation.triangles.shape[0]
# Computes the triangulation ancestors numbers in the reference
# triangulation.
ancestors = np.arange(ntri, dtype=np.int32)
for _ in range(subdiv):
refi_triangulation, ancestors = self._refine_triangulation_once(
refi_triangulation, ancestors)
refi_npts = refi_triangulation.x.shape[0]
refi_triangles = refi_triangulation.triangles
# Now we compute found_index table if needed
if return_tri_index:
# We have to initialize found_index with -1 because some nodes
# may very well belong to no triangle at all, e.g., in case of
# Delaunay Triangulation with DuplicatePointWarning.
found_index = - np.ones(refi_npts, dtype=np.int32)
tri_mask = self._triangulation.mask
if tri_mask is None:
found_index[refi_triangles] = np.repeat(ancestors,
3).reshape(-1, 3)
else:
# There is a subtlety here: we want to avoid whenever possible
# that refined points container is a masked triangle (which
# would result in artifacts in plots).
# So we impose the numbering from masked ancestors first,
# then overwrite it with unmasked ancestor numbers.
ancestor_mask = tri_mask[ancestors]
found_index[refi_triangles[ancestor_mask, :]
] = np.repeat(ancestors[ancestor_mask],
3).reshape(-1, 3)
found_index[refi_triangles[~ancestor_mask, :]
] = np.repeat(ancestors[~ancestor_mask],
3).reshape(-1, 3)
return refi_triangulation, found_index
else:
return refi_triangulation
def refine_field(self, z, triinterpolator=None, subdiv=3):
"""
Refines a field defined on the encapsulated triangulation.
Returns *refi_tri* (refined triangulation), *refi_z* (interpolated
values of the field at the node of the refined triangulation).
Parameters
----------
z : 1d-array-like of length ``n_points``
Values of the field to refine, defined at the nodes of the
encapsulated triangulation. (``n_points`` is the number of points
in the initial triangulation)
triinterpolator : :class:`~matplotlib.tri.TriInterpolator`, optional
Interpolator used for field interpolation. If not specified,
a :class:`~matplotlib.tri.CubicTriInterpolator` will
be used.
subdiv : integer, optional
Recursion level for the subdivision. Defaults to 3.
Each triangle will be divided into ``4**subdiv`` child triangles.
Returns
-------
refi_tri : :class:`~matplotlib.tri.Triangulation` object
The returned refined triangulation
refi_z : 1d array of length: *refi_tri* node count.
The returned interpolated field (at *refi_tri* nodes)
Examples
--------
The main application of this method is to plot high-quality
iso-contours on a coarse triangular grid (e.g., triangulation built
from relatively sparse test data):
.. plot:: mpl_examples/pylab_examples/tricontour_smooth_user.py
"""
if triinterpolator is None:
interp = matplotlib.tri.CubicTriInterpolator(
self._triangulation, z)
else:
if not isinstance(triinterpolator,
matplotlib.tri.TriInterpolator):
raise ValueError("Expected a TriInterpolator object")
interp = triinterpolator
refi_tri, found_index = self.refine_triangulation(
subdiv=subdiv, return_tri_index=True)
refi_z = interp._interpolate_multikeys(
refi_tri.x, refi_tri.y, tri_index=found_index)[0]
return refi_tri, refi_z
@staticmethod
def _refine_triangulation_once(triangulation, ancestors=None):
"""
This function refines a matplotlib.tri *triangulation* by splitting
each triangle into 4 child-masked_triangles built on the edges midside
nodes.
The masked triangles, if present, are also splitted but their children
returned masked.
If *ancestors* is not provided, returns only a new triangulation:
child_triangulation.
If the array-like key table *ancestor* is given, it shall be of shape
(ntri,) where ntri is the number of *triangulation* masked_triangles.
In this case, the function returns
(child_triangulation, child_ancestors)
child_ancestors is defined so that the 4 child masked_triangles share
the same index as their father: child_ancestors.shape = (4 * ntri,).
"""
x = triangulation.x
y = triangulation.y
# According to tri.triangulation doc:
# neighbors[i,j] is the triangle that is the neighbor
# to the edge from point index masked_triangles[i,j] to point
# index masked_triangles[i,(j+1)%3].
neighbors = triangulation.neighbors
triangles = triangulation.triangles
npts = np.shape(x)[0]
ntri = np.shape(triangles)[0]
if ancestors is not None:
ancestors = np.asarray(ancestors)
if np.shape(ancestors) != (ntri,):
raise ValueError(
"Incompatible shapes provide for triangulation"
".masked_triangles and ancestors: {0} and {1}".format(
np.shape(triangles), np.shape(ancestors)))
# Initiating tables refi_x and refi_y of the refined triangulation
# points
# hint: each apex is shared by 2 masked_triangles except the borders.
borders = np.sum(neighbors == -1)
added_pts = (3*ntri + borders) // 2
refi_npts = npts + added_pts
refi_x = np.zeros(refi_npts)
refi_y = np.zeros(refi_npts)
# First part of refi_x, refi_y is just the initial points
refi_x[:npts] = x
refi_y[:npts] = y
# Second part contains the edge midside nodes.
# Each edge belongs to 1 triangle (if border edge) or is shared by 2
# masked_triangles (interior edge).
# We first build 2 * ntri arrays of edge starting nodes (edge_elems,
# edge_apexes) ; we then extract only the masters to avoid overlaps.
# The so-called 'master' is the triangle with biggest index
# The 'slave' is the triangle with lower index
# (can be -1 if border edge)
# For slave and master we will identify the apex pointing to the edge
# start
edge_elems = np.ravel(np.vstack([np.arange(ntri, dtype=np.int32),
np.arange(ntri, dtype=np.int32),
np.arange(ntri, dtype=np.int32)]))
edge_apexes = np.ravel(np.vstack([np.zeros(ntri, dtype=np.int32),
np.ones(ntri, dtype=np.int32),
np.ones(ntri, dtype=np.int32)*2]))
edge_neighbors = neighbors[edge_elems, edge_apexes]
mask_masters = (edge_elems > edge_neighbors)
# Identifying the "masters" and adding to refi_x, refi_y vec
masters = edge_elems[mask_masters]
apex_masters = edge_apexes[mask_masters]
x_add = (x[triangles[masters, apex_masters]] +
x[triangles[masters, (apex_masters+1) % 3]]) * 0.5
y_add = (y[triangles[masters, apex_masters]] +
y[triangles[masters, (apex_masters+1) % 3]]) * 0.5
refi_x[npts:] = x_add
refi_y[npts:] = y_add
# Building the new masked_triangles ; each old masked_triangles hosts
# 4 new masked_triangles
# there are 6 pts to identify per 'old' triangle, 3 new_pt_corner and
# 3 new_pt_midside
new_pt_corner = triangles
# What is the index in refi_x, refi_y of point at middle of apex iapex
# of elem ielem ?
# If ielem is the apex master: simple count, given the way refi_x was
# built.
# If ielem is the apex slave: yet we do not know ; but we will soon
# using the neighbors table.
new_pt_midside = np.empty([ntri, 3], dtype=np.int32)
cum_sum = npts
for imid in range(3):
mask_st_loc = (imid == apex_masters)
n_masters_loc = np.sum(mask_st_loc)
elem_masters_loc = masters[mask_st_loc]
new_pt_midside[:, imid][elem_masters_loc] = np.arange(
n_masters_loc, dtype=np.int32) + cum_sum
cum_sum += n_masters_loc
# Now dealing with slave elems.
# for each slave element we identify the master and then the inode
# onces slave_masters is indentified, slave_masters_apex is such that:
# neighbors[slaves_masters, slave_masters_apex] == slaves
mask_slaves = np.logical_not(mask_masters)
slaves = edge_elems[mask_slaves]
slaves_masters = edge_neighbors[mask_slaves]
diff_table = np.abs(neighbors[slaves_masters, :] -
np.outer(slaves, np.ones(3, dtype=np.int32)))
slave_masters_apex = np.argmin(diff_table, axis=1)
slaves_apex = edge_apexes[mask_slaves]
new_pt_midside[slaves, slaves_apex] = new_pt_midside[
slaves_masters, slave_masters_apex]
# Builds the 4 child masked_triangles
child_triangles = np.empty([ntri*4, 3], dtype=np.int32)
child_triangles[0::4, :] = np.vstack([
new_pt_corner[:, 0], new_pt_midside[:, 0],
new_pt_midside[:, 2]]).T
child_triangles[1::4, :] = np.vstack([
new_pt_corner[:, 1], new_pt_midside[:, 1],
new_pt_midside[:, 0]]).T
child_triangles[2::4, :] = np.vstack([
new_pt_corner[:, 2], new_pt_midside[:, 2],
new_pt_midside[:, 1]]).T
child_triangles[3::4, :] = np.vstack([
new_pt_midside[:, 0], new_pt_midside[:, 1],
new_pt_midside[:, 2]]).T
child_triangulation = Triangulation(refi_x, refi_y, child_triangles)
# Builds the child mask
if triangulation.mask is not None:
child_triangulation.set_mask(np.repeat(triangulation.mask, 4))
if ancestors is None:
return child_triangulation
else:
return child_triangulation, np.repeat(ancestors, 4)
|
mit
|
veryberry/addons-yelizariev
|
sugarcrm_migration/import_sugarcrm.py
|
16
|
44410
|
# -*- coding: utf-8 -*-
import logging
_logger = logging.getLogger(__name__)
try:
import MySQLdb
import MySQLdb.cursors
from pandas import merge, DataFrame
except ImportError:
pass
from openerp.addons.import_framework.import_base import import_base, create_childs
from openerp.addons.import_framework.mapper import *
import subprocess
def fix_email(text):
return text.replace('\r', '<br>')
class import_sugarcrm(import_base):
TABLE_USER = 'users'
TABLE_ACCOUNT = 'accounts'
TABLE_ACCOUNT_LEAD = 'accounts_leads'
TABLE_ACCOUNT_TAG = 'accounts_tags_'
TABLE_CONTACT = 'contacts'
TABLE_CONTACT_COMPANY = 'contacts_companies_'
TABLE_CONTACT_TAG = 'contacts_tags_'
TABLE_CASE = 'cases'
TABLE_CASE_TAG = 'cases_tags_'
#TABLE_EMPLOYEE = 'Employees'
#TABLE_OPPORTUNITY = 'Opportunities'
#TABLE_LEAD = 'Leads'
#TABLE_STAGE = 'crm_stage'
#TABLE_ATTENDEE = 'calendar_attendee'
#TABLE_CALL = 'Calls'
#TABLE_MEETING = 'Meetings'
#TABLE_TASK = 'Tasks'
#TABLE_PROJECT = 'Project'
#TABLE_PROJECT_TASK = 'ProjectTask'
#TABLE_BUG = 'Bugs'
TABLE_NOTE = 'Notes'
TABLE_NOTE_INTERNAL = 'notes_internal'
TABLE_EMAIL = 'emails'
#TABLE_COMPAIGN = 'Campaigns'
#TABLE_DOCUMENT = 'Documents'
#TABLE_HISTORY_ATTACHMNET = 'history_attachment'
def initialize(self):
self.db = MySQLdb.connect(host=self.context.get('db_host'),
port=int(self.context.get('db_port')),
user=self.context.get('db_user'),
passwd=self.context.get('db_passwd'),
db=self.context.get('db_name'),
charset='utf8',
cursorclass=MySQLdb.cursors.DictCursor
)
db_dump_fies = self.context.get('db_dump_fies')
if db_dump_fies:
cur = self.db.cursor()
for f in db_dump_fies:
_logger.info('load dump %s' % f)
fd = open(f, 'r')
subprocess.Popen(['mysql',
'-u', self.context.get('db_user'),
'-p{}'.format(self.context.get('db_passwd')),
'-h', self.context.get('db_host'),
'-P', self.context.get('db_port'),
self.context.get('db_name')], stdin=fd).wait()
cur.close()
def finalize(self):
pass
def finalize_note(self):
mail_message_obj = self.pool['mail.message']
ids = self.pool['ir.attachment'].search(self.cr, self.uid, [('res_model_tmp','=','mail.message')])
for a in self.pool['ir.attachment'].read(self.cr, self.uid, ids, ['id', 'res_id_tmp'], context=self.context):
if not a['res_id_tmp']:
continue
mail_message_obj.write(self.cr, self.uid, [a['res_id_tmp']],
{'attachment_ids':[(4, a['id'])]})
def get_data(self, table):
cur = self.db.cursor()
query = "SELECT * FROM %s" % table
#query = query + ' order by rand()' # for debug
cur.execute(query)
res = cur.fetchall()
cur.close()
return list(res)
def get_mapping(self):
res = [
self.get_mapping_user(),
self.get_mapping_account(),
self.get_mapping_contact(),
self.get_mapping_case(),
self.get_mapping_email(),
self.get_mapping_note_internal(),
self.get_mapping_note(),
]
return res
def merge_table_email(self, df, id_on='id'):
#mysql> select bean_module, count(*) from email_addr_bean_rel group by bean_module;
#+-------------+----------+
#| bean_module | count(*) |
#+-------------+----------+
#| Contacts | 1048 |
#| Leads | 31 |
#| Prospects | 20391 |
#| Users | 33 |
#+-------------+----------+
#4 rows in set (0.21 sec)
t1 = merge(df,
DataFrame(self.get_data('email_addr_bean_rel')),
how='left',
left_on=id_on,
suffixes=('', '_email_addr_bean_rel'),
right_on='bean_id')
t2 = merge(t1,
DataFrame(self.get_data('email_addresses')),
how='left',
left_on = 'email_address_id',
suffixes=('', '_email_addresses'),
right_on = 'id')
return t2
def table_user(self):
t1 = self.merge_table_email(DataFrame(self.get_data('users')))
return t1
def get_mapping_user(self):
return {
'name': self.TABLE_USER,
'table': self.table_user,
'models':[{
'model' : 'res.users',
'fields': {
'id': xml_id(self.TABLE_USER, 'id'),
'active': lambda record: not record['deleted'], # status == 'Active'
'name': concat('first_name', 'last_name'),
'login': value('user_name', fallback='last_name'),
'password' : 'user_hash',
'company_id/id': const('base.main_company'),
'alias_name': value('user_name', fallback='last_name', lower=True),
'email': 'email_address',
}
}]
}
def table_account(self):
t1 = merge(DataFrame(self.get_data('accounts')),
DataFrame(self.get_data('accounts_cstm')),
left_on='id',
right_on='id_c'
)
#t1 = t1[:100] # for debug
return t1
def get_hook_tag(self, field_name):
def f(external_values):
res = []
value = external_values.get(field_name)
value = value or ''
if not isinstance(value, basestring):
value = str(value)
for v in value.split(','):
v = do_clean_sugar(v)
if v:
res.append({field_name:v})
return res
return f
def tag(self, model, xml_id_prefix, field_name):
parent = xml_id_prefix + field_name
return {'model':model,
'hook':self.get_hook_tag(field_name),
'fields': {
'id': xml_id(parent, field_name),
'name': field_name,
'parent_id/id':const('sugarcrm_migration.'+parent),
}
}
def context_partner(self):
# see module description
return {"skip_addr_sync":True}
def get_mapping_account(self):
def partner(prefix, suffix):
return {'model' : 'res.partner',
'hook': self.get_hook_ignore_empty('%sfirst_name%s'%(prefix, suffix),
'%slast_name%s'%(prefix, suffix)),
'context':self.context_partner,
'fields': {
'id': xml_id(self.TABLE_ACCOUNT + '_%s%s'%(prefix, suffix), 'id'),
'name': concat('%sfirst_name%s'%(prefix, suffix), '%slast_name%s'%(prefix, suffix)),
'phone': '%sphone%s'%(prefix, suffix),
'mobile': '%smobile%s'%(prefix, suffix),
'fax': '%sfax%s'%(prefix, suffix),
'email': '%semail%s'%(prefix, suffix),
'parent_id/id': xml_id(self.TABLE_ACCOUNT, 'id'),
'function': '%sjob_title%s'%(prefix, suffix),
'customer': const('1'),
'supplier': const('0'),
},
}
partner_list = [
partner('finance_', ''),
partner('pa_', '_primary_c'),
partner('pa_', '_secondary_c'),
partner('', '_primary_c'),
partner('', '_secondary_c'),
partner('', '_quantenary_c'),
partner('', '_other_c'),
]
tag_list = [
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'initial_source_of_referral_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'private_sector_new_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'rtw_organisation_type_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'sales_funnel_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'shenley_holdings_company_new_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'source_of_referral_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'status_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'introduced_by_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'introduced_by_customer_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'sister_company_c'),
]
return {
'name': self.TABLE_ACCOUNT,
'table': self.table_account,
'dependencies' : [self.TABLE_USER],
'models': tag_list + [
# company
{
'model' : 'res.partner',
'context':self.context_partner,
'fields' :
{
'id': xml_id(self.TABLE_ACCOUNT, 'id'),
'name': concat('name', 'first_name_c', 'last_name_c'),
'is_company': const('1'),
'date': fixdate('date_entered'),
'active': lambda record: not record['deleted'],
'user_id/.id': user_by_login('account_manager_2_c'),
'website': first('website', 'website_c'),
'phone':'company_phone_c',
'email':first('email_address', 'email_c', lower=True),
'fax': first('phone_fax', 'fax_c', 'fax_primary_c'),
'city': 'company_city_c',
'zip': 'company_post_code_c',
#'state_id': 'company_region_c',
'street': 'company_street_c',
'street2': concat('company_street_2_c','company_street_3_c'),
'country_id/.id': country_by_name('europe_c'),
'opt_out': mapper_int('unsubscribe_c'),
'customer': const('1'),
'supplier': const('0'),
'category_id/id': tags_from_fields(self.TABLE_ACCOUNT_TAG, ['initial_source_of_referral_c', 'private_sector_new_c', 'rtw_organisation_type_c', 'sales_funnel_c', 'shenley_holdings_company_new_c', 'source_of_referral_c', 'status_c', 'introduced_by_c', 'introduced_by_customer_c', 'sister_company_c',]),
'comment': ppconcat('website_c'),
}},
# realted lead
{
'model' : 'crm.lead',
'fields': {
'id': xml_id(self.TABLE_ACCOUNT_LEAD, 'id'),
'partner_id/id': xml_id(self.TABLE_ACCOUNT, 'id'),
'name': concat('name', 'first_name_c', 'last_name_c'),
'active': lambda record: not record['deleted'],
#'user_id/id': xml_id(self.TABLE_USER, 'assigned_user_id'),
'phone':first('phone_office', 'telephone_c', 'company_phone_c'),
'email_from':first('email_address', 'email_c', lower=True),
'fax': first('phone_fax', 'fax_c', 'fax_primary_c'),
'probability': map_val('sales_funnel_c', self.map_lead_probability, 0),
'stage_id/id': map_val('status_c', self.map_lead_stage, 'crm.stage_lead1'),
'type': map_val('status_c', self.map_lead_type, 'lead'),
'section_id/id': const('sales_team.section_sales_department'),
}
}
] + partner_list # related contacts
}
map_lead_probability = {
'Lost': 0,
'Proposal Sent': 50,
'Prospect Identified': 1,
'Prospect Qualified': 20,
'Sales Won': 100,
'Scheduled': 100, #in sugarcrm: 150,
'Suspect': 0,
}
#mysql> select sales_funnel_c, count(*) from accounts_cstm group by sales_funnel_c;
#+---------------------+----------+
#| sales_funnel_c | count(*) |
#+---------------------+----------+
#| NULL | 4322 |
#| | 144 |
#| Lost | 1 |
#| Proposal Sent | 3 |
#| Prospect Identified | 5 |
#| Prospect Qualified | 20 |
#| Sales Won | 2 |
#| Scheduled | 1 |
#| Suspect | 62 |
map_lead_stage = {
'': 'crm.stage_lead7', # Lost
'Archived': 'crm.stage_lead2', # Dead
'Dorment': 'crm.stage_lead4', # Proposition
'Live Contact': 'crm.stage_lead6', # Won
'Pipeline': 'crm.stage_lead5', # Negotiation
'Prospect': 'crm.stage_lead1', # New
}
map_lead_type = {
'Dorment': 'opportunity',
'Live Contact': 'opportunity',
'Pipeline': 'opportunity',
}
#mysql> select status_c, count(*) from accounts_cstm group by status_c;
#+---------------+----------+
#| status_c | count(*) |
#+---------------+----------+
#| NULL | 210 |
#| | 655 |
#| Archived | 84 |
#| Dorment | 101 |
#| Live Contract | 73 |
#| Pipeline | 390 |
#| Prospect | 3047 |
#+---------------+----------+
def table_contact(self):
t1 = merge(DataFrame(self.get_data('contacts')),
DataFrame(self.get_data('contacts_cstm')),
left_on='id',
right_on='id_c'
)
t2 = self.merge_table_email(t1)
#t2 = t2[:10] # for debug
return t2
def get_mapping_contact(self):
tag_list = [
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'agreed_commission_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'agreed_introducer_commission_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'ambassador_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'consultant_type_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'consultant_type_other_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'england_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'ethnicity_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'europe_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'first_language_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'gender_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'other_languages_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'religion_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'role_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'role_type_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'specialism_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'status_live_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'status_live_new_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'trainer_type_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'training_experience_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'willing_to_travel_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'skill_set_c'),
]
def company(field_name):
return {'model':'res.partner',
'context':self.context_partner,
'hook':self.get_hook_ignore_empty(field_name),
'fields': {
'id': xml_id(self.TABLE_CONTACT_COMPANY, field_name),
'name': field_name,
'is_company': const('1'),
'customer': const('0'),
'supplier': const('1'),
}
}
return {
'name': self.TABLE_CONTACT,
'table': self.table_contact,
'dependencies' : [self.TABLE_USER],
'models':tag_list + [company('company_name_c')] + [{
'model' : 'res.partner',
'context':self.context_partner,
'fields': {
'id': xml_id(self.TABLE_CONTACT, 'id'),
'name': concat('title', 'first_name', 'last_name'),
'parent_id/id': xml_id(self.TABLE_CONTACT_COMPANY, 'company_name_c'),
'create_date': 'date_entered',
'write_date': 'date_modified',
'active': lambda record: not record['deleted'],
#'user_id/id': xml_id(self.TABLE_USER, 'assigned_user_id'),
'city': 'city_c',
'street': 'company_street_c',
'street2': concat('company_street_2_c','company_street_3_c'),
'zip': 'company_post_code_c',
'phone':first('company_phone_c', 'home_phone_c', 'phone_home', 'phone_work', 'phone_other', 'home_telephone_c', 'business_telephone_c'),
'mobile':first('phone_mobile', 'personal_mobile_phone_c'),
'email':first('email_c', 'email_address', 'personal_email_c', 'business_email_c', 'other_email_c', 'email_2_c'),
'website': first('website', 'website_c'),
'fax': first('phone_fax', 'company_fax_c'),
'customer': const('0'),
'supplier': const('1'),
'category_id/id': tags_from_fields(self.TABLE_CONTACT_TAG, ['agreed_commission_c', 'agreed_introducer_commission_c', 'ambassador_c', 'consultant_type_c', 'consultant_type_other_c', 'england_c', 'ethnicity_c', 'europe_c', 'first_language_c', 'gender_c', 'other_languages_c', 'religion_c', 'role_c', 'role_type_c', 'skill_set_c', 'specialism_c', 'status_live_c', 'status_live_new_c', 'trainer_type_c', 'training_experience_c', 'willing_to_travel_c', ]),
'comment': ppconcat(
'description',
'phone_home',
'phone_mobile',
'phone_work',
'phone_other',
'phone_fax',
'personal_email_c',
'business_email_c',
'other_email_c',
'home_telephone_c',
'business_telephone_c',
'personal_mobile_phone_c',
'personal_telephone_c',
'home_phone_c',
'mobile_phone_c',
'other_phone_c',
'email_c',
'email_2_c',
'company_phone_c',
'company_mobile_phone_c',
'company_fax_c',
'company_phone_other_c',
'company_email_c',
'prg_email_issued_c',
'email_address_permanent_c',
'prg_email_c',
'cjsm_email_address_c',
)
}
}]
}
def table_case(self):
t1 = merge(DataFrame(self.get_data('cases')),
DataFrame(self.get_data('cases_cstm')),
left_on='id',
right_on='id_c'
)
#t1 = t1[:10] # for debug
return t1
case_priority_mapping = {
'P1': '0',
'P2': '1',
'P3': '2'
}
case_state_mapping = {
'Awaiting Payment':'awaiting_payment',
'Cancelled':'cancelled',
'Completed':'close',
'Deferred':'pending',
'Live':'open',
'Lost':'lost',
'Pipeline':'pipeline_reactive',
'Pipeline - Proactive':'pipeline_proactive',
'Provisional':'draft',
'To be Invoiced':'to_be_invoiced',
}
def field_estimated_close_date_c(self, external_values):
estimated_close_date_c = external_values.get('estimated_close_date_c')
date = external_values.get('end_date_c')
return ''
def finalize_case(self):
ids = self.pool['account.analytic.account'].search(self.cr, self.uid, [('user_id_tmp', '!=', False)])
for r in self.pool['account.analytic.account'].read(self.cr, self.uid, ids, ['id', 'user_id_tmp']):
project_id = self.pool['project.project'].search(self.cr, self.uid, [('analytic_account_id','=', int(r['id']))], context=self.context)
self.pool['project.project'].write(self.cr, self.uid, project_id, {'user_id':r['user_id_tmp'][0]}, context=self.context)
def get_mapping_case(self):
#mysql> select case_status_c, count(*) from cases_cstm group by case_status_c;
#+----------------------+----------+
#| case_status_c | count(*) |
#+----------------------+----------+
#| NULL | 2 |
#| | 40 |
#| Awaiting Payment | 10 |
#| Cancelled | 182 |
#| Completed | 339 |
#| Deferred | 125 |
#| Live | 25 |
#| Lost | 419 |
#| Pipeline | 60 |
#| Pipeline - Proactive | 73 |
#| Provisional | 2 |
#| To be Invoiced | 7 |
#+----------------------+----------+
def partner_participant(prefix, suffix):
return {'model' : 'res.partner',
'hook': self.get_hook_ignore_empty('%scase_participant%s'%(prefix, suffix)),
'context':self.context_partner,
'fields': {
'id': xml_id(self.TABLE_CASE + '_%s%s'%(prefix, suffix), 'id'),
'name': '%scase_participant%s'%(prefix, suffix),
'phone': '%sparticipant_phone%s'%(prefix, suffix),
'function': '%sparticipant_role%s'%(prefix, suffix),
'participate_in_contract_ids/id': xml_id(self.TABLE_CASE, 'id'),
'customer': const('0'),
'supplier': const('0'),
},
}
def partner(prefix, suffix):
return {'model' : 'res.partner',
'hook': self.get_hook_ignore_empty('%scontact%s'%(prefix, suffix)),
'context':self.context_partner,
'fields': {
'id': xml_id(self.TABLE_CASE + '_%s%s'%(prefix, suffix), 'id'),
'name': '%scontact%s'%(prefix, suffix),
'phone': '%sphone%s'%(prefix, suffix),
'mobile': '%smobile%s'%(prefix, suffix),
'function': '%srole%s'%(prefix, suffix),
'customer': const('0'),
'supplier': const('0'),
},
}
partner_participant_list = [
partner_participant('', '_c'),
partner_participant('', '_2_c'),
partner_participant('', '_3_c'),
]
partner_list = [
partner('primary_', '_c'),
partner('secondary_', '_c'),
]
tag_list = [
self.tag('contract.category', self.TABLE_CASE_TAG, 'business_type_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'probability_of_closing_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'production_funnel_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'product_area_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'product_type_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'reason_lost_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'source_of_referral_c'),
]
return {
'name': self.TABLE_CASE,
'table': self.table_case,
'dependencies' : [
self.TABLE_USER,
self.TABLE_ACCOUNT,
self.TABLE_CONTACT,
#self.TABLE_LEAD
],
'models': []+
tag_list+
partner_list+
[{
'model' : 'account.analytic.account',
'context': lambda : {'active_test':False},
'finalize': self.finalize_case,
'fields': {
'id': xml_id(self.TABLE_CASE, 'id'),
'name': concat('case_number_c', 'case_number', 'name', delimiter=' * '),
'type': const('contract'),
'use_tasks': const('1'),
'user_id_tmp/.id': user_by_login('case_manager_c'),
'support_manager_id/.id': user_by_login('support_case_manager_c'),
'notetaker_id/.id': res_id(const(self.TABLE_CONTACT), 'contact_id4_c', default=None),
'proof_reader_id/.id': res_id(const(self.TABLE_CONTACT), 'contact_id2_c', default=None),
'consultant_id/.id': res_id(const(self.TABLE_CONTACT), 'contact_id_c', default=None),
'business_manager_id/.id': res_id(const(self.TABLE_CASE + '_%s%s'%('secondary_', '_c')), 'id', default=None),
'commissioning_manager_id/.id': res_id(const(self.TABLE_CASE + '_%s%s'%('primary_', '_c')), 'id', default=None),
'category_id/id': tags_from_fields(self.TABLE_CASE_TAG, ['business_type_c', 'probability_of_closing_c', 'production_funnel_c', 'product_area_c', 'product_type_c', 'reason_lost_c', 'source_of_referral_c',]),
'create_date': 'date_entered',
'state': map_val('case_status_c', self.case_state_mapping, 'draft'),
'partner_id/id': xml_id(self.TABLE_ACCOUNT, 'account_id'),
'date_start':'end_date_c',
'date':call(self.field_estimated_close_date_c),
'description': ppconcat(
'invoiced_value_of_case_c',
),
}
}] +
partner_participant_list
}
def table_filter_modules(self, t, field_name='bean_module'):
newt = t[(t[field_name] == 'Accounts')|
(t[field_name] == 'Cases')|
(t[field_name] == 'Contacts')|
(t[field_name] == 'Notes')|
(t[field_name] == 'Emails')
]
return newt
def table_email(self):
t1 = merge(DataFrame(self.get_data('emails')),
DataFrame(self.get_data('emails_text')),
how='left',
left_on='id',
right_on='email_id'
)
t2 = merge(t1,
DataFrame(self.get_data('emails_beans')),
how='left',
left_on='id',
right_on='email_id',
suffixes = ('', '_emails_beans')
)
t3 = self.table_filter_modules(t2)
#t3 = t3[:100] # for debug
return t3
map_to_model = {
'Accounts': 'res.partner',
'Cases': 'project.project',
'Contacts': 'res.partner',
'Prospects': 'TODO',
'Emails': 'mail.message',
#'Notes': 'ir.attachment',
}
map_to_table = {
'Accounts': TABLE_ACCOUNT,
'Cases': TABLE_CASE,
'Contacts': TABLE_CONTACT,
'Prospects': 'TODO',
'Emails': TABLE_EMAIL,
#'Notes': TABLE_NOTE,
}
#mysql> select parent_type, count(*) from notes group by parent_type;
#+-------------+----------+
#| parent_type | count(*) |
#+-------------+----------+
#| NULL | 604 |
#| Accounts | 6385 |
#| Cases | 12149 |
#| Contacts | 41 |
#| Emails | 12445 |
#| Leads | 355 |
#| Meetings | 2 |
#+-------------+----------+
#7 rows in set (0.30 sec)
#
def get_mapping_email(self):
# mysql> select bean_module, count(*) from emails_beans group by bean_module;
# +---------------+----------+
# | bean_module | count(*) |
# +---------------+----------+
# | Accounts | 182 |
# | Cases | 1746 |
# | Contacts | 493 |
# | Leads | 102 |
# | Opportunities | 1 |
# | Prospects | 16819 |
# +---------------+----------+
# 6 rows in set (0.56 sec)
return {
'name': self.TABLE_EMAIL,
'table': self.table_email,
'dependencies' : [
self.TABLE_USER,
self.TABLE_ACCOUNT,
self.TABLE_CONTACT,
self.TABLE_CASE,
#self.TABLE_LEAD,
#self.TABLE_OPPORTUNITY,
#self.TABLE_MEETING,
#self.TABLE_CALL
],
'models':[{
'model' : 'mail.message',
'hook': self.hook_email,
'fields': {
'id': xml_id(self.TABLE_EMAIL, 'id'),
'type':const('email'),
#mysql> select type, count(*) from emails group by type;
#+----------+----------+
#| type | count(*) |
#+----------+----------+
#| archived | 17119 |
#| draft | 8 |
#| inbound | 3004 |
#| out | 75 |
#+----------+----------+
#4 rows in set (0.76 sec)
'email_from': 'from_addr_name',
'reply_to': 'reply_to_addr',
#'same_thread': 'TODO',
'author_id/id': user2partner(self.TABLE_USER, 'created_by'),
#'partner_ids' #many2many
#attachment_ids' #many2many
#'parent_id': 'TODO',
'model': 'model',
'res_id': 'res_id',
#record_name
'subject':'name',
'date':'date_sent',
'message_id': 'message_id',
'body': call(lambda vals, html, txt: fix_email(html or txt or ''),
value('description_html'), value('description')),
'subtype_id/id':const('mail.mt_comment'),
'notified_partner_ids/.id': emails2partners('to_addrs'),
#'state' : const('received'),
#'email_to': 'to_addrs_names',
#'email_cc': 'cc_addrs_names',
#'email_bcc': 'bcc_addrs_names',
#'partner_id/.id': 'partner_id/.id',
#'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
}
}]
}
def table_note(self):
t = DataFrame(self.get_data('notes'))
t = self.table_filter_modules(t, 'parent_type')
t = t.dropna(subset=['filename'])
#t = t[:10] # for debug
return t
def table_note_internal(self):
t = DataFrame(self.get_data('notes'))
t = self.table_filter_modules(t, 'parent_type')
t = t[(t['parent_type'] != 'Emails')]
#t = t[:100] # for debug
return t
def get_id_model(self, external_values, field_name='parent_id', parent_field_name='parent_type'):
id = res_id(map_val(parent_field_name, self.map_to_table), field_name)
id.set_parent(self)
id = id(external_values)
model = map_val(parent_field_name, self.map_to_model)
model = model(external_values)
if model=='project.project':
id = self.pool['project.project'].search(self.cr, self.uid, [('analytic_account_id','=', int(id))], context=self.context)
if isinstance(id, list):
id=id[0]
return str(id),model
def hook_email(self, external_values):
id,model = self.get_id_model(external_values, field_name='bean_id', parent_field_name='bean_module')
external_values['res_id']=id
external_values['model']=model
return external_values
def hook_note(self, external_values):
parent_type = external_values.get('parent_type')
contact_id = external_values.get('contact_id')
if parent_type == 'Accounts' and contact_id:
external_values['parent_type'] = 'Contacts'
id,model = self.get_id_model(external_values, field_name='contact_id')
if id:
#print 'note Accounts fixed to Contacts'
external_values['res_id'] = id
external_values['res_model'] = model
return external_values
external_values['parent_type'] = parent_type
id,model = self.get_id_model(external_values)
if not id:
#print 'Note not found', parent_type, external_values.get('parent_id')
return None
else:
#print 'Note FOUND', parent_type, external_values.get('parent_id')
pass
external_values['res_id'] = id
external_values['res_model'] = model
return external_values
map_note_to_table = {
'Emails': TABLE_EMAIL
}
def get_mapping_note(self):
return {
'name': self.TABLE_NOTE,
'table': self.table_note,
'dependencies' : [self.TABLE_EMAIL,
self.TABLE_NOTE_INTERNAL,
],
'models':[{
'model': 'ir.attachment',
'context': lambda : {'active_test':False, 'quick_import':True},
'hook': self.hook_note,
'finalize': self.finalize_note,
'fields': {
'id': xml_id(self.TABLE_NOTE, 'id'),
'name':'filename',
'datas_fname':'filename',
'res_model': 'res_model',
'res_id': 'res_id',
'res_model_tmp': const('mail.message'),
'res_id_tmp': res_id(map_val('parent_type', self.map_note_to_table, default=self.TABLE_NOTE_INTERNAL), 'id'),
'store_fname': call(lambda external_values, id_value: 'sugarcrm_files/' + id_value,
value('id')),
'type':const('binary'),
#'description': 'description',
'description': const(''),
'create_date': 'date_entered',
'create_uid/id': xml_id(self.TABLE_USER, 'create_by'),
'company_id/id': const('base.main_company'),
}
}]
}
def get_mapping_note_internal(self):
return {
'name': self.TABLE_NOTE_INTERNAL,
'table': self.table_note_internal,
'dependencies' : [self.TABLE_EMAIL,
],
'models':[{
'model': 'mail.message',
'hook': self.hook_note,
'fields': {
'id': xml_id(self.TABLE_NOTE_INTERNAL, 'id'),
'subject':concat('name', 'filename', 'date_entered', delimiter=' * '),
'body': call(lambda vals, body: fix_email(body or ''),
value('description')),
'model': 'res_model',
'res_id': 'res_id',
'type':const('email'),
'date': 'date_entered',
'author_id/id': user2partner(self.TABLE_USER, 'created_by'),
#'subtype_id/id':const('mail.mt_comment'),
}
}]
}
def get_mapping_history_attachment(self):
# is not used
res.append({
'name': self.TABLE_HISTORY_ATTACHMNET,
'model' : 'ir.attachment',
'dependencies' : [self.TABLE_USER, self.TABLE_ACCOUNT, self.TABLE_CONTACT, self.TABLE_LEAD, self.TABLE_OPPORTUNITY, self.TABLE_MEETING, self.TABLE_CALL, self.TABLE_EMAIL],
'hook' : import_history,
'models':[{
'fields': {
'name':'name',
'user_id/id': ref(self.TABLE_USER, 'created_by'),
'description': ppconcat('description', 'description_html'),
'res_id': 'res_id',
'res_model': 'model',
'partner_id/.id' : 'partner_id/.id',
'datas' : 'datas',
'datas_fname' : 'datas_fname'
}
}]
})
def get_mapping_bug():
# is not used
return {
'name': self.TABLE_BUG,
'model' : 'project.issue',
'dependencies' : [self.TABLE_USER],
'models':[{
'fields': {
'name': concat('bug_number', 'name', delimiter='-'),
'project_id/id': call(get_bug_project_id, 'sugarcrm_bugs'),
'categ_id/id': call(get_category, 'project.issue', value('type')),
'description': ppconcat('description', 'source', 'resolution', 'work_log', 'found_in_release', 'release_name', 'fixed_in_release_name', 'fixed_in_release'),
'priority': get_project_issue_priority,
'state': map_val('status', project_issue_state),
'assigned_to/id' : ref(self.TABLE_USER, 'assigned_user_id'),
}
}]
}
def get_mapping_project(self):
# is not used
return {
'name': self.TABLE_PROJECT,
'model' : 'project.project',
'dependencies' : [self.TABLE_CONTACT, self.TABLE_ACCOUNT, self.TABLE_USER],
'hook' : import_project,
'models':[{
'fields': {
'name': 'name',
'date_start': 'estimated_start_date',
'date': 'estimated_end_date',
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'partner_id/.id': 'partner_id/.id',
'contact_id/.id': 'contact_id/.id',
'state': map_val('status', project_state)
}
}]
}
def get_mapping_project_task(self):
# is not used
return {
'name': self.TABLE_PROJECT_TASK,
'model' : 'project.task',
'dependencies' : [self.TABLE_USER, self.TABLE_PROJECT],
'models':[{
'fields': {
'name': 'name',
'date_start': 'date_start',
'date_end': 'date_finish',
'project_id/id': ref(self.TABLE_PROJECT, 'project_id'),
'planned_hours': 'estimated_effort',
'priority': get_project_task_priority,
'description': ppconcat('description','milestone_flag', 'project_task_id', 'task_number', 'percent_complete'),
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'partner_id/id': 'partner_id/id',
'contact_id/id': 'contact_id/id',
'state': map_val('status', project_task_state)
}
}]
}
def get_mapping_task(self):
# is not used
return {
'name': self.TABLE_TASK,
'model' : 'crm.meeting',
'dependencies' : [self.TABLE_CONTACT, self.TABLE_ACCOUNT, self.TABLE_USER],
'hook' : import_task,
'models':[{
'fields': {
'name': 'name',
'date': 'date',
'date_deadline': 'date_deadline',
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'categ_id/id': call(get_category, 'crm.meeting', const('Tasks')),
'partner_id/id': related_ref(self.TABLE_ACCOUNT),
'partner_address_id/id': ref(self.TABLE_CONTACT,'contact_id'),
'state': map_val('status', task_state)
}
}]
}
def get_mapping_call(self):
# is not used
return {
'name': self.TABLE_CALL,
'model' : 'crm.phonecall',
'dependencies' : [self.TABLE_ACCOUNT, self.TABLE_CONTACT, self.TABLE_OPPORTUNITY, self.TABLE_LEAD],
'models':[{
'fields': {
'name': 'name',
'date': 'date_start',
'duration': call(get_float_time, value('duration_hours'), value('duration_minutes')),
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'partner_id/id': related_ref(self.TABLE_ACCOUNT),
'partner_address_id/id': related_ref(self.TABLE_CONTACT),
'categ_id/id': call(get_category, 'crm.phonecall', value('direction')),
'opportunity_id/id': related_ref(self.TABLE_OPPORTUNITY),
'description': ppconcat('description'),
'state': map_val('status', call_state)
}
}]
}
def get_mapping_meeting(self):
# is not used
return {
'name': self.TABLE_MEETING,
'model' : 'crm.meeting',
'dependencies' : [self.TABLE_CONTACT, self.TABLE_OPPORTUNITY, self.TABLE_LEAD, self.TABLE_TASK],
'hook': import_meeting,
'models':[{
'fields': {
'name': 'name',
'date': 'date_start',
'duration': call(get_float_time, value('duration_hours'), value('duration_minutes')),
'location': 'location',
'attendee_ids/id':'attendee_ids/id',
'alarm_id/id': call(get_alarm_id, value('reminder_time')),
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'partner_id/id': related_ref(self.TABLE_ACCOUNT),
'partner_address_id/id': related_ref(self.TABLE_CONTACT),
'state': map_val('status', meeting_state)
}
}]
}
def get_mapping_opportunity(self):
# is not used
return {
'name': self.TABLE_OPPORTUNITY,
'model' : 'crm.lead',
'dependencies' : [self.TABLE_USER, self.TABLE_ACCOUNT, self.TABLE_CONTACT,self.TABLE_COMPAIGN],
'hook' : import_opp,
'models':[{
'fields': {
'name': 'name',
'probability': 'probability',
'partner_id/id': refbyname(self.TABLE_ACCOUNT, 'account_name', 'res.partner'),
'title_action': 'next_step',
'partner_address_id/id': 'partner_address_id/id',
'planned_revenue': 'amount',
'date_deadline': 'date_closed',
'user_id/id' : ref(self.TABLE_USER, 'assigned_user_id'),
'stage_id/id' : get_opportunity_status,
'type' : const('opportunity'),
'categ_id/id': call(get_category, 'crm.lead', value('opportunity_type')),
'email_from': 'email_from',
'state': map_val('status', opp_state),
'description' : 'description',
}
}]
}
def get_mapping_compaign(self):
# is not used
return {
'name': self.TABLE_COMPAIGN,
'model' : 'crm.case.resource.type',
'models':[{
'fields': {
'name': 'name',
}
}]
}
def get_mapping_employee(self):
# is not used
return {
'name': self.TABLE_EMPLOYEE,
'model' : 'hr.employee',
'dependencies' : [self.TABLE_USER],
'models':[{
'fields': {
'resource_id/id': get_ressource,
'name': concat('first_name', 'last_name'),
'work_phone': 'phone_work',
'mobile_phone': 'phone_mobile',
'user_id/id': ref(self.TABLE_USER, 'id'),
'address_home_id/id': get_user_address,
'notes': ppconcat('messenger_type', 'messenger_id', 'description'),
'job_id/id': get_job_id,
'work_email' : 'email1',
'coach_id/id_parent' : 'reports_to_id',
}
}]
}
|
lgpl-3.0
|
TomAugspurger/pandas
|
pandas/tests/series/methods/test_reindex_like.py
|
8
|
1245
|
from datetime import datetime
import numpy as np
from pandas import Series
import pandas._testing as tm
def test_reindex_like(datetime_series):
other = datetime_series[::2]
tm.assert_series_equal(
datetime_series.reindex(other.index), datetime_series.reindex_like(other)
)
# GH#7179
day1 = datetime(2013, 3, 5)
day2 = datetime(2013, 5, 5)
day3 = datetime(2014, 3, 5)
series1 = Series([5, None, None], [day1, day2, day3])
series2 = Series([None, None], [day1, day3])
result = series1.reindex_like(series2, method="pad")
expected = Series([5, np.nan], index=[day1, day3])
tm.assert_series_equal(result, expected)
def test_reindex_like_nearest():
ser = Series(np.arange(10, dtype="int64"))
target = [0.1, 0.9, 1.5, 2.0]
other = ser.reindex(target, method="nearest")
expected = Series(np.around(target).astype("int64"), target)
result = ser.reindex_like(other, method="nearest")
tm.assert_series_equal(expected, result)
result = ser.reindex_like(other, method="nearest", tolerance=1)
tm.assert_series_equal(expected, result)
result = ser.reindex_like(other, method="nearest", tolerance=[1, 2, 3, 4])
tm.assert_series_equal(expected, result)
|
bsd-3-clause
|
yantrabuddhi/FreeCAD
|
src/Mod/Plot/InitGui.py
|
18
|
2920
|
#***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
class PlotWorkbench(Workbench):
"""Workbench of Plot module."""
from plotUtils import Paths
import PlotGui
Icon = 'Icon.svg'
MenuText = "Plot"
ToolTip = ("The Plot module is used to edit/save output plots performed "
"by other tools")
def Initialize(self):
from PySide import QtCore, QtGui
cmdlst = ["Plot_SaveFig",
"Plot_Axes",
"Plot_Series",
"Plot_Grid",
"Plot_Legend",
"Plot_Labels",
"Plot_Positions"]
self.appendToolbar(str(QtCore.QT_TRANSLATE_NOOP(
"Plot",
"Plot edition tools")), cmdlst)
self.appendMenu(str(QtCore.QT_TRANSLATE_NOOP(
"Plot",
"Plot")), cmdlst)
try:
import matplotlib
except ImportError:
from PySide import QtCore, QtGui
msg = QtGui.QApplication.translate(
"plot_console",
"matplotlib not found, Plot module will be disabled",
None,
QtGui.QApplication.UnicodeUTF8)
FreeCAD.Console.PrintMessage(msg + '\n')
Gui.addWorkbench(PlotWorkbench())
|
lgpl-2.1
|
Myasuka/scikit-learn
|
doc/conf.py
|
210
|
8446
|
# -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
|
bsd-3-clause
|
eteq/emcee
|
document/plots/oned.py
|
16
|
2164
|
import os
import sys
import time
import numpy as np
import matplotlib.pyplot as pl
import h5py
from multiprocessing import Pool
sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
import emcee
# import acor
def lnprobfn(p, icov):
return -0.5 * np.dot(p, np.dot(icov, p))
def random_cov(ndim, dof=1):
v = np.random.randn(ndim * (ndim + dof)).reshape((ndim + dof, ndim))
return (sum([np.outer(v[i], v[i]) for i in range(ndim + dof)])
/ (ndim + dof))
_rngs = {}
def _worker(args):
i, outfn, nsteps = args
pid = os.getpid()
_random = _rngs.get(pid, np.random.RandomState(int(int(pid)
+ time.time())))
_rngs[pid] = _random
ndim = int(np.ceil(2 ** (7 * _random.rand())))
nwalkers = 2 * ndim + 2
# nwalkers += nwalkers % 2
print ndim, nwalkers
cov = random_cov(ndim)
icov = np.linalg.inv(cov)
ens_samp = emcee.EnsembleSampler(nwalkers, ndim, lnprobfn,
args=[icov])
ens_samp.random_state = _random.get_state()
pos, lnprob, state = ens_samp.run_mcmc(np.random.randn(nwalkers * ndim)
.reshape([nwalkers, ndim]), nsteps)
proposal = np.diag(cov.diagonal())
mh_samp = emcee.MHSampler(proposal, ndim, lnprobfn,
args=[icov])
mh_samp.random_state = state
mh_samp.run_mcmc(np.random.randn(ndim), nsteps)
f = h5py.File(outfn)
f["data"][i, :] = np.array([ndim, np.mean(ens_samp.acor),
np.mean(mh_samp.acor)])
f.close()
def oned():
nsteps = 10000
niter = 10
nthreads = 2
outfn = os.path.join(os.path.split(__file__)[0], "gauss_scaling.h5")
print outfn
f = h5py.File(outfn, "w")
f.create_dataset("data", (niter, 3), "f")
f.close()
pool = Pool(nthreads)
pool.map(_worker, [(i, outfn, nsteps) for i in range(niter)])
f = h5py.File(outfn)
data = f["data"][...]
f.close()
pl.clf()
pl.plot(data[:, 0], data[:, 1], "ks", alpha=0.5)
pl.plot(data[:, 0], data[:, 2], ".k", alpha=0.5)
pl.savefig(os.path.join(os.path.split(__file__)[0], "gauss_scaling.png"))
if __name__ == "__main__":
oned()
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.