repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
alexsavio/scikit-learn | sklearn/gaussian_process/gpr.py | 13 | 18747 | """Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard scikit-learn estimator API,
GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Read more in the :ref:`User Guide <gaussian_process>`.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations
and reduce potential numerical issue during fitting. If an array is
passed, it must have the same number of entries as the data used for
fitting and is used as datapoint-dependent noise level. Note that this
is equivalent to adding a WhiteKernel with c=alpha. Allowing to specify
the noise level directly as a parameter is mainly for convenience and
for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y : boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_ : array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_ : kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_ : array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process regression model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# Normalize target value
if self.normalize_y:
self.y_train_mean = np.mean(y, axis=0)
# demean y
y = y - self.y_train_mean
else:
self.y_train_mean = np.zeros(1)
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self.rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
self.L_ = cholesky(K, lower=True) # Line 2
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
X = check_array(X)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = self.kernel(X)
return y_mean, y_cov
elif return_std:
y_var = self.kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
y_mean = self.y_train_mean + y_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
return y_mean, y_cov
elif return_std:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ki,kj,ij->k", K_trans, K_trans, K_inv)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
| bsd-3-clause |
rseubert/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
jeffreyliu3230/osf.io | tasks.py | 9 | 23940 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Invoke tasks. To run a task, run ``$ invoke <COMMAND>``. To see a list of
commands, run ``$ invoke --list``.
"""
import os
import sys
import code
import platform
import subprocess
import logging
from invoke import task, run
from website import settings
logging.getLogger('invoke').setLevel(logging.CRITICAL)
HERE = os.path.dirname(os.path.abspath(__file__))
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
def get_bin_path():
"""Get parent path of current python binary.
"""
return os.path.dirname(sys.executable)
def bin_prefix(cmd):
"""Prefix command with current binary path.
"""
return os.path.join(get_bin_path(), cmd)
try:
__import__('rednose')
except ImportError:
TEST_CMD = 'nosetests'
else:
TEST_CMD = 'nosetests --rednose'
@task
def server(host=None, port=5000, debug=True, live=False):
"""Run the app server."""
from website.app import init_app
app = init_app(set_backends=True, routes=True)
settings.API_SERVER_PORT = port
if live:
from livereload import Server
server = Server(app.wsgi_app)
server.watch(os.path.join(HERE, 'website', 'static', 'public'))
server.serve(port=port)
else:
app.run(host=host, port=port, debug=debug, threaded=debug, extra_files=[settings.ASSET_HASH_PATH])
@task
def apiserver(port=8000, live=False):
"""Run the API server."""
cmd = 'python manage.py runserver {}'.format(port)
if live:
cmd += ' livereload'
run(cmd, echo=True)
SHELL_BANNER = """
{version}
+--------------------------------------------------+
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
|ccccccccccccccccccccccOOOOOOOccccccccccccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccOOOOOOOOOOOOccccccccccccccccccc|
|cccccccccOOOOOOOcccOOOOOOOOOOOOcccOOOOOOOccccccccc|
|cccccccOOOOOOOOOOccOOOOOsssOOOOcOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOccOOssssssOOccOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOsOcOssssssOOOOOOOOOOOOOOOccccccc|
|cccccccOOOOOOOOOOOssccOOOOOOcOssOOOOOOOOOOcccccccc|
|cccccccccOOOOOOOsssOccccccccccOssOOOOOOOcccccccccc|
|cccccOOOccccOOssssOccccccccccccOssssOccccOOOcccccc|
|ccOOOOOOOOOOOOOccccccccccccccccccccOOOOOOOOOOOOccc|
|cOOOOOOOOssssssOcccccccccccccccccOOssssssOOOOOOOOc|
|cOOOOOOOssssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOsssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOOssssOOccccccccccccccccccOsssssOOOOOOOOcc|
|cccOOOOOOOOOOOOOOOccccccccccccccOOOOOOOOOOOOOOOccc|
|ccccccccccccOOssssOOccccccccccOssssOOOcccccccccccc|
|ccccccccOOOOOOOOOssOccccOOcccOsssOOOOOOOOccccccccc|
|cccccccOOOOOOOOOOOsOcOOssssOcOssOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOOOsssssssOcOOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOcccOssssssOcccOOOOOOOOOOOccccccc|
|ccccccccOOOOOOOOOcccOOOOOOOOOOcccOOOOOOOOOcccccccc|
|ccccccccccOOOOcccccOOOOOOOOOOOcccccOOOOccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccccOOOOccccccccccccccccccccccc|
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
+--------------------------------------------------+
Welcome to the OSF Python Shell. Happy hacking!
Available variables:
{context}
"""
def make_shell_context():
from modularodm import Q
from framework.auth import User, Auth
from framework.mongo import database
from website.app import init_app
from website.project.model import Node
from website import models # all models
from website import settings
import requests
app = init_app()
context = {
'app': app,
'db': database,
'User': User,
'Auth': Auth,
'Node': Node,
'Q': Q,
'models': models,
'run_tests': test,
'rget': requests.get,
'rpost': requests.post,
'rdelete': requests.delete,
'rput': requests.put,
'settings': settings,
}
try: # Add a fake factory for generating fake names, emails, etc.
from faker import Factory
fake = Factory.create()
context['fake'] = fake
except ImportError:
pass
return context
def format_context(context):
lines = []
for name, obj in context.items():
line = "{name}: {obj!r}".format(**locals())
lines.append(line)
return '\n'.join(lines)
# Shell command adapted from Flask-Script. See NOTICE for license info.
@task
def shell():
context = make_shell_context()
banner = SHELL_BANNER.format(version=sys.version,
context=format_context(context)
)
try:
try:
# 0.10.x
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(banner=banner)
ipshell(global_ns={}, local_ns=context)
except ImportError:
# 0.12+
from IPython import embed
embed(banner1=banner, user_ns=context)
return
except ImportError:
pass
# fallback to basic python shell
code.interact(banner, local=context)
return
@task(aliases=['mongo'])
def mongoserver(daemon=False, config=None):
"""Run the mongod process.
"""
if not config:
platform_configs = {
'darwin': '/usr/local/etc/tokumx.conf', # default for homebrew install
'linux': '/etc/tokumx.conf',
}
platform = str(sys.platform).lower()
config = platform_configs.get(platform)
port = settings.DB_PORT
cmd = 'mongod --port {0}'.format(port)
if config:
cmd += ' --config {0}'.format(config)
if daemon:
cmd += " --fork"
run(cmd, echo=True)
@task(aliases=['mongoshell'])
def mongoclient():
"""Run the mongo shell for the OSF database."""
db = settings.DB_NAME
port = settings.DB_PORT
run("mongo {db} --port {port}".format(db=db, port=port), pty=True)
@task
def mongodump(path):
"""Back up the contents of the running OSF database"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongodump --db {db} --port {port} --out {path}".format(
db=db,
port=port,
path=path,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
run(cmd, echo=True)
print()
print("To restore from the dumped database, run `invoke mongorestore {0}`".format(
os.path.join(path, settings.DB_NAME)))
@task
def mongorestore(path, drop=False):
"""Restores the running OSF database with the contents of the database at
the location given its argument.
By default, the contents of the specified database are added to
the existing database. The `--drop` option will cause the existing database
to be dropped.
A caveat: if you `invoke mongodump {path}`, you must restore with
`invoke mongorestore {path}/{settings.DB_NAME}, as that's where the
database dump will be stored.
"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongorestore --db {db} --port {port}".format(
db=db,
port=port,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
if drop:
cmd += " --drop"
cmd += " " + path
run(cmd, echo=True)
@task
def sharejs(host=None, port=None, db_host=None, db_port=None, db_name=None, cors_allow_origin=None):
"""Start a local ShareJS server."""
if host:
os.environ['SHAREJS_SERVER_HOST'] = host
if port:
os.environ['SHAREJS_SERVER_PORT'] = port
if db_host:
os.environ['SHAREJS_DB_HOST'] = db_host
if db_port:
os.environ['SHAREJS_DB_PORT'] = db_port
if db_name:
os.environ['SHAREJS_DB_NAME'] = db_name
if cors_allow_origin:
os.environ['SHAREJS_CORS_ALLOW_ORIGIN'] = cors_allow_origin
if settings.SENTRY_DSN:
os.environ['SHAREJS_SENTRY_DSN'] = settings.SENTRY_DSN
share_server = os.path.join(settings.ADDON_PATH, 'wiki', 'shareServer.js')
run("node {0}".format(share_server))
@task(aliases=['celery'])
def celery_worker(level="debug"):
"""Run the Celery process."""
cmd = 'celery worker -A framework.tasks -l {0}'.format(level)
run(bin_prefix(cmd))
@task
def rabbitmq():
"""Start a local rabbitmq server.
NOTE: this is for development only. The production environment should start
the server as a daemon.
"""
run("rabbitmq-server", pty=True)
@task(aliases=['elastic'])
def elasticsearch():
"""Start a local elasticsearch server
NOTE: Requires that elasticsearch is installed. See README for instructions
"""
import platform
if platform.linux_distribution()[0] == 'Ubuntu':
run("sudo service elasticsearch start")
elif platform.system() == 'Darwin': # Mac OSX
run('elasticsearch')
else:
print("Your system is not recognized, you will have to start elasticsearch manually")
@task
def migrate_search(delete=False, index=settings.ELASTIC_INDEX):
"""Migrate the search-enabled models."""
from website.search_migration.migrate import migrate
migrate(delete, index=index)
@task
def rebuild_search():
"""Delete and recreate the index for elasticsearch"""
run("curl -s -XDELETE {uri}/{index}*".format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
run("curl -s -XPUT {uri}/{index}".format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
migrate_search()
@task
def mailserver(port=1025):
"""Run a SMTP test server."""
cmd = 'python -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=port)
run(bin_prefix(cmd), pty=True)
@task
def jshint():
"""Run JSHint syntax check"""
js_folder = os.path.join(HERE, 'website', 'static', 'js')
cmd = 'jshint {}'.format(js_folder)
run(cmd, echo=True)
@task(aliases=['flake8'])
def flake():
run('flake8 .', echo=True)
def pip_install(req_file):
"""Return the proper 'pip install' command for installing the dependencies
defined in ``req_file``.
"""
cmd = bin_prefix('pip install --exists-action w --upgrade -r {} '.format(req_file))
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
return cmd
@task(aliases=['req'])
def requirements(addons=False, release=False, dev=False):
"""Install python dependencies.
Examples:
inv requirements --dev
inv requirements --addons
inv requirements --release
"""
if release or addons:
addon_requirements()
# "release" takes precedence
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
else: # then base requirements
req_file = os.path.join(HERE, 'requirements.txt')
run(pip_install(req_file), echo=True)
@task
def test_module(module=None, verbosity=2):
"""Helper for running tests.
"""
# Allow selecting specific submodule
module_fmt = ' '.join(module) if isinstance(module, list) else module
args = " --verbosity={0} -s {1}".format(verbosity, module_fmt)
# Use pty so the process buffers "correctly"
run(bin_prefix(TEST_CMD) + args, pty=True)
@task
def test_osf():
"""Run the OSF test suite."""
test_module(module="tests/")
@task
def test_addons():
"""Run all the tests in the addons directory.
"""
modules = []
for addon in settings.ADDONS_REQUESTED:
module = os.path.join(settings.BASE_PATH, 'addons', addon)
modules.append(module)
test_module(module=modules)
@task
def test(all=False, syntax=False):
"""
Run unit tests: OSF (always), plus addons and syntax checks (optional)
"""
if syntax:
flake()
jshint()
test_osf()
if all:
test_addons()
karma(single=True, browsers='PhantomJS')
@task
def karma(single=False, sauce=False, browsers=None):
"""Run JS tests with Karma. Requires Chrome to be installed."""
karma_bin = os.path.join(
HERE, 'node_modules', 'karma', 'bin', 'karma'
)
cmd = '{} start'.format(karma_bin)
if sauce:
cmd += ' karma.saucelabs.conf.js'
if single:
cmd += ' --single-run'
# Use browsers if specified on the command-line, otherwise default
# what's specified in karma.conf.js
if browsers:
cmd += ' --browsers {}'.format(browsers)
run(cmd, echo=True)
@task
def wheelhouse(addons=False, release=False, dev=False):
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev:
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
else:
req_file = os.path.join(HERE, 'requirements.txt')
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
if not addons:
return
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
req_file = os.path.join(path, 'requirements.txt')
if os.path.exists(req_file):
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
@task
def addon_requirements():
"""Install all addon requirements."""
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
try:
requirements_file = os.path.join(path, 'requirements.txt')
open(requirements_file)
print('Installing requirements for {0}'.format(directory))
cmd = 'pip install --exists-action w --upgrade -r {0}'.format(requirements_file)
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
run(bin_prefix(cmd))
except IOError:
pass
print('Finished')
@task
def encryption(owner=None):
"""Generate GnuPG key.
For local development:
> invoke encryption
On Linode:
> sudo env/bin/invoke encryption --owner www-data
"""
if not settings.USE_GNUPG:
print('GnuPG is not enabled. No GnuPG key will be generated.')
return
import gnupg
gpg = gnupg.GPG(gnupghome=settings.GNUPG_HOME, gpgbinary=settings.GNUPG_BINARY)
keys = gpg.list_keys()
if keys:
print('Existing GnuPG key found')
return
print('Generating GnuPG key')
input_data = gpg.gen_key_input(name_real='OSF Generated Key')
gpg.gen_key(input_data)
if owner:
run('sudo chown -R {0} {1}'.format(owner, settings.GNUPG_HOME))
@task
def travis_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path):
try:
open(os.path.join(path, 'local-travis.py'))
run('cp {path}/local-travis.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path) and not os.path.isfile(os.path.join(path, 'local.py')):
try:
open(os.path.join(path, 'local-dist.py'))
run('cp {path}/local-dist.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_settings(addons=False):
# Website settings
if not os.path.isfile('website/settings/local.py'):
print('Creating local.py file')
run('cp website/settings/local-dist.py website/settings/local.py')
# Addon settings
if addons:
copy_addon_settings()
@task
def packages():
brew_commands = [
'update',
'upgrade',
'install libxml2',
'install libxslt',
'install elasticsearch',
'install gpg',
'install node',
'tap tokutek/tokumx',
'install tokumx-bin',
]
if platform.system() == 'Darwin':
print('Running brew commands')
for item in brew_commands:
command = 'brew {cmd}'.format(cmd=item)
run(command)
elif platform.system() == 'Linux':
# TODO: Write a script similar to brew bundle for Ubuntu
# e.g., run('sudo apt-get install [list of packages]')
pass
@task
def npm_bower():
print('Installing bower')
run('npm install -g bower', echo=True)
@task(aliases=['bower'])
def bower_install():
print('Installing bower-managed packages')
bower_bin = os.path.join(HERE, 'node_modules', 'bower', 'bin', 'bower')
run('{} prune'.format(bower_bin), echo=True)
run('{} install'.format(bower_bin), echo=True)
@task
def setup():
"""Creates local settings, installs requirements, and generates encryption key"""
copy_settings(addons=True)
packages()
requirements(addons=True, dev=True)
encryption()
from website.app import build_js_config_files
from website import settings
# Build nodeCategories.json before building assets
build_js_config_files(settings)
assets(dev=True, watch=False)
@task
def analytics():
from website.app import init_app
import matplotlib
matplotlib.use('Agg')
init_app()
from scripts.analytics import (
logs, addons, comments, folders, links, watch, email_invites,
permissions, profile, benchmarks
)
modules = (
logs, addons, comments, folders, links, watch, email_invites,
permissions, profile, benchmarks
)
for module in modules:
module.main()
@task
def clear_sessions(months=1, dry_run=False):
from website.app import init_app
init_app(routes=False, set_backends=True)
from scripts import clear_sessions
clear_sessions.clear_sessions_relative(months=months, dry_run=dry_run)
# Release tasks
@task
def hotfix(name, finish=False, push=False):
"""Rename hotfix branch to hotfix/<next-patch-version> and optionally
finish hotfix.
"""
print('Checking out master to calculate curent version')
run('git checkout master')
latest_version = latest_tag_info()['current_version']
print('Current version is: {}'.format(latest_version))
major, minor, patch = latest_version.split('.')
next_patch_version = '.'.join([major, minor, str(int(patch) + 1)])
print('Bumping to next patch version: {}'.format(next_patch_version))
print('Renaming branch...')
new_branch_name = 'hotfix/{}'.format(next_patch_version)
run('git checkout {}'.format(name), echo=True)
run('git branch -m {}'.format(new_branch_name), echo=True)
if finish:
run('git flow hotfix finish {}'.format(next_patch_version), echo=True, pty=True)
if push:
run('git push origin master', echo=True)
run('git push --tags', echo=True)
run('git push origin develop', echo=True)
@task
def feature(name, finish=False, push=False):
"""Rename the current branch to a feature branch and optionally finish it."""
print('Renaming branch...')
run('git branch -m feature/{}'.format(name), echo=True)
if finish:
run('git flow feature finish {}'.format(name), echo=True)
if push:
run('git push origin develop', echo=True)
# Adapted from bumpversion
def latest_tag_info():
try:
# git-describe doesn't update the git-index, so we do that
# subprocess.check_output(["git", "update-index", "--refresh"])
# get info about the latest tag in git
describe_out = subprocess.check_output([
"git",
"describe",
"--dirty",
"--tags",
"--long",
"--abbrev=40"
], stderr=subprocess.STDOUT
).decode().split("-")
except subprocess.CalledProcessError as err:
raise err
# logger.warn("Error when running git describe")
return {}
info = {}
if describe_out[-1].strip() == "dirty":
info["dirty"] = True
describe_out.pop()
info["commit_sha"] = describe_out.pop().lstrip("g")
info["distance_to_latest_tag"] = int(describe_out.pop())
info["current_version"] = describe_out.pop().lstrip("v")
# assert type(info["current_version"]) == str
assert 0 == len(describe_out)
return info
# Tasks for generating and bundling SSL certificates
# See http://cosdev.readthedocs.org/en/latest/osf/ops.html for details
@task
def generate_key(domain, bits=2048):
cmd = 'openssl genrsa -des3 -out {0}.key {1}'.format(domain, bits)
run(cmd)
@task
def generate_key_nopass(domain):
cmd = 'openssl rsa -in {domain}.key -out {domain}.key.nopass'.format(
domain=domain
)
run(cmd)
@task
def generate_csr(domain):
cmd = 'openssl req -new -key {domain}.key.nopass -out {domain}.csr'.format(
domain=domain
)
run(cmd)
@task
def request_ssl_cert(domain):
"""Generate a key, a key with password removed, and a signing request for
the specified domain.
Usage:
> invoke request_ssl_cert pizza.osf.io
"""
generate_key(domain)
generate_key_nopass(domain)
generate_csr(domain)
@task
def bundle_certs(domain, cert_path):
"""Concatenate certificates from NameCheap in the correct order. Certificate
files must be in the same directory.
"""
cert_files = [
'{0}.crt'.format(domain),
'COMODORSADomainValidationSecureServerCA.crt',
'COMODORSAAddTrustCA.crt',
'AddTrustExternalCARoot.crt',
]
certs = ' '.join(
os.path.join(cert_path, cert_file)
for cert_file in cert_files
)
cmd = 'cat {certs} > {domain}.bundle.crt'.format(
certs=certs,
domain=domain,
)
run(cmd)
@task
def clean_assets():
"""Remove built JS files."""
public_path = os.path.join(HERE, 'website', 'static', 'public')
js_path = os.path.join(public_path, 'js')
run('rm -rf {0}'.format(js_path), echo=True)
@task(aliases=['pack'])
def webpack(clean=False, watch=False, dev=False):
"""Build static assets with webpack."""
if clean:
clean_assets()
webpack_bin = os.path.join(HERE, 'node_modules', 'webpack', 'bin', 'webpack.js')
args = [webpack_bin]
if settings.DEBUG_MODE and dev:
args += ['--colors']
else:
args += ['--progress']
if watch:
args += ['--watch']
config_file = 'webpack.dev.config.js' if dev else 'webpack.prod.config.js'
args += ['--config {0}'.format(config_file)]
command = ' '.join(args)
run(command, echo=True)
@task()
def assets(dev=False, watch=False):
"""Install and build static assets."""
npm = 'npm install'
if not dev:
npm += ' --production'
run(npm, echo=True)
bower_install()
# Always set clean=False to prevent possible mistakes
# on prod
webpack(clean=False, watch=watch, dev=dev)
@task
def generate_self_signed(domain):
"""Generate self-signed SSL key and certificate.
"""
cmd = (
'openssl req -x509 -nodes -days 365 -newkey rsa:2048'
' -keyout {0}.key -out {0}.crt'
).format(domain)
run(cmd)
@task
def update_citation_styles():
from scripts import parse_citation_styles
total = parse_citation_styles.main()
print("Parsed {} styles".format(total))
| apache-2.0 |
bluemonk482/tdparse | src/sklearnSVM.py | 1 | 3651 | import os, time
from argparse import ArgumentParser
import numpy as np
from sklearn import svm, metrics
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit, GroupKFold
from sklearn.datasets import load_svmlight_file
from sklearn.externals import joblib
from utilities import readfeats, readfeats_sklearn, twoclass_fscore, frange, writingfile
# from liblinear import scaling
def macro_averaged_precision(y_true, y_predicted):
p = metrics.precision_score(y_true, y_predicted, average='macro')
return p
def predict(clf, x_train, y_train, x_test, y_test):
y_predicted = clf.predict(x_test)
print 'Macro-F1 score: ', metrics.f1_score(y_test, y_predicted, average='macro')
print 'Accuracy score: ', metrics.accuracy_score(y_test, y_predicted)
print "Macro-F1 score (2 classes):", (metrics.f1_score(y_test, y_predicted, average=None)[0]+metrics.f1_score(y_test, y_predicted, average=None)[-1])/2
return y_predicted
def CV(x_train, y_train):
c=[]
crange=frange(0.00001,1,10)
c.extend([i for i in crange])
crange=frange(0.00003,3,10)
c.extend([i for i in crange])
crange=frange(0.00005,5,10)
c.extend([i for i in crange])
crange=frange(0.00007,7,10)
c.extend([i for i in crange])
crange=frange(0.00009,10,10)
c.extend([i for i in crange])
c.sort() #Cost parameter values; use a bigger search space for better performance
cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=0).split(x_train, y_train)
# ids = readfeats('../data/election/output/id_train') # only for election data
# cv = GroupKFold(n_splits=5).split(x_train, y_train, ids)
clf = svm.LinearSVC()
param_grid = [{'C': c}]
twoclass_f1_macro = metrics.make_scorer(twoclass_fscore, greater_is_better=True)
precision_macro = metrics.make_scorer(macro_averaged_precision, greater_is_better=True)
grid_search = GridSearchCV(clf, param_grid=param_grid, cv=cv, verbose=0, scoring='f1_macro')
grid_search.fit(x_train, y_train)
print("Best parameters set:")
print '\n'
print(grid_search.best_estimator_)
print '\n'
print(grid_search.best_score_)
print(grid_search.best_params_)
print '\n'
return grid_search.best_estimator_
def save_model(clf, filepath):
joblib.dump(clf, filepath)
def main(output_dir):
trfile = '../data/'+output_dir+'/train.scale'
tfile = '../data/'+output_dir+'/test.scale'
pfile = '../data/'+output_dir+'/predresults'
truefile = '../data/'+output_dir+'/y_test'
# print "scaling features"
# scaling(output_dir)
print "loading features for training"
x_train, y_train = readfeats_sklearn(trfile)
print "loading features for testing"
x_test, y_test = readfeats_sklearn(tfile)
print "cross-validation"
clf = CV(x_train, y_train) # Comment this if parameter tuning is not desired
# print "training classifier"
# clf = svm.LinearSVC(C=1, class_weight='balanced') # Manually select C-parameter for training SVM
# clf.fit(x_train, y_train)
# print "saving trained model"
# save_model(clf, '../models/sklearn_saved.model')
print "evaluation"
preds = predict(clf, x_train, y_train, x_test, y_test)
print "writing labels"
writingfile(pfile, preds)
if __name__ == "__main__":
start = time.clock()
parser = ArgumentParser()
parser.add_argument("--data", dest="d", help="Output folder name", default='election')
args = parser.parse_args()
output_dir = args.d + '/output'
main(output_dir)
print "\n"
print "Time taken:", time.clock() - start | mit |
rohangoel96/IRCLogParser | IRCLogParser/lib/analysis/user.py | 1 | 18354 | import networkx as nx
import re
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction import text
from nltk.stem.wordnet import WordNetLemmatizer
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans, MiniBatchKMeans
from time import time
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import Normalizer
from sklearn.pipeline import make_pipeline
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
import lib.util as util
sys.path.append('../lib')
import lib.config as config
import ext.common_english_words as common_english_words
import ext.extend_stop_words as custom_stop_words
def nick_change_graph(log_dict, DAY_BY_DAY_ANALYSIS=False):
""" creates a graph which tracks the nick changes of the users
where each edge has a time stamp denoting the time
at which the nick was changed by the user
Args:
log_dict (str): Dictionary of logs created using reader.py
Returns:
list of the day_to_day nick changes if config.DAY_BY_DAY_ANALYSIS=True or else an aggregate nick change graph for the
given time period.
"""
rem_time = None #remembers the time of the last message of the file parsed before the current file
nick_change_day_list = []
aggregate_nick_change_graph = nx.MultiDiGraph() # graph for nick changes in the whole time span (not day to day)
for day_content_all_channels in log_dict.values():
for day_content in day_content_all_channels:
day_log = day_content["log_data"]
today_nick_change_graph = nx.MultiDiGraph() #using networkx
current_line_no = -1
for line in day_log:
current_line_no = current_line_no + 1
if(line[0] == '=' and "changed the topic of" not in line): #excluding the condition when user changes the topic. Search for only nick changes
nick1 = util.splice_find(line, "=", " is", 3)
nick2 = util.splice_find(line, "wn as", "\n", 5)
earlier_line_no = current_line_no
while earlier_line_no >= 0: #to find the line just before "=="" so as to find time of Nick Change
earlier_line_no = earlier_line_no - 1
if(day_log[earlier_line_no][0] != '='):
year, month, day = util.get_year_month_day(day_content)
util.build_graphs(nick1, nick2, day_log[earlier_line_no][1:6], year, month, day, today_nick_change_graph, aggregate_nick_change_graph)
break
if(earlier_line_no == -1):
today_nick_change_graph.add_edge(nick1, nick2, weight=rem_time)
aggregate_nick_change_graph.add_edge(nick1, nick2, weight = rem_time)
count = len(day_log) - 1 #setting up the rem_time for next file, by noting the last message sent on that file.
while(count >= 0):
if(day_log[count][0] != '='):
rem_time = day_log[count][1:6]
break
count = count-1
nick_change_day_list.append(today_nick_change_graph)
if DAY_BY_DAY_ANALYSIS:
return nick_change_day_list
else:
return aggregate_nick_change_graph
def top_keywords_for_nick(user_keyword_freq_dict, nick, threshold, min_words_spoken):
"""
outputs top keywords for a particular nick
Args:
user_keyword_freq_dict(dict): dictionary for each user having keywords and their frequency
nick(str) : user to do analysis on
threshold(float): threshold on normalised values to seperate meaningful words
min_words_spoken(int): threhold on the minumum number of words spoken by a user to perform analysis on
Returns:
null
"""
keywords = None
for dicts in user_keyword_freq_dict:
if dicts['nick'] == nick:
keywords = dicts['keywords']
break
total_freq = 0.0
for freq_tuple in keywords:
total_freq += freq_tuple[1]
top_keywords = []
top_keywords_normal_freq = []
if total_freq > min_words_spoken:
if keywords:
for keyword in keywords:
if keyword[2] >= threshold:
top_keywords.append(keyword[0].encode('ascii', 'ignore'))
top_keywords_normal_freq.append(keyword[2])
if len(top_keywords) == 0:
if config.DEBUGGER:
print "No word's normalised score crosses the value of", threshold
top_keywords = None
else:
if config.DEBUGGER:
print "No message sent by nick", nick
pass
else:
if config.DEBUGGER:
print "Not enough words spoken by", nick, "; spoke" ,int(total_freq), "words only, required", min_words_spoken
pass
return (top_keywords, top_keywords_normal_freq)
def keywords(log_dict, nicks, nick_same_list):
"""
Returns keywods for all users
Args:
log_dict (str): Dictionary of logs data created using reader.py
nicks(List) : list of nickname created using nickTracker.py
nick_same_list :List of same_nick names created using nickTracker.py
Returns
keywords_filtered: filtered keywords for user
user_keyword_freq_dict: dictionary for each user having keywords and their frequency
user_words_dict: keywods for user
nicks_for_stop_words: stop words
"""
user_words_dict = []
user_keyword_freq_dict = []
keywords_filtered = []
no_messages = 0
def get_nick_receiver(nick_receiver, rec, nick_to_compare, nick_name, nicks, nick_same_list):
if(rec == nick_name):
if(nick_to_compare != nick_name):
nick_receiver = iter_nicks(nick_receiver, nicks, nick_same_list, nick_name)
return nick_receiver
def iter_nicks(nick_sender_receiver, nicks, nick_same_list, nick_comp):
for i in range(len(nicks)):
if nick_comp in nick_same_list[i]:
nick_sender_receiver = nick_same_list[i][0]
break
else:
nick_sender_receiver = nick_comp
return nick_sender_receiver
for day_content_all_channels in log_dict.values():
for day_content in day_content_all_channels:
day_log = day_content["log_data"]
for line in day_log:
flag_comma = 0
if(util.check_if_msg_line(line)):
m = re.search(r"\<(.*?)\>", line)
nick_to_compare = util.correctLastCharCR((m.group(0)[1:-1]))
nick_sender = ''
nick_sender = iter_nicks(nick_sender, nicks, nick_same_list, nick_to_compare)
nick_receiver = ''
for nick_name in nicks:
rec_list = [e.strip() for e in line.split(':')] #receiver list splited about :
util.rec_list_splice(rec_list)
if not rec_list[1]: #index 0 will contain time 14:02
break
rec_list = util.correct_last_char_list(rec_list)
for rec in rec_list:
nick_receiver = get_nick_receiver(nick_receiver, rec, nick_to_compare, nick_name, nicks, nick_same_list)
if "," in rec_list[1]: #receiver list may of the form <Dhruv> Rohan, Ram :
flag_comma = 1
rec_list_2 = [e.strip() for e in rec_list[1].split(',')]
rec_list_2 = util.correct_last_char_list(rec_list_2)
for rec in rec_list_2:
nick_receiver = get_nick_receiver(nick_receiver, rec, nick_to_compare, nick_name, nicks, nick_same_list)
if(flag_comma == 0): #receiver list can be <Dhruv> Rohan, Hi!
rec = util.splice_find(line, ">", ", ", 1)
nick_receiver = get_nick_receiver(nick_receiver, rec, nick_to_compare, nick_name, nicks, nick_same_list)
#generating the words written by the sender
message = rec_list[1:]
no_messages += 1
correctedNickReciever = util.correct_nick_for_(nick_receiver)
if correctedNickReciever in message:
message.remove(correctedNickReciever)
lmtzr = WordNetLemmatizer()
#limit word size = 3, drop numbers.
word_list_temp = re.sub(r'\d+', '', " ".join(re.findall(r'\w{3,}', ":".join(message).replace(","," ")))).split(" ")
word_list = []
#remove punctuations
for word in word_list_temp:
word = word.lower()
word_list.append(word.replace("'",""))
word_list_lemmatized = []
try:
word_list_lemmatized = map(lmtzr.lemmatize, map(lambda x: lmtzr.lemmatize(x, 'v'), word_list))
except UnicodeDecodeError:
pass
fr = 1
for dic in user_words_dict:
if dic['sender'] == nick_sender:
dic['words'].extend(word_list_lemmatized)
fr = 0
if fr:
user_words_dict.append({'sender':nick_sender, 'words':word_list_lemmatized })
nicks_for_stop_words = []
stop_word_without_apostrophe = []
for l in nick_same_list:
nicks_for_stop_words.extend(l)
for dictonary in user_words_dict:
nicks_for_stop_words.append(dictonary['sender'])
nicks_for_stop_words.extend([x.lower() for x in nicks_for_stop_words])
for words in common_english_words.words:
stop_word_without_apostrophe.append(words.replace("'",""))
stop_words_extended = extended_stop_words(nicks_for_stop_words, stop_word_without_apostrophe)
count_vect = CountVectorizer(analyzer = 'word', stop_words=stop_words_extended, min_df = 1)
for dictonary in user_words_dict:
try:
matrix = count_vect.fit_transform(dictonary['words'])
freqs = [[word, matrix.getcol(idx).sum()] for word, idx in count_vect.vocabulary_.items()]
keywords = sorted(freqs, key = lambda x: -x[1])
total_freq = 0.0
for freq_tuple in keywords:
total_freq += freq_tuple[1]
for freq_tuple in keywords:
freq_tuple.append(round(freq_tuple[1]/float(total_freq), 5))
user_keyword_freq_dict.append({'nick':dictonary['sender'], 'keywords': keywords })
except ValueError:
pass
for data in user_keyword_freq_dict:
keywords, normal_scores = top_keywords_for_nick(user_keyword_freq_dict, data['nick'], config.KEYWORDS_THRESHOLD, config.KEYWORDS_MIN_WORDS)
if config.DEBUGGER:
print "Nick:", data['nick']
print "Keywords with normalised score > 0.01\n", keywords
print "Their Normal scores\n", normal_scores
print "\n"
if keywords:
keywords_filtered.append({'nick': data['nick'], 'keywords': keywords})
return keywords_filtered, user_keyword_freq_dict, user_words_dict, nicks_for_stop_words
def keywords_clusters(log_dict, nicks, nick_same_list):
"""
Uses `keywords` to form clusters of words post TF IDF (optional).
Args:
log_dict (str): Dictionary of logs data created using reader.py
nicks(List) : list of nickname created using nickTracker.py
nick_same_list :List of same_nick names created using nickTracker.py
Returns
null
"""
'''
AUTO TFIDF FROM JUST SENTENCES
'''
#http://scikit-learn.org/stable/auto_examples/text/document_clustering.html
#BUILDING CORPUS
keyword_dict_list, user_keyword_freq_dict, user_words_dict_list, nicks_for_stop_words = keywords(log_dict, nicks, nick_same_list)
corpus = []
def build_centroid(km):
if config.ENABLE_SVD:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
return order_centroids
for user_words_dict in user_words_dict_list:
corpus.append(" ".join(map(str,user_words_dict['words'])))
print "No. of users", len(corpus)
#TF_IDF
stop_word_without_apostrophe = []
for words in common_english_words.words:
stop_word_without_apostrophe.append(words.replace("'",""))
stop_words_extended = extended_stop_words(nicks_for_stop_words, stop_word_without_apostrophe)
vectorizer = TfidfVectorizer(max_df=0.5, min_df=2, stop_words=stop_words_extended,
use_idf=True)
print "Extracting features from the training dataset using TF-IDF"
t0 = time()
tf_idf = vectorizer.fit_transform(corpus)
print("done in %fs" % (time() - t0))
print "n_samples: %d, n_features: %d \n" % tf_idf.shape
# LSA
if config.ENABLE_SVD:
print("============USING SVD==========")
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(100) #recommened value = 100
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
tf_idf = lsa.fit_transform(tf_idf)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
if not config.ENABLE_ELBOW_METHOD_FOR_K:
# CLUSTERING
km = KMeans(n_clusters=config.NUMBER_OF_CLUSTERS, init='k-means++',
random_state=3465, max_iter=100, n_init=8)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(tf_idf)
print("done in %0.3fs" % (time() - t0))
print("Top terms per cluster:")
order_centroids = build_centroid(km)
np.set_printoptions(threshold=np.nan)
terms = vectorizer.get_feature_names()
for i in range(config.NUMBER_OF_CLUSTERS):
print("Cluster %d:" % i)
for ind in order_centroids[i, :config.SHOW_N_WORDS_PER_CLUSTER]:
print terms[ind]+"\t"+str(round(km.cluster_centers_[i][ind], 2))
print ""
else:
print "============ELBOW METHOD ============="
sum_squared_errors_list = []
avg_sum_squared_errors_list = []
for i in xrange(1, config.CHECK_K_TILL + 1):
print "\n===>> K = ", i
km = KMeans(n_clusters=i, init='k-means++', max_iter=100, n_init=8)
t0 = time()
km.fit(tf_idf)
order_centroids = build_centroid(km)
distance_matrix_all_combination = cdist(tf_idf, km.cluster_centers_, 'euclidean')
# cIdx = np.argmin(distance_matrix_all_combination,axis=1)
distance_from_nearest_centroid = np.min(distance_matrix_all_combination, axis=1)
sum_squared_errors = sum(distance_from_nearest_centroid)
avg_sum_squared_errors = sum_squared_errors/tf_idf.shape[0]
print "Sum Squared Error =", sum_squared_errors
print "Avg Sum Squared Error =", avg_sum_squared_errors
sum_squared_errors_list.append(sum_squared_errors)
avg_sum_squared_errors_list.append(avg_sum_squared_errors)
print("Top terms per cluster:")
terms = vectorizer.get_feature_names()
for i in range(i):
print("Cluster %d:" % i)
for ind in order_centroids[i, :config.SHOW_N_WORDS_PER_CLUSTER]:
print(' %s' % terms[ind])
print()
plt.plot(range(1, config.CHECK_K_TILL+1), sum_squared_errors_list, 'b*-')
# ax.plot(K[kIdx], avgWithinSS[kIdx], marker='o', markersize=12,
# markeredgewidth=2, markeredgecolor='r', markerfacecolor='None')
plt.grid(True)
plt.xlabel('Number of clusters')
plt.ylabel('Average sum of squares')
plt.title('Elbow for KMeans clustering')
plt.show()
#NOTE RANDOM OUTPUTS BECAUSE OF RANDOM INITIALISATION
print "NOTE RANDOM OUTPUTS BECAUSE OF RANDOM INITIALISATION"
def extended_stop_words(nicks_for_stop_words, stop_word_without_apostrophe):
stop_words_extended = text.ENGLISH_STOP_WORDS.union(common_english_words.words).union(nicks_for_stop_words).union(stop_word_without_apostrophe).union(custom_stop_words.words).union(custom_stop_words.slangs)
return stop_words_extended | mit |
pprett/statsmodels | statsmodels/stats/outliers_influence.py | 1 | 27029 | # -*- coding: utf-8 -*-
"""Influence and Outlier Measures
Created on Sun Jan 29 11:16:09 2012
Author: Josef Perktold
License: BSD-3
"""
from collections import defaultdict
import numpy as np
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.decorators import cache_readonly
#influence measures
def reset_ramsey(res, degree=5):
'''Ramsey's RESET specification test for linear models
This is a general specification test, for additional non-linear effects
in a model.
Notes
-----
The test fits an auxiliary OLS regression where the design matrix, exog,
is augmented by powers 2 to degree of the fitted values. Then it performs
an F-test whether these additional terms are significant.
If the p-value of the f-test is below a threshold, e.g. 0.1, then this
indicates that there might be additional non-linear effects in the model
and that the linear model is mis-specified.
References
----------
http://en.wikipedia.org/wiki/Ramsey_RESET_test
'''
order = degree + 1
k_vars = res.model.exog.shape[1]
#vander without constant and x:
y_fitted_vander = np.vander(res.fittedvalues, order)[:, :-2] #drop constant
exog = np.column_stack((res.model.exog, y_fitted_vander))
res_aux = OLS(res.model.endog, exog).fit()
#r_matrix = np.eye(degree, exog.shape[1], k_vars)
r_matrix = np.eye(degree-1, exog.shape[1], k_vars)
#df1 = degree - 1
#df2 = exog.shape[0] - degree - res.df_model (without constant)
return res_aux.f_test(r_matrix) #, r_matrix, res_aux
def variance_inflation_factor(exog, exog_idx):
'''variance inflation factor, VIF, for one exogenous variable
The variance inflation factor is a measure for the increase of the
variance of the parameter estimates if an additional variable, given by
exog_idx is added to the linear regression. It is a measure for
multicollinearity of the design matrix, exog.
One recommendation is that if VIF is greater than 5, then the explanatory
variable given by exog_idx is highly collinear with the other explanatory
variables, and the parameter estimates will have large standard errors
because of this.
Parameters
----------
exog : ndarray, (nobs, k_vars)
design matrix with all explanatory variables, as for example used in
regression
exog_idx : int
index of the exogenous variable in the columns of exog
Returns
-------
vif : float
variance inflation factor
Notes
-----
This function does not save the auxiliary regression.
See Also
--------
xxx : class for regression diagnostics TODO: doesn't exist yet
References
----------
http://en.wikipedia.org/wiki/Variance_inflation_factor
'''
k_vars = exog.shape[1]
x_i = exog[:, exog_idx]
mask = np.arange(k_vars) != exog_idx
x_noti = exog[:, mask]
r_squared_i = OLS(x_i, x_noti).fit().rsquared
vif = 1. / (1. - r_squared_i)
return vif
class OLSInfluence(object):
'''class to calculate outlier and influence measures for OLS result
Parameters
----------
results : Regression Results instance
currently assumes the results are from an OLS regression
Notes
-----
One part of the results can be calculated without any auxiliary regression
(some of which have the `_internal` postfix in the name. Other statistics
require leave-one-observation-out (LOOO) auxiliary regression, and will be
slower (mainly results with `_external` postfix in the name).
The auxiliary LOOO regression only the required results are stored.
Using the LOO measures is currently only recommended if the data set
is not too large. One possible approach for LOOO measures would be to
identify possible problem observations with the _internal measures, and
then run the leave-one-observation-out only with observations that are
possible outliers. (However, this is not yet available in an automized way.)
This should be extended to general least squares.
The leave-one-variable-out (LOVO) auxiliary regression are currently not
used.
'''
def __init__(self, results):
#check which model is allowed
try:
self.results = results._results # don't use wrapped results
except: # we got unwrapped results
self.results = results
self.nobs, self.k_vars = results.model.exog.shape
self.endog = results.model.endog
self.exog = results.model.exog
self.model_class = results.model.__class__
self.sigma_est = np.sqrt(results.mse_resid)
self.aux_regression_exog = {}
self.aux_regression_endog = {}
@cache_readonly
def hat_matrix_diag(self):
'''(cached attribute) diagonal of the hat_matrix for OLS
Notes
-----
temporarily calculated here, this should go to model class
'''
return (self.exog * self.results.model.pinv_wexog.T).sum(1)
@cache_readonly
def resid_press(self):
'''(cached attribute) PRESS residuals
'''
hii = self.hat_matrix_diag
return self.results.resid / (1 - hii)
@cache_readonly
def influence(self):
'''(cached attribute) influence measure
matches the influence measure that gretl reports
u * h / (1 - h)
where u are the residuals and h is the diagonal of the hat_matrix
'''
hii = self.hat_matrix_diag
return self.results.resid * hii / (1 - hii)
@cache_readonly
def hat_diag_factor(self):
'''(cached attribute) factor of diagonal of hat_matrix used in influence
this might be useful for internal reuse
h / (1 - h)
'''
hii = self.hat_matrix_diag
return hii / (1 - hii)
@cache_readonly
def ess_press(self):
'''(cached attribute) error sum of squares of PRESS residuals
'''
return np.dot(self.resid_press, self.resid_press)
@cache_readonly
def resid_studentized_internal(self):
'''(cached attribute) studentized residuals using variance from OLS
this uses sigma from original estimate
does not require leave one out loop
'''
return self.get_resid_studentized_external(sigma=None)
#return self.results.resid / self.sigma_est
@cache_readonly
def resid_studentized_external(self):
'''(cached attribute) studentized residuals using LOOO variance
this uses sigma from leave-one-out estimates
requires leave one out loop for observations
'''
sigma_looo = np.sqrt(self.sigma2_not_obsi)
return self.get_resid_studentized_external(sigma=sigma_looo)
def get_resid_studentized_external(self, sigma=None):
'''calculate studentized residuals
Parameters
----------
sigma : None or float
estimate of the standard deviation of the residuals. If None, then
the estimate from the regression results is used.
Returns
-------
stzd_resid : ndarray
studentized residuals
Notes
-----
studentized residuals are defined as ::
resid / sigma / np.sqrt(1 - hii)
where resid are the residuals from the regression, sigma is an
estimate of the standard deviation of the residuals, and hii is the
diagonal of the hat_matrix.
'''
hii = self.hat_matrix_diag
if sigma is None:
sigma2_est = self.results.mse_resid
#can be replace by different estimators of sigma
sigma = np.sqrt(sigma2_est)
return self.results.resid / sigma / np.sqrt(1 - hii)
@cache_readonly
def dffits_internal(self):
'''(cached attribute) dffits measure for influence of an observation
based on resid_studentized_internal
uses original results, no nobs loop
'''
#TODO: do I want to use different sigma estimate in
# resid_studentized_external
# -> move definition of sigma_error to the __init__
hii = self.hat_matrix_diag
dffits_ = self.resid_studentized_internal * np.sqrt(hii / (1 - hii))
dffits_threshold = 2 * np.sqrt(self.k_vars * 1. / self.nobs)
return dffits_, dffits_threshold
@cache_readonly
def dffits(self):
'''(cached attribute) dffits measure for influence of an observation
based on resid_studentized_external,
uses results from leave-one-observation-out loop
It is recommended that observations with dffits large than a
threshold of 2 sqrt{k / n} where k is the number of parameters, should
be investigated.
Returns
-------
dffits: float
dffits_threshold : float
References
----------
`Wikipedia <http://en.wikipedia.org/wiki/DFFITS>`_
'''
#TODO: do I want to use different sigma estimate in
# resid_studentized_external
# -> move definition of sigma_error to the __init__
hii = self.hat_matrix_diag
dffits_ = self.resid_studentized_external * np.sqrt(hii / (1 - hii))
dffits_threshold = 2 * np.sqrt(self.k_vars * 1. / self.nobs)
return dffits_, dffits_threshold
@cache_readonly
def dfbetas(self):
'''(cached attribute) dfbetas
uses results from leave-one-observation-out loop
'''
dfbetas = self.results.params - self.params_not_obsi#[None,:]
dfbetas /= np.sqrt(self.sigma2_not_obsi[:,None])
dfbetas /= np.sqrt(np.diag(self.results.normalized_cov_params))
return dfbetas
@cache_readonly
def sigma2_not_obsi(self):
'''(cached attribute) error variance for all LOOO regressions
This is 'mse_resid' from each auxiliary regression.
uses results from leave-one-observation-out loop
'''
return np.asarray(self._res_looo['mse_resid'])
@cache_readonly
def params_not_obsi(self):
'''(cached attribute) parameter estimates for all LOOO regressions
uses results from leave-one-observation-out loop
'''
return np.asarray(self._res_looo['params'])
@cache_readonly
def det_cov_params_not_obsi(self):
'''(cached attribute) determinant of cov_params of all LOOO regressions
uses results from leave-one-observation-out loop
'''
return np.asarray(self._res_looo['det_cov_params'])
@cache_readonly
def cooks_distance(self):
'''(cached attribute) Cooks distance
uses original results, no nobs loop
'''
hii = self.hat_matrix_diag
#Eubank p.93, 94
cooks_d2 = self.resid_studentized_internal**2 / self.k_vars
cooks_d2 *= hii / (1 - hii)
from scipy import stats
#alpha = 0.1
#print stats.f.isf(1-alpha, n_params, res.df_modelwc)
pvals = stats.f.sf(cooks_d2, self.k_vars, self.results.df_resid)
return cooks_d2, pvals
@cache_readonly
def cov_ratio(self):
'''(cached attribute) covariance ratio between LOOO and original
This uses determinant of the estimate of the parameter covariance
from leave-one-out estimates.
requires leave one out loop for observations
'''
#don't use inplace division / because then we change original
cov_ratio = (self.det_cov_params_not_obsi
/ np.linalg.det(self.results.cov_params()))
return cov_ratio
@cache_readonly
def resid_var(self):
'''(cached attribute) estimate of variance of the residuals
::
sigma2 = sigma2_OLS * (1 - hii)
where hii is the diagonal of the hat matrix
'''
#TODO:check if correct outside of ols
return self.results.mse_resid * (1 - self.hat_matrix_diag)
@cache_readonly
def resid_std(self):
'''(cached attribute) estimate of standard deviation of the residuals
See Also
--------
resid_var
'''
return np.sqrt(self.resid_var)
def _ols_xnoti(self, drop_idx, endog_idx='endog', store=True):
'''regression results from LOVO auxiliary regression with cache
The result instances are stored, which could use a large amount of
memory if the datasets are large. There are too many combinations to
store them all, except for small problems.
Parameters
----------
drop_idx : int
index of exog that is dropped from the regression
endog_idx : 'endog' or int
If 'endog', then the endogenous variable of the result instance
is regressed on the exogenous variables, excluding the one at
drop_idx. If endog_idx is an integer, then the exog with that
index is regressed with OLS on all other exogenous variables.
(The latter is the auxiliary regression for the variance inflation
factor.)
this needs more thought, memory versus speed
not yet used in any other parts, not sufficiently tested
'''
#reverse the structure, access store, if fail calculate ?
#this creates keys in store even if store = false ! bug
if endog_idx == 'endog':
stored = self.aux_regression_endog
if hasattr(stored, drop_idx):
return stored[drop_idx]
x_i = self.results.model.endog
else:
#nested dictionary
try:
self.aux_regression_exog[endog_idx][drop_idx]
except KeyError:
pass
stored = self.aux_regression_exog[endog_idx]
stored = {}
x_i = self.exog[:, endog_idx]
mask = np.arange(k_vars) != drop_idx
x_noti = self.exog[:, mask]
res = OLS(x_i, x_noti).fit()
if store:
stored[drop_idx] = res
return res
def _get_drop_vari(self, attributes):
'''regress endog on exog without one of the variables
This uses a k_vars loop, only attributes of the OLS instance are stored.
Parameters
----------
attributes : list of strings
These are the names of the attributes of the auxiliary OLS results
instance that are stored and returned.
not yet used
'''
from statsmodels.sandbox.tools.cross_val import LeaveOneOut
endog = self.results.model.endog
exog = self.exog
cv_iter = LeaveOneOut(self.k_vars)
res_loo = defaultdict(list)
for inidx, outidx in cv_iter:
for att in attributes:
res_i = self.model_class(endog, exog[:,inidx]).fit()
res_loo[att].append(getattr(res_i, att))
return res_loo
@cache_readonly
def _res_looo(self):
'''collect required results from the LOOO loop
all results will be attached.
currently only 'params', 'mse_resid', 'det_cov_params' are stored
regresses endog on exog dropping one observation at a time
this uses a nobs loop, only attributes of the OLS instance are stored.
'''
from statsmodels.sandbox.tools.cross_val import LeaveOneOut
get_det_cov_params = lambda res: np.linalg.det(res.cov_params())
endog = self.endog
exog = self.exog
params = np.zeros_like(exog)
mse_resid = np.zeros_like(endog)
det_cov_params = np.zeros_like(endog)
cv_iter = LeaveOneOut(self.nobs)
for inidx, outidx in cv_iter:
res_i = self.model_class(endog[inidx], exog[inidx]).fit()
params[outidx] = res_i.params
mse_resid[outidx] = res_i.mse_resid
det_cov_params[outidx] = get_det_cov_params(res_i)
return dict(params=params, mse_resid=mse_resid,
det_cov_params=det_cov_params)
def summary_frame(self):
"""
Creates a DataFrame with all available influence results.
Returns
-------
frame : DataFrame
A DataFrame with all results.
Notes
-----
The resultant DataFrame contains six variables in addition to the
DFBETAS. These are:
* cooks_d : Cook's Distance defined in `Influence.cooks_distance`
* standard_resid : Standardized residuals defined in
`Influence.resid_studentized_internal`
* hat_diag : The diagonal of the projection, or hat, matrix defined in
`Influence.hat_matrix_diag`
* dffits_internal : DFFITS statistics using internally Studentized
residuals defined in `Influence.dffits_internal`
* dffits : DFFITS statistics using externally Studentized residuals
defined in `Influence.dffits`
* student_resid : Externally Studentized residuals defined in
`Influence.resid_studentized_external`
"""
from pandas import DataFrame
# row and column labels
data = self.results.model._data
row_labels = data.row_labels
beta_labels = ['dfb_' + i for i in data.xnames]
# grab the results
summary_data = DataFrame(dict(
cooks_d = self.cooks_distance[0],
standard_resid = self.resid_studentized_internal,
hat_diag = self.hat_matrix_diag,
dffits_internal = self.dffits_internal[0],
student_resid = self.resid_studentized_external,
dffits = self.dffits[0],
),
index = row_labels)
#NOTE: if we don't give columns, order of above will be arbitrary
dfbeta = DataFrame(self.dfbetas, columns=beta_labels,
index=row_labels)
return dfbeta.join(summary_data)
def summary_table(self, float_fmt="%6.3f"):
'''create a summary table with all influence and outlier measures
This does currently not distinguish between statistics that can be
calculated from the original regression results and for which a
leave-one-observation-out loop is needed
Returns
-------
res : SimpleTable instance
SimpleTable instance with the results, can be printed
Notes
-----
This also attaches table_data to the instance.
'''
#print self.dfbetas
# table_raw = [ np.arange(self.nobs),
# self.endog,
# self.fittedvalues,
# self.cooks_distance(),
# self.resid_studentized_internal,
# self.hat_matrix_diag,
# self.dffits_internal,
# self.resid_studentized_external,
# self.dffits,
# self.dfbetas
# ]
table_raw = [ ('obs', np.arange(self.nobs)),
('endog', self.endog),
('fitted\nvalue', self.results.fittedvalues),
("Cook's\nd", self.cooks_distance[0]),
("student.\nresidual", self.resid_studentized_internal),
('hat diag', self.hat_matrix_diag),
('dffits \ninternal', self.dffits_internal[0]),
("ext.stud.\nresidual", self.resid_studentized_external),
('dffits', self.dffits[0]),
('dfbeta\nslope', self.dfbetas[:,1]) #skip needs to partially unravel
]
colnames, data = zip(*table_raw) #unzip
data = np.column_stack(data)
self.table_data = data
from statsmodels.iolib.table import SimpleTable, default_html_fmt
from statsmodels.iolib.tableformatting import fmt_base
from copy import deepcopy
fmt = deepcopy(fmt_base)
fmt_html = deepcopy(default_html_fmt)
fmt['data_fmts'] = ["%4d"] + [float_fmt] * (data.shape[1] - 1)
#fmt_html['data_fmts'] = fmt['data_fmts']
return SimpleTable(data, headers=colnames, txt_fmt=fmt,
html_fmt=fmt_html)
def summary_table(res, alpha=0.05):
'''generate summary table of outlier and influence similar to SAS
Parameters
----------
alpha : float
significance level for confidence interval
Returns
-------
st : SimpleTable instance
table with results that can be printed
data : ndarray
calculated measures and statistics for the table
ss2 : list of strings
column_names for table (Note: rows of table are observations)
'''
from scipy import stats
from statsmodels.sandbox.regression.predstd import wls_prediction_std
infl = Influence(res)
#standard error for predicted mean
#Note: using hat_matrix only works for fitted values
predict_mean_se = np.sqrt(infl.hat_matrix_diag*res.mse_resid)
tppf = stats.t.isf(alpha/2., res.df_resid)
predict_mean_ci = np.column_stack([
res.fittedvalues - tppf * predict_mean_se,
res.fittedvalues + tppf * predict_mean_se])
#standard error for predicted observation
predict_se, predict_ci_low, predict_ci_upp = wls_prediction_std(res)
predict_ci = np.column_stack((predict_ci_low, predict_ci_upp))
#standard deviation of residual
resid_se = np.sqrt(res.mse_resid * (1 - infl.hat_matrix_diag))
table_sm = np.column_stack([
np.arange(res.nobs) + 1,
res.model.endog,
res.fittedvalues,
predict_mean_se,
predict_mean_ci[:,0],
predict_mean_ci[:,1],
predict_ci[:,0],
predict_ci[:,1],
res.resid,
resid_se,
infl.resid_studentized_internal,
infl.cooks_distance[0]
])
#colnames, data = zip(*table_raw) #unzip
data = table_sm
ss2 = ['Obs', 'Dep Var\nPopulation', 'Predicted\nValue', 'Std Error\nMean Predict', 'Mean ci\n95% low', 'Mean ci\n95% upp', 'Predict ci\n95% low', 'Predict ci\n95% upp', 'Residual', 'Std Error\nResidual', 'Student\nResidual', "Cook's\nD"]
colnames = ss2
#self.table_data = data
#data = np.column_stack(data)
from statsmodels.iolib.table import SimpleTable, default_html_fmt
from statsmodels.iolib.tableformatting import fmt_base
from copy import deepcopy
fmt = deepcopy(fmt_base)
fmt_html = deepcopy(default_html_fmt)
fmt['data_fmts'] = ["%4d"] + ["%6.3f"] * (data.shape[1] - 1)
#fmt_html['data_fmts'] = fmt['data_fmts']
st = SimpleTable(data, headers=colnames, txt_fmt=fmt,
html_fmt=fmt_html)
return st, data, ss2
if __name__ == '__main__':
import statsmodels.api as sm
data = np.array('''\
64 57 8
71 59 10
53 49 6
67 62 11
55 51 8
58 50 7
77 55 10
57 48 9
56 42 10
51 42 6
76 61 12
68 57 9'''.split(), float).reshape(-1,3)
varnames = 'weight height age'.split()
endog = data[:,0]
exog = sm.add_constant(data[:,2], prepend=True)
res_ols = sm.OLS(endog, exog).fit()
hh = (res_ols.model.exog * res_ols.model.pinv_wexog.T).sum(1)
x = res_ols.model.exog
hh_check = np.diag(np.dot(x, np.dot(res_ols.model.normalized_cov_params, x.T)))
from numpy.testing import assert_almost_equal
assert_almost_equal(hh, hh_check, decimal=13)
res = res_ols #alias
#http://en.wikipedia.org/wiki/PRESS_statistic
#predicted residuals, leave one out predicted residuals
resid_press = res.resid / (1-hh)
ess_press = np.dot(resid_press, resid_press)
sigma2_est = np.sqrt(res.mse_resid) #can be replace by different estimators of sigma
sigma_est = np.sqrt(sigma2_est)
resid_studentized = res.resid / sigma_est / np.sqrt(1 - hh)
#http://en.wikipedia.org/wiki/DFFITS:
dffits = resid_studentized * np.sqrt(hh / (1 - hh))
nobs, k_vars = res.model.exog.shape
#Belsley, Kuh and Welsch (1980) suggest a threshold for abs(DFFITS)
dffits_threshold = 2 * np.sqrt(k_vars/nobs)
res_ols.df_modelwc = res_ols.df_model + 1
n_params = res.model.exog.shape[1]
#http://en.wikipedia.org/wiki/Cook%27s_distance
cooks_d = res.resid**2 / sigma2_est / res_ols.df_modelwc * hh / (1 - hh)**2
#or
#Eubank p.93, 94
cooks_d2 = resid_studentized**2 / res_ols.df_modelwc * hh / (1 - hh)
#threshold if normal, also Wikipedia
from scipy import stats
alpha = 0.1
#df looks wrong
print stats.f.isf(1-alpha, n_params, res.df_resid)
print stats.f.sf(cooks_d, n_params, res.df_resid)
print 'Cooks Distance'
print cooks_d
print cooks_d2
doplot = 0
if doplot:
import matplotlib.pyplot as plt
fig = plt.figure()
# ax = fig.add_subplot(3,1,1)
# plt.plot(andrew_results.weights, 'o', label='rlm weights')
# plt.legend(loc='lower left')
ax = fig.add_subplot(3,1,2)
plt.plot(cooks_d, 'o', label="Cook's distance")
plt.legend(loc='upper left')
ax2 = fig.add_subplot(3,1,3)
plt.plot(resid_studentized, 'o', label='studentized_resid')
plt.plot(dffits, 'o', label='DFFITS')
leg = plt.legend(loc='lower left', fancybox=True)
leg.get_frame().set_alpha(0.5) #, fontsize='small')
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize='small') # the legend text fontsize
print reset_ramsey(res, degree=3)
#note, constant in last column
for i in range(1):
print variance_inflation_factor(res.model.exog, i)
infl = Influence(res_ols)
print infl.resid_studentized_external
print infl.resid_studentized_internal
print infl.summary_table()
print summary_table(res, alpha=0.05)[0]
'''
>>> res.resid
array([ 4.28571429, 4. , 0.57142857, -3.64285714,
-4.71428571, 1.92857143, 10. , -6.35714286,
-11. , -1.42857143, 1.71428571, 4.64285714])
>>> infl.hat_matrix_diag
array([ 0.10084034, 0.11764706, 0.28571429, 0.20168067, 0.10084034,
0.16806723, 0.11764706, 0.08403361, 0.11764706, 0.28571429,
0.33613445, 0.08403361])
>>> infl.resid_press
array([ 4.76635514, 4.53333333, 0.8 , -4.56315789,
-5.24299065, 2.31818182, 11.33333333, -6.94036697,
-12.46666667, -2. , 2.58227848, 5.06880734])
>>> infl.ess_press
465.98646628086374
'''
| bsd-3-clause |
rcomer/iris | lib/iris/tests/test_quickplot.py | 3 | 8026 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Tests the high-level plotting interface.
"""
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import iris.tests.test_plot as test_plot
import iris
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
import iris.plot as iplt
import iris.quickplot as qplt
# Caches _load_theta so subsequent calls are faster
def cache(fn, cache={}):
def inner(*args, **kwargs):
key = "result"
if not cache:
cache[key] = fn(*args, **kwargs)
return cache[key]
return inner
@cache
def _load_theta():
path = tests.get_data_path(("PP", "COLPEX", "theta_and_orog_subset.pp"))
theta = iris.load_cube(path, "air_potential_temperature")
# Improve the unit
theta.units = "K"
return theta
@tests.skip_data
@tests.skip_plot
class TestQuickplotCoordinatesGiven(test_plot.TestPlotCoordinatesGiven):
def setUp(self):
tests.GraphicsTest.setUp(self)
filename = tests.get_data_path(
("PP", "COLPEX", "theta_and_orog_subset.pp")
)
self.cube = test_plot.load_cube_once(
filename, "air_potential_temperature"
)
self.draw_module = iris.quickplot
self.contourf = test_plot.LambdaStr(
"iris.quickplot.contourf",
lambda cube, *args, **kwargs: iris.quickplot.contourf(
cube, *args, **kwargs
),
)
self.contour = test_plot.LambdaStr(
"iris.quickplot.contour",
lambda cube, *args, **kwargs: iris.quickplot.contour(
cube, *args, **kwargs
),
)
self.points = test_plot.LambdaStr(
"iris.quickplot.points",
lambda cube, *args, **kwargs: iris.quickplot.points(
cube, c=cube.data, *args, **kwargs
),
)
self.plot = test_plot.LambdaStr(
"iris.quickplot.plot",
lambda cube, *args, **kwargs: iris.quickplot.plot(
cube, *args, **kwargs
),
)
self.results = {
"yx": (
[self.contourf, ["grid_latitude", "grid_longitude"]],
[self.contourf, ["grid_longitude", "grid_latitude"]],
[self.contour, ["grid_latitude", "grid_longitude"]],
[self.contour, ["grid_longitude", "grid_latitude"]],
[self.points, ["grid_latitude", "grid_longitude"]],
[self.points, ["grid_longitude", "grid_latitude"]],
),
"zx": (
[self.contourf, ["model_level_number", "grid_longitude"]],
[self.contourf, ["grid_longitude", "model_level_number"]],
[self.contour, ["model_level_number", "grid_longitude"]],
[self.contour, ["grid_longitude", "model_level_number"]],
[self.points, ["model_level_number", "grid_longitude"]],
[self.points, ["grid_longitude", "model_level_number"]],
),
"tx": (
[self.contourf, ["time", "grid_longitude"]],
[self.contourf, ["grid_longitude", "time"]],
[self.contour, ["time", "grid_longitude"]],
[self.contour, ["grid_longitude", "time"]],
[self.points, ["time", "grid_longitude"]],
[self.points, ["grid_longitude", "time"]],
),
"x": ([self.plot, ["grid_longitude"]],),
"y": ([self.plot, ["grid_latitude"]],),
}
@tests.skip_data
@tests.skip_plot
class TestLabels(tests.GraphicsTest):
def setUp(self):
super().setUp()
self.theta = _load_theta()
def _slice(self, coords):
"""Returns the first cube containing the requested coordinates."""
for cube in self.theta.slices(coords):
break
return cube
def _small(self):
# Use a restricted size so we can make out the detail
cube = self._slice(["model_level_number", "grid_longitude"])
return cube[:5, :5]
def test_contour(self):
qplt.contour(self._small())
self.check_graphic()
qplt.contourf(
self._small(), coords=["model_level_number", "grid_longitude"]
)
self.check_graphic()
def test_contourf(self):
qplt.contourf(self._small())
cube = self._small()
iplt.orography_at_points(cube)
self.check_graphic()
qplt.contourf(
self._small(), coords=["model_level_number", "grid_longitude"]
)
self.check_graphic()
qplt.contourf(
self._small(), coords=["grid_longitude", "model_level_number"]
)
self.check_graphic()
def test_contourf_axes_specified(self):
# Check that the contourf function does not modify the matplotlib
# pyplot state machine.
# Create a figure and axes to be used by contourf
plt.figure()
axes1 = plt.axes()
# Create test figure and axes which will be the new results
# of plt.gcf and plt.gca.
plt.figure()
axes2 = plt.axes()
# Add a title to the test axes.
plt.title("This should not be changed")
# Draw the contourf on a specific axes.
qplt.contourf(self._small(), axes=axes1)
# Ensure that the correct axes got the appropriate title.
self.assertEqual(axes2.get_title(), "This should not be changed")
self.assertEqual(axes1.get_title(), "Air potential temperature")
# Check that the axes labels were set correctly.
self.assertEqual(axes1.get_xlabel(), "Grid longitude / degrees")
self.assertEqual(axes1.get_ylabel(), "Altitude / m")
def test_contourf_nameless(self):
cube = self._small()
cube.standard_name = None
cube.attributes["STASH"] = ""
qplt.contourf(cube, coords=["grid_longitude", "model_level_number"])
self.check_graphic()
def test_pcolor(self):
qplt.pcolor(self._small())
self.check_graphic()
def test_pcolormesh(self):
qplt.pcolormesh(self._small())
# cube = self._small()
# iplt.orography_at_bounds(cube)
self.check_graphic()
def test_pcolormesh_str_symbol(self):
pcube = self._small().copy()
pcube.coords("level_height")[0].units = "centimeters"
qplt.pcolormesh(pcube)
self.check_graphic()
def test_map(self):
cube = self._slice(["grid_latitude", "grid_longitude"])
qplt.contour(cube)
self.check_graphic()
# check that the result of adding 360 to the data is *almost* identically the same result
lon = cube.coord("grid_longitude")
lon.points = lon.points + 360
qplt.contour(cube)
self.check_graphic()
def test_alignment(self):
cube = self._small()
qplt.contourf(cube)
# qplt.outline(cube)
qplt.points(cube)
self.check_graphic()
@tests.skip_data
@tests.skip_plot
class TestTimeReferenceUnitsLabels(tests.GraphicsTest):
def setUp(self):
super().setUp()
path = tests.get_data_path(("PP", "aPProt1", "rotatedMHtimecube.pp"))
self.cube = iris.load_cube(path)[:, 0, 0]
def test_reference_time_units(self):
# units should not be displayed for a reference time
qplt.plot(self.cube.coord("time"), self.cube)
plt.gcf().autofmt_xdate()
self.check_graphic()
def test_not_reference_time_units(self):
# units should be displayed for other time coordinates
qplt.plot(self.cube.coord("forecast_period"), self.cube)
self.check_graphic()
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
VasLem/KinectPainting | OptGridSearchCV.py | 1 | 7990 | '''
An optimized method for GridSearchCV, which iteratively performs grid search
and reduces the span of the parameters after each iteration. Made to make the
life of an engineer less boring.
'''
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
def optGridSearchCV(classifier, xtrain, ytrain, parameters, reduction_ratio=2,
iter_num=3, scoring='f1_macro', fold_num=5, first_rand=False,
n_jobs=1,verbose=1,only_rand=False, only_brute=False):
'''
The local optimum resides inside the parameters space, with bounds defined
by the min and max of each parameter, thus a recommended way to run this
function, if no prior knowledge exists, is to set the min and max of each
parameter to the corresponding min and max allowed bounds.
<classifier>: initialized classifier object
<xtrain>: features of samples, with shape (n_samples, n_features)
<ytrain>: labels of samples
<parameters>: dictionary of parameters, same with GridSearchCV <params>
type
<reduction_ratio>: the scale of relative reduction of the span of the
number parameters
<iter_num>: number of iterations to take place
<fold_num>: number of folds for CrossValidation
<first_rand> : True to perform random parameter picking (normally
distributed) firstly and then brute parameter picking (using linspace).
If false, the turn of each method changes
<only_rand> : True to perform only random picking
<only_brute> : True to perform only brute picking
'''
def print_params(parameters, preset=''):
'''
print parameters in pandas form, if allowed
'''
try:
from pandas import DataFrame
if isinstance(parameters, list):
params = DataFrame(parameters)
else:
try:
params = DataFrame.from_dict(parameters)
except ValueError:
params = DataFrame([parameters])
print(params)
except ImportError:
print(preset+str(parameters))
def reduce_list(params, best_params):
'''
Reduce parameters list of dictionaries to a parameters dictionary,
which correspots to the <best_params> found by <GridSearchCV>
'''
best_keys = set(best_params.keys())
for count, dic in enumerate(params):
if best_keys == set(dic.keys()):
return dic, count
raise Exception
def update_parameters(prev_parameters, best_parameters, num_of_samples,
rate=2, israndom=True):
'''
Each new parameter has the same number of values as previous one and
its values are inside the bounds set by the min and max values of the
old parameter. Furthermore, best value from the previous paramter
exists inside the new parameter.
<num_of_samples>: dictionary with keys from the best_parameters.
<prev_parameters>: previous parameters, which hold all tested values
<best_parameters>: parameters found to provide the best score (using
GridSearchCV)
<israndom>: whether to perform random or brute method
<rate>: rate of parameters span relative reduction
'''
rate = float(rate)
new_parameters = {}
for key in best_parameters:
if (not isinstance(best_parameters[key], str) and
not isinstance(best_parameters[key], bool) and
not best_parameters[key] is None):
if israndom:
center = best_parameters[key]
std = np.std(prev_parameters[key]) / float(rate)
pick = np.random.normal(loc=center, scale=std,
size=100 * num_of_samples[key])
pick = pick[(pick >=
np.min(prev_parameters[key]))*
(pick <= np.max(prev_parameters[key]))]
new_parameters[key] = pick[
:(num_of_samples[key]-1)]
else:
center = best_parameters[key]
rang = np.max(prev_parameters[
key]) - np.min(prev_parameters[key])
rang = [max(center - rang /
float(rate), min(prev_parameters[key])),
min(center + rang /
float(rate), max(prev_parameters[key]))]
new_parameters[key] = np.linspace(
rang[0], rang[1], num_of_samples[key]-1)
if isinstance(best_parameters[key], int):
new_parameters[key] = new_parameters[key].astype(int)
new_parameters[key] = new_parameters[key].tolist()
new_parameters[key] += [best_parameters[key]]
else:
new_parameters[key] = [best_parameters[key]]
return new_parameters
num_of_samples = {}
if not isinstance(parameters, list):
num_of_samples = {}
for key in parameters:
num_of_samples[key] = len(parameters[key])
best_scores = []
best_params = []
best_estimators = []
rand_flags = [first_rand, not first_rand]
if only_brute:
rand_flags = [False]
if only_rand:
rand_flags = [True]
for it_count in range(iter_num):
for rand_flag in rand_flags:
if verbose==2:
print('Parameters to test on:')
print_params(parameters,'\t')
try:
grids = GridSearchCV(
classifier,
parameters,
scoring=scoring,
cv=fold_num,
n_jobs=n_jobs, verbose=verbose)
grids.fit(xtrain, ytrain)
best_scores.append(grids.best_score_)
best_params.append(grids.best_params_)
best_estimators.append(grids.best_estimator_)
grids_params = grids.best_params_
except ValueError:
print('Invalid parameters')
raise
best_params = parameters
if rand_flag == rand_flags[1]:
print('Iteration Number: ' + str(it_count))
print('\tBest Classifier Params:')
print_params(best_params[-1],'\t\t')
print('\tBest Score:' + str(best_scores[-1]))
if isinstance(parameters, list):
parameters, _ = reduce_list(parameters, grids_params)
for key in parameters:
num_of_samples[key] = len(parameters[key])
if rand_flag == rand_flags[1] and it_count == iter_num - 1:
break
print('Reducing Parameters using '+ ['random' if rand_flag else
'brute'][0] + ' method')
parameters = update_parameters(parameters, grids_params, num_of_samples,
rate=reduction_ratio,
israndom=rand_flag)
return best_params, best_scores, best_estimators
def example():
'''
An example of usage
'''
parameters = [{'C': [1, 10, 100, 1000], 'tol': [0.001, 0.0001],
'class_weight': [None, 'balanced']},
{'C': [1, 10, 100, 1000], 'multi_class': ['crammer_singer'],
'tol': [0.001, 0.0001]}]
xtrain = np.random.random((100, 20))
xtrain[xtrain < 0] = 0
ytrain = (np.random.random(100) > 0.5).astype(int)
lsvc = LinearSVC()
optGridSearchCV(lsvc, xtrain, ytrain, parameters, reduction_ratio=2,
iter_num=3, scoring='f1_macro', fold_num=5, first_rand=False,
n_jobs=4)
if __name__ == '__main__':
example()
| bsd-3-clause |
elemhsb/mallorca | sw/airborne/test/ahrs/ahrs_utils.py | 15 | 5172 | #! /usr/bin/env python
# $Id$
# Copyright (C) 2011 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
#import os
#from optparse import OptionParser
#import scipy
#from scipy import optimize
import shlex, subprocess
from pylab import *
from array import array
import numpy
import matplotlib.pyplot as plt
def run_simulation(ahrs_type, build_opt, traj_nb):
print "\nBuilding ahrs"
args = ["make", "clean", "run_ahrs_on_synth", "AHRS_TYPE=AHRS_TYPE_"+ahrs_type] + build_opt
# print args
p = subprocess.Popen(args=args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
outputlines = p.stdout.readlines()
p.wait()
for i in outputlines:
print " # "+i,
print
print "Running simulation"
print " using traj " + str(traj_nb)
p = subprocess.Popen(args=["./run_ahrs_on_synth",str(traj_nb)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
outputlines = p.stdout.readlines()
p.wait()
# for i in outputlines:
# print " "+i,
# print "\n"
ahrs_data_type = [('time', 'float32'),
('phi_true', 'float32'), ('theta_true', 'float32'), ('psi_true', 'float32'),
('p_true', 'float32'), ('q_true', 'float32'), ('r_true', 'float32'),
('bp_true', 'float32'), ('bq_true', 'float32'), ('br_true', 'float32'),
('phi_ahrs', 'float32'), ('theta_ahrs', 'float32'), ('psi_ahrs', 'float32'),
('p_ahrs', 'float32'), ('q_ahrs', 'float32'), ('r_ahrs', 'float32'),
('bp_ahrs', 'float32'), ('bq_ahrs', 'float32'), ('br_ahrs', 'float32')
]
pos_data_type = [ ('x0_true', 'float32'), ('y0_true', 'float32'), ('z0_true', 'float32'),
('x1_true', 'float32'), ('y1_true', 'float32'), ('z1_true', 'float32'),
('x2_true', 'float32'), ('y2_true', 'float32'), ('z2_true', 'float32'),
('x3_true', 'float32'), ('y3_true', 'float32'), ('z3_true', 'float32'),
]
mydescr = numpy.dtype(ahrs_data_type)
data = [[] for dummy in xrange(len(mydescr))]
# import code; code.interact(local=locals())
for line in outputlines:
if line.startswith("#"):
print " "+line,
else:
fields = line.strip().split(' ');
# print fields
for i, number in enumerate(fields):
data[i].append(number)
print
for i in xrange(len(mydescr)):
data[i] = cast[mydescr[i]](data[i])
return numpy.rec.array(data, dtype=mydescr)
def plot_simulation_results(plot_true_state, lsty, type, sim_res):
print "Plotting Results"
# f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
subplot(3,3,1)
plt.plot(sim_res.time, sim_res.phi_ahrs, lsty, label=type)
ylabel('degres')
title('phi')
legend()
subplot(3,3,2)
plot(sim_res.time, sim_res.theta_ahrs, lsty)
title('theta')
subplot(3,3,3)
plot(sim_res.time, sim_res.psi_ahrs, lsty)
title('psi')
subplot(3,3,4)
plt.plot(sim_res.time, sim_res.p_ahrs, lsty)
ylabel('degres/s')
title('p')
subplot(3,3,5)
plt.plot(sim_res.time, sim_res.q_ahrs, lsty)
title('q')
subplot(3,3,6)
plt.plot(sim_res.time, sim_res.r_ahrs, lsty)
title('r')
subplot(3,3,7)
plt.plot(sim_res.time, sim_res.bp_ahrs, lsty)
ylabel('degres/s')
xlabel('time in s')
title('bp')
subplot(3,3,8)
plt.plot(sim_res.time, sim_res.bq_ahrs, lsty)
xlabel('time in s')
title('bq')
subplot(3,3,9)
plt.plot(sim_res.time, sim_res.br_ahrs, lsty)
xlabel('time in s')
title('br')
if plot_true_state:
subplot(3,3,1)
plt.plot(sim_res.time, sim_res.phi_true, 'r--')
subplot(3,3,2)
plot(sim_res.time, sim_res.theta_true, 'r--')
subplot(3,3,3)
plot(sim_res.time, sim_res.psi_true, 'r--')
subplot(3,3,4)
plot(sim_res.time, sim_res.p_true, 'r--')
subplot(3,3,5)
plot(sim_res.time, sim_res.q_true, 'r--')
subplot(3,3,6)
plot(sim_res.time, sim_res.r_true, 'r--')
subplot(3,3,7)
plot(sim_res.time, sim_res.bp_true, 'r--')
subplot(3,3,8)
plot(sim_res.time, sim_res.bq_true, 'r--')
subplot(3,3,9)
plot(sim_res.time, sim_res.br_true, 'r--')
def show_plot():
plt.show();
| gpl-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| mit |
hhbyyh/spark | python/pyspark/sql/group.py | 24 | 12490 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.column import Column, _to_seq
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import *
__all__ = ["GroupedData"]
def dfapi(f):
def _api(self):
name = f.__name__
jdf = getattr(self._jgd, name)()
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
def df_varargs_api(f):
def _api(self, *cols):
name = f.__name__
jdf = getattr(self._jgd, name)(_to_seq(self.sql_ctx._sc, cols))
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
class GroupedData(object):
"""
A set of methods for aggregations on a :class:`DataFrame`,
created by :func:`DataFrame.groupBy`.
.. note:: Experimental
.. versionadded:: 1.3
"""
def __init__(self, jgd, df):
self._jgd = jgd
self._df = df
self.sql_ctx = df.sql_ctx
@ignore_unicode_prefix
@since(1.3)
def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions can be:
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
a full shuffle is required. Also, all the data of a group will be loaded into
memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
.. note:: Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
in a single call to this function.
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def min_udf(v):
... return v.min()
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
[Row(name=u'Alice', min_udf(age)=2), Row(name=u'Bob', min_udf(age)=5)]
"""
assert exprs, "exprs should not be empty"
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jgd.agg(exprs[0])
else:
# Columns
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
jdf = self._jgd.agg(exprs[0]._jc,
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx)
@dfapi
@since(1.3)
def count(self):
"""Counts the number of records for each group.
>>> sorted(df.groupBy(df.age).count().collect())
[Row(age=2, count=1), Row(age=5, count=1)]
"""
@df_varargs_api
@since(1.3)
def mean(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().mean('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().mean('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
@since(1.3)
def avg(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().avg('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().avg('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
@since(1.3)
def max(self, *cols):
"""Computes the max value for each numeric columns for each group.
>>> df.groupBy().max('age').collect()
[Row(max(age)=5)]
>>> df3.groupBy().max('age', 'height').collect()
[Row(max(age)=5, max(height)=85)]
"""
@df_varargs_api
@since(1.3)
def min(self, *cols):
"""Computes the min value for each numeric column for each group.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().min('age').collect()
[Row(min(age)=2)]
>>> df3.groupBy().min('age', 'height').collect()
[Row(min(age)=2, min(height)=80)]
"""
@df_varargs_api
@since(1.3)
def sum(self, *cols):
"""Compute the sum for each numeric columns for each group.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().sum('age').collect()
[Row(sum(age)=7)]
>>> df3.groupBy().sum('age', 'height').collect()
[Row(sum(age)=7, sum(height)=165)]
"""
@since(1.6)
def pivot(self, pivot_col, values=None):
"""
Pivots a column of the current :class:`DataFrame` and perform the specified aggregation.
There are two versions of pivot function: one that requires the caller to specify the list
of distinct values to pivot on, and one that does not. The latter is more concise but less
efficient, because Spark needs to first compute the list of distinct values internally.
:param pivot_col: Name of the column to pivot.
:param values: List of values that will be translated to columns in the output DataFrame.
# Compute the sum of earnings for each year by course with each course as a separate column
>>> df4.groupBy("year").pivot("course", ["dotNET", "Java"]).sum("earnings").collect()
[Row(year=2012, dotNET=15000, Java=20000), Row(year=2013, dotNET=48000, Java=30000)]
# Or without specifying column values (less efficient)
>>> df4.groupBy("year").pivot("course").sum("earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
>>> df5.groupBy("sales.year").pivot("sales.course").sum("sales.earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
"""
if values is None:
jgd = self._jgd.pivot(pivot_col)
else:
jgd = self._jgd.pivot(pivot_col, values)
return GroupedData(jgd, self._df)
@since(2.3)
def apply(self, udf):
"""
Maps each group of the current :class:`DataFrame` using a pandas udf and returns the result
as a `DataFrame`.
The user-defined function should take a `pandas.DataFrame` and return another
`pandas.DataFrame`. For each group, all columns are passed together as a `pandas.DataFrame`
to the user-function and the returned `pandas.DataFrame` are combined as a
:class:`DataFrame`.
The returned `pandas.DataFrame` can be of arbitrary length and its schema must match the
returnType of the pandas udf.
.. note:: This function requires a full shuffle. all the data of a group will be loaded
into memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. note:: Experimental
:param udf: a grouped map user-defined function returned by
:func:`pyspark.sql.functions.pandas_udf`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
.. seealso:: :meth:`pyspark.sql.functions.pandas_udf`
"""
# Columns are special because hasattr always return True
if isinstance(udf, Column) or not hasattr(udf, 'func') \
or udf.evalType != PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
raise ValueError("Invalid udf: the udf argument must be a pandas_udf of type "
"GROUPED_MAP.")
df = self._df
udf_column = udf(*[df[col] for col in df.columns])
jdf = self._jgd.flatMapGroupsInPandas(udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.group
globs = pyspark.sql.group.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.group tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80),
Row(name='Bob', age=5, height=85)]).toDF()
globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000),
Row(course="Java", year=2012, earnings=20000),
Row(course="dotNET", year=2012, earnings=5000),
Row(course="dotNET", year=2013, earnings=48000),
Row(course="Java", year=2013, earnings=30000)]).toDF()
globs['df5'] = sc.parallelize([
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=10000)),
Row(training="junior", sales=Row(course="Java", year=2012, earnings=20000)),
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=5000)),
Row(training="junior", sales=Row(course="dotNET", year=2013, earnings=48000)),
Row(training="expert", sales=Row(course="Java", year=2013, earnings=30000))]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.group, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
SanPen/PracticalGridModeling | examples/substation.py | 1 | 1669 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 20 16:40:22 2017
@author: santi
"""
import pandas as pd
import numpy as np
import networkx as nx
from matplotlib import pyplot as plt
if __name__ == "__main__":
# load data
conn_df = pd.read_excel('substation.xlsx', 'Connectivity', index_col=0).fillna(0)
stat_df = pd.read_excel('substation.xlsx', 'States', index_col=0)
pos_df = pd.read_excel('substation.xlsx', 'Pos', index_col=0)
node_names = conn_df.columns.values
G = nx.Graph()
pos = dict()
lpos = dict()
# add nodes to the graph
for i in range(len(node_names)):
G.add_node(node_names[i])
x = pos_df.values[i, 0]
y = pos_df.values[i, 1]
pos[node_names[i]] = [x, y]
lpos[node_names[i]] = [x, y]
# add branches to the graph
for i, line in enumerate(conn_df.values):
if stat_df.values[i] > 0:
x, y = np.where(line > 0)[0] # works because there are only 2 values per line with a 1 in the excel file
n1 = node_names[x]
n2 = node_names[y]
G.add_edge(n1, n2)
# get the islands
islands = list(nx.connected_components(G))
sub_grids = list()
print('Islands:\n', islands, '\n\n')
for island in islands:
g = nx.subgraph(G, island)
sub_grids.append(g)
# plot
nx.draw(G, pos=pos, node_size=100, node_color='black')
for name in node_names:
x, y = lpos[name]
plt.text(x+1.5,y+1,s=name, bbox=dict(facecolor='white', alpha=0.5), horizontalalignment='center')
plt.show() | gpl-3.0 |
akionakamura/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 40 | 23697 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
| bsd-3-clause |
moutai/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
shipci/sympy | sympy/external/importtools.py | 85 | 7294 | """Tools to assist importing optional external modules."""
from __future__ import print_function, division
import sys
# Override these in the module to change the default warning behavior.
# For example, you might set both to False before running the tests so that
# warnings are not printed to the console, or set both to True for debugging.
WARN_NOT_INSTALLED = None # Default is False
WARN_OLD_VERSION = None # Default is True
def __sympy_debug():
# helper function from sympy/__init__.py
# We don't just import SYMPY_DEBUG from that file because we don't want to
# import all of sympy just to use this module.
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
if __sympy_debug():
WARN_OLD_VERSION = True
WARN_NOT_INSTALLED = True
def import_module(module, min_module_version=None, min_python_version=None,
warn_not_installed=None, warn_old_version=None,
module_version_attr='__version__', module_version_attr_call_args=None,
__import__kwargs={}, catch=()):
"""
Import and return a module if it is installed.
If the module is not installed, it returns None.
A minimum version for the module can be given as the keyword argument
min_module_version. This should be comparable against the module version.
By default, module.__version__ is used to get the module version. To
override this, set the module_version_attr keyword argument. If the
attribute of the module to get the version should be called (e.g.,
module.version()), then set module_version_attr_call_args to the args such
that module.module_version_attr(*module_version_attr_call_args) returns the
module's version.
If the module version is less than min_module_version using the Python <
comparison, None will be returned, even if the module is installed. You can
use this to keep from importing an incompatible older version of a module.
You can also specify a minimum Python version by using the
min_python_version keyword argument. This should be comparable against
sys.version_info.
If the keyword argument warn_not_installed is set to True, the function will
emit a UserWarning when the module is not installed.
If the keyword argument warn_old_version is set to True, the function will
emit a UserWarning when the library is installed, but cannot be imported
because of the min_module_version or min_python_version options.
Note that because of the way warnings are handled, a warning will be
emitted for each module only once. You can change the default warning
behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION
in sympy.external.importtools. By default, WARN_NOT_INSTALLED is False and
WARN_OLD_VERSION is True.
This function uses __import__() to import the module. To pass additional
options to __import__(), use the __import__kwargs keyword argument. For
example, to import a submodule A.B, you must pass a nonempty fromlist option
to __import__. See the docstring of __import__().
This catches ImportError to determine if the module is not installed. To
catch additional errors, pass them as a tuple to the catch keyword
argument.
Examples
========
>>> from sympy.external import import_module
>>> numpy = import_module('numpy')
>>> numpy = import_module('numpy', min_python_version=(2, 7),
... warn_old_version=False)
>>> numpy = import_module('numpy', min_module_version='1.5',
... warn_old_version=False) # numpy.__version__ is a string
>>> # gmpy does not have __version__, but it does have gmpy.version()
>>> gmpy = import_module('gmpy', min_module_version='1.14',
... module_version_attr='version', module_version_attr_call_args=(),
... warn_old_version=False)
>>> # To import a submodule, you must pass a nonempty fromlist to
>>> # __import__(). The values do not matter.
>>> p3 = import_module('mpl_toolkits.mplot3d',
... __import__kwargs={'fromlist':['something']})
>>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened
>>> matplotlib = import_module('matplotlib',
... __import__kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,))
"""
# keyword argument overrides default, and global variable overrides
# keyword argument.
warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None
else warn_old_version or True)
warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None
else warn_not_installed or False)
import warnings
# Check Python first so we don't waste time importing a module we can't use
if min_python_version:
if sys.version_info < min_python_version:
if warn_old_version:
warnings.warn("Python version is too old to use %s "
"(%s or newer required)" % (
module, '.'.join(map(str, min_python_version))),
UserWarning)
return
# PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it
if module == 'numpy' and '__pypy__' in sys.builtin_module_names:
return
try:
mod = __import__(module, **__import__kwargs)
## there's something funny about imports with matplotlib and py3k. doing
## from matplotlib import collections
## gives python's stdlib collections module. explicitly re-importing
## the module fixes this.
from_list = __import__kwargs.get('fromlist', tuple())
for submod in from_list:
if submod == 'collections' and mod.__name__ == 'matplotlib':
__import__(module + '.' + submod)
except ImportError:
if warn_not_installed:
warnings.warn("%s module is not installed" % module, UserWarning)
return
except catch as e:
if warn_not_installed:
warnings.warn(
"%s module could not be used (%s)" % (module, repr(e)))
return
if min_module_version:
modversion = getattr(mod, module_version_attr)
if module_version_attr_call_args is not None:
modversion = modversion(*module_version_attr_call_args)
if modversion < min_module_version:
if warn_old_version:
# Attempt to create a pretty string version of the version
if isinstance(min_module_version, basestring):
verstr = min_module_version
elif isinstance(min_module_version, (tuple, list)):
verstr = '.'.join(map(str, min_module_version))
else:
# Either don't know what this is. Hopefully
# it's something that has a nice str version, like an int.
verstr = str(min_module_version)
warnings.warn("%s version is too old to use "
"(%s or newer required)" % (module, verstr),
UserWarning)
return
return mod
| bsd-3-clause |
scottw13/BET-1 | doc/conf.py | 1 | 8589 | # Copyright (C) 2014-2015 The BET Development Team
# -*- coding: utf-8 -*-
#
# BET documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 15 14:33:13 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage',
'sphinx.ext.mathjax', 'sphinx.ext.intersphinx']
intersphinx_cache_limit = 10 #days to keep cached inventories
intersphinx_mapping = {'python': ('https://docs.python.org/2.7', None),
'polyadcirc' : ('http://ut-chg.github.io/PolyADCIRC', None),
'matplotlib':('http://matplotlib.sourceforge.net', None),
'numpy':('http://docs.scipy.org/doc/numpy',None),
'np':('http://docs.scipy.org/doc/numpy',None),
'scipy':('http://docs.scipy.org/doc/scipy',None)
}
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'BET'
copyright = u'2014, The BET Development Team (Lindley Graham, Steven Mattis, Troy Butler)'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'BETdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'BET.tex', u'BET Documentation',
u'The BET Development Team (Lindley Graham, Steven Mattis, Troy Butler)', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bet', u'BET Documentation',
[u'The BET Development Team (Lindley Graham, Steven Mattis, Troy Butler)'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'BET', u'BET Documentation',
u'The BET Development Team (Lindley Graham, Steven Mattis, Troy Butler)', 'BET', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls= 'footnote'
| gpl-3.0 |
eramirem/astroML | book_figures/chapter7/fig_spec_reconstruction.py | 3 | 3410 | """
PCA Reconstruction of a spectrum
--------------------------------
Figure 7.6
The reconstruction of a particular spectrum from its eigenvectors. The input
spectrum is shown in gray, and the partial reconstruction for progressively
more terms is shown in black. The top panel shows only the mean of the set of
spectra. By the time 20 PCA components are added, the reconstruction is very
close to the input, as indicated by the expected total variance of 94%.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from astroML.datasets import sdss_corrected_spectra
from astroML.decorators import pickle_results
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Download data
data = sdss_corrected_spectra.fetch_sdss_corrected_spectra()
spectra = sdss_corrected_spectra.reconstruct_spectra(data)
wavelengths = sdss_corrected_spectra.compute_wavelengths(data)
#------------------------------------------------------------
# Compute PCA components
# Eigenvalues can be computed using PCA as in the commented code below:
#from sklearn.decomposition import PCA
#pca = PCA()
#pca.fit(spectra)
#evals = pca.explained_variance_ratio_
#evals_cs = evals.cumsum()
# because the spectra have been reconstructed from masked values, this
# is not exactly correct in this case: we'll use the values computed
# in the file compute_sdss_pca.py
evals = data['evals'] ** 2
evals_cs = evals.cumsum()
evals_cs /= evals_cs[-1]
evecs = data['evecs']
spec_mean = spectra.mean(0)
#------------------------------------------------------------
# Find the coefficients of a particular spectrum
spec = spectra[1]
coeff = np.dot(evecs, spec - spec_mean)
#------------------------------------------------------------
# Plot the sequence of reconstructions
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(hspace=0, top=0.95, bottom=0.1, left=0.12, right=0.93)
for i, n in enumerate([0, 4, 8, 20]):
ax = fig.add_subplot(411 + i)
ax.plot(wavelengths, spec, '-', c='gray')
ax.plot(wavelengths, spec_mean + np.dot(coeff[:n], evecs[:n]), '-k')
if i < 3:
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_ylim(-2, 21)
ax.set_ylabel('flux')
if n == 0:
text = "mean"
elif n == 1:
text = "mean + 1 component\n"
text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1]
else:
text = "mean + %i components\n" % n
text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1]
ax.text(0.02, 0.93, text, ha='left', va='top', transform=ax.transAxes)
fig.axes[-1].set_xlabel(r'${\rm wavelength\ (\AA)}$')
plt.show()
| bsd-2-clause |
kmunve/TSanalysis | Plotting/meteo_plots.py | 1 | 1137 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from matplotlib import pyplot as plt
from numpy import arange
"""
__author__: 'kmunve'
"""
def _temperature_plot(values, xticks=None, p_title=None, p_xlabel='Time', p_ylabel='Temperature'):
"""
TODO: add a check if the values are in Kelvin, Fahrenheit, or Celsius and adjust plot parameters accordingly.
TODO: rotate xlabels and change format to YYYY-MM-DD:HH
:param values:
:param xlabels:
:return:
"""
y = values
if xticks is None:
x = arange(len(y))
else:
x = xticks
# Create figure
plt.figure(figsize=(14,6))
ax = plt.axes()
# Set y limits
ax.set_ylim(-10, 25)
plt.plot(x, y, color='green', linewidth=2)
plt.axhline(0.0, color='grey', linestyle='--')
plt.title = p_title
plt.xlabel = p_xlabel
plt.ylabel = p_ylabel
def temperature_plot(values, xticks=None, p_title=None, p_xlabel='Time', p_ylabel='Temperature'):
"""
Plot temperature values with envoked plt.show() for external use.
"""
_temperature_plot(values, xticks, p_title, p_xlabel, p_ylabel)
plt.show()
| mit |
glennq/scikit-learn | sklearn/datasets/__init__.py | 72 | 3807 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_breast_cancer
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
dnjohnstone/hyperspy | hyperspy/defaults_parser.py | 1 | 10780 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import os.path
import configparser
import logging
import traits.api as t
from matplotlib.cm import cmap_d
from hyperspy.misc.config_dir import config_path, os_name, data_path
from hyperspy.misc.ipython_tools import turn_logging_on, turn_logging_off
from hyperspy.ui_registry import add_gui_method
defaults_file = os.path.join(config_path, 'hyperspyrc')
eels_gos_files = os.path.join(data_path, 'EELS_GOS.tar.gz')
_logger = logging.getLogger(__name__)
def guess_gos_path():
if os_name == 'windows':
# If DM is installed, use the GOS tables from the default
# installation
# location in windows
program_files = os.environ['PROGRAMFILES']
gos = 'Gatan\\DigitalMicrograph\\EELS Reference Data\\H-S GOS Tables'
gos_path = os.path.join(program_files, gos)
# Else, use the default location in the .hyperspy forlder
if os.path.isdir(gos_path) is False and \
'PROGRAMFILES(X86)' in os.environ:
program_files = os.environ['PROGRAMFILES(X86)']
gos_path = os.path.join(program_files, gos)
if os.path.isdir(gos_path) is False:
gos_path = os.path.join(config_path, 'EELS_GOS')
else:
gos_path = os.path.join(config_path, 'EELS_GOS')
return gos_path
if os.path.isfile(defaults_file):
# Remove config file if obsolated
with open(defaults_file) as f:
if 'Not really' in f.readline():
# It is the old config file
defaults_file_exists = False
else:
defaults_file_exists = True
if not defaults_file_exists:
# It actually exists, but is an obsoleted unsupported version of it
# so we delete it.
_logger.info('Removing obsoleted config file')
os.remove(defaults_file)
else:
defaults_file_exists = False
# Defaults template definition starts#####################################
# This "section" is all that has to be modified to add or remove sections and
# options from the defaults
# Due to https://github.com/enthought/traitsui/issues/23 the desc text as
# displayed in the tooltip get "Specifies" prepended.
class GeneralConfig(t.HasTraits):
logger_on = t.CBool(
False,
label='Automatic logging (requires IPython)',
desc='If enabled, HyperSpy will store a log in the current directory '
'of all the commands typed')
show_progressbar = t.CBool(
True,
label='Show progress bar',
desc='If enabled, show a progress bar when available')
dtb_expand_structures = t.CBool(
True,
label='Expand structures in DictionaryTreeBrowser',
desc='If enabled, when printing DictionaryTreeBrowser (e.g. '
'metadata), long lists and tuples will be expanded and any '
'dictionaries in them will be printed similar to '
'DictionaryTreeBrowser, but with double lines')
logging_level = t.Enum(['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', ],
desc='the log level of all hyperspy modules.')
parallel = t.CBool(
True,
desc='Use parallel threads for computations by default.'
)
nb_progressbar = t.CBool(
True,
desc='Attempt to use ipywidgets progressbar'
)
def _logger_on_changed(self, old, new):
if new is True:
turn_logging_on()
else:
turn_logging_off()
class EELSConfig(t.HasTraits):
eels_gos_files_path = t.Directory(
guess_gos_path(),
label='GOS directory',
desc='The GOS files are required to create the EELS edge components')
class GUIs(t.HasTraits):
enable_ipywidgets_gui = t.CBool(
True,
desc="Display ipywidgets in the Jupyter Notebook. "
"Requires installing hyperspy_gui_ipywidgets.")
enable_traitsui_gui = t.CBool(
True,
desc="Display traitsui user interface elements. "
"Requires installing hyperspy_gui_traitsui.")
warn_if_guis_are_missing = t.CBool(
True,
desc="Display warnings, if hyperspy_gui_ipywidgets or hyperspy_gui_traitsui are missing.")
class PlotConfig(t.HasTraits):
saturated_pixels = t.CFloat(0.05,
label='Saturated pixels',
desc='Set the default saturated pixels value '
'for plotting images.'
)
cmap_navigator = t.Enum(list(cmap_d.keys()),
label='Color map navigator',
desc='Set the default color map for the navigator.',
)
cmap_signal = t.Enum(list(cmap_d.keys()),
label='Color map signal',
desc='Set the default color map for the signal plot.',
)
dims_024_increase = t.Str('right',
label='Navigate right'
)
dims_024_decrease = t.Str('left',
label='Navigate left',
)
dims_135_increase = t.Str('down',
label='Navigate down',
)
dims_135_decrease = t.Str('up',
label='Navigate up',
)
modifier_dims_01 = t.Enum(['ctrl', 'alt', 'shift', 'ctrl+alt', 'ctrl+shift', 'alt+shift',
'ctrl+alt+shift'], label='Modifier key for 1st and 2nd dimensions') # 0 elem is default
modifier_dims_23 = t.Enum(['shift', 'alt', 'ctrl', 'ctrl+alt', 'ctrl+shift', 'alt+shift',
'ctrl+alt+shift'], label='Modifier key for 3rd and 4th dimensions') # 0 elem is default
modifier_dims_45 = t.Enum(['alt', 'ctrl', 'shift', 'ctrl+alt', 'ctrl+shift', 'alt+shift',
'ctrl+alt+shift'], label='Modifier key for 5th and 6th dimensions') # 0 elem is default
class EDSConfig(t.HasTraits):
eds_mn_ka = t.CFloat(130.,
label='Energy resolution at Mn Ka (eV)',
desc='default value for FWHM of the Mn Ka peak in eV,'
'This value is used as a first approximation'
'of the energy resolution of the detector.')
eds_tilt_stage = t.CFloat(
0.,
label='Stage tilt',
desc='default value for the stage tilt in degree.')
eds_detector_azimuth = t.CFloat(
0.,
label='Azimuth angle',
desc='default value for the azimuth angle in degree. If the azimuth'
' is zero, the detector is perpendicular to the tilt axis.')
eds_detector_elevation = t.CFloat(
35.,
label='Elevation angle',
desc='default value for the elevation angle in degree.')
template = {
'General': GeneralConfig(),
'GUIs': GUIs(),
'EELS': EELSConfig(),
'EDS': EDSConfig(),
'Plot': PlotConfig(),
}
# Set the enums defaults
template['General'].logging_level = 'WARNING'
template['Plot'].cmap_navigator = 'gray'
template['Plot'].cmap_signal = 'gray'
# Defaults template definition ends ######################################
def template2config(template, config):
for section, traited_class in template.items():
config.add_section(section)
for key, item in traited_class.trait_get().items():
config.set(section, key, str(item))
def config2template(template, config):
for section, traited_class in template.items():
config_dict = {}
for name, value in config.items(section):
if value == 'True':
value = True
elif value == 'False':
value = False
if name == 'fine_structure_smoothing':
value = float(value)
config_dict[name] = value
traited_class.trait_set(True, **config_dict)
def dictionary_from_template(template):
dictionary = {}
for section, traited_class in template.items():
dictionary[section] = traited_class.get()
return dictionary
config = configparser.ConfigParser(allow_no_value=True)
template2config(template, config)
rewrite = False
if defaults_file_exists:
# Parse the config file. It only copy to config the options that are
# already defined. If the file contains any option that was not already
# define the config file is rewritten because it is obsolate
config2 = configparser.ConfigParser(allow_no_value=True)
config2.read(defaults_file)
for section in config2.sections():
if config.has_section(section):
for option in config2.options(section):
if config.has_option(section, option):
config.set(section, option, config2.get(section, option))
else:
rewrite = True
else:
rewrite = True
if not defaults_file_exists or rewrite is True:
_logger.info('Writing the config file')
with open(defaults_file, "w") as df:
config.write(df)
# Use the traited classes to cast the content of the ConfigParser
config2template(template, config)
@add_gui_method(toolkey="hyperspy.Preferences")
class Preferences(t.HasTraits):
EELS = t.Instance(EELSConfig)
EDS = t.Instance(EDSConfig)
General = t.Instance(GeneralConfig)
GUIs = t.Instance(GUIs)
Plot = t.Instance(PlotConfig)
def save(self):
config = configparser.ConfigParser(allow_no_value=True)
template2config(template, config)
config.write(open(defaults_file, 'w'))
preferences = Preferences(
EELS=template['EELS'],
EDS=template['EDS'],
General=template['General'],
GUIs=template['GUIs'],
Plot=template['Plot'],
)
if preferences.General.logger_on:
turn_logging_on(verbose=0)
def file_version(fname):
with open(fname, 'r') as f:
for l in f.readlines():
if '__version__' in l:
return l[l.find('=') + 1:].strip()
return '0'
| gpl-3.0 |
themrmax/scikit-learn | doc/conf.py | 10 | 9807 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpy_ext.numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
'sphinx_gallery.gen_gallery',
'sphinx_issues',
]
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2007 - 2017, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
sphinx_gallery_conf = {
'doc_module': 'sklearn',
'backreferences_dir': os.path.join('modules', 'generated'),
'reference_url': {
'sklearn': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.8.1',
'scipy': 'http://docs.scipy.org/doc/scipy-0.13.3/reference'}
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600,
'sphx_glr_plot_outlier_detection_003.png': 372,
'sphx_glr_plot_gpr_co2_001.png': 350,
'sphx_glr_plot_adaboost_twoclass_001.png': 372,
'sphx_glr_plot_compare_methods_001.png': 349}
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
# Config for sphinx_issues
issues_uri = 'https://github.com/scikit-learn/scikit-learn/issues/{issue}'
issues_github_path = 'scikit-learn/scikit-learn'
issues_user_uri = 'https://github.com/{user}'
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('build-finished', make_carousel_thumbs)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
krisht/Krishna-Thesis | Research/src/BrainNet.py | 1 | 43359 | from __future__ import print_function
import datetime
import itertools
import matplotlib
import os
import re
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0,1"
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "FreeSerif"
import numpy as np
import os
import psutil
import random
import sys
import tensorflow as tf
import tensorflow.contrib.slim as slim
from sklearn import neighbors
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import normalize
from sklearn.manifold import TSNE
curr_time = datetime.datetime.now()
loss_mem = []
loss_mem_skip = []
def norm_op(vector, axisss):
#return normalize(vector, axis=axisss, norm='l2')
return vector * 10e4
def plot_embedding(X, y, epoch, accuracy, num_to_label, title):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
cmap = plt.get_cmap('gist_rainbow')
color_map = [cmap(1.*i/6) for i in range(6)]
legend_entry = []
for ii, c in enumerate(color_map):
legend_entry.append(matplotlib.patches.Patch(color=c, label=num_to_label[ii]))
plt.figure(figsize=(4.0, 4.0))
plt.scatter(X[:,0], X[:, 1], c=y, cmap=matplotlib.colors.ListedColormap(color_map), s=2)
plt.legend(handles=legend_entry)
plt.xticks([]), plt.yticks([])
#plt.title(title)
plt.savefig('./%s Results/%s_tSNE_plot_epoch%s_%.3f%%.pdf' % (curr_time, curr_time, epoch, accuracy), bbox_inches='tight')
def compute_tSNE(X, y, epoch, accuracy, num_to_label, with_seizure=None, title="t-SNE Embedding of DCNN Clustering Network"):
tsne = TSNE(n_components=2, init='random', random_state=0)
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne, y, epoch=epoch, accuracy=accuracy, num_to_label=num_to_label, title=title)
if with_seizure is None:
np.savez('./%s Results/%s_tSNE_plot_epoch%s_%.3f%%' % (curr_time, curr_time, epoch, accuracy), X_tsne, y)
elif with_seizure == 1:
np.savez('./%s Results/%s_tSNE_plot_with_seizure_epoch%s_%.3f%%' % (curr_time, curr_time, epoch, accuracy), X_tsne, y)
elif with_seizure == 0:
np.savez('./%s Results/%s_tSNE_plot_without_seizure_epoch%s_%.3f%%' % (curr_time, curr_time, epoch, accuracy), X_tsne, y)
elif with_seizure == 2:
np.savez('./%s Results/%s_tSNE_plot_with_only_seizure_epoch%s_%.3f%%' % (curr_time, curr_time, epoch, accuracy), X_tsne, y)
def get_loss(loss_mem, loss_mem_skip):
plt.figure(figsize=(4.0, 4.0))
plt.plot(loss_mem_skip, 'ro-', markersize=2)
plt.xlabel("1000 Iterations")
plt.ylabel("Average Loss in 1000 Iterations")
plt.title("Iterations vs. Average Loss")
plt.savefig('./%s Results/%s_convergence_with_skip_plot.pdf' % (curr_time, curr_time), bbox_inches='tight')
plt.figure(figsize=(4.0, 4.0))
plt.plot(loss_mem, 'ro-', markersize=2)
plt.xlabel("1000 Iterations")
plt.ylabel("Average Loss in 1000 Iterations")
plt.title("Iterations vs. Average Loss")
plt.savefig('./%s Results/%s_convergence_plot.pdf' % (curr_time, curr_time), bbox_inches='tight')
def plot_confusion_matrix(cm, classes, normalize=True, cmap=plt.cm.Greys, accuracy = None, epoch=None, with_seizure=None, title = "Confusion Matrix on All Data"):
plt.figure(figsize=(4, 4))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
ax = plt.gca()
#plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
ax.yaxis.set_label_coords(-0.1,1.03)
h = ax.set_ylabel('True label', rotation=0, horizontalalignment='left')
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm = np.nan_to_num(cm)
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, '{0:.2f}'.format(cm[i, j]), horizontalalignment="center", verticalalignment="center", color="white" if cm[i, j] > thresh else "black")
#plt.tight_layout()
plt.xlabel('Predicted label')
#plt.title(title)
#plt.show()
if with_seizure is None:
plt.savefig('./%s Results/%s_confusion_matrix_epoch%s_%.3f%%.pdf' % (curr_time, curr_time, epoch, accuracy), bbox_inches='tight')
elif with_seizure == 1:
plt.savefig('./%s Results/%s_confusion_matrix_with_seizure_epoch%s_%.3f%%.pdf' % (curr_time, curr_time, epoch, accuracy), bbox_inches='tight')
elif with_seizure == 0:
plt.savefig('./%s Results/%s_confusion_matrix_without_seizure_epoch%s_%.3f%%.pdf' % (curr_time, curr_time, epoch, accuracy), bbox_inches='tight')
elif with_seizure == 2:
plt.savefig('./%s Results/%s_confusion_matrix_with_only_seizure_epoch%s_%.3f%%.pdf' % (curr_time, curr_time, epoch, accuracy), bbox_inches='tight')
class BrainNet:
def __init__(self, input_shape=[None, 71, 125], path_to_files='/media/krishna/DATA', l2_weight=0.05, num_output=64, num_classes=6, alpha=.5, validation_size=500, learning_rate=1e-3, batch_size=100, train_epoch=5, keep_prob=None, debug=True, restore_dir=None):
self.bckg_num = 0
self.artf_num = 1
self.eybl_num = 2
self.gped_num = 3
self.spsw_num = 4
self.pled_num = 5
self.path_to_files = path_to_files
self.num_to_class = dict()
self.num_to_class[0] = 'BCKG'
self.num_to_class[1] = 'ARTF'
self.num_to_class[2] = 'EYBL'
self.num_to_class[3] = 'GPED'
self.num_to_class[4] = 'SPSW'
self.num_to_class[5] = 'PLED'
self.count_of_triplets = dict()
self.DEBUG = debug
self.train_path = os.path.abspath(self.path_to_files + '/Train')
self.val_path = os.path.abspath(self.path_to_files + '/Validation')
path = os.path.abspath(self.path_to_files)
self.artf = np.load(os.path.abspath(self.train_path + '/artf_files.npy'))
self.bckg = np.load(os.path.abspath(self.train_path + '/bckg_files.npy'))
self.spsw = np.load(os.path.abspath(self.train_path + '/spsw_files.npy'))
self.pled = np.load(os.path.abspath(self.train_path + '/pled_files.npy'))
self.gped = np.load(os.path.abspath(self.train_path + '/gped_files.npy'))
self.eybl = np.load(os.path.abspath(self.train_path + '/eybl_files.npy'))
self.artf_val = np.load(os.path.abspath(self.val_path + '/artf_files.npy'))
self.bckg_val = np.load(os.path.abspath(self.val_path + '/bckg_files.npy'))
self.spsw_val = np.load(os.path.abspath(self.val_path + '/spsw_files.npy'))
self.pled_val = np.load(os.path.abspath(self.val_path + '/pled_files.npy'))
self.gped_val = np.load(os.path.abspath(self.val_path + '/gped_files.npy'))
self.eybl_val = np.load(os.path.abspath(self.val_path + '/eybl_files.npy'))
if path_to_files != '/media/krishna/DATA':
self.artf = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.artf])
self.bckg = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.bckg])
self.spsw = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.spsw])
self.pled = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.pled])
self.gped = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.gped])
self.eybl = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.eybl])
self.artf_val = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.artf_val])
self.bckg_val = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.bckg_val])
self.spsw_val = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.spsw_val])
self.pled_val = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.pled_val])
self.gped_val = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.gped_val])
self.eybl_val = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.eybl_val])
files_with_spsw = set(['session' + re.search('session(.+?)_', a).group(1) + '_' for a in self.spsw])
files_with_gped = set(['session' + re.search('session(.+?)_', a).group(1) + '_' for a in self.gped])
files_with_pled = set(['session' + re.search('session(.+?)_', a).group(1) + '_' for a in self.pled])
files_with_bckg = set(['session' + re.search('session(.+?)_', a).group(1) + '_' for a in self.bckg])
files_with_artf = set(['session' + re.search('session(.+?)_', a).group(1) + '_' for a in self.artf])
files_with_eybl = set(['session' + re.search('session(.+?)_', a).group(1) + '_' for a in self.eybl])
total_set = (files_with_spsw | files_with_gped | files_with_pled | files_with_bckg | files_with_artf | files_with_eybl)
self.files_without_seizures = total_set - files_with_spsw - files_with_pled - files_with_gped
self.files_with_seizures = total_set - self.files_without_seizures
print(self.files_with_seizures)
print(self.files_without_seizures)
self.sess = tf.Session()
self.num_classes = num_classes
self.num_output = num_output
self.input_shape = input_shape
self.batch_size = batch_size
self.alpha = alpha
self.train_epoch = train_epoch
self.learning_rate = learning_rate
self.keep_prob = keep_prob
self.validation_size = validation_size
self.l2_weight = l2_weight
self.inference_input = tf.placeholder(tf.float32, shape=input_shape)
self.inference_model = self.get_model(self.inference_input, reuse=False)
if restore_dir is not None:
if self.DEBUG:
print("Loading saved data...")
dir = tf.train.Saver()
dir.restore(self.sess, restore_dir)
if self.DEBUG:
print("Finished loading saved data...")
if not os.path.exists('./%s Results' % curr_time):
os.makedirs('./%s Results' % curr_time)
self.metadata_file = './%s Results/METADATA.txt' % curr_time
with open(self.metadata_file, 'w') as file:
file.write('DCNN Clustering Network\n')
#file.write('Normalization on\n')
file.write('Time of training: %s\n' % curr_time)
file.write('Input shape: %s\n' % input_shape)
file.write('Path to files: %s\n' % path_to_files)
file.write('L2 Regularization Weight: %s\n' % l2_weight)
file.write('Number of outputs: %s\n' % num_output)
file.write('Number of classes: %s\n' % num_classes)
file.write('Alpha value: %s\n' % alpha)
file.write('Validation Size: %s\n' % validation_size)
file.write('Learning rate: %s\n' % learning_rate)
file.write('Batch size: %s\n' % batch_size)
file.write('Number of Epochs: %s\n' % train_epoch)
file.write('Dropout probability: %s\n' % keep_prob)
file.write('Debug mode: %s\n' % debug)
file.write('Restore directory: %s\n' % restore_dir)
file.close()
def distance_metric(self, a, b, metric='cosine'):
if metric == 'cosine':
num = tf.reduce_sum(a*b, 1)
denom = tf.sqrt(tf.reduce_sum(a*a,1))*tf.sqrt(tf.reduce_sum(b*b, 1))
result = 1 - (self.num/self.denom)
return result
elif metric=='euclidean':
return tf.reduce_sum(tf.square(tf.subtract(a, b)), 1)
def triplet_loss(self, alpha):
self.anchor = tf.placeholder(tf.float32, shape=self.input_shape)
self.positive = tf.placeholder(tf.float32, shape=self.input_shape)
self.negative = tf.placeholder(tf.float32, shape=self.input_shape)
self.anchor_out = self.get_model(self.anchor, reuse=True)
self.positive_out = self.get_model(self.positive, reuse=True)
self.negative_out = self.get_model(self.negative, reuse=True)
with tf.variable_scope('triplet_loss'):
pos_dist = self.distance_metric(self.anchor_out, self.positive_out, metric='euclidean')
neg_dist = self.distance_metric(self.anchor_out, self.negative_out, metric='euclidean')
basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss
def get_triplets(self, size=10):
A = []
P = []
N = []
for _ in range(size):
choices = ['bckg', 'eybl', 'gped', 'spsw', 'pled', 'artf']
neg_choices = list(choices)
choice = random.choice(choices)
neg_choices.remove(choice)
if choice == 'bckg':
a = np.load(random.choice(self.bckg))
p = np.load(random.choice(self.bckg))
elif choice == 'eybl':
a = np.load(random.choice(self.eybl))
p = np.load(random.choice(self.eybl))
elif choice == 'gped':
a = np.load(random.choice(self.gped))
p = np.load(random.choice(self.gped))
elif choice == 'spsw':
a = np.load(random.choice(self.spsw))
p = np.load(random.choice(self.spsw))
elif choice == 'pled':
a = np.load(random.choice(self.pled))
p = np.load(random.choice(self.pled))
else:
a = np.load(random.choice(self.artf))
p = np.load(random.choice(self.artf))
neg_choice = random.choice(neg_choices)
if neg_choice == 'bckg':
n = np.load(random.choice(self.bckg))
elif neg_choice == 'eybl':
n = np.load(random.choice(self.eybl))
elif neg_choice == 'gped':
n = np.load(random.choice(self.gped))
elif neg_choice == 'spsw':
n = np.load(random.choice(self.spsw))
elif neg_choice == 'pled':
n = np.load(random.choice(self.pled))
else:
n = np.load(random.choice(self.artf))
key = choice + choice + neg_choice
if key in self.count_of_triplets:
self.count_of_triplets[key]+=1
else:
self.count_of_triplets[key] = 1
a = norm_op(a, axisss=0)
p = norm_op(p, axisss=0)
n = norm_op(n, axisss=0)
A.append(a)
P.append(p)
N.append(n)
A = np.asarray(A)
P = np.asarray(P)
N = np.asarray(N)
return A, P, N
# End new stuff
#
def simple_model(self, inputs, reuse=False):
with slim.arg_scope([slim.layers.conv2d, slim.layers.fully_connected], weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), weights_regularizer=slim.l2_regularizer(self.l2_weight), reuse=reuse):
net = tf.expand_dims(inputs, dim=3)
net = slim.layers.conv2d(net, num_outputs=32, kernel_size=5, scope='conv1', trainable=True)
net = slim.layers.max_pool2d(net, kernel_size=5, scope='maxpool1')
net = slim.layers.conv2d(net, num_outputs=64, kernel_size=3, scope='conv2', trainable=True)
net = slim.layers.max_pool2d(net, kernel_size=3, scope='maxpool2')
net = slim.layers.conv2d(net, num_outputs=128, kernel_size=2, scope='conv3', trainable=True)
net = slim.layers.max_pool2d(net, kernel_size=2, scope='maxpool3')
net = slim.layers.conv2d(net, num_outputs=256, kernel_size=1, scope='conv4', trainable=True)
net = slim.layers.max_pool2d(net, kernel_size=2, scope='maxpool4')
net = slim.layers.conv2d(net, num_outputs=1024, kernel_size=4, scope='conv5', trainable=True)
net = slim.layers.max_pool2d(net, kernel_size=4, scope='maxpool5')
net = slim.layers.flatten(net, scope='flatten')
net = slim.layers.fully_connected(net, 1024, scope='fc1', trainable=True)
net = slim.layers.fully_connected(net, 512, scope='fc2', trainable=True)
net = slim.layers.fully_connected(net, 256, scope='fc3', trainable=True)
net = slim.layers.fully_connected(net, self.num_output, weights_regularizer=None, scope='output')
return net
def inception_v3(self, inputs, dropout_keep_prob=0.8, reuse=False, scope=''):
end_points = {}
with tf.name_scope(scope, 'inception_v3', [inputs]):
with slim.arg_scope([slim.layers.conv2d, slim.layers.fully_connected, slim.layers.batch_norm, slim.layers.dropout], weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), weights_regularizer=slim.l2_regularizer(self.l2_weight), reuse=reuse):
with slim.arg_scope([slim.layers.conv2d], stride=1, padding='VALID', reuse=reuse):
# 299 x 299 x 3
inputs = tf.expand_dims(inputs, dim=3)
end_points['conv0'] = slim.layers.conv2d(inputs, 32, kernel_size=3, stride=2, scope='conv0')
# 149 x 149 x 32
end_points['conv1'] = slim.layers.conv2d(end_points['conv0'], 32, kernel_size=3, scope='conv1')
# 147 x 147 x 32
end_points['conv2'] = slim.layers.conv2d(end_points['conv1'], 64, kernel_size=3, padding='SAME', scope='conv2')
# 147 x 147 x 64
#end_points['pool1'] = slim.layers.max_pool2d(end_points['conv2'], kernel_size=3, stride=2, scope='pool1')
# 73 x 73 x 64
end_points['conv3'] = slim.layers.conv2d(end_points['conv2'], 80, kernel_size=1, scope='conv3')
# 73 x 73 x 80.
end_points['conv4'] = slim.layers.conv2d(end_points['conv3'], 192, kernel_size=3, scope='conv4')
# 71 x 71 x 192.
#end_points['pool2'] = slim.layers.max_pool2d(end_points['conv4'], kernel_size=3, stride=2, scope='pool2')
# 35 x 35 x 192.
net = end_points['conv4']
# Inception blocks
with slim.arg_scope([slim.layers.conv2d], stride=1, padding='SAME', reuse=reuse):
# mixed: 35 x 35 x 256.
with tf.variable_scope('mixed_35x35x256a'):
with tf.variable_scope('branch1x1'):
branch1x1 = slim.layers.conv2d(net, 64, kernel_size=1, scope='branch1x1/conv1')
with tf.variable_scope('branch5x5'):
branch5x5 = slim.layers.conv2d(net, 48, kernel_size=1, scope='branch1x1/conv2')
branch5x5 = slim.layers.conv2d(branch5x5, 64, kernel_size=5, scope='branch1x1/conv3')
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = slim.layers.conv2d(net, 64, kernel_size=1, scope='branch3x3dbl/conv1')
branch3x3dbl = slim.layers.conv2d(branch3x3dbl, 96, kernel_size=3, scope='branch3x3dbl/conv2')
branch3x3dbl = slim.layers.conv2d(branch3x3dbl, 96, kernel_size=3, scope='branch3x3dbl/conv3')
with tf.variable_scope('branch_pool'):
branch_pool = slim.layers.avg_pool2d(net, kernel_size=3, stride=1, padding='SAME', scope='branch_pool/avg_pool1')
branch_pool = slim.layers.conv2d(branch_pool, 32, kernel_size=1, scope='branch_pool/conv1')
net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool])
end_points['mixed_35x35x256a'] = net
# mixed_1: 35 x 35 x 288.
with tf.variable_scope('mixed_35x35x288a'):
with tf.variable_scope('branch1x1'):
branch1x1 = slim.layers.conv2d(net, 64, kernel_size=1, scope='branch1x1/conv1')
with tf.variable_scope('branch5x5'):
branch5x5 = slim.layers.conv2d(net, 48, kernel_size=1, scope='branch5x5/conv1')
branch5x5 = slim.layers.conv2d(branch5x5, 64, kernel_size=5, scope='branch5x5/conv2')
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = slim.layers.conv2d(net, 64, kernel_size=1, scope='branch3x3dbl/conv1')
branch3x3dbl = slim.layers.conv2d(branch3x3dbl, 96, kernel_size=3, scope='branch3x3dbl/conv2')
branch3x3dbl = slim.layers.conv2d(branch3x3dbl, 96, kernel_size=3, scope='branch3x3dbl/conv3')
with tf.variable_scope('branch_pool'):
branch_pool = slim.layers.avg_pool2d(net, kernel_size=3, stride=1, padding='SAME', scope='branch_pool/avg_pool1')
branch_pool = slim.layers.conv2d(branch_pool, 64, kernel_size=1, scope='branch_pool/conv1')
net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool])
end_points['mixed_35x35x288a'] = net
# mixed_2: 35 x 35 x 288.
with tf.variable_scope('mixed_35x35x288b'):
with tf.variable_scope('branch1x1'):
branch1x1 = slim.layers.conv2d(net, 64, kernel_size=1, scope='branch1x1/conv1')
with tf.variable_scope('branch5x5'):
branch5x5 = slim.layers.conv2d(net, 48, kernel_size=1, scope='branch5x5/conv1')
branch5x5 = slim.layers.conv2d(branch5x5, 64, kernel_size=5, scope='branch5x5/conv2')
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = slim.layers.conv2d(net, 64, kernel_size=1, scope='branch3x3dbl/conv1')
branch3x3dbl = slim.layers.conv2d(branch3x3dbl, 96, kernel_size=3, scope='branch3x3dbl/conv2')
branch3x3dbl = slim.layers.conv2d(branch3x3dbl, 96, kernel_size=3, scope='branch3x3dbl/conv3')
with tf.variable_scope('branch_pool'):
branch_pool = slim.layers.avg_pool2d(net, kernel_size=3, stride=1, padding='SAME', scope='branch_pool/avg_pool1')
branch_pool = slim.layers.conv2d(branch_pool, 64, kernel_size=1, scope='branch_pool/conv1')
net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool])
end_points['mixed_35x35x288b'] = net
# mixed_3: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768a'):
with tf.variable_scope('branch3x3'):
branch3x3 = slim.layers.conv2d(net, 384, kernel_size=3, stride=2, padding='VALID', scope='branch3x3/conv1')
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = slim.layers.conv2d(net, 64, kernel_size=1, scope='branch3x3dbl/conv1')
branch3x3dbl = slim.layers.conv2d(branch3x3dbl, 96, kernel_size=3, scope='branch3x3dbl/conv2')
branch3x3dbl = slim.layers.conv2d(branch3x3dbl, 96, kernel_size=3, stride=2, padding='VALID', scope='branch3x3dbl/conv3')
with tf.variable_scope('branch_pool'):
branch_pool = slim.layers.max_pool2d(net, kernel_size=3, stride=2, padding='VALID', scope='branch_pool/max_pool1')
net = tf.concat(axis=3, values=[branch3x3, branch3x3dbl, branch_pool])
end_points['mixed_17x17x768a'] = net
# mixed4: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768b'):
with tf.variable_scope('branch1x1'):
branch1x1 = slim.layers.conv2d(net, 192, kernel_size=1, scope='branch1x1/conv1')
with tf.variable_scope('branch7x7'):
branch7x7 = slim.layers.conv2d(net, 128, kernel_size=1, scope='branch7x7/conv1')
branch7x7 = slim.layers.conv2d(branch7x7, 128, kernel_size=(1, 7), scope='branch7x7/conv2')
branch7x7 = slim.layers.conv2d(branch7x7, 192, kernel_size=(7, 1), scope='branch7x7/conv3')
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = slim.layers.conv2d(net, 128, kernel_size=1, scope='branch7x7dbl/conv1')
branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 128, kernel_size=(7, 1), scope='branch7x7dbl/conv2')
branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 128, kernel_size=(1, 7), scope='branch7x7dbl/conv3')
branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 128, kernel_size=(7, 1), scope='branch7x7dbl/conv4')
branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 192, kernel_size=(1, 7), scope='branch7x7dbl/conv5')
with tf.variable_scope('branch_pool'):
branch_pool = slim.layers.avg_pool2d(net, kernel_size=3, stride=1, padding='SAME', scope='branch_pool/avg_pool1')
branch_pool = slim.layers.conv2d(branch_pool, 192, kernel_size=1, scope='branch_pool/conv1')
net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768b'] = net
# mixed_5: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768c'):
with tf.variable_scope('branch1x1'):
branch1x1 = slim.layers.conv2d(net, 192, kernel_size=1, scope='branch1x1/conv1')
with tf.variable_scope('branch7x7'):
branch7x7 = slim.layers.conv2d(net, 160, kernel_size=1, scope='branch7x7/conv1')
branch7x7 = slim.layers.conv2d(branch7x7, 160, kernel_size=(1, 7), scope='branch7x7/conv2')
branch7x7 = slim.layers.conv2d(branch7x7, 192, kernel_size=(7, 1), scope='branch7x7/conv3')
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = slim.layers.conv2d(net, 160, kernel_size=1, scope='branch7x7dbl/conv1')
branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 160, kernel_size=(7, 1), scope='branch7x7dbl/conv2')
branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 160, kernel_size=(1, 7), scope='branch7x7dbl/conv3')
branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 160, kernel_size=(7, 1), scope='branch7x7dbl/conv4')
branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 192, kernel_size=(1, 7), scope='branch7x7dbl/conv5')
with tf.variable_scope('branch_pool'):
branch_pool = slim.layers.avg_pool2d(net, kernel_size=3, stride=1, padding='SAME', scope='branch_pool/avg_pool1')
branch_pool = slim.layers.conv2d(branch_pool, 192, kernel_size=1, scope='branch_pool/conv2')
net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768c'] = net
# mixed_6: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768d'):
with tf.variable_scope('branch1x1'):
branch1x1 = slim.layers.conv2d(net, 192, kernel_size=1, scope='branch1x1/conv1')
with tf.variable_scope('branch7x7'):
branch7x7 = slim.layers.conv2d(net, 160, kernel_size=1, scope='branch7x7/conv1')
branch7x7 = slim.layers.conv2d(branch7x7, 160, kernel_size=(1, 7), scope='branch7x7/conv2')
branch7x7 = slim.layers.conv2d(branch7x7, 192, kernel_size=(7, 1), scope='branch7x7/conv3')
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = slim.layers.conv2d(net, 160, kernel_size=1, scope='branch7x7dbl/conv1')
branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 160, kernel_size=(7, 1), scope='branch7x7dbl/conv2')
branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 160, kernel_size=(1, 7), scope='branch7x7dbl/conv3')
branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 160, kernel_size=(7, 1), scope='branch7x7dbl/conv4')
branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 192, kernel_size=(1, 7), scope='branch7x7dbl/conv5')
with tf.variable_scope('branch_pool'):
branch_pool = slim.layers.avg_pool2d(net, kernel_size=3, stride=1, padding='SAME', scope='branch_pool/avg_pool1')
branch_pool = slim.layers.conv2d(branch_pool, 192, kernel_size=1, scope='branch_pool/conv1')
net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768d'] = net
# mixed_7: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768e'):
with tf.variable_scope('branch1x1'):
branch1x1 = slim.layers.conv2d(net, 192, kernel_size=1, scope='branch1x1/conv1')
with tf.variable_scope('branch7x7'):
branch7x7 = slim.layers.conv2d(net, 192, kernel_size=1, scope='branch7x7/conv1')
branch7x7 = slim.layers.conv2d(branch7x7, 192, kernel_size=(1, 7), scope='branch7x7/conv2')
branch7x7 = slim.layers.conv2d(branch7x7, 192, kernel_size=(7, 1), scope='branch7x7/conv3')
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = slim.layers.conv2d(net, 192, kernel_size=1, scope='branch7x7dbl/conv1')
branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 192, kernel_size=(7, 1), scope='branch7x7dbl/conv2')
branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 192, kernel_size=(1, 7), scope='branch7x7dbl/conv3')
branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 192, kernel_size=(7, 1), scope='branch7x7dbl/conv4')
branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 192, kernel_size=(1, 7), scope='branch7x7dbl/conv5')
with tf.variable_scope('branch_pool'):
branch_pool = slim.layers.avg_pool2d(net, kernel_size=3, stride=1, padding='SAME', scope='branch_pool/avg_pool1')
branch_pool = slim.layers.conv2d(branch_pool, 192, kernel_size=1, scope='branch_pool/conv1')
net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768e'] = net
# Auxiliary Head logits
aux_logits = tf.identity(end_points['mixed_17x17x768e'])
with tf.variable_scope('aux_logits'):
aux_logits = slim.layers.avg_pool2d(aux_logits, kernel_size=5, stride=3, padding='VALID', scope='aux_logits/avg_pool1')
aux_logits = slim.layers.conv2d(aux_logits, 128, kernel_size=1, scope='aux_logits/proj')
# Shape of feature map before the final layer.
shape = aux_logits.get_shape()
aux_logits = slim.layers.conv2d(aux_logits, 768, shape[1:3], padding='VALID', scope='aux_logits/conv2')
aux_logits = slim.layers.flatten(aux_logits, scope='aux_logits/flatten')
aux_logits = slim.layers.fully_connected(aux_logits, self.num_output, activation_fn=None, scope='aux_logits/fc1')
end_points['aux_logits'] = aux_logits
with tf.variable_scope('mixed_17x17x1280a'):
with tf.variable_scope('branch3x3'):
branch3x3 = slim.layers.conv2d(net, 192, kernel_size=1, scope='branch3x3/conv1')
branch3x3 = slim.layers.conv2d(branch3x3, 320, kernel_size=3, stride=2, padding='VALID', scope='branch3x3/conv2')
with tf.variable_scope('branch7x7x3'):
branch7x7x3 = slim.layers.conv2d(net, 192, kernel_size=1, scope='branch7x7x3/conv1')
branch7x7x3 = slim.layers.conv2d(branch7x7x3, 192, kernel_size=(1, 7), scope='branch7x7x3/conv2')
branch7x7x3 = slim.layers.conv2d(branch7x7x3, 192, kernel_size=(7, 1), scope='branch7x7x3/conv3')
branch7x7x3 = slim.layers.conv2d(branch7x7x3, 192, kernel_size=3, stride=2, padding='VALID', scope='branch7x7x3/conv4')
with tf.variable_scope('branch_pool'):
branch_pool = slim.layers.max_pool2d(net, kernel_size=3, stride=2, padding='VALID', scope='branch_pool/max_pool1')
net = tf.concat(axis=3, values=[branch3x3, branch7x7x3, branch_pool])
end_points['mixed_17x17x1280a'] = net
with tf.variable_scope('logits'):
shape = net.get_shape()
net = slim.layers.avg_pool2d(net, shape[1:3], stride=1, padding='VALID', scope='pool')
end_points['prev_layer'] = net
# 1 x 1 x 2048
#net = slim.layers.dropout(net, dropout_keep_prob, scope='dropout')
net = slim.layers.flatten(net, scope='flatten')
# 2048
logits = slim.layers.fully_connected(net, self.num_output, weights_regularizer=None, activation_fn=None, scope='logits')
# 1000
end_points['logits'] = logits
return end_points['logits']
def get_model(self, inputs, reuse=False, use_inception=True):
if not use_inception:
return self.simple_model(inputs, reuse=reuse)
else:
return self.inception_v3(inputs, reuse=reuse)
def train_model(self, outdir=None):
loss = self.triplet_loss(alpha=self.alpha)
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.optim = self.optimizer.minimize(loss=loss)
self.sess.run(tf.global_variables_initializer())
count = 0
ii = 0
val_percentage = 0
val_conf_matrix = 0
epoch = -1
while True:
epoch += 1
ii = 0
count = 0
temp_count = 0
full_loss = 0
while ii <= self.batch_size:
ii += 1
a, p, n = self.get_triplets()
temploss = self.sess.run(loss, feed_dict={self.anchor: a, self.positive: p, self.negative: n})
if temploss == 0:
ii -= 1
count += 1
temp_count += 1
continue
full_loss += temploss
if ((ii + epoch * self.batch_size) % 1000 == 0):
loss_mem_skip.append(full_loss / (1000.0 + temp_count))
loss_mem.append(full_loss / (1000.0))
full_loss = 0
temp_count = 0
get_loss(loss_mem, loss_mem_skip)
_, a, p, n = self.sess.run([self.optim, self.anchor_out, self.positive_out, self.negative_out], feed_dict={self.anchor: a, self.positive: p, self.negative: n})
d1 = np.linalg.norm(p - a)
d2 = np.linalg.norm(n - a)
if self.DEBUG:
print("Epoch: %2d, Iter: %7d, IterSkip: %7d, Loss: %.4f, P_Diff: %.4f, N_diff: %.4f" % (epoch, ii, count, temploss, d1, d2))
val_percentage, val_conf_matrix = self.validate(epoch)
self.sess.close()
return epoch, val_percentage, val_conf_matrix
def get_sample(self, size=1, validation=False, with_seizure=None):
data_list = []
class_list = []
if not validation:
for ii in range(0, size):
if with_seizure is None:
choice = random.choice(['bckg', 'eybl', 'gped', 'spsw', 'pled', 'artf'])
if choice == 'bckg':
data_list.append(norm_op(np.load(random.choice(self.bckg)), axisss=0))
class_list.append(self.bckg_num)
elif choice == 'eybl':
data_list.append(norm_op(np.load(random.choice(self.eybl)), axisss=0))
class_list.append(self.eybl_num)
elif choice == 'gped':
data_list.append(norm_op(np.load(random.choice(self.gped)), axisss=0))
class_list.append(self.gped_num)
elif choice == 'spsw':
data_list.append(norm_op(np.load(random.choice(self.spsw)), axisss=0))
class_list.append(self.spsw_num)
elif choice == 'pled':
data_list.append(norm_op(np.load(random.choice(self.pled)), axisss=0))
class_list.append(self.pled_num)
else:
data_list.append(norm_op(np.load(random.choice(self.artf)), axisss=0))
class_list.append(self.artf_num)
elif with_seizure == 2:
choice = random.choice(['gped', 'spsw', 'pled'])
success = False
the_file = ''
class_num = None
while not success:
if choice == 'bckg':
the_file = random.choice(self.bckg)
class_num = self.bckg_num
elif choice == 'eybl':
the_file = random.choice(self.eybl)
class_num = self.eybl_num
elif choice == 'gped':
the_file = random.choice(self.gped)
class_num = self.gped_num
elif choice == 'spsw':
the_file = random.choice(self.spsw)
class_num = self.spsw_num
elif choice == 'pled':
the_file = random.choice(self.pled)
class_num = self.pled_num
else:
the_file = random.choice(self.artf)
class_num = self.artf_num
the_file_stripped = 'session' + re.search('session(.+?)_', str(the_file)).group(1) + '_'
if the_file_stripped in self.files_with_seizures:
success = True
#print(the_file, the_file_stripped, the_file_stripped in self.files_with_seizures)
data_list.append(norm_op(np.load(str(the_file)), axisss=0))
class_list.append(class_num)
elif with_seizure == 1:
choice = random.choice(['bckg', 'eybl', 'gped', 'spsw', 'pled', 'artf'])
success = False
the_file = ''
class_num = None
while not success:
if choice == 'bckg':
the_file = random.choice(self.bckg)
class_num = self.bckg_num
elif choice == 'eybl':
the_file = random.choice(self.eybl)
class_num = self.eybl_num
elif choice == 'gped':
the_file = random.choice(self.gped)
class_num = self.gped_num
elif choice == 'spsw':
the_file = random.choice(self.spsw)
class_num = self.spsw_num
elif choice == 'pled':
the_file = random.choice(self.pled)
class_num = self.pled_num
else:
the_file = random.choice(self.artf)
class_num = self.artf_num
the_file_stripped = 'session' + re.search('session(.+?)_', str(the_file)).group(1) + '_'
if the_file_stripped in self.files_with_seizures:
success = True
#print(the_file, the_file_stripped, the_file_stripped in self.files_with_seizures)
data_list.append(norm_op(np.load(str(the_file)), axisss=0))
class_list.append(class_num)
elif with_seizure == 0:
choice = random.choice(['bckg', 'eybl', 'artf'])
success = False
the_file = ''
class_num = None
while not success:
if choice == 'bckg':
the_file = random.choice(self.bckg)
class_num = self.bckg_num
elif choice == 'eybl':
the_file = random.choice(self.eybl)
class_num = self.eybl_num
elif choice == 'gped':
the_file = random.choice(self.gped)
class_num = self.gped_num
elif choice == 'spsw':
the_file = random.choice(self.spsw)
class_num = self.spsw_num
elif choice == 'pled':
the_file = random.choice(self.pled)
class_num = self.pled_num
else:
the_file = random.choice(self.artf)
class_num = self.artf_num
#print(the_file)
the_file_stripped = 'session' + re.search('session(.+?)_', str(the_file)).group(1) + '_'
if the_file_stripped in self.files_without_seizures:
success = True
#print(the_file, the_file_stripped, the_file_stripped in self.files_without_seizures)
data_list.append(norm_op(np.load(str(the_file)), axisss=0))
class_list.append(class_num)
else:
for ii in range(0, size):
choice = random.choice(['bckg', 'eybl', 'gped', 'spsw', 'pled', 'artf'])
if choice == 'bckg':
data_list.append(norm_op(np.load(random.choice(self.bckg_val)), axisss=0))
class_list.append(self.bckg_num)
elif choice == 'eybl':
data_list.append(norm_op(np.load(random.choice(self.eybl_val)), axisss=0))
class_list.append(self.eybl_num)
elif choice == 'gped':
data_list.append(norm_op(np.load(random.choice(self.gped_val)), axisss=0))
class_list.append(self.gped_num)
elif choice == 'spsw':
data_list.append(norm_op(np.load(random.choice(self.spsw_val)), axisss=0))
class_list.append(self.spsw_num)
elif choice == 'pled':
data_list.append(norm_op(np.load(random.choice(self.pled_val)), axisss=0))
class_list.append(self.pled_num)
else:
data_list.append(norm_op(np.load(random.choice(self.artf_val)), axisss=0))
class_list.append(self.artf_num)
return data_list, class_list
def validate(self, epoch):
inputs, classes = self.get_sample(size=self.validation_size, validation=True)
vector_inputs = self.sess.run(self.inference_model, feed_dict={self.inference_input: inputs})
del inputs
tempClassifier = neighbors.KNeighborsClassifier(31)
tempClassifier.fit(vector_inputs, classes)
# All data (Files with Seizures & Files without Seizures)
val_inputs, val_classes = self.get_sample(size=self.validation_size)
vector_val_inputs = self.sess.run(self.inference_model, feed_dict={self.inference_input: val_inputs})
del val_inputs
pred_class = tempClassifier.predict(vector_val_inputs)
percentage = len([i for i, j in zip(val_classes, pred_class) if i == j]) * 100.0 / self.validation_size
if self.DEBUG:
print("Validation Results: %.3f%% of of %d correct" % (percentage, self.validation_size))
val_classes = list(map(lambda x: self.num_to_class[x], val_classes))
pred_class = list(map(lambda x: self.num_to_class[x], pred_class))
class_labels = [0, 1, 2, 3, 4, 5]
class_labels = list(map(lambda x: self.num_to_class[x], class_labels))
conf_matrix = confusion_matrix(val_classes, pred_class, labels=class_labels)
np.set_printoptions(precision=2)
np.save('./%s Results/%s_confusion_matrix_epoch%s_%.3f%%' % (curr_time, curr_time, epoch, percentage), conf_matrix)
plot_confusion_matrix(conf_matrix, classes=class_labels, epoch=epoch, accuracy=percentage)
print("All data: %s" % set(val_classes))
compute_tSNE(vector_inputs, classes, epoch=epoch, accuracy=percentage, num_to_label=self.num_to_class)
# Files with Seizures
val_inputs_seizure, val_classes_seizure = self.get_sample(size=self.validation_size, with_seizure = 1)
vector_val_inputs_seizure = self.sess.run(self.inference_model, feed_dict={self.inference_input: val_inputs_seizure})
del val_inputs_seizure
pred_class_seizure = tempClassifier.predict(vector_val_inputs_seizure)
percentage_seizure = len([i for i, j in zip(val_classes_seizure, pred_class_seizure) if i == j]) * 100.0 / self.validation_size
if self.DEBUG:
print("Validation Results: %.3f%% of of %d correct" % (percentage_seizure, self.validation_size))
val_classes_seizure = list(map(lambda x: self.num_to_class[x], val_classes_seizure))
pred_class_seizure = list(map(lambda x: self.num_to_class[x], pred_class_seizure))
class_labels_seizure = [0, 1, 2, 3, 4, 5]
class_labels_seizure = list(map(lambda x: self.num_to_class[x], class_labels_seizure))
conf_matrix_seizure = confusion_matrix(val_classes_seizure, pred_class_seizure, labels=class_labels_seizure)
np.set_printoptions(precision=2)
np.save('./%s Results/%s_confusion_matrix_with_seizure_epoch%s_%.3f%%' % (curr_time, curr_time, epoch, percentage_seizure), conf_matrix_seizure)
plot_confusion_matrix(conf_matrix_seizure, classes=class_labels_seizure, epoch=epoch, accuracy=percentage_seizure, with_seizure=1, title = "Confusion Matrix on Files with Seizure")
print("With Seizure data: %s" % set(val_classes_seizure))
#compute_tSNE(vector_inputs, classes, epoch=epoch, accuracy=percentage_seizure, num_to_label=self.num_to_class)
# ONLY Seizures
val_inputs_only_seizure, val_classes_only_seizure = self.get_sample(size=self.validation_size, with_seizure = 2)
vector_val_inputs_only_seizure = self.sess.run(self.inference_model, feed_dict={self.inference_input: val_inputs_only_seizure})
del val_inputs_only_seizure
pred_class_only_seizure = tempClassifier.predict(vector_val_inputs_only_seizure)
percentage_only_seizure = len([i for i, j in zip(val_classes_only_seizure, pred_class_only_seizure) if i == j]) * 100.0 / self.validation_size
if self.DEBUG:
print("Validation Results: %.3f%% of of %d correct" % (percentage_only_seizure, self.validation_size))
val_classes_only_seizure = list(map(lambda x: self.num_to_class[x], val_classes_only_seizure))
pred_class_only_seizure = list(map(lambda x: self.num_to_class[x], pred_class_only_seizure))
class_labels_only_seizure = [0, 1, 2, 3, 4, 5]
class_labels_only_seizure = list(map(lambda x: self.num_to_class[x], class_labels_only_seizure))
conf_matrix_only_seizure = confusion_matrix(val_classes_only_seizure, pred_class_only_seizure, labels=class_labels_seizure)
np.set_printoptions(precision=2)
np.save('./%s Results/%s_confusion_matrix_with_only_seizure_epoch%s_%.3f%%' % (curr_time, curr_time, epoch, percentage_only_seizure), conf_matrix_only_seizure)
plot_confusion_matrix(conf_matrix_only_seizure, classes=class_labels_only_seizure, epoch=epoch, accuracy=percentage_only_seizure, with_seizure=1, title = "Confusion Matrix on Files with Seizure")
print("With only Seizure data: %s" % set(val_classes_only_seizure))
#compute_tSNE(vector_inputs, classes, epoch=epoch, accuracy=percentage_seizure, num_to_label=self.num_to_class)
# Files without Seizures
val_inputs_without_seizure, val_classes_without_seizure = self.get_sample(size=self.validation_size, with_seizure=0)
vector_val_inputs_without_seizure = self.sess.run(self.inference_model, feed_dict={self.inference_input: val_inputs_without_seizure})
del val_inputs_without_seizure
pred_class_without_seizure = tempClassifier.predict(vector_val_inputs_without_seizure)
percentage_without_seizure = len([i for i, j in zip(val_classes_without_seizure, pred_class_without_seizure) if i == j]) * 100.0 / self.validation_size
if self.DEBUG:
print("Validation Results: %.3f%% of of %d correct" % (percentage_without_seizure, self.validation_size))
val_classes_without_seizure = list(map(lambda x: self.num_to_class[x], val_classes_without_seizure))
pred_class_without_seizure = list(map(lambda x: self.num_to_class[x], pred_class_without_seizure))
class_labels_without_seizure = [0, 1, 2, 3, 4, 5]
class_labels_without_seizure = list(map(lambda x: self.num_to_class[x], class_labels_without_seizure))
conf_matrix_without_seizure = confusion_matrix(val_classes_without_seizure, pred_class_without_seizure, labels=class_labels_without_seizure)
np.set_printoptions(precision=2)
np.save('./%s Results/%s_confusion_matrix_without_seizure_epoch%s_%.3f%%' % (curr_time, curr_time, epoch, percentage_without_seizure), conf_matrix_without_seizure)
plot_confusion_matrix(conf_matrix_without_seizure, classes=class_labels_without_seizure, epoch=epoch, accuracy=percentage_without_seizure, with_seizure=0, title = "Confusion Matrix on Files without Seizure")
print("Without Seizure data: %s" % set(val_classes_without_seizure))
#compute_tSNE(vector_inputs, classes, epoch=epoch, accuracy=percentage_without_seizure, num_to_label=self.num_to_class)
self.count_of_triplets = dict()
return percentage, conf_matrix
| mit |
homeslike/OpticalTweezer | scripts/p0.1_at0.05/vCOMhistogram.py | 28 | 2448 | import math
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from subprocess import call
from scipy.stats import norm
# proc = call("ls *.dat",shell=True)
# datetime = "170123_2033_"
datetime = sys.argv[1]+"_"
gasTempDataIn = np.genfromtxt(datetime+"gasTempData.dat",usecols=0,skip_header=100)
gasTempDataOut = np.genfromtxt(datetime+"gasTempData.dat",usecols=1,skip_header=100)
vCOMData_x = np.genfromtxt(datetime+"vCOMData.dat",usecols=0,skip_header=100)
vCOMData_y = np.genfromtxt(datetime+"vCOMData.dat",usecols=1,skip_header=100)
vCOMData_z = np.genfromtxt(datetime+"vCOMData.dat",usecols=2,skip_header=100)
N = 32
vSqd = []
for i in range(0,len(vCOMData_x)):
vSqd.append((vCOMData_x[i]*vCOMData_x[i]+vCOMData_x[i]*vCOMData_x[i]+vCOMData_x[i]*vCOMData_x[i])*0.5)
vSqdMean = np.mean(vSqd)
histogram_x,bins_x = np.histogram(vCOMData_x,bins=100,normed=True)
histogram_y,bins_y = np.histogram(vCOMData_y,bins=100,normed=True)
histogram_z,bins_z = np.histogram(vCOMData_z,bins=100,normed=True)
inTemp = np.mean(gasTempDataIn)
outTemp = np.mean(gasTempDataOut)
statistics = open(datetime+"statistics.dat","w")
statistics.write("GasIn: " + str(inTemp)+"\n")
statistics.write("GasOut: " + str(outTemp)+"\n")
statistics.write("T_COM: " + str(2./3. * vSqdMean)+"\n")
statistics.write("Mu_x " + str(np.mean(vCOMData_x))+"\n")
statistics.write("Sigma_x: " + str(np.std(vCOMData_x))+"\n")
statistics.write("Mu_y " + str(np.mean(vCOMData_y))+"\n")
statistics.write("Sigma_y: " + str(np.std(vCOMData_y))+"\n")
statistics.write("Mu_z " + str(np.mean(vCOMData_z))+"\n")
statistics.write("Sigma_z: " + str(np.std(vCOMData_z))+"\n")
histogram_x_file = open(datetime+"histogram_vx.dat","w")
histogram_y_file = open(datetime+"histogram_vy.dat","w")
histogram_z_file = open(datetime+"histogram_vz.dat","w")
for i in range(0,len(histogram_x)):
histogram_x_file.write(str(bins_x[i]) + "\t" + str(histogram_x[i]) + "\n")
histogram_y_file.write(str(bins_y[i]) + "\t" + str(histogram_y[i]) + "\n")
histogram_z_file.write(str(bins_z[i]) + "\t" + str(histogram_z[i]) + "\n")
# plt.figure(1)
# plt.hist(vCOMData_x,bins=100)
# plt.figure(2)
# plt.hist(vCOMData_y,bins=100)
# plt.figure(3)
# plt.hist(vCOMData_z,bins=100)
# plt.show()
# plt.figure(1)
# plt.plot(vSqd)
# plt.plot((0,700),(vSqdMean,vSqdMean))
# plt.figure(2)
# plt.hist(vCOMData_x,bins=100,normed=True)
# plt.plot(x,gasInPDF)
# plt.show()
| mit |
lcharleux/numerical_analysis | doc/Optimisation/Example_code/brachi1d.py | 1 | 2925 | #------------------------------------------------------------------------
# RECHERCHE DU CHEMIN LE PLUS RAPIDE ENTRE 2 POINTS A ET B
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# PACKAGES
from scipy import optimize as opt # Optimize
import numpy as np # Numpy
import matplotlib.pyplot as plt # Pyplot
from matplotlib import cm # Colormaps
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# POSITION DES POINTS ET DONNEES PHYSIQUES
xa, xb = 0., 1.
ya, yb = 1., 0.
m = 1. # masse en kg
g = 10. # gravite en ms**-2
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# CALCUL DU TEMPS DE PARCOURS
def temps(Y):
# On calcule l'energie potentielle en supposant qu'elle est nulle en A
Ep = m * g * (Y - Y[0])
# On calcule l'energie cinetique
Ec = - Ep
# On calcule la vitesse
V = (2. / m * Ec) **.5
# On calcule la vitesse moyenne sur chaque element
Ve = (V[1:] + V[:-1]) / 2.
# On calcule le pas en X:
dx = X[1] - X[0]
# On calcule la longueur de chaque element
Le = ( ( Y[1:] - Y[:-1] )**2 + dx**2)**.5
# On calcule le temps de parcours par element
te = Le / Ve
# On calcule le temps de parcours total
t = te.sum()
return t
def add_AB(Yc):
Y = np.zeros([len(Yc) + 2])
Y[1:-1] = Yc
Y[0], Y[-1] = ya, yb
return Y
def temps2(Yc):
return temps(add_AB(Yc))
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# MISE EN APPLICATION
manual = False # Mode de fonctionnement: True pour manuel, False pour automatique
# MAILLAGE EN X
Np = 1 # Nombre de noeuds souhaites
X = np.linspace(xa, xb, Np+2) # Coordonnees en x des noeuds
# COORDONNEES EN Y ?
Y = np.linspace(ya, yb, Np+2) # Altitude des noeuds
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# AFFICHAGE
yp = np.linspace(-.5, .9, 100 )
t =np.zeros_like(yp)
for i in xrange(len(yp)):
Y[1] = yp[i]
t[i] = temps(Y)
plt.figure(0)
plt.clf()
plt.plot(yp, t)
plt.grid()
plt.xlabel("$y_0$")
plt.ylabel("$t_f$")
plt.savefig("brachi1d.pdf")
loc = np.where(t == t.min())
Y[1] = yp[loc]
Yl = np.linspace(ya, yb, Np+2) # Altitude des noeuds
tl = temps(Yl)
plt.figure(1)
plt.clf()
plt.plot(X, Y, "og-", label = "$t_f = {0:.2f}$ s".format(t[loc][0]))
plt.plot(X, Yl, "ob-", label = "$t_f = {0:.2f}$ s".format(tl))
plt.grid()
plt.xlabel("Position, $x$")
plt.ylabel("Position, $y$")
plt.legend()
plt.savefig("brachi1d_sol.pdf")
| gpl-2.0 |
tkaitchuck/nupic | external/linux64/lib/python2.6/site-packages/matplotlib/backends/backend_wx.py | 69 | 77038 | from __future__ import division
"""
backend_wx.py
A wxPython backend for matplotlib, based (very heavily) on
backend_template.py and backend_gtk.py
Author: Jeremy O'Donoghue ([email protected])
Derived from original copyright work by John Hunter
([email protected])
Copyright (C) Jeremy O'Donoghue & John Hunter, 2003-4
License: This work is licensed under a PSF compatible license. A copy
should be included with this source code.
"""
"""
KNOWN BUGS -
- Mousewheel (on Windows) only works after menu button has been pressed
at least once
- Mousewheel on Linux (wxGTK linked against GTK 1.2) does not work at all
- Vertical text renders horizontally if you use a non TrueType font
on Windows. This is a known wxPython issue. Work-around is to ensure
that you use a TrueType font.
- Pcolor demo puts chart slightly outside bounding box (approx 1-2 pixels
to the bottom left)
- Outputting to bitmap more than 300dpi results in some text being incorrectly
scaled. Seems to be a wxPython bug on Windows or font point sizes > 60, as
font size is correctly calculated.
- Performance poorer than for previous direct rendering version
- TIFF output not supported on wxGTK. This is a wxGTK issue
- Text is not anti-aliased on wxGTK. This is probably a platform
configuration issue.
- If a second call is made to show(), no figure is generated (#866965)
Not implemented:
- Printing
Fixed this release:
- Bug #866967: Interactive operation issues fixed [JDH]
- Bug #866969: Dynamic update does not function with backend_wx [JOD]
Examples which work on this release:
---------------------------------------------------------------
| Windows 2000 | Linux |
| wxPython 2.3.3 | wxPython 2.4.2.4 |
--------------------------------------------------------------|
- alignment_test.py | TBE | OK |
- arctest.py | TBE | (3) |
- axes_demo.py | OK | OK |
- axes_props.py | OK | OK |
- bar_stacked.py | TBE | OK |
- barchart_demo.py | OK | OK |
- color_demo.py | OK | OK |
- csd_demo.py | OK | OK |
- dynamic_demo.py | N/A | N/A |
- dynamic_demo_wx.py | TBE | OK |
- embedding_in_gtk.py | N/A | N/A |
- embedding_in_wx.py | OK | OK |
- errorbar_demo.py | OK | OK |
- figtext.py | OK | OK |
- histogram_demo.py | OK | OK |
- interactive.py | N/A (2) | N/A (2) |
- interactive2.py | N/A (2) | N/A (2) |
- legend_demo.py | OK | OK |
- legend_demo2.py | OK | OK |
- line_styles.py | OK | OK |
- log_demo.py | OK | OK |
- logo.py | OK | OK |
- mpl_with_glade.py | N/A (2) | N/A (2) |
- mri_demo.py | OK | OK |
- mri_demo_with_eeg.py | OK | OK |
- multiple_figs_demo.py | OK | OK |
- pcolor_demo.py | OK | OK |
- psd_demo.py | OK | OK |
- scatter_demo.py | OK | OK |
- scatter_demo2.py | OK | OK |
- simple_plot.py | OK | OK |
- stock_demo.py | OK | OK |
- subplot_demo.py | OK | OK |
- system_monitor.py | N/A (2) | N/A (2) |
- text_handles.py | OK | OK |
- text_themes.py | OK | OK |
- vline_demo.py | OK | OK |
---------------------------------------------------------------
(2) - Script uses GTK-specific features - cannot not run,
but wxPython equivalent should be written.
(3) - Clipping seems to be broken.
"""
cvs_id = '$Id: backend_wx.py 6484 2008-12-03 18:38:03Z jdh2358 $'
import sys, os, os.path, math, StringIO, weakref, warnings
import numpy as npy
# Debugging settings here...
# Debug level set here. If the debug level is less than 5, information
# messages (progressively more info for lower value) are printed. In addition,
# traceback is performed, and pdb activated, for all uncaught exceptions in
# this case
_DEBUG = 5
if _DEBUG < 5:
import traceback, pdb
_DEBUG_lvls = {1 : 'Low ', 2 : 'Med ', 3 : 'High', 4 : 'Error' }
try:
import wx
backend_version = wx.VERSION_STRING
except:
raise ImportError("Matplotlib backend_wx requires wxPython be installed")
#!!! this is the call that is causing the exception swallowing !!!
#wx.InitAllImageHandlers()
def DEBUG_MSG(string, lvl=3, o=None):
if lvl >= _DEBUG:
cls = o.__class__
# Jeremy, often times the commented line won't print but the
# one below does. I think WX is redefining stderr, damned
# beast
#print >>sys.stderr, "%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls)
print "%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls)
def debug_on_error(type, value, tb):
"""Code due to Thomas Heller - published in Python Cookbook (O'Reilley)"""
traceback.print_exc(type, value, tb)
print
pdb.pm() # jdh uncomment
class fake_stderr:
"""Wx does strange things with stderr, as it makes the assumption that there
is probably no console. This redirects stderr to the console, since we know
that there is one!"""
def write(self, msg):
print "Stderr: %s\n\r" % msg
#if _DEBUG < 5:
# sys.excepthook = debug_on_error
# WxLogger =wx.LogStderr()
# sys.stderr = fake_stderr
# Event binding code changed after version 2.5
if wx.VERSION_STRING >= '2.5':
def bind(actor,event,action,**kw):
actor.Bind(event,action,**kw)
else:
def bind(actor,event,action,id=None):
if id is not None:
event(actor, id, action)
else:
event(actor,action)
import matplotlib
from matplotlib import verbose
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureCanvasBase, FigureManagerBase, NavigationToolbar2, \
cursors
from matplotlib._pylab_helpers import Gcf
from matplotlib.artist import Artist
from matplotlib.cbook import exception_to_str, is_string_like, is_writable_file_like
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.text import _process_text_args, Text
from matplotlib.transforms import Affine2D
from matplotlib.widgets import SubplotTool
from matplotlib import rcParams
# the True dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
# Delay time for idle checks
IDLE_DELAY = 5
def error_msg_wx(msg, parent=None):
"""
Signal an error condition -- in a GUI, popup a error dialog
"""
dialog =wx.MessageDialog(parent = parent,
message = msg,
caption = 'Matplotlib backend_wx error',
style=wx.OK | wx.CENTRE)
dialog.ShowModal()
dialog.Destroy()
return None
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
class RendererWx(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles. It acts as the
'renderer' instance used by many classes in the hierarchy.
"""
#In wxPython, drawing is performed on a wxDC instance, which will
#generally be mapped to the client aread of the window displaying
#the plot. Under wxPython, the wxDC instance has a wx.Pen which
#describes the colour and weight of any lines drawn, and a wxBrush
#which describes the fill colour of any closed polygon.
fontweights = {
100 : wx.LIGHT,
200 : wx.LIGHT,
300 : wx.LIGHT,
400 : wx.NORMAL,
500 : wx.NORMAL,
600 : wx.NORMAL,
700 : wx.BOLD,
800 : wx.BOLD,
900 : wx.BOLD,
'ultralight' : wx.LIGHT,
'light' : wx.LIGHT,
'normal' : wx.NORMAL,
'medium' : wx.NORMAL,
'semibold' : wx.NORMAL,
'bold' : wx.BOLD,
'heavy' : wx.BOLD,
'ultrabold' : wx.BOLD,
'black' : wx.BOLD
}
fontangles = {
'italic' : wx.ITALIC,
'normal' : wx.NORMAL,
'oblique' : wx.SLANT }
# wxPython allows for portable font styles, choosing them appropriately
# for the target platform. Map some standard font names to the portable
# styles
# QUESTION: Is it be wise to agree standard fontnames across all backends?
fontnames = { 'Sans' : wx.SWISS,
'Roman' : wx.ROMAN,
'Script' : wx.SCRIPT,
'Decorative' : wx.DECORATIVE,
'Modern' : wx.MODERN,
'Courier' : wx.MODERN,
'courier' : wx.MODERN }
def __init__(self, bitmap, dpi):
"""
Initialise a wxWindows renderer instance.
"""
DEBUG_MSG("__init__()", 1, self)
if wx.VERSION_STRING < "2.8":
raise RuntimeError("matplotlib no longer supports wxPython < 2.8 for the Wx backend.\nYou may, however, use the WxAgg backend.")
self.width = bitmap.GetWidth()
self.height = bitmap.GetHeight()
self.bitmap = bitmap
self.fontd = {}
self.dpi = dpi
self.gc = None
def flipy(self):
return True
def offset_text_height(self):
return True
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
#return 1, 1
if ismath: s = self.strip_math(s)
if self.gc is None:
gc = self.new_gc()
else:
gc = self.gc
gfx_ctx = gc.gfx_ctx
font = self.get_wx_font(s, prop)
gfx_ctx.SetFont(font, wx.BLACK)
w, h, descent, leading = gfx_ctx.GetFullTextExtent(s)
return w, h, descent
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def handle_clip_rectangle(self, gc):
new_bounds = gc.get_clip_rectangle()
if new_bounds is not None:
new_bounds = new_bounds.bounds
gfx_ctx = gc.gfx_ctx
if gfx_ctx._lastcliprect != new_bounds:
gfx_ctx._lastcliprect = new_bounds
if new_bounds is None:
gfx_ctx.ResetClip()
else:
gfx_ctx.Clip(new_bounds[0], self.height - new_bounds[1] - new_bounds[3],
new_bounds[2], new_bounds[3])
#@staticmethod
def convert_path(gfx_ctx, tpath):
wxpath = gfx_ctx.CreatePath()
for points, code in tpath.iter_segments():
if code == Path.MOVETO:
wxpath.MoveToPoint(*points)
elif code == Path.LINETO:
wxpath.AddLineToPoint(*points)
elif code == Path.CURVE3:
wxpath.AddQuadCurveToPoint(*points)
elif code == Path.CURVE4:
wxpath.AddCurveToPoint(*points)
elif code == Path.CLOSEPOLY:
wxpath.CloseSubpath()
return wxpath
convert_path = staticmethod(convert_path)
def draw_path(self, gc, path, transform, rgbFace=None):
gc.select()
self.handle_clip_rectangle(gc)
gfx_ctx = gc.gfx_ctx
transform = transform + Affine2D().scale(1.0, -1.0).translate(0.0, self.height)
tpath = transform.transform_path(path)
wxpath = self.convert_path(gfx_ctx, tpath)
if rgbFace is not None:
gfx_ctx.SetBrush(wx.Brush(gc.get_wxcolour(rgbFace)))
gfx_ctx.DrawPath(wxpath)
else:
gfx_ctx.StrokePath(wxpath)
gc.unselect()
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
if bbox != None:
l,b,w,h = bbox.bounds
else:
l=0
b=0,
w=self.width
h=self.height
rows, cols, image_str = im.as_rgba_str()
image_array = npy.fromstring(image_str, npy.uint8)
image_array.shape = rows, cols, 4
bitmap = wx.BitmapFromBufferRGBA(cols,rows,image_array)
gc = self.get_gc()
gc.select()
gc.gfx_ctx.DrawBitmap(bitmap,int(l),int(b),int(w),int(h))
gc.unselect()
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
Render the matplotlib.text.Text instance
None)
"""
if ismath: s = self.strip_math(s)
DEBUG_MSG("draw_text()", 1, self)
gc.select()
self.handle_clip_rectangle(gc)
gfx_ctx = gc.gfx_ctx
font = self.get_wx_font(s, prop)
color = gc.get_wxcolour(gc.get_rgb())
gfx_ctx.SetFont(font, color)
w, h, d = self.get_text_width_height_descent(s, prop, ismath)
x = int(x)
y = int(y-h)
if angle == 0.0:
gfx_ctx.DrawText(s, x, y)
else:
rads = angle / 180.0 * math.pi
xo = h * math.sin(rads)
yo = h * math.cos(rads)
gfx_ctx.DrawRotatedText(s, x - xo, y - yo, rads)
gc.unselect()
def new_gc(self):
"""
Return an instance of a GraphicsContextWx, and sets the current gc copy
"""
DEBUG_MSG('new_gc()', 2, self)
self.gc = GraphicsContextWx(self.bitmap, self)
self.gc.select()
self.gc.unselect()
return self.gc
def get_gc(self):
"""
Fetch the locally cached gc.
"""
# This is a dirty hack to allow anything with access to a renderer to
# access the current graphics context
assert self.gc != None, "gc must be defined"
return self.gc
def get_wx_font(self, s, prop):
"""
Return a wx font. Cache instances in a font dictionary for
efficiency
"""
DEBUG_MSG("get_wx_font()", 1, self)
key = hash(prop)
fontprop = prop
fontname = fontprop.get_name()
font = self.fontd.get(key)
if font is not None:
return font
# Allow use of platform independent and dependent font names
wxFontname = self.fontnames.get(fontname, wx.ROMAN)
wxFacename = '' # Empty => wxPython chooses based on wx_fontname
# Font colour is determined by the active wx.Pen
# TODO: It may be wise to cache font information
size = self.points_to_pixels(fontprop.get_size_in_points())
font =wx.Font(int(size+0.5), # Size
wxFontname, # 'Generic' name
self.fontangles[fontprop.get_style()], # Angle
self.fontweights[fontprop.get_weight()], # Weight
False, # Underline
wxFacename) # Platform font name
# cache the font and gc and return it
self.fontd[key] = font
return font
def points_to_pixels(self, points):
"""
convert point measures to pixes using dpi and the pixels per
inch of the display
"""
return points*(PIXELS_PER_INCH/72.0*self.dpi/72.0)
class GraphicsContextWx(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc...
This class stores a reference to a wxMemoryDC, and a
wxGraphicsContext that draws to it. Creating a wxGraphicsContext
seems to be fairly heavy, so these objects are cached based on the
bitmap object that is passed in.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, eg, (0.5, 0.0, 1.0). wxPython uses an int interval, but
since wxPython colour management is rather simple, I have not chosen
to implement a separate colour manager class.
"""
_capd = { 'butt': wx.CAP_BUTT,
'projecting': wx.CAP_PROJECTING,
'round': wx.CAP_ROUND }
_joind = { 'bevel': wx.JOIN_BEVEL,
'miter': wx.JOIN_MITER,
'round': wx.JOIN_ROUND }
_dashd_wx = { 'solid': wx.SOLID,
'dashed': wx.SHORT_DASH,
'dashdot': wx.DOT_DASH,
'dotted': wx.DOT }
_cache = weakref.WeakKeyDictionary()
def __init__(self, bitmap, renderer):
GraphicsContextBase.__init__(self)
#assert self.Ok(), "wxMemoryDC not OK to use"
DEBUG_MSG("__init__()", 1, self)
dc, gfx_ctx = self._cache.get(bitmap, (None, None))
if dc is None:
dc = wx.MemoryDC()
dc.SelectObject(bitmap)
gfx_ctx = wx.GraphicsContext.Create(dc)
gfx_ctx._lastcliprect = None
self._cache[bitmap] = dc, gfx_ctx
self.bitmap = bitmap
self.dc = dc
self.gfx_ctx = gfx_ctx
self._pen = wx.Pen('BLACK', 1, wx.SOLID)
gfx_ctx.SetPen(self._pen)
self._style = wx.SOLID
self.renderer = renderer
def select(self):
"""
Select the current bitmap into this wxDC instance
"""
if sys.platform=='win32':
self.dc.SelectObject(self.bitmap)
self.IsSelected = True
def unselect(self):
"""
Select a Null bitmasp into this wxDC instance
"""
if sys.platform=='win32':
self.dc.SelectObject(wx.NullBitmap)
self.IsSelected = False
def set_foreground(self, fg, isRGB=None):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
"""
# Implementation note: wxPython has a separate concept of pen and
# brush - the brush fills any outline trace left by the pen.
# Here we set both to the same colour - if a figure is not to be
# filled, the renderer will set the brush to be transparent
# Same goes for text foreground...
DEBUG_MSG("set_foreground()", 1, self)
self.select()
GraphicsContextBase.set_foreground(self, fg, isRGB)
self._pen.SetColour(self.get_wxcolour(self.get_rgb()))
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_graylevel(self, frac):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
"""
DEBUG_MSG("set_graylevel()", 1, self)
self.select()
GraphicsContextBase.set_graylevel(self, frac)
self._pen.SetColour(self.get_wxcolour(self.get_rgb()))
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_linewidth(self, w):
"""
Set the line width.
"""
DEBUG_MSG("set_linewidth()", 1, self)
self.select()
if w>0 and w<1: w = 1
GraphicsContextBase.set_linewidth(self, w)
lw = int(self.renderer.points_to_pixels(self._linewidth))
if lw==0: lw = 1
self._pen.SetWidth(lw)
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
DEBUG_MSG("set_capstyle()", 1, self)
self.select()
GraphicsContextBase.set_capstyle(self, cs)
self._pen.SetCap(GraphicsContextWx._capd[self._capstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
DEBUG_MSG("set_joinstyle()", 1, self)
self.select()
GraphicsContextBase.set_joinstyle(self, js)
self._pen.SetJoin(GraphicsContextWx._joind[self._joinstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_linestyle(self, ls):
"""
Set the line style to be one of
"""
DEBUG_MSG("set_linestyle()", 1, self)
self.select()
GraphicsContextBase.set_linestyle(self, ls)
try:
self._style = GraphicsContextWx._dashd_wx[ls]
except KeyError:
self._style = wx.LONG_DASH# Style not used elsewhere...
# On MS Windows platform, only line width of 1 allowed for dash lines
if wx.Platform == '__WXMSW__':
self.set_linewidth(1)
self._pen.SetStyle(self._style)
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def get_wxcolour(self, color):
"""return a wx.Colour from RGB format"""
DEBUG_MSG("get_wx_color()", 1, self)
if len(color) == 3:
r, g, b = color
r *= 255
g *= 255
b *= 255
return wx.Colour(red=int(r), green=int(g), blue=int(b))
else:
r, g, b, a = color
r *= 255
g *= 255
b *= 255
a *= 255
return wx.Colour(red=int(r), green=int(g), blue=int(b), alpha=int(a))
class FigureCanvasWx(FigureCanvasBase, wx.Panel):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually) lives
inside a frame instantiated by a FigureManagerWx. The parent window probably
implements a wx.Sizer to control the displayed control size - but we give a
hint as to our preferred minimum size.
"""
keyvald = {
wx.WXK_CONTROL : 'control',
wx.WXK_SHIFT : 'shift',
wx.WXK_ALT : 'alt',
wx.WXK_LEFT : 'left',
wx.WXK_UP : 'up',
wx.WXK_RIGHT : 'right',
wx.WXK_DOWN : 'down',
wx.WXK_ESCAPE : 'escape',
wx.WXK_F1 : 'f1',
wx.WXK_F2 : 'f2',
wx.WXK_F3 : 'f3',
wx.WXK_F4 : 'f4',
wx.WXK_F5 : 'f5',
wx.WXK_F6 : 'f6',
wx.WXK_F7 : 'f7',
wx.WXK_F8 : 'f8',
wx.WXK_F9 : 'f9',
wx.WXK_F10 : 'f10',
wx.WXK_F11 : 'f11',
wx.WXK_F12 : 'f12',
wx.WXK_SCROLL : 'scroll_lock',
wx.WXK_PAUSE : 'break',
wx.WXK_BACK : 'backspace',
wx.WXK_RETURN : 'enter',
wx.WXK_INSERT : 'insert',
wx.WXK_DELETE : 'delete',
wx.WXK_HOME : 'home',
wx.WXK_END : 'end',
wx.WXK_PRIOR : 'pageup',
wx.WXK_NEXT : 'pagedown',
wx.WXK_PAGEUP : 'pageup',
wx.WXK_PAGEDOWN : 'pagedown',
wx.WXK_NUMPAD0 : '0',
wx.WXK_NUMPAD1 : '1',
wx.WXK_NUMPAD2 : '2',
wx.WXK_NUMPAD3 : '3',
wx.WXK_NUMPAD4 : '4',
wx.WXK_NUMPAD5 : '5',
wx.WXK_NUMPAD6 : '6',
wx.WXK_NUMPAD7 : '7',
wx.WXK_NUMPAD8 : '8',
wx.WXK_NUMPAD9 : '9',
wx.WXK_NUMPAD_ADD : '+',
wx.WXK_NUMPAD_SUBTRACT : '-',
wx.WXK_NUMPAD_MULTIPLY : '*',
wx.WXK_NUMPAD_DIVIDE : '/',
wx.WXK_NUMPAD_DECIMAL : 'dec',
wx.WXK_NUMPAD_ENTER : 'enter',
wx.WXK_NUMPAD_UP : 'up',
wx.WXK_NUMPAD_RIGHT : 'right',
wx.WXK_NUMPAD_DOWN : 'down',
wx.WXK_NUMPAD_LEFT : 'left',
wx.WXK_NUMPAD_PRIOR : 'pageup',
wx.WXK_NUMPAD_NEXT : 'pagedown',
wx.WXK_NUMPAD_PAGEUP : 'pageup',
wx.WXK_NUMPAD_PAGEDOWN : 'pagedown',
wx.WXK_NUMPAD_HOME : 'home',
wx.WXK_NUMPAD_END : 'end',
wx.WXK_NUMPAD_INSERT : 'insert',
wx.WXK_NUMPAD_DELETE : 'delete',
}
def __init__(self, parent, id, figure):
"""
Initialise a FigureWx instance.
- Initialise the FigureCanvasBase and wxPanel parents.
- Set event handlers for:
EVT_SIZE (Resize event)
EVT_PAINT (Paint event)
"""
FigureCanvasBase.__init__(self, figure)
# Set preferred window size hint - helps the sizer (if one is
# connected)
l,b,w,h = figure.bbox.bounds
w = int(math.ceil(w))
h = int(math.ceil(h))
wx.Panel.__init__(self, parent, id, size=wx.Size(w, h))
def do_nothing(*args, **kwargs):
warnings.warn('could not find a setinitialsize function for backend_wx; please report your wxpython version=%s to the matplotlib developers list'%backend_version)
pass
# try to find the set size func across wx versions
try:
getattr(self, 'SetInitialSize')
except AttributeError:
self.SetInitialSize = getattr(self, 'SetBestFittingSize', do_nothing)
if not hasattr(self,'IsShownOnScreen'):
self.IsShownOnScreen = getattr(self, 'IsVisible', lambda *args: True)
# Create the drawing bitmap
self.bitmap =wx.EmptyBitmap(w, h)
DEBUG_MSG("__init__() - bitmap w:%d h:%d" % (w,h), 2, self)
# TODO: Add support for 'point' inspection and plot navigation.
self._isDrawn = False
bind(self, wx.EVT_SIZE, self._onSize)
bind(self, wx.EVT_PAINT, self._onPaint)
bind(self, wx.EVT_ERASE_BACKGROUND, self._onEraseBackground)
bind(self, wx.EVT_KEY_DOWN, self._onKeyDown)
bind(self, wx.EVT_KEY_UP, self._onKeyUp)
bind(self, wx.EVT_RIGHT_DOWN, self._onRightButtonDown)
bind(self, wx.EVT_RIGHT_DCLICK, self._onRightButtonDown)
bind(self, wx.EVT_RIGHT_UP, self._onRightButtonUp)
bind(self, wx.EVT_MOUSEWHEEL, self._onMouseWheel)
bind(self, wx.EVT_LEFT_DOWN, self._onLeftButtonDown)
bind(self, wx.EVT_LEFT_DCLICK, self._onLeftButtonDown)
bind(self, wx.EVT_LEFT_UP, self._onLeftButtonUp)
bind(self, wx.EVT_MOTION, self._onMotion)
bind(self, wx.EVT_LEAVE_WINDOW, self._onLeave)
bind(self, wx.EVT_ENTER_WINDOW, self._onEnter)
bind(self, wx.EVT_IDLE, self._onIdle)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.macros = {} # dict from wx id to seq of macros
self.Printer_Init()
def Destroy(self, *args, **kwargs):
wx.Panel.Destroy(self, *args, **kwargs)
def Copy_to_Clipboard(self, event=None):
"copy bitmap of canvas to system clipboard"
bmp_obj = wx.BitmapDataObject()
bmp_obj.SetBitmap(self.bitmap)
wx.TheClipboard.Open()
wx.TheClipboard.SetData(bmp_obj)
wx.TheClipboard.Close()
def Printer_Init(self):
"""initialize printer settings using wx methods"""
self.printerData = wx.PrintData()
self.printerData.SetPaperId(wx.PAPER_LETTER)
self.printerData.SetPrintMode(wx.PRINT_MODE_PRINTER)
self.printerPageData= wx.PageSetupDialogData()
self.printerPageData.SetMarginBottomRight((25,25))
self.printerPageData.SetMarginTopLeft((25,25))
self.printerPageData.SetPrintData(self.printerData)
self.printer_width = 5.5
self.printer_margin= 0.5
def Printer_Setup(self, event=None):
"""set up figure for printing. The standard wx Printer
Setup Dialog seems to die easily. Therefore, this setup
simply asks for image width and margin for printing. """
dmsg = """Width of output figure in inches.
The current aspect ration will be kept."""
dlg = wx.Dialog(self, -1, 'Page Setup for Printing' , (-1,-1))
df = dlg.GetFont()
df.SetWeight(wx.NORMAL)
df.SetPointSize(11)
dlg.SetFont(df)
x_wid = wx.TextCtrl(dlg,-1,value="%.2f" % self.printer_width, size=(70,-1))
x_mrg = wx.TextCtrl(dlg,-1,value="%.2f" % self.printer_margin,size=(70,-1))
sizerAll = wx.BoxSizer(wx.VERTICAL)
sizerAll.Add(wx.StaticText(dlg,-1,dmsg),
0, wx.ALL | wx.EXPAND, 5)
sizer = wx.FlexGridSizer(0,3)
sizerAll.Add(sizer, 0, wx.ALL | wx.EXPAND, 5)
sizer.Add(wx.StaticText(dlg,-1,'Figure Width'),
1, wx.ALIGN_LEFT|wx.ALL, 2)
sizer.Add(x_wid,
1, wx.ALIGN_LEFT|wx.ALL, 2)
sizer.Add(wx.StaticText(dlg,-1,'in'),
1, wx.ALIGN_LEFT|wx.ALL, 2)
sizer.Add(wx.StaticText(dlg,-1,'Margin'),
1, wx.ALIGN_LEFT|wx.ALL, 2)
sizer.Add(x_mrg,
1, wx.ALIGN_LEFT|wx.ALL, 2)
sizer.Add(wx.StaticText(dlg,-1,'in'),
1, wx.ALIGN_LEFT|wx.ALL, 2)
btn = wx.Button(dlg,wx.ID_OK, " OK ")
btn.SetDefault()
sizer.Add(btn, 1, wx.ALIGN_LEFT, 5)
btn = wx.Button(dlg,wx.ID_CANCEL, " CANCEL ")
sizer.Add(btn, 1, wx.ALIGN_LEFT, 5)
dlg.SetSizer(sizerAll)
dlg.SetAutoLayout(True)
sizerAll.Fit(dlg)
if dlg.ShowModal() == wx.ID_OK:
try:
self.printer_width = float(x_wid.GetValue())
self.printer_margin = float(x_mrg.GetValue())
except:
pass
if ((self.printer_width + self.printer_margin) > 7.5):
self.printerData.SetOrientation(wx.LANDSCAPE)
else:
self.printerData.SetOrientation(wx.PORTRAIT)
dlg.Destroy()
return
def Printer_Setup2(self, event=None):
"""set up figure for printing. Using the standard wx Printer
Setup Dialog. """
if hasattr(self, 'printerData'):
data = wx.PageSetupDialogData()
data.SetPrintData(self.printerData)
else:
data = wx.PageSetupDialogData()
data.SetMarginTopLeft( (15, 15) )
data.SetMarginBottomRight( (15, 15) )
dlg = wx.PageSetupDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPageSetupData()
tl = data.GetMarginTopLeft()
br = data.GetMarginBottomRight()
self.printerData = wx.PrintData(data.GetPrintData())
dlg.Destroy()
def Printer_Preview(self, event=None):
""" generate Print Preview with wx Print mechanism"""
po1 = PrintoutWx(self, width=self.printer_width,
margin=self.printer_margin)
po2 = PrintoutWx(self, width=self.printer_width,
margin=self.printer_margin)
self.preview = wx.PrintPreview(po1,po2,self.printerData)
if not self.preview.Ok(): print "error with preview"
self.preview.SetZoom(50)
frameInst= self
while not isinstance(frameInst, wx.Frame):
frameInst= frameInst.GetParent()
frame = wx.PreviewFrame(self.preview, frameInst, "Preview")
frame.Initialize()
frame.SetPosition(self.GetPosition())
frame.SetSize((850,650))
frame.Centre(wx.BOTH)
frame.Show(True)
self.gui_repaint()
def Printer_Print(self, event=None):
""" Print figure using wx Print mechanism"""
pdd = wx.PrintDialogData()
# SetPrintData for 2.4 combatibility
pdd.SetPrintData(self.printerData)
pdd.SetToPage(1)
printer = wx.Printer(pdd)
printout = PrintoutWx(self, width=int(self.printer_width),
margin=int(self.printer_margin))
print_ok = printer.Print(self, printout, True)
if wx.VERSION_STRING >= '2.5':
if not print_ok and not printer.GetLastError() == wx.PRINTER_CANCELLED:
wx.MessageBox("""There was a problem printing.
Perhaps your current printer is not set correctly?""",
"Printing", wx.OK)
else:
if not print_ok:
wx.MessageBox("""There was a problem printing.
Perhaps your current printer is not set correctly?""",
"Printing", wx.OK)
printout.Destroy()
self.gui_repaint()
def draw_idle(self):
"""
Delay rendering until the GUI is idle.
"""
DEBUG_MSG("draw_idle()", 1, self)
self._isDrawn = False # Force redraw
# Create a timer for handling draw_idle requests
# If there are events pending when the timer is
# complete, reset the timer and continue. The
# alternative approach, binding to wx.EVT_IDLE,
# doesn't behave as nicely.
if hasattr(self,'_idletimer'):
self._idletimer.Restart(IDLE_DELAY)
else:
self._idletimer = wx.FutureCall(IDLE_DELAY,self._onDrawIdle)
# FutureCall is a backwards-compatible alias;
# CallLater became available in 2.7.1.1.
def _onDrawIdle(self, *args, **kwargs):
if wx.GetApp().Pending():
self._idletimer.Restart(IDLE_DELAY, *args, **kwargs)
else:
del self._idletimer
# GUI event or explicit draw call may already
# have caused the draw to take place
if not self._isDrawn:
self.draw(*args, **kwargs)
def draw(self, drawDC=None):
"""
Render the figure using RendererWx instance renderer, or using a
previously defined renderer if none is specified.
"""
DEBUG_MSG("draw()", 1, self)
self.renderer = RendererWx(self.bitmap, self.figure.dpi)
self.figure.draw(self.renderer)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def flush_events(self):
wx.Yield()
def start_event_loop(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
Call signature::
start_event_loop(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
Raises RuntimeError if event loop is already running.
"""
if hasattr(self, '_event_loop'):
raise RuntimeError("Event loop already running")
id = wx.NewId()
timer = wx.Timer(self, id=id)
if timeout > 0:
timer.Start(timeout*1000, oneShot=True)
bind(self, wx.EVT_TIMER, self.stop_event_loop, id=id)
# Event loop handler for start/stop event loop
self._event_loop = wx.EventLoop()
self._event_loop.Run()
timer.Stop()
def stop_event_loop(self, event=None):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
if hasattr(self,'_event_loop'):
if self._event_loop.IsRunning():
self._event_loop.Exit()
del self._event_loop
def _get_imagesave_wildcards(self):
'return the wildcard string for the filesave dialog'
default_filetype = self.get_default_filetype()
filetypes = self.get_supported_filetypes_grouped()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
wildcards = []
extensions = []
filter_index = 0
for i, (name, exts) in enumerate(sorted_filetypes):
ext_list = ';'.join(['*.%s' % ext for ext in exts])
extensions.append(exts[0])
wildcard = '%s (%s)|%s' % (name, ext_list, ext_list)
if default_filetype in exts:
filter_index = i
wildcards.append(wildcard)
wildcards = '|'.join(wildcards)
return wildcards, extensions, filter_index
def gui_repaint(self, drawDC=None):
"""
Performs update of the displayed image on the GUI canvas, using the
supplied device context. If drawDC is None, a ClientDC will be used to
redraw the image.
"""
DEBUG_MSG("gui_repaint()", 1, self)
if self.IsShownOnScreen():
if drawDC is None:
drawDC=wx.ClientDC(self)
drawDC.BeginDrawing()
drawDC.DrawBitmap(self.bitmap, 0, 0)
drawDC.EndDrawing()
#wx.GetApp().Yield()
else:
pass
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['bmp'] = 'Windows bitmap'
filetypes['jpeg'] = 'JPEG'
filetypes['jpg'] = 'JPEG'
filetypes['pcx'] = 'PCX'
filetypes['png'] = 'Portable Network Graphics'
filetypes['tif'] = 'Tagged Image Format File'
filetypes['tiff'] = 'Tagged Image Format File'
filetypes['xpm'] = 'X pixmap'
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasBase.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
def print_bmp(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_BMP, *args, **kwargs)
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_JPEG, *args, **kwargs)
print_jpg = print_jpeg
def print_pcx(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_PCX, *args, **kwargs)
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_PNG, *args, **kwargs)
def print_tiff(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_TIF, *args, **kwargs)
print_tif = print_tiff
def print_xpm(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_XPM, *args, **kwargs)
def _print_image(self, filename, filetype, *args, **kwargs):
origBitmap = self.bitmap
l,b,width,height = self.figure.bbox.bounds
width = int(math.ceil(width))
height = int(math.ceil(height))
self.bitmap = wx.EmptyBitmap(width, height)
renderer = RendererWx(self.bitmap, self.figure.dpi)
gc = renderer.new_gc()
self.figure.draw(renderer)
# Now that we have rendered into the bitmap, save it
# to the appropriate file type and clean up
if is_string_like(filename):
if not self.bitmap.SaveFile(filename, filetype):
DEBUG_MSG('print_figure() file save error', 4, self)
raise RuntimeError('Could not save figure to %s\n' % (filename))
elif is_writable_file_like(filename):
if not self.bitmap.ConvertToImage().SaveStream(filename, filetype):
DEBUG_MSG('print_figure() file save error', 4, self)
raise RuntimeError('Could not save figure to %s\n' % (filename))
# Restore everything to normal
self.bitmap = origBitmap
# Note: draw is required here since bits of state about the
# last renderer are strewn about the artist draw methods. Do
# not remove the draw without first verifying that these have
# been cleaned up. The artist contains() methods will fail
# otherwise.
if self._isDrawn:
self.draw()
self.Refresh()
def get_default_filetype(self):
return 'png'
def _onPaint(self, evt):
"""
Called when wxPaintEvt is generated
"""
DEBUG_MSG("_onPaint()", 1, self)
drawDC = wx.PaintDC(self)
if not self._isDrawn:
self.draw(drawDC=drawDC)
else:
self.gui_repaint(drawDC=drawDC)
evt.Skip()
def _onEraseBackground(self, evt):
"""
Called when window is redrawn; since we are blitting the entire
image, we can leave this blank to suppress flicker.
"""
pass
def _onSize(self, evt):
"""
Called when wxEventSize is generated.
In this application we attempt to resize to fit the window, so it
is better to take the performance hit and redraw the whole window.
"""
DEBUG_MSG("_onSize()", 2, self)
# Create a new, correctly sized bitmap
self._width, self._height = self.GetClientSize()
self.bitmap =wx.EmptyBitmap(self._width, self._height)
self._isDrawn = False
if self._width <= 1 or self._height <= 1: return # Empty figure
dpival = self.figure.dpi
winch = self._width/dpival
hinch = self._height/dpival
self.figure.set_size_inches(winch, hinch)
# Rendering will happen on the associated paint event
# so no need to do anything here except to make sure
# the whole background is repainted.
self.Refresh(eraseBackground=False)
def _get_key(self, evt):
keyval = evt.m_keyCode
if keyval in self.keyvald:
key = self.keyvald[keyval]
elif keyval <256:
key = chr(keyval)
else:
key = None
# why is wx upcasing this?
if key is not None: key = key.lower()
return key
def _onIdle(self, evt):
'a GUI idle event'
evt.Skip()
FigureCanvasBase.idle_event(self, guiEvent=evt)
def _onKeyDown(self, evt):
"""Capture key press."""
key = self._get_key(evt)
evt.Skip()
FigureCanvasBase.key_press_event(self, key, guiEvent=evt)
def _onKeyUp(self, evt):
"""Release key."""
key = self._get_key(evt)
#print 'release key', key
evt.Skip()
FigureCanvasBase.key_release_event(self, key, guiEvent=evt)
def _onRightButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 3, guiEvent=evt)
def _onRightButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
if self.HasCapture(): self.ReleaseMouse()
FigureCanvasBase.button_release_event(self, x, y, 3, guiEvent=evt)
def _onLeftButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 1, guiEvent=evt)
def _onLeftButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
#print 'release button', 1
evt.Skip()
if self.HasCapture(): self.ReleaseMouse()
FigureCanvasBase.button_release_event(self, x, y, 1, guiEvent=evt)
def _onMouseWheel(self, evt):
"""Translate mouse wheel events into matplotlib events"""
# Determine mouse location
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# Convert delta/rotation/rate into a floating point step size
delta = evt.GetWheelDelta()
rotation = evt.GetWheelRotation()
rate = evt.GetLinesPerAction()
#print "delta,rotation,rate",delta,rotation,rate
step = rate*float(rotation)/delta
# Done handling event
evt.Skip()
# Mac is giving two events for every wheel event
# Need to skip every second one
if wx.Platform == '__WXMAC__':
if not hasattr(self,'_skipwheelevent'):
self._skipwheelevent = True
elif self._skipwheelevent:
self._skipwheelevent = False
return # Return without processing event
else:
self._skipwheelevent = True
# Convert to mpl event
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=evt)
def _onMotion(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=evt)
def _onLeave(self, evt):
"""Mouse has left the window."""
evt.Skip()
FigureCanvasBase.leave_notify_event(self, guiEvent = evt)
def _onEnter(self, evt):
"""Mouse has entered the window."""
FigureCanvasBase.enter_notify_event(self, guiEvent = evt)
########################################################################
#
# The following functions and classes are for pylab compatibility
# mode (matplotlib.pylab) and implement figure managers, etc...
#
########################################################################
def _create_wx_app():
"""
Creates a wx.PySimpleApp instance if a wx.App has not been created.
"""
wxapp = wx.GetApp()
if wxapp is None:
wxapp = wx.PySimpleApp()
wxapp.SetExitOnFrameDelete(True)
# retain a reference to the app object so it does not get garbage
# collected and cause segmentation faults
_create_wx_app.theWxApp = wxapp
def draw_if_interactive():
"""
This should be overriden in a windowing environment if drawing
should be done in interactive python mode
"""
DEBUG_MSG("draw_if_interactive()", 1, None)
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def show():
"""
Current implementation assumes that matplotlib is executed in a PyCrust
shell. It appears to be possible to execute wxPython applications from
within a PyCrust without having to ensure that wxPython has been created
in a secondary thread (e.g. SciPy gui_thread).
Unfortunately, gui_thread seems to introduce a number of further
dependencies on SciPy modules, which I do not wish to introduce
into the backend at this point. If there is a need I will look
into this in a later release.
"""
DEBUG_MSG("show()", 3, None)
for figwin in Gcf.get_all_fig_managers():
figwin.frame.Show()
if show._needmain and not matplotlib.is_interactive():
# start the wxPython gui event if there is not already one running
wxapp = wx.GetApp()
if wxapp is not None:
# wxPython 2.4 has no wx.App.IsMainLoopRunning() method
imlr = getattr(wxapp, 'IsMainLoopRunning', lambda: False)
if not imlr():
wxapp.MainLoop()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
_create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
frame = FigureFrameWx(num, fig)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr
class FigureFrameWx(wx.Frame):
def __init__(self, num, fig):
# On non-Windows platform, explicitly set the position - fix
# positioning bug on some Linux platforms
if wx.Platform == '__WXMSW__':
pos = wx.DefaultPosition
else:
pos =wx.Point(20,20)
l,b,w,h = fig.bbox.bounds
wx.Frame.__init__(self, parent=None, id=-1, pos=pos,
title="Figure %d" % num)
# Frame will be sized later by the Fit method
DEBUG_MSG("__init__()", 1, self)
self.num = num
statbar = StatusBarWx(self)
self.SetStatusBar(statbar)
self.canvas = self.get_canvas(fig)
self.canvas.SetInitialSize(wx.Size(fig.bbox.width, fig.bbox.height))
self.sizer =wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND)
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version
self.toolbar = self._get_toolbar(statbar)
if self.toolbar is not None:
self.toolbar.Realize()
if wx.Platform == '__WXMAC__':
# Mac platform (OSX 10.3, MacPython) does not seem to cope with
# having a toolbar in a sizer. This work-around gets the buttons
# back, but at the expense of having the toolbar at the top
self.SetToolBar(self.toolbar)
else:
# On Windows platform, default window size is incorrect, so set
# toolbar width to figure width.
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
# As noted above, doesn't work for Mac.
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.SetSizer(self.sizer)
self.Fit()
self.figmgr = FigureManagerWx(self.canvas, num, self)
bind(self, wx.EVT_CLOSE, self._onClose)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbarWx(self.canvas, True)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2Wx(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
def get_canvas(self, fig):
return FigureCanvasWx(self, -1, fig)
def get_figure_manager(self):
DEBUG_MSG("get_figure_manager()", 1, self)
return self.figmgr
def _onClose(self, evt):
DEBUG_MSG("onClose()", 1, self)
self.canvas.stop_event_loop()
Gcf.destroy(self.num)
#self.Destroy()
def GetToolBar(self):
"""Override wxFrame::GetToolBar as we don't have managed toolbar"""
return self.toolbar
def Destroy(self, *args, **kwargs):
wx.Frame.Destroy(self, *args, **kwargs)
if self.toolbar is not None:
self.toolbar.Destroy()
wxapp = wx.GetApp()
if wxapp:
wxapp.Yield()
return True
class FigureManagerWx(FigureManagerBase):
"""
This class contains the FigureCanvas and GUI frame
It is instantiated by GcfWx whenever a new figure is created. GcfWx is
responsible for managing multiple instances of FigureManagerWx.
NB: FigureManagerBase is found in _pylab_helpers
public attrs
canvas - a FigureCanvasWx(wx.Panel) instance
window - a wxFrame instance - http://www.lpthe.jussieu.fr/~zeitlin/wxWindows/docs/wxwin_wxframe.html#wxframe
"""
def __init__(self, canvas, num, frame):
DEBUG_MSG("__init__()", 1, self)
FigureManagerBase.__init__(self, canvas, num)
self.frame = frame
self.window = frame
self.tb = frame.GetToolBar()
self.toolbar = self.tb # consistent with other backends
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.tb != None: self.tb.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def showfig(*args):
frame.Show()
# attach a show method to the figure
self.canvas.figure.show = showfig
def destroy(self, *args):
DEBUG_MSG("destroy()", 1, self)
self.frame.Destroy()
#if self.tb is not None: self.tb.Destroy()
import wx
#wx.GetApp().ProcessIdle()
wx.WakeUpIdle()
def set_window_title(self, title):
self.window.SetTitle(title)
def resize(self, width, height):
'Set the canvas size in pixels'
self.canvas.SetInitialSize(wx.Size(width, height))
self.window.GetSizer().Fit(self.window)
# Identifiers for toolbar controls - images_wx contains bitmaps for the images
# used in the controls. wxWindows does not provide any stock images, so I've
# 'stolen' those from GTK2, and transformed them into the appropriate format.
#import images_wx
_NTB_AXISMENU =wx.NewId()
_NTB_AXISMENU_BUTTON =wx.NewId()
_NTB_X_PAN_LEFT =wx.NewId()
_NTB_X_PAN_RIGHT =wx.NewId()
_NTB_X_ZOOMIN =wx.NewId()
_NTB_X_ZOOMOUT =wx.NewId()
_NTB_Y_PAN_UP =wx.NewId()
_NTB_Y_PAN_DOWN =wx.NewId()
_NTB_Y_ZOOMIN =wx.NewId()
_NTB_Y_ZOOMOUT =wx.NewId()
#_NTB_SUBPLOT =wx.NewId()
_NTB_SAVE =wx.NewId()
_NTB_CLOSE =wx.NewId()
def _load_bitmap(filename):
"""
Load a bitmap file from the backends/images subdirectory in which the
matplotlib library is installed. The filename parameter should not
contain any path information as this is determined automatically.
Returns a wx.Bitmap object
"""
basedir = os.path.join(rcParams['datapath'],'images')
bmpFilename = os.path.normpath(os.path.join(basedir, filename))
if not os.path.exists(bmpFilename):
raise IOError('Could not find bitmap file "%s"; dying'%bmpFilename)
bmp = wx.Bitmap(bmpFilename)
return bmp
class MenuButtonWx(wx.Button):
"""
wxPython does not permit a menu to be incorporated directly into a toolbar.
This class simulates the effect by associating a pop-up menu with a button
in the toolbar, and managing this as though it were a menu.
"""
def __init__(self, parent):
wx.Button.__init__(self, parent, _NTB_AXISMENU_BUTTON, "Axes: ",
style=wx.BU_EXACTFIT)
self._toolbar = parent
self._menu =wx.Menu()
self._axisId = []
# First two menu items never change...
self._allId =wx.NewId()
self._invertId =wx.NewId()
self._menu.Append(self._allId, "All", "Select all axes", False)
self._menu.Append(self._invertId, "Invert", "Invert axes selected", False)
self._menu.AppendSeparator()
bind(self, wx.EVT_BUTTON, self._onMenuButton, id=_NTB_AXISMENU_BUTTON)
bind(self, wx.EVT_MENU, self._handleSelectAllAxes, id=self._allId)
bind(self, wx.EVT_MENU, self._handleInvertAxesSelected, id=self._invertId)
def Destroy(self):
self._menu.Destroy()
self.Destroy()
def _onMenuButton(self, evt):
"""Handle menu button pressed."""
x, y = self.GetPositionTuple()
w, h = self.GetSizeTuple()
self.PopupMenuXY(self._menu, x, y+h-4)
# When menu returned, indicate selection in button
evt.Skip()
def _handleSelectAllAxes(self, evt):
"""Called when the 'select all axes' menu item is selected."""
if len(self._axisId) == 0:
return
for i in range(len(self._axisId)):
self._menu.Check(self._axisId[i], True)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def _handleInvertAxesSelected(self, evt):
"""Called when the invert all menu item is selected"""
if len(self._axisId) == 0: return
for i in range(len(self._axisId)):
if self._menu.IsChecked(self._axisId[i]):
self._menu.Check(self._axisId[i], False)
else:
self._menu.Check(self._axisId[i], True)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def _onMenuItemSelected(self, evt):
"""Called whenever one of the specific axis menu items is selected"""
current = self._menu.IsChecked(evt.GetId())
if current:
new = False
else:
new = True
self._menu.Check(evt.GetId(), new)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def updateAxes(self, maxAxis):
"""Ensures that there are entries for max_axis axes in the menu
(selected by default)."""
if maxAxis > len(self._axisId):
for i in range(len(self._axisId) + 1, maxAxis + 1, 1):
menuId =wx.NewId()
self._axisId.append(menuId)
self._menu.Append(menuId, "Axis %d" % i, "Select axis %d" % i, True)
self._menu.Check(menuId, True)
bind(self, wx.EVT_MENU, self._onMenuItemSelected, id=menuId)
self._toolbar.set_active(range(len(self._axisId)))
def getActiveAxes(self):
"""Return a list of the selected axes."""
active = []
for i in range(len(self._axisId)):
if self._menu.IsChecked(self._axisId[i]):
active.append(i)
return active
def updateButtonText(self, lst):
"""Update the list of selected axes in the menu button"""
axis_txt = ''
for e in lst:
axis_txt += '%d,' % (e+1)
# remove trailing ',' and add to button string
self.SetLabel("Axes: %s" % axis_txt[:-1])
cursord = {
cursors.MOVE : wx.CURSOR_HAND,
cursors.HAND : wx.CURSOR_HAND,
cursors.POINTER : wx.CURSOR_ARROW,
cursors.SELECT_REGION : wx.CURSOR_CROSS,
}
class SubplotToolWX(wx.Frame):
def __init__(self, targetfig):
wx.Frame.__init__(self, None, -1, "Configure subplots")
toolfig = Figure((6,3))
canvas = FigureCanvasWx(self, -1, toolfig)
# Create a figure manager to manage things
figmgr = FigureManager(canvas, 1, self)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(canvas, 1, wx.LEFT|wx.TOP|wx.GROW)
self.SetSizer(sizer)
self.Fit()
tool = SubplotTool(targetfig, toolfig)
class NavigationToolbar2Wx(NavigationToolbar2, wx.ToolBar):
def __init__(self, canvas):
wx.ToolBar.__init__(self, canvas.GetParent(), -1)
NavigationToolbar2.__init__(self, canvas)
self.canvas = canvas
self._idle = True
self.statbar = None
def get_canvas(self, frame, fig):
return FigureCanvasWx(frame, -1, fig)
def _init_toolbar(self):
DEBUG_MSG("_init_toolbar", 1, self)
self._parent = self.canvas.GetParent()
_NTB2_HOME =wx.NewId()
self._NTB2_BACK =wx.NewId()
self._NTB2_FORWARD =wx.NewId()
self._NTB2_PAN =wx.NewId()
self._NTB2_ZOOM =wx.NewId()
_NTB2_SAVE = wx.NewId()
_NTB2_SUBPLOT =wx.NewId()
self.SetToolBitmapSize(wx.Size(24,24))
self.AddSimpleTool(_NTB2_HOME, _load_bitmap('home.png'),
'Home', 'Reset original view')
self.AddSimpleTool(self._NTB2_BACK, _load_bitmap('back.png'),
'Back', 'Back navigation view')
self.AddSimpleTool(self._NTB2_FORWARD, _load_bitmap('forward.png'),
'Forward', 'Forward navigation view')
# todo: get new bitmap
self.AddCheckTool(self._NTB2_PAN, _load_bitmap('move.png'),
shortHelp='Pan',
longHelp='Pan with left, zoom with right')
self.AddCheckTool(self._NTB2_ZOOM, _load_bitmap('zoom_to_rect.png'),
shortHelp='Zoom', longHelp='Zoom to rectangle')
self.AddSeparator()
self.AddSimpleTool(_NTB2_SUBPLOT, _load_bitmap('subplots.png'),
'Configure subplots', 'Configure subplot parameters')
self.AddSimpleTool(_NTB2_SAVE, _load_bitmap('filesave.png'),
'Save', 'Save plot contents to file')
bind(self, wx.EVT_TOOL, self.home, id=_NTB2_HOME)
bind(self, wx.EVT_TOOL, self.forward, id=self._NTB2_FORWARD)
bind(self, wx.EVT_TOOL, self.back, id=self._NTB2_BACK)
bind(self, wx.EVT_TOOL, self.zoom, id=self._NTB2_ZOOM)
bind(self, wx.EVT_TOOL, self.pan, id=self._NTB2_PAN)
bind(self, wx.EVT_TOOL, self.configure_subplot, id=_NTB2_SUBPLOT)
bind(self, wx.EVT_TOOL, self.save, id=_NTB2_SAVE)
self.Realize()
def zoom(self, *args):
self.ToggleTool(self._NTB2_PAN, False)
NavigationToolbar2.zoom(self, *args)
def pan(self, *args):
self.ToggleTool(self._NTB2_ZOOM, False)
NavigationToolbar2.pan(self, *args)
def configure_subplot(self, evt):
frame = wx.Frame(None, -1, "Configure subplots")
toolfig = Figure((6,3))
canvas = self.get_canvas(frame, toolfig)
# Create a figure manager to manage things
figmgr = FigureManager(canvas, 1, frame)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(canvas, 1, wx.LEFT|wx.TOP|wx.GROW)
frame.SetSizer(sizer)
frame.Fit()
tool = SubplotTool(self.canvas.figure, toolfig)
frame.Show()
def save(self, evt):
# Fetch the required filename and file type.
filetypes, exts, filter_index = self.canvas._get_imagesave_wildcards()
default_file = "image." + self.canvas.get_default_filetype()
dlg = wx.FileDialog(self._parent, "Save to file", "", default_file,
filetypes,
wx.SAVE|wx.OVERWRITE_PROMPT|wx.CHANGE_DIR)
dlg.SetFilterIndex(filter_index)
if dlg.ShowModal() == wx.ID_OK:
dirname = dlg.GetDirectory()
filename = dlg.GetFilename()
DEBUG_MSG('Save file dir:%s name:%s' % (dirname, filename), 3, self)
format = exts[dlg.GetFilterIndex()]
basename, ext = os.path.splitext(filename)
if ext.startswith('.'):
ext = ext[1:]
if ext in ('svg', 'pdf', 'ps', 'eps', 'png') and format!=ext:
#looks like they forgot to set the image type drop
#down, going with the extension.
warnings.warn('extension %s did not match the selected image type %s; going with %s'%(ext, format, ext), stacklevel=0)
format = ext
try:
self.canvas.print_figure(
os.path.join(dirname, filename), format=format)
except Exception, e:
error_msg_wx(str(e))
def set_cursor(self, cursor):
cursor =wx.StockCursor(cursord[cursor])
self.canvas.SetCursor( cursor )
def release(self, event):
try: del self.lastrect
except AttributeError: pass
def dynamic_update(self):
d = self._idle
self._idle = False
if d:
self.canvas.draw()
self._idle = True
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
canvas = self.canvas
dc =wx.ClientDC(canvas)
# Set logical function to XOR for rubberbanding
dc.SetLogicalFunction(wx.XOR)
# Set dc brush and pen
# Here I set brush and pen to white and grey respectively
# You can set it to your own choices
# The brush setting is not really needed since we
# dont do any filling of the dc. It is set just for
# the sake of completion.
wbrush =wx.Brush(wx.Colour(255,255,255), wx.TRANSPARENT)
wpen =wx.Pen(wx.Colour(200, 200, 200), 1, wx.SOLID)
dc.SetBrush(wbrush)
dc.SetPen(wpen)
dc.ResetBoundingBox()
dc.BeginDrawing()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
if y1<y0: y0, y1 = y1, y0
if x1<y0: x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rect = int(x0), int(y0), int(w), int(h)
try: lastrect = self.lastrect
except AttributeError: pass
else: dc.DrawRectangle(*lastrect) #erase last
self.lastrect = rect
dc.DrawRectangle(*rect)
dc.EndDrawing()
def set_status_bar(self, statbar):
self.statbar = statbar
def set_message(self, s):
if self.statbar is not None: self.statbar.set_function(s)
def set_history_buttons(self):
can_backward = (self._views._pos > 0)
can_forward = (self._views._pos < len(self._views._elements) - 1)
self.EnableTool(self._NTB2_BACK, can_backward)
self.EnableTool(self._NTB2_FORWARD, can_forward)
class NavigationToolbarWx(wx.ToolBar):
def __init__(self, canvas, can_kill=False):
"""
figure is the Figure instance that the toolboar controls
win, if not None, is the wxWindow the Figure is embedded in
"""
wx.ToolBar.__init__(self, canvas.GetParent(), -1)
DEBUG_MSG("__init__()", 1, self)
self.canvas = canvas
self._lastControl = None
self._mouseOnButton = None
self._parent = canvas.GetParent()
self._NTB_BUTTON_HANDLER = {
_NTB_X_PAN_LEFT : self.panx,
_NTB_X_PAN_RIGHT : self.panx,
_NTB_X_ZOOMIN : self.zoomx,
_NTB_X_ZOOMOUT : self.zoomy,
_NTB_Y_PAN_UP : self.pany,
_NTB_Y_PAN_DOWN : self.pany,
_NTB_Y_ZOOMIN : self.zoomy,
_NTB_Y_ZOOMOUT : self.zoomy }
self._create_menu()
self._create_controls(can_kill)
self.Realize()
def _create_menu(self):
"""
Creates the 'menu' - implemented as a button which opens a
pop-up menu since wxPython does not allow a menu as a control
"""
DEBUG_MSG("_create_menu()", 1, self)
self._menu = MenuButtonWx(self)
self.AddControl(self._menu)
self.AddSeparator()
def _create_controls(self, can_kill):
"""
Creates the button controls, and links them to event handlers
"""
DEBUG_MSG("_create_controls()", 1, self)
# Need the following line as Windows toolbars default to 15x16
self.SetToolBitmapSize(wx.Size(16,16))
self.AddSimpleTool(_NTB_X_PAN_LEFT, _load_bitmap('stock_left.xpm'),
'Left', 'Scroll left')
self.AddSimpleTool(_NTB_X_PAN_RIGHT, _load_bitmap('stock_right.xpm'),
'Right', 'Scroll right')
self.AddSimpleTool(_NTB_X_ZOOMIN, _load_bitmap('stock_zoom-in.xpm'),
'Zoom in', 'Increase X axis magnification')
self.AddSimpleTool(_NTB_X_ZOOMOUT, _load_bitmap('stock_zoom-out.xpm'),
'Zoom out', 'Decrease X axis magnification')
self.AddSeparator()
self.AddSimpleTool(_NTB_Y_PAN_UP,_load_bitmap('stock_up.xpm'),
'Up', 'Scroll up')
self.AddSimpleTool(_NTB_Y_PAN_DOWN, _load_bitmap('stock_down.xpm'),
'Down', 'Scroll down')
self.AddSimpleTool(_NTB_Y_ZOOMIN, _load_bitmap('stock_zoom-in.xpm'),
'Zoom in', 'Increase Y axis magnification')
self.AddSimpleTool(_NTB_Y_ZOOMOUT, _load_bitmap('stock_zoom-out.xpm'),
'Zoom out', 'Decrease Y axis magnification')
self.AddSeparator()
self.AddSimpleTool(_NTB_SAVE, _load_bitmap('stock_save_as.xpm'),
'Save', 'Save plot contents as images')
self.AddSeparator()
bind(self, wx.EVT_TOOL, self._onLeftScroll, id=_NTB_X_PAN_LEFT)
bind(self, wx.EVT_TOOL, self._onRightScroll, id=_NTB_X_PAN_RIGHT)
bind(self, wx.EVT_TOOL, self._onXZoomIn, id=_NTB_X_ZOOMIN)
bind(self, wx.EVT_TOOL, self._onXZoomOut, id=_NTB_X_ZOOMOUT)
bind(self, wx.EVT_TOOL, self._onUpScroll, id=_NTB_Y_PAN_UP)
bind(self, wx.EVT_TOOL, self._onDownScroll, id=_NTB_Y_PAN_DOWN)
bind(self, wx.EVT_TOOL, self._onYZoomIn, id=_NTB_Y_ZOOMIN)
bind(self, wx.EVT_TOOL, self._onYZoomOut, id=_NTB_Y_ZOOMOUT)
bind(self, wx.EVT_TOOL, self._onSave, id=_NTB_SAVE)
bind(self, wx.EVT_TOOL_ENTER, self._onEnterTool, id=self.GetId())
if can_kill:
bind(self, wx.EVT_TOOL, self._onClose, id=_NTB_CLOSE)
bind(self, wx.EVT_MOUSEWHEEL, self._onMouseWheel)
def set_active(self, ind):
"""
ind is a list of index numbers for the axes which are to be made active
"""
DEBUG_MSG("set_active()", 1, self)
self._ind = ind
if ind != None:
self._active = [ self._axes[i] for i in self._ind ]
else:
self._active = []
# Now update button text wit active axes
self._menu.updateButtonText(ind)
def get_last_control(self):
"""Returns the identity of the last toolbar button pressed."""
return self._lastControl
def panx(self, direction):
DEBUG_MSG("panx()", 1, self)
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
self.canvas.Refresh(eraseBackground=False)
def pany(self, direction):
DEBUG_MSG("pany()", 1, self)
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
self.canvas.Refresh(eraseBackground=False)
def zoomx(self, in_out):
DEBUG_MSG("zoomx()", 1, self)
for a in self._active:
a.xaxis.zoom(in_out)
self.canvas.draw()
self.canvas.Refresh(eraseBackground=False)
def zoomy(self, in_out):
DEBUG_MSG("zoomy()", 1, self)
for a in self._active:
a.yaxis.zoom(in_out)
self.canvas.draw()
self.canvas.Refresh(eraseBackground=False)
def update(self):
"""
Update the toolbar menu - called when (e.g.) a new subplot or axes are added
"""
DEBUG_MSG("update()", 1, self)
self._axes = self.canvas.figure.get_axes()
self._menu.updateAxes(len(self._axes))
def _do_nothing(self, d):
"""A NULL event handler - does nothing whatsoever"""
pass
# Local event handlers - mainly supply parameters to pan/scroll functions
def _onEnterTool(self, evt):
toolId = evt.GetSelection()
try:
self.button_fn = self._NTB_BUTTON_HANDLER[toolId]
except KeyError:
self.button_fn = self._do_nothing
evt.Skip()
def _onLeftScroll(self, evt):
self.panx(-1)
evt.Skip()
def _onRightScroll(self, evt):
self.panx(1)
evt.Skip()
def _onXZoomIn(self, evt):
self.zoomx(1)
evt.Skip()
def _onXZoomOut(self, evt):
self.zoomx(-1)
evt.Skip()
def _onUpScroll(self, evt):
self.pany(1)
evt.Skip()
def _onDownScroll(self, evt):
self.pany(-1)
evt.Skip()
def _onYZoomIn(self, evt):
self.zoomy(1)
evt.Skip()
def _onYZoomOut(self, evt):
self.zoomy(-1)
evt.Skip()
def _onMouseEnterButton(self, button):
self._mouseOnButton = button
def _onMouseLeaveButton(self, button):
if self._mouseOnButton == button:
self._mouseOnButton = None
def _onMouseWheel(self, evt):
if evt.GetWheelRotation() > 0:
direction = 1
else:
direction = -1
self.button_fn(direction)
_onSave = NavigationToolbar2Wx.save
def _onClose(self, evt):
self.GetParent().Destroy()
class StatusBarWx(wx.StatusBar):
"""
A status bar is added to _FigureFrame to allow measurements and the
previously selected scroll function to be displayed as a user
convenience.
"""
def __init__(self, parent):
wx.StatusBar.__init__(self, parent, -1)
self.SetFieldsCount(2)
self.SetStatusText("None", 1)
#self.SetStatusText("Measurement: None", 2)
#self.Reposition()
def set_function(self, string):
self.SetStatusText("%s" % string, 1)
#def set_measurement(self, string):
# self.SetStatusText("Measurement: %s" % string, 2)
#< Additions for printing support: Matt Newville
class PrintoutWx(wx.Printout):
"""Simple wrapper around wx Printout class -- all the real work
here is scaling the matplotlib canvas bitmap to the current
printer's definition.
"""
def __init__(self, canvas, width=5.5,margin=0.5, title='matplotlib'):
wx.Printout.__init__(self,title=title)
self.canvas = canvas
# width, in inches of output figure (approximate)
self.width = width
self.margin = margin
def HasPage(self, page):
#current only supports 1 page print
return page == 1
def GetPageInfo(self):
return (1, 1, 1, 1)
def OnPrintPage(self, page):
self.canvas.draw()
dc = self.GetDC()
(ppw,pph) = self.GetPPIPrinter() # printer's pixels per in
(pgw,pgh) = self.GetPageSizePixels() # page size in pixels
(dcw,dch) = dc.GetSize()
(grw,grh) = self.canvas.GetSizeTuple()
# save current figure dpi resolution and bg color,
# so that we can temporarily set them to the dpi of
# the printer, and the bg color to white
bgcolor = self.canvas.figure.get_facecolor()
fig_dpi = self.canvas.figure.dpi
# draw the bitmap, scaled appropriately
vscale = float(ppw) / fig_dpi
# set figure resolution,bg color for printer
self.canvas.figure.dpi = ppw
self.canvas.figure.set_facecolor('#FFFFFF')
renderer = RendererWx(self.canvas.bitmap, self.canvas.figure.dpi)
self.canvas.figure.draw(renderer)
self.canvas.bitmap.SetWidth( int(self.canvas.bitmap.GetWidth() * vscale))
self.canvas.bitmap.SetHeight( int(self.canvas.bitmap.GetHeight()* vscale))
self.canvas.draw()
# page may need additional scaling on preview
page_scale = 1.0
if self.IsPreview(): page_scale = float(dcw)/pgw
# get margin in pixels = (margin in in) * (pixels/in)
top_margin = int(self.margin * pph * page_scale)
left_margin = int(self.margin * ppw * page_scale)
# set scale so that width of output is self.width inches
# (assuming grw is size of graph in inches....)
user_scale = (self.width * fig_dpi * page_scale)/float(grw)
dc.SetDeviceOrigin(left_margin,top_margin)
dc.SetUserScale(user_scale,user_scale)
# this cute little number avoid API inconsistencies in wx
try:
dc.DrawBitmap(self.canvas.bitmap, 0, 0)
except:
try:
dc.DrawBitmap(self.canvas.bitmap, (0, 0))
except:
pass
# restore original figure resolution
self.canvas.figure.set_facecolor(bgcolor)
self.canvas.figure.dpi = fig_dpi
self.canvas.draw()
return True
#>
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
Toolbar = NavigationToolbarWx
FigureManager = FigureManagerWx
| gpl-3.0 |
pmelchior/skymapper | examples/example1.py | 1 | 3622 | # load projection and helper functions
import numpy as np
import skymapper as skm
def getCatalog(size=10000, survey=None):
# dummy catalog: uniform on sphere
# Marsaglia (1972)
xyz = np.random.normal(size=(size, 3))
r = np.sqrt((xyz**2).sum(axis=1))
dec = np.arccos(xyz[:,2]/r) / skm.DEG2RAD - 90
ra = - np.arctan2(xyz[:,0], xyz[:,1]) / skm.DEG2RAD
if survey is not None:
inside = survey.contains(ra, dec)
ra = ra[inside]
dec = dec[inside]
return ra, dec
def makeHealpixMap(ra, dec, nside=1024, nest=False):
# convert a ra/dec catalog into healpix map with counts per cell
import healpy as hp
ipix = hp.ang2pix(nside, (90-dec)/180*np.pi, ra/180*np.pi, nest=nest)
return np.bincount(ipix, minlength=hp.nside2npix(nside))
def getHealpixCoords(pixels, nside, nest=False):
# convert healpix cell indices to center ra/dec
import healpy as hp
theta, phi = hp.pix2ang(nside, pixels, nest=nest)
return phi * 180. / np.pi, 90 - theta * 180. / np.pi
if __name__ == "__main__":
# load RA/Dec from catalog
size = 100000
des = skm.survey.DES()
ra, dec = getCatalog(size, survey=des)
# define the best Albers projection for the footprint
# minimizing the variation in distortion
crit = skm.stdDistortion
proj = skm.Albers.optimize(ra, dec, crit=crit)
# construct map: will hold figure and projection
# the outline of the sphere can be styled with kwargs for matplotlib Polygon
map = skm.Map(proj)
# add graticules, separated by 15 deg
# the lines can be styled with kwargs for matplotlib Line2D
# additional arguments for formatting the graticule labels
sep=15
map.grid(sep=sep)
# # add footprint, retain the polygon for clipping
# footprint = map.footprint("DES", zorder=20, edgecolor='#2222B2', facecolor='None', lw=1)
#
#### 1. plot density in healpix cells ####
nside = 32
mappable = map.density(ra, dec, nside=nside)
cb = map.colorbar(mappable, cb_label="$n$ [arcmin$^{-2}$]")
# add random scatter plot
nsamples = 10
size = 100*np.random.rand(nsamples)
map.scatter(ra[:nsamples], dec[:nsamples], s=size, edgecolor='k', facecolor='None')
# focus on relevant region
map.focus(ra, dec)
# entitle: access mpl figure
map.title('Density with random scatter')
# copy map without data contents
map2 = map.clone()
#### 2. show map distortion over the survey ####
a,b = proj.distortion(ra, dec)
mappable2 = map2.hexbin(ra, dec, C=1-np.abs(b/a), vmin=0, vmax=0.3, cmap='RdYlBu_r')
cb2 = map2.colorbar(mappable2, cb_label='Distortion')
map2.title('Projection distortion')
#### 3. extrapolate RA over all sky ####
map3 = skm.Map(proj)
# show with 45 deg graticules
sep=45
map3.grid(sep=sep)
# alter number of labels at the south pole
map3.labelMeridiansAtParallel(-90, size=8, meridians=np.arange(0,360,90))
# this is slow when working with lots of samples...
mappable3 = map3.extrapolate(ra[::100], dec[::100], dec[::100], resolution=100)
cb3 = map3.colorbar(mappable3, cb_label='Dec')
# add footprint shade
footprint3 = map3.footprint(des, nside=nside, zorder=20, facecolors='w', alpha=0.3)
map3.title('Extrapolation on the sphere')
#### 4. test Healpix map functions ####
map4 = map.clone()
# simply bin the counts of ra/dec
m = makeHealpixMap(ra, dec, nside=nside)
mappable4 = map4.healpix(m, cmap="YlOrRd")
cb4 = map4.colorbar(mappable4, cb_label="Healpix cell count")
map4.title('Healpix map')
| mit |
tmrowco/electricitymap | parsers/ENTE.py | 1 | 6758 | #!/usr/bin/env python3
# This parser gets all real time interconnection flows from the
# Central American Electrical Interconnection System (SIEPAC).
import arrow
import pandas as pd
url = 'http://www.enteoperador.org/newsite/flash/data.csv'
def read_data():
"""
Reads csv data from the url.
Returns a pandas dataframe.
"""
df = pd.read_csv(url, index_col=False)
return df
def connections(df):
"""
Gets values for each interconnection.
Returns a dictionary.
"""
interconnections = {'GT->MX': df.iloc[0]['MXGU'],
'GT->SV': df.iloc[0]['GUES'],
'GT->HN': df.iloc[0]['GUHO'],
'HN->SV': df.iloc[0]['ESHO'],
'HN->NI': df.iloc[0]['HONI'],
'CR->NI': df.iloc[0]['NICR'],
'CR->PA': df.iloc[0]['CRPA']}
return interconnections
def net(df):
"""
Gets net production values for each country. Uses system totals for Mexico.
Returns a dictionary.
"""
net_production = {}
net_production['GT'] = df.iloc[0]['GENGUA'] - df.iloc[0]['DEMGUA']
net_production['SV'] = df.iloc[0]['GENSAL'] - df.iloc[0]['DEMSAL']
net_production['HN'] = df.iloc[0]['GENHON'] - df.iloc[0]['DEMHON']
net_production['NI'] = df.iloc[0]['GENNIC'] - df.iloc[0]['DEMNIC']
net_production['CR'] = df.iloc[0]['GENCRI'] - df.iloc[0]['DEMCRI']
net_production['PA'] = df.iloc[0]['GENPAN'] - df.iloc[0]['DEMPAN']
net_production['MX'] = df.iloc[0]['TOTALGEN'] - df.iloc[0]['TOTALDEM']
return net_production
def flow_logic(net_production, interconnections):
"""
Calculates flow direction for each interconnection using network flow and
simultaneous equations.
Returns a dictionary.
"""
# Each country is modeled as a node with flows going either in or out of it.
# Importing is given a negative flow while exporting is positive.
PA = {'CR': 0}
CR = {'PA': 0, 'NI': 0}
NI = {'CR': 0, 'HN': 0}
HN = {'GT': 0, 'SV': 0, 'NI': 0}
SV = {'GT': 0, 'HN': 0} # TODO: SV is assigned to but never used
GT = {'MX': 0, 'HN': 0, 'SV': 0}
def plusminus(value):
"""
Takes a number and check its sign.
Returns 1 if positive, -1 if negative and zero if zero.
"""
if value > 0:
newvalue = 1
elif value < 0:
newvalue = -1
else:
newvalue = 0
return newvalue
def flipsign(value):
"""
Changes the sign of any number given apart from zero.
1 -> -1
-1 -> 1
"""
newvalue = (-1) * value
return newvalue
flows = {'HN->NI': 0.0,
'CR->NI': 0.0,
'CR->PA': 0.0,
'GT->HN': 0.0,
'GT->MX': 0.0,
'GT->SV': 0.0,
'HN->SV': 0.0}
# First we determine whether Mexico is importing or exporting using totals for the SIEPAC system.
if net_production['MX'] < 0:
# exporting
GT['MX'] = -1
else:
GT['MX'] = 1
# We then find the direction of the PA by exploiting the fact that it only has one interconnection.
if net_production['PA'] > 0:
# PA can only export to CR
PA['CR'] = 1
CR['PA'] = -1
else:
# PA importing from CR
PA['CR'] = -1
CR['PA'] = 1
# Next we can find CR and NI flows using their net productions and process of elimination.
PAN = interconnections['CR->PA'] * CR['PA']
CR['NI'] = plusminus((net_production['CR'] - PAN) / interconnections['CR->NI'])
NI['CR'] = flipsign(CR['NI'])
NIC = interconnections['CR->NI'] * NI['CR']
NI['HN'] = plusminus((net_production['NI'] - NIC) / interconnections['HN->NI'])
HN['NI'] = flipsign(NI['HN'])
# Now we use 3 simultaneous equations to find the remaining flows. We can use the fact that
# several flows are already known to our advantage.
# a = interconnections['GT->SV']
# b = interconnections['HN->SV']
# c = interconnections['GT->HN']
# MX = interconnections['GT->MX']*GT['MX']
# HON = interconnections['HN->NI']*HN['NI']
#
# eqs = np.array([[a, b, 0], [a, 0, c], [0, b, c]])
# res = np.array([net_production['SV'], net_production['GT']-MX, net_production['HN']-HON])
#
# solution = np.linalg.solve(eqs, res)
#
# #Factor to account for transmission losses.
# GT['SV'] = plusminus(solution[0]+0.5)
#
# SV['GT'] = flipsign(GT['SV'])
# SV['HN'] = plusminus(solution[1])
# HN['SV'] = flipsign(SV['HN'])
# GT['HN'] = plusminus(solution[2])
# HN['GT'] = flipsign(GT['HN'])
# Flows commented out are disabled until the maths behind determining their direction can be proved satisfactorily.
flows['HN->NI'] = HN['NI']
flows['CR->NI'] = CR['NI']
flows['CR->PA'] = CR['PA']
# flows['GT->HN'] = GT['HN']
flows['GT->MX'] = GT['MX']
# flows['GT->SV'] = GT['SV']
# flows['HN->SV'] = SV['HN']
return flows
def net_flow(interconnections, flows):
"""
Combines interconnection values with flow directions.
Returns a dictionary.
"""
netflow = {k: interconnections[k] * flows[k] for k in interconnections}
return netflow
def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):
"""
Gets an exchange pair from the SIEPAC system.
Return:
A dictionary in the form:
{
'sortedZoneKeys': 'CR->PA',
'datetime': '2017-01-01T00:00:00Z',
'netFlow': 0.0,
'source': 'mysource.com'
}
"""
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
getdata = read_data()
connect = connections(getdata)
nt = net(getdata)
fl = flow_logic(nt, connect)
netflow = net_flow(connect, fl)
exchange = {}
dt = arrow.now('UTC-6').floor('minute')
zones = '->'.join(sorted([zone_key1, zone_key2]))
if zones in netflow:
exchange['netFlow'] = netflow[zones]
else:
raise NotImplementedError('This exchange is not implemented.')
exchange.update(sortedZoneKeys=zones,
datetime=dt.datetime,
source='enteoperador.org')
return exchange
if __name__ == '__main__':
"""Main method, never used by the Electricity Map backend, but handy for testing."""
print('fetch_exchange(CR, PA) ->')
print(fetch_exchange('CR', 'PA'))
print('fetch_exchange(CR, NI) ->')
print(fetch_exchange('CR', 'NI'))
print('fetch_exchange(HN, NI) ->')
print(fetch_exchange('HN', 'NI'))
print('fetch_exchange(GT, MX) ->')
print(fetch_exchange('GT', 'MX'))
| gpl-3.0 |
jjdmol/LOFAR | CEP/PyBDSM/src/python/plotresults.py | 1 | 29164 | """Plotting module
This module is used to display fits results.
"""
from image import *
from . import has_pl
if has_pl:
import matplotlib.pyplot as pl
import matplotlib.cm as cm
import matplotlib.patches as mpatches
from matplotlib.widgets import Button
from matplotlib.patches import Ellipse
from matplotlib.lines import Line2D
from matplotlib import collections
from math import log10
import functions as func
from const import fwsig
import os
import numpy as N
def plotresults(img, ch0_image=True, rms_image=True, mean_image=True,
ch0_islands=True, gresid_image=True, sresid_image=False,
gmodel_image=True, smodel_image=False, pyramid_srcs=False,
source_seds=False, ch0_flagged=False, pi_image=False,
psf_major=False, psf_minor=False, psf_pa=False, broadcast=False):
"""Show the results of a fit."""
global img_ch0, img_rms, img_mean, img_gaus_mod, img_shap_mod
global img_gaus_resid, img_shap_resid, pixels_per_beam, pix2sky
global vmin, vmax, vmin_cur, vmax_cur, ch0min, ch0max, img_pi
global low, fig, images, src_list, srcid_cur, sky2pix, markers
global img_psf_maj, img_psf_min, img_psf_pa, do_broadcast, samp_client
global samp_key, samp_gaul_table_url, samp_srl_table_url
if not has_pl:
print "\033[31;1mWARNING\033[0m: Matplotlib not found. Plotting is disabled."
return
if hasattr(img, 'samp_client'):
samp_client = img.samp_client
samp_key = img.samp_key
if hasattr(img, 'samp_srl_table_url'):
samp_srl_table_url = img.samp_srl_table_url
else:
samp_srl_table_url = None
if hasattr(img, 'samp_gaul_table_url'):
samp_gaul_table_url = img.samp_gaul_table_url
else:
samp_gaul_table_url = None
else:
samp_clent = None
samp_key = None
samp_srl_table_url = None
samp_gaul_table_url = None
do_broadcast = broadcast
# Define the images. The images are used both by imshow and by the
# on_press() and coord_format event handlers
pix2sky = img.pix2sky
sky2pix = img.sky2pix
gfactor = 2.0 * N.sqrt(2.0 * N.log(2.0))
pixels_per_beam = 2.0 * N.pi * (img.beam2pix(img.beam)[0]
* img.beam2pix(img.beam)[1]) / gfactor**2
# Construct lists of images, titles, etc.
images = []
titles = []
names = []
markers = []
img_gaus_mod = None # default needed for key press event
img_shap_mod = None # default needed for key press event
if ch0_image:
img_ch0 = img.ch0_arr
images.append(img_ch0)
titles.append('Original (ch0) Image\n(arbitrary logarithmic scale)')
names.append('ch0')
if ch0_islands:
img_ch0 = img.ch0_arr
images.append(img_ch0)
if hasattr(img, 'ngaus'):
if hasattr(img, 'ch0_pi_arr'):
ch0_str = 'Islands (hatched boundaries; red = PI only) and\nGaussians'
else:
ch0_str = 'Islands (hatched boundaries) and\nGaussians'
if hasattr(img, 'atrous_gaussians'):
ch0_str += ' (red = wavelet)'
titles.append(ch0_str)
else:
titles.append('Islands (hatched boundaries)')
names.append('ch0')
if ch0_flagged:
if not hasattr(img, 'ngaus'):
print 'Image was not fit with Gaussians. Skipping display of flagged Gaussians.'
else:
img_ch0 = img.ch0_arr
images.append(img_ch0)
titles.append('Flagged Gaussians')
names.append('ch0')
if pi_image:
if not hasattr(img, 'ch0_pi_arr'):
print 'Polarization module not run. Skipping PI image.'
else:
img_pi = img.ch0_pi_arr
images.append(img_pi)
titles.append('Polarized Intensity Image')
names.append('ch0_pi')
if rms_image:
img_rms = img.rms_arr
images.append(img_rms)
titles.append('Background rms Image')
names.append('rms')
if gresid_image:
if not hasattr(img, 'ngaus'):
print 'Image was not fit with Gaussians. Skipping residual Gaussian image.'
else:
img_gaus_resid = img.resid_gaus_arr
images.append(img_gaus_resid)
titles.append('Gaussian Residual Image')
names.append('gaus_resid')
if gmodel_image:
if not hasattr(img, 'ngaus'):
print 'Image was not fit with Gaussians. Skipping model Gaussian image.'
else:
img_gaus_mod = img.model_gaus_arr
images.append(img_gaus_mod)
titles.append('Gaussian Model Image')
names.append('gaus_mod')
if mean_image:
img_mean = img.mean_arr
images.append(img_mean)
titles.append('Background mean Image')
names.append('mean')
if sresid_image:
if img.opts.shapelet_do == False:
print 'Image was not decomposed into shapelets. Skipping residual shapelet image.'
else:
img_shap_resid = img.ch0_arr - img.model_shap_arr
images.append(img_shap_resid)
titles.append('Shapelet Residual Image')
names.append('shap_resid')
if smodel_image:
if img.opts.shapelet_do == False:
print 'Image was not decomposed into shapelets. Skipping model shapelet image.'
else:
img_shap_mod = img.model_shap_arr
images.append(img_shap_mod)
titles.append('Shapelet Model Image')
names.append('shap_mod')
if source_seds:
if img.opts.spectralindex_do == False:
print 'Source SEDs were not fit. Skipping source SED plots.'
else:
src_list = img.sources
sed_src = get_src(src_list, 0)
if sed_src is None:
print 'No sources found. Skipping source SED plots.'
else:
images.append('seds')
titles.append('')
names.append('seds')
srcid_cur = 0
if pyramid_srcs:
if img.opts.atrous_do == False:
print 'Image was not decomposed into wavelets. Skipping wavelet images.'
else:
# Get the unique j levels and store them. Only make subplots for
# occupied j levels
print 'Pyramidal source plots not yet supported.'
# j_list = []
# for p in img.pyrsrcs:
# for l in p.jlevels:
# j_list.append(l)
# j_set = set(j_list)
# j_with_gaus = list(j_set)
# index_first_waveplot = len(images)
# for i in range(len(j_with_gaus)):
# images.append('wavelets')
# names.append('pyrsrc'+str(i))
if psf_major or psf_minor or psf_pa:
if img.opts.psf_vary_do == False:
print 'PSF variation not calculated. Skipping PSF variation images.'
else:
if psf_major:
img_psf_maj = img.psf_vary_maj_arr*fwsig
images.append(img_psf_maj)
titles.append('PSF Major Axis FWHM (pixels)')
names.append('psf_maj')
if psf_minor:
img_psf_min = img.psf_vary_min_arr*fwsig
images.append(img_psf_min)
titles.append('PSF Minor Axis FWHM (pixels)')
names.append('psf_min')
if psf_pa:
img_psf_pa = img.psf_vary_pa_arr
images.append(img_psf_pa)
titles.append('PSF Pos. Angle FWhM (degrees)')
names.append('psf_pa')
if images == []:
print 'No images to display.'
return
im_mean = img.clipped_mean
im_rms = img.clipped_rms
if img.resid_gaus is None:
low = 1.1*abs(img.min_value)
else:
low = N.max([1.1*abs(img.min_value),1.1*abs(N.nanmin(img.resid_gaus))])
if low <= 0.0:
low = 1E-6
vmin_est = im_mean - im_rms*5.0 + low
if vmin_est <= 0.0:
vmin = N.log10(low)
else:
vmin = N.log10(vmin_est)
vmax = N.log10(im_mean + im_rms*30.0 + low)
ch0min = vmin
ch0max = N.log10(img.max_value + low)
vmin_cur = vmin
vmax_cur = vmax
origin = 'lower'
colours = ['m', 'b', 'c', 'g', 'y', 'k'] # reserve red ('r') for wavelets
styles = ['-', '-.', '--']
print '=' * 72
print 'NOTE -- With the mouse pointer in plot window:'
print ' Press "i" ........ : Get integrated flux densities and mean rms'
print ' values for the visible portion of the image'
print ' Press "m" ........ : Change min and max scaling values'
print ' Press "n" ........ : Show / hide island IDs'
print ' Press "0" ........ : Reset scaling to default'
if 'seds' in images:
print ' Press "c" ........ : Change source for SED plot'
if ch0_islands and hasattr(img, 'ngaus'):
print ' Click Gaussian ... : Print Gaussian and source IDs (zoom_rect mode, '
print ' toggled with the "zoom" button and indicated in '
print ' the lower right corner, must be off)'
if 'seds' in images:
print ' The SED plot will also show the chosen source.'
print '_' * 72
if len(images) > 1:
numx = 2
else:
numx = 1
numy = int(N.ceil(float(len(images))/float(numx)))
fig = pl.figure(figsize=(max(15, 10.0*float(numy)/float(numx)), 10.0))
fig.canvas.set_window_title('PyBDSM Fit Results for '+ img.filename)
gray_palette = cm.gray
gray_palette.set_bad('k')
for i, image in enumerate(images):
if image != 'wavelets' and image != 'seds':
if i == 0:
cmd = 'ax' + str(i+1) + ' = pl.subplot(' + str(numx) + \
', ' + str(numy) + ', ' + str(i+1) + ')'
else:
cmd = 'ax' + str(i+1) + ' = pl.subplot(' + str(numx) + \
', ' + str(numy) + ', ' + str(i+1) + ', sharex=ax1' + \
', sharey=ax1)'
exec cmd
if 'PSF' in titles[i]:
im = image
else:
im = N.log10(image + low)
if 'Islands' in titles[i]:
island_offsets_x = []
island_offsets_y = []
border_color = []
ax = pl.gca()
for iisl, isl in enumerate(img.islands):
xb, yb = isl.border
if hasattr(isl, '_pi'):
for c in range(len(xb)):
border_color.append('r')
else:
for c in range(len(xb)):
border_color.append('#afeeee')
island_offsets_x += xb.tolist()
island_offsets_y += yb.tolist()
marker = ax.text(N.max(xb)+2, N.max(yb), str(isl.island_id),
color='#afeeee', clip_on=True)
marker.set_visible(not marker.get_visible())
markers.append(marker)
# draw the gaussians with one colour per source or island
# (if gaul2srl was not run)
if hasattr(img, 'nsrc'):
nsrc = len(isl.sources)
for isrc in range(nsrc):
col = colours[isrc % 6]
style = styles[isrc/6 % 3]
src = isl.sources[isrc]
for g in src.gaussians:
if hasattr(g, 'valid'):
valid = g.valid
else:
valid = True
if g.jlevel == 0 and valid and g.gaus_num >= 0:
gidx = g.gaus_num
e = Ellipse(xy=g.centre_pix, width=g.size_pix[0],
height=g.size_pix[1], angle=g.size_pix[2]+90.0)
ax.add_artist(e)
e.set_picker(3)
e.set_clip_box(ax.bbox)
e.set_facecolor(col)
e.set_alpha(0.5)
e.gaus_id = gidx
e.src_id = src.source_id
e.jlevel = g.jlevel
e.isl_id = g.island_id
e.tflux = g.total_flux
e.pflux = g.peak_flux
e.centre_sky = g.centre_sky
if len(img.islands) > 0:
island_offsets = zip(N.array(island_offsets_x), N.array(island_offsets_y))
isl_borders = collections.AsteriskPolygonCollection(4, offsets=island_offsets, color=border_color,
transOffset=ax.transData, sizes=(10.0,))
ax.add_collection(isl_borders)
if hasattr(img, 'gaussians'):
for atrg in img.gaussians:
if atrg.jlevel > 0 and atrg.gaus_num >= 0:
col = 'r'
style = '-'
gidx = atrg.gaus_num
e = Ellipse(xy=atrg.centre_pix, width=atrg.size_pix[0], height=atrg.size_pix[1], angle=atrg.size_pix[2]+90.0)
ax.add_artist(e)
e.set_picker(3)
e.set_clip_box(ax.bbox)
e.set_edgecolor(col)
e.set_facecolor('none')
e.set_alpha(0.8)
e.gaus_id = gidx
e.src_id = atrg.source_id
e.jlevel = atrg.jlevel
e.isl_id = atrg.island_id
e.tflux = atrg.total_flux
e.pflux = atrg.peak_flux
e.centre_sky = atrg.centre_sky
if 'Flagged' in titles[i]:
for iisl, isl in enumerate(img.islands):
ax = pl.gca()
style = '-'
for ig, g in enumerate(isl.fgaul):
col = colours[ig % 6]
ellx, elly = func.drawellipse(g)
gline, = ax.plot(ellx, elly, color = col,
linestyle = style, picker=3)
gline.flag = g.flag
if 'PSF' in titles[i]:
cmd = 'ax' + str(i+1) + ".imshow(N.transpose(im), origin=origin, "\
"interpolation='nearest', cmap=gray_palette)"
else:
cmd = 'ax' + str(i+1) + ".imshow(N.transpose(im), origin=origin, "\
"interpolation='nearest',vmin=vmin, vmax=vmax, cmap=gray_palette)"
exec cmd
cmd = 'ax' + str(i+1) + '.format_coord = format_coord_'+names[i]
exec cmd
pl.title(titles[i])
elif image == 'seds':
cmd = 'ax' + str(i+1) + ' = pl.subplot(' + str(numx) + \
', ' + str(numy) + ', ' + str(i+1) + ')'
exec cmd
ax = pl.gca()
plot_sed(sed_src, ax)
elif image == 'wavelets':
if i == index_first_waveplot:
for j in range(len(j_with_gaus)):
cmd = 'ax' + str(j+i+1) + ' = pl.subplot(' + str(numx) + \
', ' + str(numy) + ', ' + str(j+i+1) + ', sharex=ax1, '+\
'sharey=ax1)'
exec cmd
pl.title('Pyramidal Sources for\nWavelet Scale J = ' +
str(j_with_gaus[j]))
for pyr in img.pyrsrcs:
for iisl, isl in enumerate(pyr.islands):
jj = pyr.jlevels[iisl]
jindx = j_with_gaus.index(jj)
col = colours[pyr.pyr_id % 6]
ind = N.where(~isl.mask_active)
cmd = "ax" + str(jindx + index_first_waveplot + 1) + \
".plot(ind[0]+isl.origin[0], "\
"ind[1]+isl.origin[1], '.', color=col)"
exec cmd
fig.canvas.mpl_connect('key_press_event', on_press)
fig.canvas.mpl_connect('pick_event', on_pick)
pl.show()
pl.close('all')
def on_pick(event):
global images, srcid_cur, samp_client, samp_key, do_broadcast, samp_gaul_table_url, samp_srl_table_url
g = event.artist
if hasattr(g, 'gaus_id'):
gaus_id = g.gaus_id
src_id = g.src_id
isl_id = g.isl_id
tflux = g.tflux
pflux = g.pflux
wav_j = g.jlevel
if wav_j == 0:
print 'Gaussian #' + str(gaus_id) + ' (in src #' + str(src_id) + \
', isl #' + str(isl_id) + '): F_tot = ' + str(round(tflux,4)) + \
' Jy, F_peak = ' + str(round(pflux,4)) + ' Jy/beam'
else:
print 'Gaussian #' + str(gaus_id) + ' (in src #' + str(src_id) + \
', isl #' + str(isl_id) + ', wav #' + str(wav_j) + \
'): F_tot = ' + str(round(tflux,3)) + ' Jy, F_peak = ' + \
str(round(pflux,4)) + ' Jy/beam'
# Transmit src_id, gaus_id, and coordinates to SAMP Hub (if we are connected)
if do_broadcast and samp_key is not None:
if samp_gaul_table_url is not None:
func.send_highlight_row(samp_client, samp_key, samp_gaul_table_url, gaus_id)
if samp_srl_table_url is not None:
func.send_highlight_row(samp_client, samp_key, samp_srl_table_url, src_id)
func.send_coords(samp_client, samp_key, g.centre_sky)
# Change source SED
# First check that SEDs are being plotted and that the selected Gaussian
# is from the zeroth wavelet image
has_sed = False
if 'seds' in images and wav_j == 0:
has_sed = True
if not has_sed:
return
ax_indx = images.index('seds')
sed_src = get_src(src_list, src_id)
if srcid_cur == src_id:
return
srcid_cur = src_id
axes_list = fig.get_axes()
for axindx, ax in enumerate(axes_list):
if images[axindx] == 'seds':
plot_sed(sed_src, ax)
else:
print 'Flagged Gaussian (flag = ' + str(g.flag) + '; use "' + \
"help 'flagging_opts'" + '" for flag meanings)'
pl.draw()
def on_press(event):
"""Handle keypresses"""
from interface import raw_input_no_history
import numpy
global img_ch0, img_rms, img_mean, img_gaus_mod, img_shap_mod
global pixels_per_beam, vmin, vmax, vmin_cur, vmax_cur, img_pi
global ch0min, ch0max, low, fig, images, src_list, srcid_cur
global markers
if event.key == '0':
print 'Resetting limits to defaults (%.4f -- %.4f Jy/beam)' \
% (pow(10, vmin)-low,
pow(10, vmax)-low)
axes_list = fig.get_axes()
for axindx, ax in enumerate(axes_list):
if images[axindx] != 'wavelets' and images[axindx] != 'seds':
im = ax.get_images()[0]
im.set_clim(vmin, vmax)
vmin_cur = vmin
vmax_cur = vmax
pl.draw()
if event.key == 'm':
# Modify scaling
# First check that there are images to modify
has_image = False
for im in images:
if isinstance(im, numpy.ndarray):
has_image = True
if not has_image:
return
minscl = 'a'
while isinstance(minscl, str):
try:
if minscl == '':
minscl = pow(10, vmin_cur) - low
break
minscl = float(minscl)
except ValueError:
prompt = "Enter min value (current = %.4f Jy/beam) : " % (pow(10, vmin_cur)-low,)
try:
minscl = raw_input_no_history(prompt)
except RuntimeError:
print 'Sorry, unable to change scaling.'
return
minscl = N.log10(minscl + low)
maxscl = 'a'
while isinstance(maxscl, str):
try:
if maxscl == '':
maxscl = pow(10, vmax_cur) - low
break
maxscl = float(maxscl)
except ValueError:
prompt = "Enter max value (current = %.4f Jy/beam) : " % (pow(10, vmax_cur)-low,)
try:
maxscl = raw_input_no_history(prompt)
except RuntimeError:
print 'Sorry, unable to change scaling.'
return
maxscl = N.log10(maxscl + low)
if maxscl <= minscl:
print 'Max value must be greater than min value!'
return
axes_list = fig.get_axes()
for axindx, ax in enumerate(axes_list):
if images[axindx] != 'wavelets' and images[axindx] != 'seds':
im = ax.get_images()[0]
im.set_clim(minscl, maxscl)
vmin_cur = minscl
vmax_cur = maxscl
pl.draw()
if event.key == 'c':
# Change source SED
# First check that SEDs are being plotted
has_sed = False
if 'seds' in images:
has_sed = True
if not has_sed:
return
srcid = 'a'
while isinstance(srcid, str):
try:
if srcid == '':
srcid = srcid_cur
break
srcid = int(srcid)
except ValueError:
prompt = "Enter source ID (current = %i) : " % (srcid_cur,)
try:
srcid = raw_input_no_history(prompt)
except RuntimeError:
print 'Sorry, unable to change source.'
return
ax_indx = images.index('seds')
sed_src = get_src(src_list, srcid)
if sed_src is None:
print 'Source not found!'
return
srcid_cur = srcid
axes_list = fig.get_axes()
for axindx, ax in enumerate(axes_list):
if images[axindx] == 'seds':
plot_sed(sed_src, ax)
pl.draw()
if event.key == 'i':
# Print info about visible region
has_image = False
axes_list = fig.get_axes()
# Get limits of visible region
for axindx, ax in enumerate(axes_list):
if images[axindx] != 'wavelets' and images[axindx] != 'seds':
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
has_image = True
break
if not has_image:
return
if xmin < 0:
xmin = 0
if xmax > img_ch0.shape[0]:
xmax = img_ch0.shape[0]
if ymin < 0:
ymin = 0
if ymax > img_ch0.shape[1]:
ymax = img_ch0.shape[1]
flux = N.nansum(img_ch0[xmin:xmax, ymin:ymax])/pixels_per_beam
mask = N.isnan(img_ch0[xmin:xmax, ymin:ymax])
num_pix_unmasked = float(N.size(N.where(mask == False), 1))
mean_rms = N.nansum(img_rms[xmin:xmax, ymin:ymax])/num_pix_unmasked
mean_map_flux = N.nansum(img_mean[xmin:xmax, ymin:ymax])/pixels_per_beam
if img_gaus_mod is None:
gaus_mod_flux = 0.0
else:
gaus_mod_flux = N.nansum(img_gaus_mod[xmin:xmax, ymin:ymax])/pixels_per_beam
print 'Visible region (%i:%i, %i:%i) :' % (xmin, xmax, ymin, ymax)
print ' ch0 flux density from sum of pixels ... : %f Jy'\
% (flux,)
print ' Background mean map flux density ...... : %f Jy'\
% (mean_map_flux,)
print ' Gaussian model flux density ........... : %f Jy'\
% (gaus_mod_flux,)
if img_shap_mod is not None:
shap_mod_flux = N.nansum(img_shap_mod[xmin:xmax, ymin:ymax])/pixels_per_beam
print ' Shapelet model flux density ........... : %f Jy'\
% (shap_mod_flux,)
print ' Mean rms (from rms map) ............... : %f Jy/beam'\
% (mean_rms,)
if event.key == 'n':
# Show/Hide island numbers
if markers:
for marker in markers:
marker.set_visible(not marker.get_visible())
pl.draw()
# The following functions add ra, dec and flux density to the
# coordinates in the lower-right-hand corner of the figure window.
# Since each axis needs its own function (to return its particular
# flux), we need a separate function for each subplot.
def format_coord_ch0(x, y):
"""Custom coordinate format for ch0 image"""
global img_ch0
im = img_ch0
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_ch0_pi(x, y):
"""Custom coordinate format for ch0 image"""
global img_pi
im = img_pi
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_rms(x, y):
"""Custom coordinate format for rms image"""
global img_rms
im = img_rms
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_mean(x, y):
"""Custom coordinate format for mean image"""
global img_mean
im = img_mean
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_gaus_mod(x, y):
"""Custom coordinate format for Gaussian model image"""
global img_gaus_mod
im = img_gaus_mod
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_shap_mod(x, y):
"""Custom coordinate format for shapelet model image"""
global img_shap_mod
im = img_shap_mod
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_gaus_resid(x, y):
"""Custom coordinate format for Gaussian residual image"""
global img_gaus_resid
im = img_gaus_resid
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_shap_resid(x, y):
"""Custom coordinate format for shapelet residual image"""
global img_shap_resid
im = img_shap_resid
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_psf_maj(x, y):
"""Custom coordinate format for PSF major image"""
global img_psf_maj
im = img_psf_maj
coord_str = make_coord_str(x, y, im, unit='arcsec')
return coord_str
def format_coord_psf_min(x, y):
"""Custom coordinate format for PSF minor image"""
global img_psf_min
im = img_psf_min
coord_str = make_coord_str(x, y, im, unit='arcsec')
return coord_str
def format_coord_psf_pa(x, y):
"""Custom coordinate format for PSF pos. ang. image"""
global img_psf_pa
im = img_psf_pa
coord_str = make_coord_str(x, y, im, unit='degrees')
return coord_str
def xy_to_radec_str(x, y):
"""Converts x, y in image coords to a sexigesimal string"""
from output import ra2hhmmss, dec2ddmmss
global pix2sky
ra, dec = pix2sky([x, y])
ra = ra2hhmmss(ra)
sra = str(ra[0]).zfill(2)+':'+str(ra[1]).zfill(2)+':'+str("%.1f" % (ra[2])).zfill(3)
dec = dec2ddmmss(dec)
decsign = ('-' if dec[3] < 0 else '+')
sdec = decsign+str(dec[0]).zfill(2)+':'+str(dec[1]).zfill(2)+':'+str("%.1f" % (dec[2])).zfill(3)
return sra, sdec
def make_coord_str(x, y, im, unit='Jy/beam'):
"""Makes the x, y, ra, dec, flux string"""
rastr, decstr = xy_to_radec_str(x, y)
col = int(x + 0.5)
row = int(y + 0.5)
numcols, numrows = im.shape
if col >= 0 and col < numcols\
and row >= 0 and row < numrows:
z = im[col, row]
return 'x=%1.1f, y=%1.1f, RA=%s, Dec=%s, F=%+1.4f %s' % (x, y, rastr, decstr, z, unit)
else:
return 'x=%1.1f, y=%1.1f' % (x, y)
def plot_sed(src, ax):
"""Plots the SED for source 'src' to axis 'ax'"""
global sky2pix
global fig
ax.cla()
norm = src.spec_norm
spin = src.spec_indx
espin = src.e_spec_indx
y = N.array(src.specin_flux)
ey = N.array(src.specin_fluxE)
x = N.array(src.specin_freq)
ax.errorbar(N.log10(x/1e6), N.log10(y), yerr=ey/y, fmt='bo')
ax.plot(N.log10(x/1e6), N.log10(norm)+N.log10(x/src.specin_freq0)*spin,
'-g', label="alpha = %.2f" % (spin,))
pos = sky2pix(src.posn_sky_centroid)
xpos = int(pos[0])
ypos = int(pos[1])
pl.title('SED of source #'+str(src.source_id)+'\n'
+'(x = '+str(xpos)+', y = '+str(ypos)+')')
pl.xlabel('log Frequency (MHz)')
pl.ylabel('log Flux Density (Jy)')
pl.legend()
def get_src(src_list, srcid):
"""Returns the source for srcid or None if not found"""
for src in src_list:
if src.source_id == srcid:
return src
return None
| gpl-3.0 |
drammock/mne-python | tutorials/machine-learning/30_strf.py | 10 | 14437 | # -*- coding: utf-8 -*-
"""
=====================================================================
Spectro-temporal receptive field (STRF) estimation on continuous data
=====================================================================
This demonstrates how an encoding model can be fit with multiple continuous
inputs. In this case, we simulate the model behind a spectro-temporal receptive
field (or STRF). First, we create a linear filter that maps patterns in
spectro-temporal space onto an output, representing neural activity. We fit
a receptive field model that attempts to recover the original linear filter
that was used to create this data.
"""
# Authors: Chris Holdgraf <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 7
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.decoding import ReceptiveField, TimeDelayingRidge
from scipy.stats import multivariate_normal
from scipy.io import loadmat
from sklearn.preprocessing import scale
rng = np.random.RandomState(1337) # To make this example reproducible
###############################################################################
# Load audio data
# ---------------
#
# We'll read in the audio data from :footcite:`CrosseEtAl2016` in order to
# simulate a response.
#
# In addition, we'll downsample the data along the time dimension in order to
# speed up computation. Note that depending on the input values, this may
# not be desired. For example if your input stimulus varies more quickly than
# 1/2 the sampling rate to which we are downsampling.
# Read in audio that's been recorded in epochs.
path_audio = mne.datasets.mtrf.data_path()
data = loadmat(path_audio + '/speech_data.mat')
audio = data['spectrogram'].T
sfreq = float(data['Fs'][0, 0])
n_decim = 2
audio = mne.filter.resample(audio, down=n_decim, npad='auto')
sfreq /= n_decim
###############################################################################
# Create a receptive field
# ------------------------
#
# We'll simulate a linear receptive field for a theoretical neural signal. This
# defines how the signal will respond to power in this receptive field space.
n_freqs = 20
tmin, tmax = -0.1, 0.4
# To simulate the data we'll create explicit delays here
delays_samp = np.arange(np.round(tmin * sfreq),
np.round(tmax * sfreq) + 1).astype(int)
delays_sec = delays_samp / sfreq
freqs = np.linspace(50, 5000, n_freqs)
grid = np.array(np.meshgrid(delays_sec, freqs))
# We need data to be shaped as n_epochs, n_features, n_times, so swap axes here
grid = grid.swapaxes(0, -1).swapaxes(0, 1)
# Simulate a temporal receptive field with a Gabor filter
means_high = [.1, 500]
means_low = [.2, 2500]
cov = [[.001, 0], [0, 500000]]
gauss_high = multivariate_normal.pdf(grid, means_high, cov)
gauss_low = -1 * multivariate_normal.pdf(grid, means_low, cov)
weights = gauss_high + gauss_low # Combine to create the "true" STRF
kwargs = dict(vmax=np.abs(weights).max(), vmin=-np.abs(weights).max(),
cmap='RdBu_r', shading='gouraud')
fig, ax = plt.subplots()
ax.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax.set(title='Simulated STRF', xlabel='Time Lags (s)', ylabel='Frequency (Hz)')
plt.setp(ax.get_xticklabels(), rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
###############################################################################
# Simulate a neural response
# --------------------------
#
# Using this receptive field, we'll create an artificial neural response to
# a stimulus.
#
# To do this, we'll create a time-delayed version of the receptive field, and
# then calculate the dot product between this and the stimulus. Note that this
# is effectively doing a convolution between the stimulus and the receptive
# field. See `here <https://en.wikipedia.org/wiki/Convolution>`_ for more
# information.
# Reshape audio to split into epochs, then make epochs the first dimension.
n_epochs, n_seconds = 16, 5
audio = audio[:, :int(n_seconds * sfreq * n_epochs)]
X = audio.reshape([n_freqs, n_epochs, -1]).swapaxes(0, 1)
n_times = X.shape[-1]
# Delay the spectrogram according to delays so it can be combined w/ the STRF
# Lags will now be in axis 1, then we reshape to vectorize
delays = np.arange(np.round(tmin * sfreq),
np.round(tmax * sfreq) + 1).astype(int)
# Iterate through indices and append
X_del = np.zeros((len(delays),) + X.shape)
for ii, ix_delay in enumerate(delays):
# These arrays will take/put particular indices in the data
take = [slice(None)] * X.ndim
put = [slice(None)] * X.ndim
if ix_delay > 0:
take[-1] = slice(None, -ix_delay)
put[-1] = slice(ix_delay, None)
elif ix_delay < 0:
take[-1] = slice(-ix_delay, None)
put[-1] = slice(None, ix_delay)
X_del[ii][tuple(put)] = X[tuple(take)]
# Now set the delayed axis to the 2nd dimension
X_del = np.rollaxis(X_del, 0, 3)
X_del = X_del.reshape([n_epochs, -1, n_times])
n_features = X_del.shape[1]
weights_sim = weights.ravel()
# Simulate a neural response to the sound, given this STRF
y = np.zeros((n_epochs, n_times))
for ii, iep in enumerate(X_del):
# Simulate this epoch and add random noise
noise_amp = .002
y[ii] = np.dot(weights_sim, iep) + noise_amp * rng.randn(n_times)
# Plot the first 2 trials of audio and the simulated electrode activity
X_plt = scale(np.hstack(X[:2]).T).T
y_plt = scale(np.hstack(y[:2]))
time = np.arange(X_plt.shape[-1]) / sfreq
_, (ax1, ax2) = plt.subplots(2, 1, figsize=(6, 6), sharex=True)
ax1.pcolormesh(time, freqs, X_plt, vmin=0, vmax=4, cmap='Reds',
shading='gouraud')
ax1.set_title('Input auditory features')
ax1.set(ylim=[freqs.min(), freqs.max()], ylabel='Frequency (Hz)')
ax2.plot(time, y_plt)
ax2.set(xlim=[time.min(), time.max()], title='Simulated response',
xlabel='Time (s)', ylabel='Activity (a.u.)')
mne.viz.tight_layout()
###############################################################################
# Fit a model to recover this receptive field
# -------------------------------------------
#
# Finally, we'll use the :class:`mne.decoding.ReceptiveField` class to recover
# the linear receptive field of this signal. Note that properties of the
# receptive field (e.g. smoothness) will depend on the autocorrelation in the
# inputs and outputs.
# Create training and testing data
train, test = np.arange(n_epochs - 1), n_epochs - 1
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
X_train, X_test, y_train, y_test = [np.rollaxis(ii, -1, 0) for ii in
(X_train, X_test, y_train, y_test)]
# Model the simulated data as a function of the spectrogram input
alphas = np.logspace(-3, 3, 7)
scores = np.zeros_like(alphas)
models = []
for ii, alpha in enumerate(alphas):
rf = ReceptiveField(tmin, tmax, sfreq, freqs, estimator=alpha)
rf.fit(X_train, y_train)
# Now make predictions about the model output, given input stimuli.
scores[ii] = rf.score(X_test, y_test)
models.append(rf)
times = rf.delays_ / float(rf.sfreq)
# Choose the model that performed best on the held out data
ix_best_alpha = np.argmax(scores)
best_mod = models[ix_best_alpha]
coefs = best_mod.coef_[0]
best_pred = best_mod.predict(X_test)[:, 0]
# Plot the original STRF, and the one that we recovered with modeling.
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(6, 3), sharey=True, sharex=True)
ax1.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax2.pcolormesh(times, rf.feature_names, coefs, **kwargs)
ax1.set_title('Original STRF')
ax2.set_title('Best Reconstructed STRF')
plt.setp([iax.get_xticklabels() for iax in [ax1, ax2]], rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
# Plot the actual response and the predicted response on a held out stimulus
time_pred = np.arange(best_pred.shape[0]) / sfreq
fig, ax = plt.subplots()
ax.plot(time_pred, y_test, color='k', alpha=.2, lw=4)
ax.plot(time_pred, best_pred, color='r', lw=1)
ax.set(title='Original and predicted activity', xlabel='Time (s)')
ax.legend(['Original', 'Predicted'])
plt.autoscale(tight=True)
mne.viz.tight_layout()
###############################################################################
# Visualize the effects of regularization
# ---------------------------------------
#
# Above we fit a :class:`mne.decoding.ReceptiveField` model for one of many
# values for the ridge regularization parameter. Here we will plot the model
# score as well as the model coefficients for each value, in order to
# visualize how coefficients change with different levels of regularization.
# These issues as well as the STRF pipeline are described in detail
# in :footcite:`TheunissenEtAl2001,WillmoreSmyth2003,HoldgrafEtAl2016`.
# Plot model score for each ridge parameter
fig = plt.figure(figsize=(10, 4))
ax = plt.subplot2grid([2, len(alphas)], [1, 0], 1, len(alphas))
ax.plot(np.arange(len(alphas)), scores, marker='o', color='r')
ax.annotate('Best parameter', (ix_best_alpha, scores[ix_best_alpha]),
(ix_best_alpha, scores[ix_best_alpha] - .1),
arrowprops={'arrowstyle': '->'})
plt.xticks(np.arange(len(alphas)), ["%.0e" % ii for ii in alphas])
ax.set(xlabel="Ridge regularization value", ylabel="Score ($R^2$)",
xlim=[-.4, len(alphas) - .6])
mne.viz.tight_layout()
# Plot the STRF of each ridge parameter
for ii, (rf, i_alpha) in enumerate(zip(models, alphas)):
ax = plt.subplot2grid([2, len(alphas)], [0, ii], 1, 1)
ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
plt.xticks([], [])
plt.yticks([], [])
plt.autoscale(tight=True)
fig.suptitle('Model coefficients / scores for many ridge parameters', y=1)
mne.viz.tight_layout()
###############################################################################
# Using different regularization types
# ------------------------------------
# In addition to the standard ridge regularization, the
# :class:`mne.decoding.TimeDelayingRidge` class also exposes
# `Laplacian <https://en.wikipedia.org/wiki/Laplacian_matrix>`_ regularization
# term as:
#
# .. math::
# \left[\begin{matrix}
# 1 & -1 & & & & \\
# -1 & 2 & -1 & & & \\
# & -1 & 2 & -1 & & \\
# & & \ddots & \ddots & \ddots & \\
# & & & -1 & 2 & -1 \\
# & & & & -1 & 1\end{matrix}\right]
#
# This imposes a smoothness constraint of nearby time samples and/or features.
# Quoting :footcite:`CrosseEtAl2016` :
#
# Tikhonov [identity] regularization (Equation 5) reduces overfitting by
# smoothing the TRF estimate in a way that is insensitive to
# the amplitude of the signal of interest. However, the Laplacian
# approach (Equation 6) reduces off-sample error whilst preserving
# signal amplitude (Lalor et al., 2006). As a result, this approach
# usually leads to an improved estimate of the system’s response (as
# indexed by MSE) compared to Tikhonov regularization.
#
scores_lap = np.zeros_like(alphas)
models_lap = []
for ii, alpha in enumerate(alphas):
estimator = TimeDelayingRidge(tmin, tmax, sfreq, reg_type='laplacian',
alpha=alpha)
rf = ReceptiveField(tmin, tmax, sfreq, freqs, estimator=estimator)
rf.fit(X_train, y_train)
# Now make predictions about the model output, given input stimuli.
scores_lap[ii] = rf.score(X_test, y_test)
models_lap.append(rf)
ix_best_alpha_lap = np.argmax(scores_lap)
###############################################################################
# Compare model performance
# -------------------------
# Below we visualize the model performance of each regularization method
# (ridge vs. Laplacian) for different levels of alpha. As you can see, the
# Laplacian method performs better in general, because it imposes a smoothness
# constraint along the time and feature dimensions of the coefficients.
# This matches the "true" receptive field structure and results in a better
# model fit.
fig = plt.figure(figsize=(10, 6))
ax = plt.subplot2grid([3, len(alphas)], [2, 0], 1, len(alphas))
ax.plot(np.arange(len(alphas)), scores_lap, marker='o', color='r')
ax.plot(np.arange(len(alphas)), scores, marker='o', color='0.5', ls=':')
ax.annotate('Best Laplacian', (ix_best_alpha_lap,
scores_lap[ix_best_alpha_lap]),
(ix_best_alpha_lap, scores_lap[ix_best_alpha_lap] - .1),
arrowprops={'arrowstyle': '->'})
ax.annotate('Best Ridge', (ix_best_alpha, scores[ix_best_alpha]),
(ix_best_alpha, scores[ix_best_alpha] - .1),
arrowprops={'arrowstyle': '->'})
plt.xticks(np.arange(len(alphas)), ["%.0e" % ii for ii in alphas])
ax.set(xlabel="Laplacian regularization value", ylabel="Score ($R^2$)",
xlim=[-.4, len(alphas) - .6])
mne.viz.tight_layout()
# Plot the STRF of each ridge parameter
xlim = times[[0, -1]]
for ii, (rf_lap, rf, i_alpha) in enumerate(zip(models_lap, models, alphas)):
ax = plt.subplot2grid([3, len(alphas)], [0, ii], 1, 1)
ax.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs)
ax.set(xticks=[], yticks=[], xlim=xlim)
if ii == 0:
ax.set(ylabel='Laplacian')
ax = plt.subplot2grid([3, len(alphas)], [1, ii], 1, 1)
ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
ax.set(xticks=[], yticks=[], xlim=xlim)
if ii == 0:
ax.set(ylabel='Ridge')
fig.suptitle('Model coefficients / scores for laplacian regularization', y=1)
mne.viz.tight_layout()
###############################################################################
# Plot the original STRF, and the one that we recovered with modeling.
rf = models[ix_best_alpha]
rf_lap = models_lap[ix_best_alpha_lap]
_, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(9, 3),
sharey=True, sharex=True)
ax1.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax2.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
ax3.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs)
ax1.set_title('Original STRF')
ax2.set_title('Best Ridge STRF')
ax3.set_title('Best Laplacian STRF')
plt.setp([iax.get_xticklabels() for iax in [ax1, ax2, ax3]], rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
###############################################################################
# References
# ==========
# .. footbibliography::
| bsd-3-clause |
scottlittle/nolearn | nolearn/lasagne/visualize.py | 3 | 7850 | from itertools import product
from lasagne.layers import get_output
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
def plot_loss(net):
train_loss = [row['train_loss'] for row in net.train_history_]
valid_loss = [row['valid_loss'] for row in net.train_history_]
plt.plot(train_loss, label='train loss')
plt.plot(valid_loss, label='valid loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
def plot_conv_weights(layer, figsize=(6, 6)):
"""Plot the weights of a specific layer.
Only really makes sense with convolutional layers.
Parameters
----------
layer : lasagne.layers.Layer
"""
W = layer.W.get_value()
shape = W.shape
nrows = np.ceil(np.sqrt(shape[0])).astype(int)
ncols = nrows
for feature_map in range(shape[1]):
figs, axes = plt.subplots(nrows, ncols, figsize=figsize)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[0]:
break
axes[r, c].imshow(W[i, feature_map], cmap='gray',
interpolation='nearest')
def plot_conv_activity(layer, x, figsize=(6, 8)):
"""Plot the acitivities of a specific layer.
Only really makes sense with layers that work 2D data (2D
convolutional layers, 2D pooling layers ...).
Parameters
----------
layer : lasagne.layers.Layer
x : numpy.ndarray
Only takes one sample at a time, i.e. x.shape[0] == 1.
"""
if x.shape[0] != 1:
raise ValueError("Only one sample can be plotted at a time.")
# compile theano function
xs = T.tensor4('xs').astype(theano.config.floatX)
get_activity = theano.function([xs], get_output(layer, xs))
activity = get_activity(x)
shape = activity.shape
nrows = np.ceil(np.sqrt(shape[1])).astype(int)
ncols = nrows
figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize)
axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray',
interpolation='nearest')
axes[0, ncols // 2].set_title('original')
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[1]:
break
ndim = activity[0][i].ndim
if ndim != 2:
raise ValueError("Wrong number of dimensions, image data should "
"have 2, instead got {}".format(ndim))
axes[r + 1, c].imshow(-activity[0][i], cmap='gray',
interpolation='nearest')
def occlusion_heatmap(net, x, target, square_length=7):
"""An occlusion test that checks an image for its critical parts.
In this function, a square part of the image is occluded (i.e. set
to 0) and then the net is tested for its propensity to predict the
correct label. One should expect that this propensity shrinks of
critical parts of the image are occluded. If not, this indicates
overfitting.
Depending on the depth of the net and the size of the image, this
function may take awhile to finish, since one prediction for each
pixel of the image is made.
Currently, all color channels are occluded at the same time. Also,
this does not really work if images are randomly distorted by the
batch iterator.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
x : np.array
The input data, should be of shape (1, c, x, y). Only makes
sense with image data.
target : int
The true value of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
Results
-------
heat_array : np.array (with same size as image)
An 2D np.array that at each point (i, j) contains the predicted
probability of the correct class if the image is occluded by a
square with center (i, j).
"""
if (x.ndim != 4) or x.shape[0] != 1:
raise ValueError("This function requires the input data to be of "
"shape (1, c, x, y), instead got {}".format(x.shape))
if square_length % 2 == 0:
raise ValueError("Square length has to be an odd number, instead "
"got {}.".format(square_length))
num_classes = net.layers_[-1].num_units
img = x[0].copy()
shape = x.shape
heat_array = np.zeros(shape[2:])
pad = square_length // 2 + 1
x_occluded = np.zeros((shape[2], shape[3], shape[2], shape[3]),
dtype=img.dtype)
# generate occluded images
for i, j in product(*map(range, shape[2:])):
x_padded = np.pad(img, ((0, 0), (pad, pad), (pad, pad)), 'constant')
x_padded[:, i:i + square_length, j:j + square_length] = 0.
x_occluded[i, j, :, :] = x_padded[:, pad:-pad, pad:-pad]
# make batch predictions for each occluded image
probs = np.zeros((shape[2], shape[3], num_classes))
for i in range(shape[3]):
y_proba = net.predict_proba(x_occluded[:, i:i + 1, :, :])
probs[:, i:i + 1, :] = y_proba.reshape(shape[2], 1, num_classes)
# from predicted probabilities, pick only those of target class
for i, j in product(*map(range, shape[2:])):
heat_array[i, j] = probs[i, j, target]
return heat_array
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)):
"""Plot which parts of an image are particularly import for the
net to classify the image correctly.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
X : numpy.array
The input data, should be of shape (b, c, 0, 1). Only makes
sense with image data.
target : list or numpy.array of ints
The true values of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at. If more than one sample is passed to X, each of them needs
its own target.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
figsize : tuple (int, int)
Size of the figure.
Plots
-----
Figre with 3 subplots: the original image, the occlusion heatmap,
and both images super-imposed.
"""
if (X.ndim != 4):
raise ValueError("This function requires the input data to be of "
"shape (b, c, x, y), instead got {}".format(X.shape))
num_images = X.shape[0]
if figsize[1] is None:
figsize = (figsize[0], num_images * figsize[0] / 3)
figs, axes = plt.subplots(num_images, 3, figsize=figsize)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for n in range(num_images):
heat_img = occlusion_heatmap(
net, X[n:n + 1, :, :, :], target[n], square_length
)
ax = axes if num_images == 1 else axes[n]
img = X[n, :, :, :].mean(0)
ax[0].imshow(-img, interpolation='nearest', cmap='gray')
ax[0].set_title('image')
ax[1].imshow(-heat_img, interpolation='nearest', cmap='Reds')
ax[1].set_title('critical parts')
ax[2].imshow(-img, interpolation='nearest', cmap='gray')
ax[2].imshow(-heat_img, interpolation='nearest', cmap='Reds',
alpha=0.6)
ax[2].set_title('super-imposed')
| mit |
f3r/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 99 | 4163 |
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
pnedunuri/scikit-learn | examples/decomposition/plot_image_denoising.py | 181 | 5819 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
ScreamingUdder/mantid | scripts/HFIRPowderReduction/HfirPDReductionGUI.py | 1 | 91448 | # pylint: disable=invalid-name, relative-import, too-many-lines,too-many-instance-attributes,too-many-arguments,C901
################################################################################
# Main class for HFIR powder reduction GUI
# Key word for future developing: FUTURE, NEXT, REFACTOR, RELEASE 2.0
################################################################################
from __future__ import (absolute_import, division, print_function)
from six.moves import range
import numpy
import os
try:
import urllib.request as urllib
except ImportError:
import urllib
from .ui_MainWindow import Ui_MainWindow # import line for the UI python class
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
import mantid
import mantidqtpython as mqt
from . import HfirPDReductionControl
# ----- default configuration ---------------
DEFAULT_SERVER = 'http://neutron.ornl.gov/user_data'
DEFAULT_INSTRUMENT = 'hb2a'
DEFAULT_WAVELENGTH = 2.4100
# -------------------------------------------
class EmptyError(Exception):
""" Exception for finding empty input for integer or float
"""
def __init__(self, value):
""" Init
"""
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
class MultiScanTabState(object):
""" Description of the state of the multi-scan-tab is in
"""
NO_OPERATION = 0
RELOAD_DATA = 1
REDUCE_DATA = 2
def __init__(self):
""" Initialization
:return:
"""
self._expNo = -1
self._scanList = []
self._xMin = None
self._xMax = None
self._binSize = 0
self._unit = ''
self._plotRaw = False
self._useDetEfficiencyCorrection = False
self._excludeDetectors = []
def compare_state(self, tab_state):
""" Compare this tab state and another tab state
:param tab_state:
:return:
"""
if isinstance(tab_state, MultiScanTabState) is False:
raise NotImplementedError('compare_state must have MultiScanTabStatus as input.')
if self._expNo != tab_state.getExpNumber() or self._scanList != tab_state.getScanList:
return self.RELOAD_DATA
for attname in self.__dict__.keys():
if self.__getattribute__(attname) != tab_state.__getattribute__(attname):
return self.REDUCE_DATA
return self.NO_OPERATION
def getExpNumber(self):
""" Get experiment number
:return:
"""
return self._expNo
def getScanList(self):
""" Get the list of scans
:return:
"""
return self._scanList[:]
# pyline: disable=too-many-arguments
def setup(self, exp_no, scan_list, min_x, max_x, bin_size, unit, raw, correct_det_eff, exclude_dets):
"""
Set up the object
:param exp_no:
:param scan_list:
:param min_x:
:param max_x:
:param bin_size:
:param unit:
:param raw:
:param correct_det_eff:
:param exclude_dets:
:return:
"""
self._expNo = int(exp_no)
if isinstance(scan_list, list) is False:
raise NotImplementedError('Scan_List must be list!')
self._scanList = scan_list
self._xMin = min_x
self._xMax = max_x
self._binSize = float(bin_size)
self._unit = str(unit)
self._plotRaw = raw
self._useDetEfficiencyCorrection = correct_det_eff
self._excludeDetectors = exclude_dets
return
# pylint: disable=too-many-public-methods,too-many-branches,too-many-locals,too-many-statements
class MainWindow(QtGui.QMainWindow):
""" Class of Main Window (top)
"""
# Copy to ui.setupUI
# # Version 3.0 + Import for ui_MainWindow.py
# from MplFigureCanvas import Qt4MplCanvas
# # Replace 'self.graphicsView = QtGui.QtGraphicsView' with the following
# self.graphicsView = Qt4MplCanvas(self.centralwidget)
# self.mainplot = self.graphicsView.getPlot()
def __init__(self, parent=None):
""" Initialization and set up
"""
# Base class
QtGui.QMainWindow.__init__(self, parent)
# UI Window (from Qt Designer)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# Define gui-event handling
# menu
self.connect(self.ui.actionQuit, QtCore.SIGNAL('triggered()'),
self.doExist)
self.connect(self.ui.actionFind_Help, QtCore.SIGNAL('triggered()'),
self.doHelp)
# main
self.connect(self.ui.comboBox_wavelength, QtCore.SIGNAL('currentIndexChanged(int)'),
self.doUpdateWavelength)
self.connect(self.ui.pushButton_browseExcludedDetFile, QtCore.SIGNAL('clicked()'),
self.doBrowseExcludedDetetorFile)
self.connect(self.ui.checkBox_useDetExcludeFile, QtCore.SIGNAL('stateChanged(int)'),
self.do_enable_excluded_dets)
# tab 'Raw Detectors'
self.connect(self.ui.pushButton_plotRaw, QtCore.SIGNAL('clicked()'),
self.doPlotRawPtMain)
self.connect(self.ui.pushButton_ptUp, QtCore.SIGNAL('clicked()'),
self.do_plot_raw_pt_prev)
self.connect(self.ui.pushButton_ptDown, QtCore.SIGNAL('clicked()'),
self.doPlotRawPtNext)
self.connect(self.ui.pushButton_clearRawDets, QtCore.SIGNAL('clicked()'),
self.doClearRawDetCanvas)
# tab 'Individual Detectors'
self.connect(self.ui.pushButton_plotIndvDet, QtCore.SIGNAL('clicked()'),
self.doPlotIndvDetMain)
self.connect(self.ui.pushButton_plotPrevDet, QtCore.SIGNAL('clicked()'),
self.doPlotIndvDetPrev)
self.connect(self.ui.pushButton_plotNextDet, QtCore.SIGNAL('clicked()'),
self.doPlotIndvDetNext)
self.connect(self.ui.pushButton_clearCanvasIndDet, QtCore.SIGNAL('clicked()'),
self.doClearIndDetCanvas)
self.connect(self.ui.pushButton_plotLog, QtCore.SIGNAL('clicked()'),
self.do_plot_sample_log)
# tab 'Normalized'
self.connect(self.ui.pushButton_loadData, QtCore.SIGNAL('clicked()'),
self.doLoadData)
self.connect(self.ui.pushButton_prevScan, QtCore.SIGNAL('clicked()'),
self.doLoadReduceScanPrev)
self.connect(self.ui.pushButton_nextScan, QtCore.SIGNAL('clicked()'),
self.doLoadReduceScanNext)
self.connect(self.ui.pushButton_unit2theta, QtCore.SIGNAL('clicked()'),
self.doReduce2Theta)
self.connect(self.ui.pushButton_unitD, QtCore.SIGNAL('clicked()'),
self.doReduceDSpacing)
self.connect(self.ui.pushButton_unitQ, QtCore.SIGNAL('clicked()'),
self.doReduceQ)
self.connect(self.ui.pushButton_saveData, QtCore.SIGNAL('clicked()'),
self.doSaveData)
self.connect(self.ui.pushButton_clearTab2Canvas, QtCore.SIGNAL('clicked()'),
self.doClearCanvas)
# tab 'Multiple Scans'
self.connect(self.ui.pushButton_loadMultData, QtCore.SIGNAL('clicked()'),
self.doLoadSetData)
self.connect(self.ui.pushButton_mscanBin, QtCore.SIGNAL('clicked()'),
self.doReduceSetData)
self.connect(self.ui.pushButton_mergeScans, QtCore.SIGNAL('clicked()'),
self.doMergeScans)
self.connect(self.ui.pushButton_viewMScan1D, QtCore.SIGNAL('clicked()'),
self.doMergeScanView1D)
self.connect(self.ui.pushButton_view2D, QtCore.SIGNAL('clicked()'),
self.doMergeScanView2D)
self.connect(self.ui.pushButton_viewMerge, QtCore.SIGNAL('clicked()'),
self.doMergeScanViewMerged)
self.connect(self.ui.pushButton_clearMultCanvas, QtCore.SIGNAL('clicked()'),
self.doClearMultiRunCanvas)
self.connect(self.ui.pushButton_saveAllIndScans, QtCore.SIGNAL('clicked()'),
self.doSaveMultipleScans)
self.connect(self.ui.pushButton_saveMerge, QtCore.SIGNAL('clicked()'),
self.doSaveMergedScan)
self.connect(self.ui.pushButton_plotRawMultiScans, QtCore.SIGNAL('clicked()'),
self.do_convert_plot_multi_scans)
# tab 'Vanadium'
self.connect(self.ui.pushButton_stripVanPeaks, QtCore.SIGNAL('clicked()'),
self.doStripVandiumPeaks)
self.connect(self.ui.pushButton_saveVanRun, QtCore.SIGNAL('clicked()'),
self.doSaveVanRun)
self.connect(self.ui.pushButton_rebin2Theta, QtCore.SIGNAL('clicked()'),
self.doReduceVanadium2Theta)
self.connect(self.ui.pushButton_smoothVanData, QtCore.SIGNAL('clicked()'),
self.doSmoothVanadiumData)
self.connect(self.ui.pushButton_applySmooth, QtCore.SIGNAL('clicked()'),
self.doSmoothVanadiumApply)
self.connect(self.ui.pushButton_undoSmooth, QtCore.SIGNAL('clicked()'),
self.doSmoothVanadiumUndo)
# tab 'Advanced Setup'
self.connect(self.ui.pushButton_browseCache, QtCore.SIGNAL('clicked()'),
self.doBrowseCache)
self.connect(self.ui.radioButton_useServer, QtCore.SIGNAL('clicked()'),
self.doChangeSrcLocation)
self.connect(self.ui.radioButton_useLocal, QtCore.SIGNAL('clicked()'),
self.doChangeSrcLocation)
self.connect(self.ui.pushButton_browseLocalSrc, QtCore.SIGNAL('clicked()'),
self.doBrowseLocalDataSrc)
self.connect(self.ui.pushButton_chkServer, QtCore.SIGNAL('clicked()'),
self.doCheckSrcServer)
# Define signal-event handling
# define event handlers for matplotlib canvas
self.ui.graphicsView_mergeRun.canvas.mpl_connect('button_press_event',
self.on_mouseDownEvent)
self.ui.graphicsView_mergeRun.canvas.mpl_connect('motion_notify_event',
self.on_mouseMotion)
# Widget type definition
validator0 = QtGui.QIntValidator(self.ui.lineEdit_expNo)
validator0.setBottom(1)
self.ui.lineEdit_expNo.setValidator(validator0)
validator1 = QtGui.QIntValidator(self.ui.lineEdit_expNo)
validator1.setBottom(1)
self.ui.lineEdit_scanNo.setValidator(validator1)
validator2 = QtGui.QDoubleValidator(self.ui.lineEdit_wavelength)
validator2.setBottom(0.)
self.ui.lineEdit_wavelength.setValidator(validator2)
validator3 = QtGui.QDoubleValidator(self.ui.lineEdit_xmin)
validator3.setBottom(0.)
self.ui.lineEdit_xmin.setValidator(validator3)
validator4 = QtGui.QDoubleValidator(self.ui.lineEdit_xmax)
validator4.setBottom(0.)
self.ui.lineEdit_xmax.setValidator(validator4)
validator5 = QtGui.QDoubleValidator(self.ui.lineEdit_binsize)
validator5.setBottom(0.)
self.ui.lineEdit_binsize.setValidator(validator5)
validator6 = QtGui.QDoubleValidator(self.ui.lineEdit_ptNo)
validator6.setBottom(0)
self.ui.lineEdit_ptNo.setValidator(validator6)
validator7 = QtGui.QDoubleValidator(self.ui.lineEdit_detID)
validator7.setBottom(0)
self.ui.lineEdit_detID.setValidator(validator7)
validator8 = QtGui.QDoubleValidator(self.ui.lineEdit_min2Theta)
validator8.setBottom(0.)
self.ui.lineEdit_min2Theta.setValidator(validator8)
validator9 = QtGui.QDoubleValidator(self.ui.lineEdit_max2Theta)
validator9.setBottom(0.)
self.ui.lineEdit_max2Theta.setValidator(validator9)
validator10 = QtGui.QDoubleValidator(self.ui.lineEdit_binsize2Theta)
validator10.setBottom(0.)
self.ui.lineEdit_binsize2Theta.setValidator(validator10)
validator11 = QtGui.QIntValidator(self.ui.lineEdit_scanStart)
validator11.setBottom(1)
self.ui.lineEdit_scanStart.setValidator(validator11)
validator12 = QtGui.QIntValidator(self.ui.lineEdit_scanEnd)
validator12.setBottom(1)
self.ui.lineEdit_scanEnd.setValidator(validator12)
validator13 = QtGui.QDoubleValidator(self.ui.lineEdit_normalizeMonitor)
validator13.setBottom(0.)
self.ui.lineEdit_normalizeMonitor.setValidator(validator13)
validator14 = QtGui.QDoubleValidator(self.ui.lineEdit_mergeMinX)
validator14.setBottom(0.)
self.ui.lineEdit_mergeMinX.setValidator(validator14)
validator15 = QtGui.QDoubleValidator(self.ui.lineEdit_mergeMaxX)
validator15.setBottom(0.)
self.ui.lineEdit_mergeMaxX.setValidator(validator15)
validator16 = QtGui.QDoubleValidator(self.ui.lineEdit_mergeBinSize)
validator16.setBottom(0.)
self.ui.lineEdit_mergeBinSize.setValidator(validator16)
# Get initial setup
# RELEASE 2.0 - This part will be implemented soon as default configuration is made
# Mantid configuration
self._instrument = str(self.ui.comboBox_instrument.currentText())
# UI widgets setup
self.ui.comboBox_outputFormat.addItems(['Fullprof']) # Supports Fullprof only now, 'GSAS', 'Fullprof+GSAS'])
# RELEASE 2.0 : Need to disable some widgets... consider to refactor the code
self.ui.radioButton_useServer.setChecked(True)
self.ui.radioButton_useLocal.setChecked(False)
self.ui.checkBox_useDetExcludeFile.setChecked(True)
self.ui.comboBox_wavelength.setCurrentIndex(0)
self.ui.lineEdit_wavelength.setText('2.41')
self.ui.pushButton_unit2theta.setText(r'$2\theta$')
# vanadium spectrum smooth parameters
self.ui.lineEdit_smoothParams.setText('20,2')
# Set up data source
self._serverAddress = DEFAULT_SERVER
self._srcFromServer = True
self._localSrcDataDir = None
self._srcAtLocal = False
self._currUnit = '2theta'
# Workspaces
self._myControl = HfirPDReductionControl.HFIRPDRedControl()
# Interactive graphics
self._viewMerge_X = None
self._viewMerge_Y = None
# Control of plots: key = canvas, value = list of 2-integer-tuple (expno, scanno)
self._tabLineDict = {}
self._tabBinParamDict = {}
for key in [2]:
self._tabLineDict[key] = []
for key in [2, 3, 4]:
self._tabBinParamDict[key] = [None, None, None]
self._lastMergeLabel = ""
self._lastMergeIndex = -1
self._expNo = None
self._scanNo = None
self._detID = None
self._indvXLabel = None
self._rawDetExpNo = None
self._rawDetScanNo = None
self._rawDetPlotMode = None
self._rawDetPtNo = None
self._indvDetCanvasMode = 'samplelog'
# Multiple scan tab
self._multiScanExp = None
self._multiScanList = []
# help
self.assistantProcess = QtCore.QProcess(self)
# pylint: disable=protected-access
self.collectionFile = os.path.join(mantid._bindir, '../docs/qthelp/MantidProject.qhc')
version = ".".join(mantid.__version__.split(".")[:2])
self.qtUrl = 'qthelp://org.sphinx.mantidproject.' + version + '/doc/interfaces/HFIR Powder Reduction.html'
self.externalUrl = 'http://docs.mantidproject.org/nightly/interfaces/HFIR Powder Reduction.html'
# Initial setup for tab
self.ui.tabWidget.setCurrentIndex(0)
cache_dir = str(self.ui.lineEdit_cache.text()).strip()
if len(cache_dir) == 0 or os.path.exists(cache_dir) is False:
invalid_cache = cache_dir
cache_dir = os.path.expanduser('~')
self.ui.lineEdit_cache.setText(cache_dir)
if len(invalid_cache) == 0:
warning_msg = 'Cache directory is not set. '
else:
warning_msg = 'Cache directory {0} does not exist. '.format(invalid_cache)
warning_msg += 'Using {0} for caching dowloaded file instead.'.format(cache_dir)
print ('[WARNING] {0}'.format(warning_msg))
# Get on hold of raw data file
useserver = self.ui.radioButton_useServer.isChecked()
uselocal = self.ui.radioButton_useLocal.isChecked()
if useserver == uselocal:
self._logWarning("It is logically wrong to set up (1) neither server or local dir to "
"access data or (2) both server and local dir to retrieve data. "
"As default, it is set up to download data from server.")
useserver = True
uselocal = False
self.ui.radioButton_useServer.setChecked(True)
self.ui.radioButton_useLocal.setChecked(False)
# register startup
mantid.UsageService.registerFeatureUsage("Interface", "HfirPowderReduction", False)
return
# -- Event Handling ----------------------------------------------------
def doBrowseCache(self):
""" Pop out a dialog to let user specify the directory to
cache downloaded data
"""
# home directory
homedir = str(self.ui.lineEdit_cache.text()).strip()
if len(homedir) > 0 and os.path.exists(homedir):
home = homedir
else:
home = os.getcwd()
# pop out a dialog
dirs = str(QtGui.QFileDialog.getExistingDirectory(self, 'Get Directory', home))
# set to line edit
if dirs != home:
self.ui.lineEdit_cache.setText(dirs)
return
def doBrowseExcludedDetetorFile(self):
""" Browse excluded detector's file
Return :: None
"""
# Get file name
filefilter = "Text (*.txt);;Data (*.dat);;All files (*)"
curDir = os.getcwd()
excldetfnames = QtGui.QFileDialog.getOpenFileNames(self, 'Open File(s)', curDir, filefilter)
try:
excldetfname = excldetfnames[0]
self.ui.lineEdit_excludedDetFileName.setText(excldetfname)
except IndexError:
# return if there is no file selected
return
# Parse det exclusion file
print("Detector exclusion file name is %s." % (excldetfname))
excludedetlist, errmsg = self._myControl.parseExcludedDetFile('HB2A', excldetfname)
if len(errmsg) > 0:
self._logError(errmsg)
textbuf = ""
for detid in excludedetlist:
textbuf += "%d," % (detid)
if len(textbuf) > 0:
textbuf = textbuf[:-1]
self.ui.lineEdit_detExcluded.setText(textbuf)
def doBrowseLocalDataSrc(self):
""" Browse local data storage
"""
msg = "Browse local data storage location. Implement ASAP"
QtGui.QMessageBox.information(self, "Click!", msg)
def doChangeSrcLocation(self):
""" Source file location is changed
"""
useserver = self.ui.radioButton_useServer.isChecked()
uselocal = self.ui.radioButton_useLocal.isChecked()
print("Use Server: ", useserver)
print("Use Local : ", uselocal)
if (useserver and uselocal) or not (useserver or uselocal):
raise NotImplementedError("Impossible for radio buttons")
self._srcAtLocal = uselocal
self._srcFromServer = useserver
if uselocal is True:
self.ui.lineEdit_dataIP.setDisabled(True)
self.ui.pushButton_chkServer.setDisabled(True)
self.ui.lineEdit_localSrc.setDisabled(False)
self.ui.pushButton_browseLocalSrc.setDisabled(False)
else:
self.ui.lineEdit_dataIP.setDisabled(False)
self.ui.pushButton_chkServer.setDisabled(False)
self.ui.lineEdit_localSrc.setDisabled(True)
self.ui.pushButton_browseLocalSrc.setDisabled(True)
def doCheckSrcServer(self):
"""" Check source data server's availability
"""
msg = "Check source data server! Implement ASAP"
QtGui.QMessageBox.information(self, "Click!", msg)
def doClearCanvas(self):
""" Clear canvas
"""
itab = self.ui.tabWidget.currentIndex()
if itab == 2:
self.ui.graphicsView_reducedData.clearAllLines()
self._tabLineDict[itab] = []
def doClearIndDetCanvas(self):
""" Clear the canvas in tab 'Individual Detector' and current plotted lines
in managing dictionary
"""
# Clear all lines on canvas
self.ui.graphicsView_indvDet.clearAllLines()
# Remove their references in dictionary
if self.ui.graphicsView_indvDet in self._tabLineDict:
self._tabLineDict[self.ui.graphicsView_indvDet] = []
# Reset colur schedule
self.ui.graphicsView_indvDet.resetLineColorStyle()
def doClearMultiRunCanvas(self):
""" Clear the canvas in tab 'Multiple Run'
This canvas is applied to both 1D and 2D image.
Clear-all-lines might be not enough to clear 2D image
"""
self.ui.graphicsView_mergeRun.clearCanvas()
def doClearRawDetCanvas(self):
""" Clear the canvas in tab 'Raw Detector':
only need to clear lines
"""
self.ui.graphicsView_Raw.clearAllLines()
self._tabLineDict[self.ui.graphicsView_Raw] = []
def doClearVanadiumCanvas(self):
""" Clear the canvas in tab 'Vanadium'
"""
self.ui.graphicsView_vanPeaks.clearAllLines()
def doExist(self):
""" Exist the application
"""
clearcache = self.ui.checkBox_delCache.isChecked()
if clearcache:
urllib.delAllFile(self._cache)
self.close()
def doHelp(self):
""" Show help
Copied from DGSPlanner
"""
try:
import pymantidplot
pymantidplot.proxies.showCustomInterfaceHelp('HFIR Powder Reduction')
except ImportError:
self.assistantProcess.close()
self.assistantProcess.waitForFinished()
helpapp = QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.BinariesPath) + QtCore.QDir.separator()
helpapp += 'assistant'
args = ['-enableRemoteControl', '-collectionFile', self.collectionFile, '-showUrl', self.qtUrl]
if os.path.isfile(helpapp) and os.path.isfile(self.collectionFile):
self.assistantProcess.close()
self.assistantProcess.waitForFinished()
self.assistantProcess.start(helpapp, args)
else:
mqt.MantidQt.API.MantidDesktopServices.openUrl(QtCore.QUrl(self.externalUrl))
def _load_spice_data_to_raw_table(self, exp_no, scan_no, data_file_name):
# flake8: noqa
try:
success = self._myControl.loadSpicePDData(exp_no, scan_no, data_file_name)
return success, "" if success else "Load data failed."
except NotImplementedError as ne:
return False, str(ne)
def _get_corr_file_names_and_wavelength(self, exp_no, scan_no, data_file_name):
# Obtain the correction file names and wavelength from SPICE file
wavelength_error = False
err_msg = ""
local_dir = os.path.dirname(data_file_name)
try:
status, return_body = self._myControl.retrieveCorrectionData(instrument='HB2A',
exp=exp_no, scan=scan_no,
localdatadir=local_dir)
except NotImplementedError as e:
err_msg = str(e)
if err_msg.count('m1') > 0:
# error is about wavelength
status = False
wavelength_error = True
else:
# other error
raise e
if status:
auto_wavelength = return_body[0]
van_corr_filename = return_body[1]
excl_det_filename = return_body[2]
if van_corr_filename is not None:
self.ui.lineEdit_vcorrFileName.setText(van_corr_filename)
if excl_det_filename is not None:
self.ui.lineEdit_excludedDetFileName.setText(excl_det_filename)
else:
auto_wavelength = None
van_corr_filename = None
excl_det_filename = None
return auto_wavelength, van_corr_filename, excl_det_filename, wavelength_error, err_msg
def _set_wavelength(self, auto_wavelength, wavelength_error, exp_no, scan_no, err_msg):
if auto_wavelength is None:
# unable to get wavelength from SPICE data
self.ui.comboBox_wavelength.setCurrentIndex(4)
if wavelength_error:
self.ui.lineEdit_wavelength.setText(err_msg)
else:
self.ui.lineEdit_wavelength.setText(self.ui.comboBox_wavelength.currentText())
self._myControl.setWavelength(exp_no, scan_no, wavelength=None)
else:
# get wavelength from SPICE data. set value to GUI
self.ui.lineEdit_wavelength.setText(str(auto_wavelength))
allowed_wavelengths = [2.41, 1.54, 1.12]
num_items = self.ui.comboBox_wavelength.count()
good = False
for ic in range(num_items - 1):
if abs(auto_wavelength - allowed_wavelengths[ic]) < 0.01:
good = True
self.ui.comboBox_wavelength.setCurrentIndex(ic)
if not good:
self.ui.comboBox_wavelength.setCurrentIndex(num_items - 1)
self._myControl.setWavelength(exp_no, scan_no, wavelength=auto_wavelength)
def _get_and_parse_det_efficiency_file(self, van_corr_filename):
if self.ui.checkBox_useDetEffCorr.isChecked():
# Apply detector efficiency correction
if van_corr_filename is None:
# browse vanadium correction file
file_filter = "Text (*.txt);;Data (*.dat);;All files (*)"
current_dir = os.getcwd()
van_corr_filenames = QtGui.QFileDialog.getOpenFileNames(self, 'Open File(s)', current_dir, file_filter)
if len(van_corr_filenames) > 0:
van_corr_filename = van_corr_filenames[0]
self.ui.lineEdit_vcorrFileName.setText(str(van_corr_filename))
else:
self._logError("User does not specify any vanadium correction file.")
self.ui.checkBox_useDetEffCorr.setChecked(False)
# Parse if it is not None
if van_corr_filename is not None:
detector_efficiency_ws, err_msg = self._myControl.parseDetEffCorrFile('HB2A', van_corr_filename)
if detector_efficiency_ws is None:
print("Parsing detectors efficiency file error: {0}.".format(err_msg))
return None
else:
return detector_efficiency_ws
else:
return None
else:
# Not chosen to apply detector efficiency correction:w
return None
def _parse_spice_data_to_MDEventWS(self, detector_efficiency_table, exp_no, scan_no):
try:
print("Det Efficiency Table WS: ", str(detector_efficiency_table))
exec_status = self._myControl.parseSpiceData(exp_no, scan_no, detector_efficiency_table)
return exec_status, "" if exec_status else "Parse data failed."
except NotImplementedError as e:
return False, str(e)
def _parse_detector_exclusion_file(self, exclude_detector_filename):
if exclude_detector_filename is not None:
exclude_detector_list, err_msg = self._myControl.parseExcludedDetFile('HB2A', exclude_detector_filename)
text_buf = ""
for det_id in exclude_detector_list:
text_buf += "{0},".format(det_id)
if len(text_buf) > 0:
text_buf = text_buf[:-1]
self.ui.lineEdit_detExcluded.setText(text_buf)
def doLoadData(self, exp=None, scan=None):
""" Load and reduce data
It does not support for tab 'Advanced Setup'
For tab 'Raw Detector' and 'Individual Detector', this method will load data to MDEventWorkspaces
For tab 'Normalized' and 'Vanadium', this method will load data to MDEVentWorkspaces but NOT reduce to single spectrum
"""
# Kick away unsupported tabs
i_tab = self.ui.tabWidget.currentIndex()
tab_text = str(self.ui.tabWidget.tabText(i_tab))
print("[DB] Current active tab is No. {0} as {1}.".format(i_tab, tab_text))
# Rule out unsupported tab
if i_tab == 5:
# 'advanced'
msg = "Tab {0} does not support 'Load Data'. Request is ambiguous.".format(tab_text)
QtGui.QMessageBox.information(self, "Click!", msg)
return
# Get exp number and scan number
if isinstance(exp, int) and isinstance(scan, int):
# use input
exp_no = exp
scan_no = scan
else:
# read from GUI
try:
exp_no, scan_no = self._uiGetExpScanNumber()
self._logDebug("Attending to load Exp {0} Scan {1}.".format(exp_no, scan_no))
except NotImplementedError as ne:
self._logError("Error to get Exp and Scan due to {0}.".format(str(ne)))
return
# Form data file name and download data
status, data_filename = self._uiDownloadDataFile(exp=exp_no, scan=scan_no)
if not status:
self._logError("Unable to download or locate local data file for Exp {0} Scan {1}.".format(exp_no, scan_no))
# (Load data for tab 0, 1, 2 and 4)
if i_tab not in [0, 1, 2, 3, 4]:
# Unsupported Tabs: programming error!
err_msg = "{0}-th tab should not get this far.\n".format(i_tab)
err_msg += 'GUI has been changed, but the change has not been considered! iTab = {0}'.format(i_tab)
raise NotImplementedError(err_msg)
# Load SPICE data to raw table (step 1)
load_success, msg = self._load_spice_data_to_raw_table(exp_no, scan_no, data_filename)
if not load_success:
self._logError(msg)
return
# Obtain the correction file names and wavelength from SPICE file
(auto_wavelength, van_corr_filename, exclude_detector_filename, wavelength_error, err_msg) \
= self._load_spice_data_to_raw_table(exp_no, scan_no, data_filename)
# Set wavelength to GUI except 'multiple scans'
self._set_wavelength(auto_wavelength, wavelength_error, exp_no, scan_no, err_msg)
# Optionally obtain and parse det effecient file
detector_efficiency_table_ws = self._get_and_parse_det_efficiency_file(van_corr_filename)
# Parse SPICE data to MDEventWorkspaces
success, msg = self._parse_spice_data_to_MDEventWS(detector_efficiency_table_ws, exp_no, scan_no)
if not success:
self._logError(msg)
return
# Optionally parse detector exclusion file and set to line text
self._parse_detector_exclusion_file(exclude_detector_filename)
# Set up some widgets for raw detector data. Won't be applied to tab 3
if i_tab != 3:
float_sample_log_name_list = self._myControl.getSampleLogNames(exp_no, scan_no)
self.ui.comboBox_indvDetXLabel.clear()
self.ui.comboBox_indvDetXLabel.addItem("2theta/Scattering Angle")
self.ui.comboBox_indvDetXLabel.addItems(float_sample_log_name_list)
self.ui.comboBox_indvDetYLabel.clear()
self.ui.comboBox_indvDetYLabel.addItems(float_sample_log_name_list)
return True
def doLoadSetData(self):
""" Load a set of data
This is the first step of doing multiple scans processing
"""
# Get inputs for exp number and scans
try:
rtup = self._uiGetExpScanTabMultiScans()
expno = rtup[0]
scanlist = rtup[1]
except NotImplementedError as nie:
self._logError("Unable to load data set in multiple scans due to %s." % (str(nie)))
# Load and reduce data
loadstatus = True
for scan in sorted(scanlist):
tempstatus = self.doLoadData(expno, scan)
if not tempstatus:
self.ui.label_mergeMessage.setText('Error to load Exp %d Scan %d.' % (expno, scan))
loadstatus = False
else:
message = 'Loaded Exp %d Scan %d.' % (expno, scan)
self.ui.label_mergeMessage.setText(message)
# Load status
if loadstatus:
self.ui.label_mergeMessage.setText('All data files are loaded')
else:
self.ui.label_mergeMessage.setText('Not all data files are loaded')
# Wave length
haswavelength = True
for scan in scanlist:
if self._myControl.getWavelength(expno, scan) is None:
self._logNotice("Exp %d Scan %d has no wavelength set up." % (expno, scan))
haswavelength = False
break
# Set unit box
if haswavelength:
self.ui.comboBox_mscanUnit.clear()
self.ui.comboBox_mscanUnit.addItems(['2theta', 'dSpacing', 'Momentum Transfer (Q)'])
else:
self.ui.comboBox_mscanUnit.clear()
self.ui.comboBox_mscanUnit.addItems(['2theta'])
return
def doLoadReduceScanPrev(self):
""" Load and reduce previous scan for tab 'Normalized'
"""
# Reduce scan number by 1
try:
scanno = int(self.ui.lineEdit_scanNo.text())
except ValueError:
self._logError("Either Exp No or Scan No is not set up right as integer.")
return
else:
scanno = scanno - 1
if scanno < 1:
self._logWarning("Scan number is 1 already. Cannot have previous scan")
return
self.ui.lineEdit_scanNo.setText(str(scanno))
# Load data
self.ui.lineEdit_scanNo.setText(str(scanno))
self.doLoadData()
# Reduce data
self._uiReducePlotNoramlized(self._currUnit)
def doLoadReduceScanNext(self):
""" Load and reduce next scan for tab 'Normalized'
"""
# Advance scan number by 1
try:
scanno = int(self.ui.lineEdit_scanNo.text())
except ValueError:
self._logError("Either Exp No or Scan No is not set up right as integer.")
return False
else:
scanno = scanno + 1
if scanno < 1:
self._logWarning("Scan number is 1 already. Cannot have previous scan")
return False
# Load data
self.ui.lineEdit_scanNo.setText(str(scanno))
execstatus = self.doLoadData()
print("[DB] Load data : ", execstatus)
# Reduce data
self._uiReducePlotNoramlized(self._currUnit)
def doMergeScans(self):
""" Merge several scans for tab 'merge'
"""
# Get exp number and list of scans
try:
r = self._uiGetExpScanTabMultiScans()
expno = r[0]
scanlist = r[1]
except NotImplementedError as ne:
self._logError(str(ne))
return False
# Check whether the wavelengths are same to merge
try:
wl_list = []
for scanno in scanlist:
print("Exp %d Scan %d. Wavelength = %s." % (
expno, scanno, str(self._myControl.getWavelength(expno, scanno))))
wl_list.append(float(self._myControl.getWavelength(expno, scanno)))
wl_list = sorted(wl_list)
min_wl = wl_list[0]
max_wl = wl_list[-1]
if max_wl - min_wl > 1.0:
self._logWarning("All scans do not have same wavelengths!")
except TypeError:
self._logError('Not all scans have wavelength set up. Unable to merge scans.')
return
# Check!
try:
unit = str(self.ui.comboBox_mscanUnit.currentText())
xmin, binsize, xmax = self._uiGetBinningParams(itab=3)
# wavelength = min_wl
mindex = self._myControl.mergeReduceSpiceData(expno, scanlist, unit, xmin, xmax, binsize)
except Exception as e:
raise e
label = "Exp %d, Scan %s." % (expno, str(scanlist))
self._plotMergedReducedData(mindex, label)
self._lastMergeIndex = mindex
self._lastMergeLabel = label
return
def doMergeScanView1D(self):
""" Change the multiple runs to 1D
"""
# Highlight the button's color
self.ui.pushButton_view2D.setStyleSheet('QPushButton {background-color: yellow; color: red;}')
self.ui.pushButton_view2D.setEnabled(True)
self.ui.pushButton_viewMScan1D.setStyleSheet('QPushButton {background-color: white; color: gray;}')
self.ui.pushButton_viewMScan1D.setEnabled(False)
# Process input experiment number and scan list
try:
r = self._uiGetExpScanTabMultiScans()
expno = r[0]
scanlist = r[1]
except NotImplementedError as e:
self._logError(str(e))
return False
# Clear image
canvas = self.ui.graphicsView_mergeRun
canvas.clearAllLines()
canvas.clearCanvas()
# Plot data
unit = str(self.ui.comboBox_mscanUnit.currentText())
xlabel = self._getXLabelFromUnit(unit)
for scanno in scanlist:
label = "Exp %s Scan %s" % (str(expno), str(scanno))
self._plotReducedData(expno, scanno, canvas, xlabel, label=label, clearcanvas=False)
def doMergeScanView2D(self):
""" Change the merged run's view to 2D plot
"""
# Highlight button color and change the color of another one
self.ui.pushButton_view2D.setStyleSheet('QPushButton {background-color: white; color: gray;}')
self.ui.pushButton_view2D.setEnabled(False)
self.ui.pushButton_viewMScan1D.setStyleSheet('QPushButton {background-color: yellow; color: red;}')
self.ui.pushButton_viewMScan1D.setEnabled(True)
# Get list of data to plot
r = self._uiGetExpScanTabMultiScans()
expno = r[0]
scanlist = r[1]
# Convert the workspaces to 2D vector
vecylist = []
yticklabels = []
xmin = None
xmax = None
for scanno in scanlist:
# put y values to list for constructing 2D array
vecx, vecy = self._myControl.getVectorToPlot(expno, scanno)
vecylist.append(vecy)
yticklabels.append('Exp %d Scan %d' % (expno, scanno))
# set up range of x
if xmin is None:
xmin = vecx[0]
xmax = vecx[-1]
dim2array = numpy.array(vecylist)
# Plot
holdprev = False
self.ui.graphicsView_mergeRun.clearAllLines()
self.ui.graphicsView_mergeRun.addPlot2D(dim2array, xmin=xmin, xmax=xmax, ymin=0,
ymax=len(vecylist), holdprev=holdprev, yticklabels=yticklabels)
def doMergeScanViewMerged(self):
""" Change the merged run's view to 1D plot
"""
# Highlight the button's color
self.ui.pushButton_view2D.setStyleSheet('QPushButton {color: red;}')
self.ui.pushButton_view2D.setEnabled(True)
self.ui.pushButton_viewMScan1D.setStyleSheet('QPushButton {color: red;}')
self.ui.pushButton_viewMScan1D.setEnabled(True)
# Clear image
self.ui.graphicsView_mergeRun.clearCanvas()
# Plot
self._plotMergedReducedData(mkey=self._lastMergeIndex, label=self._lastMergeLabel)
def doPlotIndvDetMain(self):
""" Plot individual detector
"""
# Get exp and scan numbers and check whether the data has been loaded
try:
expno = self._getInteger(self.ui.lineEdit_expNo)
scanno = self._getInteger(self.ui.lineEdit_scanNo)
except EmptyError as e:
self._logError(str(e))
return
# Get detector ID and x-label option
try:
status, detidlist = self._getIntArray(self.ui.lineEdit_detID.text())
if status is False:
errmsg = detidlist
print("Unable to parse detector IDs due to %s." % (errmsg))
return
else:
print("[DB] Detectors to plot: %s" % (detidlist))
except EmptyError:
self._logError("Detector ID must be specified for plotting individual detector.")
return
# Over plot previous or clear
overplot = self.ui.checkBox_overPlotIndvDet.isChecked()
if overplot is False:
self.doClearIndDetCanvas()
xlabel = str(self.ui.comboBox_indvDetXLabel.currentText()).strip()
if xlabel != "" and xlabel != "Pt." and xlabel != "2theta/Scattering Angle":
# Plot with sample logs other than Pt.
self._logNotice("New Feature: X-label %s is supported for plotting individual detector's counts. "
" Set to detector angle." % xlabel)
xlabel = xlabel
else:
# Plot with Pt. or detector angles
if xlabel != "Pt.":
xlabel = ""
self._logNotice("X-label for individual detectror is '%s'." % (xlabel))
# plot
for detid in sorted(detidlist):
try:
self._plot_individual_detector_counts(expno, scanno, detid, xlabel, resetboundary=not overplot)
self._expNo = expno
self._scanNo = scanno
self._detID = detid
self._indvXLabel = xlabel
except NotImplementedError as e:
self._logError(str(e))
def doPlotIndvDetNext(self):
""" Plot next raw detector signals for tab 'Individual Detector'
"""
# Plot
try:
currdetid = self._detID + 1
# Over plot previous or clear
overplot = self.ui.checkBox_overPlotIndvDet.isChecked()
if overplot is False:
self.doClearIndDetCanvas()
self._plot_individual_detector_counts(self._expNo, self._scanNo, currdetid,
self._indvXLabel)
except KeyError as e:
self._logError(str(e))
else:
self._detID = currdetid
# Update widget
self.ui.lineEdit_detID.setText(str(self._detID))
def doPlotIndvDetPrev(self):
""" Plot previous individual detector's signal for tab 'Individual Detector'
"""
# Plot
try:
currdetid = self._detID - 1
# Over plot previous or clear
overplot = self.ui.checkBox_overPlotIndvDet.isChecked()
if overplot is False:
self.doClearIndDetCanvas()
self._plot_individual_detector_counts(self._expNo, self._scanNo, currdetid,
self._indvXLabel)
except KeyError as e:
self._logError(str(e))
else:
self._detID = currdetid
# Update widget
self.ui.lineEdit_detID.setText(str(self._detID))
def do_convert_plot_multi_scans(self):
""" Convert individual plots from normalized to raw or vice verse
"""
# Identify the mode
if str(self.ui.pushButton_plotRawMultiScans.text()) == 'Plot Raw':
new_mode = 'Plot Raw'
else:
new_mode = 'Plot Normalized'
# Get information
try:
min_x = self._getFloat(self.ui.lineEdit_mergeMinX)
except EmptyError:
min_x = None
try:
max_x = self._getFloat(self.ui.lineEdit_mergeMaxX)
except EmptyError:
max_x = None
bin_size = self._getFloat(self.ui.lineEdit_mergeBinSize)
# Process input experiment number and scan list
try:
r = self._uiGetExpScanTabMultiScans()
exp_no = r[0]
scan_list = r[1]
except NotImplementedError as e:
self._logError(str(e))
return False
# Re-process the data
if new_mode == 'Plot Raw':
if self._multiScanList is None or self._multiScanExp is None:
raise NotImplementedError('Experiment and scan list are not set up for plot raw.')
self._myControl.scale_to_raw_monitor_counts(self._multiScanExp, self._multiScanList, min_x, max_x, bin_size)
else:
self._myControl.reset_to_normalized(self._multiScanExp, self._multiScanList, min_x, max_x, bin_size)
# Clear image
canvas = self.ui.graphicsView_mergeRun
canvas.clearAllLines()
canvas.clearCanvas()
canvas.resetLineColorStyle()
# Plot data
unit = str(self.ui.comboBox_mscanUnit.currentText())
xlabel = self._getXLabelFromUnit(unit)
for scan_no in scan_list:
label = "Exp %s Scan %s" % (str(exp_no), str(scan_no))
self._plotReducedData(exp_no, scan_no, canvas, xlabel, label=label, clearcanvas=False)
# Change the button name
if new_mode == 'Plot Raw':
self.ui.pushButton_plotRawMultiScans.setText('Plot Normalized')
else:
self.ui.pushButton_plotRawMultiScans.setText('Plot Raw')
def doPlotRawPtMain(self):
""" Plot current raw detector signal for a specific Pt.
"""
# Get experiment number and scan number for data file
try:
expno = self._getInteger(self.ui.lineEdit_expNo)
scanno = self._getInteger(self.ui.lineEdit_scanNo)
except EmptyError as e:
self._logError(str(e))
return
# plot options
doOverPlot = bool(self.ui.checkBox_overpltRawDet.isChecked())
plotmode = str(self.ui.comboBox_rawDetMode.currentText())
try:
ptNo = self._getInteger(self.ui.lineEdit_ptNo)
except EmptyError:
ptNo = None
# plot
print("[DB] Plot Raw Detector: PlotMode = %s." % (plotmode))
execstatus = self._plotRawDetSignal(expno, scanno, plotmode, ptNo, doOverPlot)
# set global values if good
if execstatus is True:
self._rawDetPtNo = ptNo
self._rawDetExpNo = expno
self._rawDetScanNo = scanno
self._rawDetPlotMode = plotmode
else:
print("[Error] Execution fails with signal %s. " % (str(execstatus)))
def doPlotRawPtNext(self):
""" Plot next raw detector signals
"""
# Check
if self._rawDetPtNo is not None:
ptno = self._rawDetPtNo + 1
else:
self._logError("Unable to plot previous raw detector \
because Pt. or Detector ID has not been set up yet.")
return
# Get plot mode and plot
plotmode = str(self.ui.comboBox_rawDetMode.currentText())
overplot = bool(self.ui.checkBox_overpltRawDet.isChecked())
execstatus = self._plotRawDetSignal(self._rawDetExpNo, self._rawDetScanNo, plotmode,
ptno, overplot)
# update if it is good to plot
if execstatus:
self._rawDetPtNo = ptno
self.ui.lineEdit_ptNo.setText(str(ptno))
def do_enable_excluded_dets(self):
""" Enable or disable the line editor for excluded detectors
:return:
"""
if self.ui.checkBox_useDetExcludeFile.isChecked():
self.ui.lineEdit_detExcluded.setEnabled(True)
else:
self.ui.lineEdit_detExcluded.setDisabled(True)
def do_plot_raw_pt_prev(self):
""" Plot previous raw detector
"""
# Validate input
if self._rawDetPtNo is not None:
ptno = self._rawDetPtNo - 1
else:
self._logError("Unable to plot previous raw detector \
because Pt. or Detector ID has not been set up yet.")
return
# get plot mode and do plt
plotmode = str(self.ui.comboBox_rawDetMode.currentText())
overplot = bool(self.ui.checkBox_overpltRawDet.isChecked())
execstatus = self._plotRawDetSignal(self._rawDetExpNo, self._rawDetScanNo, plotmode,
ptno, overplot)
# update if it is good to plot
if execstatus:
self._rawDetPtNo = ptno
self.ui.lineEdit_ptNo.setText(str(ptno))
def do_plot_sample_log(self):
""" Plot sample log vs. Pt. in tab 'Individual Detector'
"""
expNo = int(self.ui.lineEdit_expNo.text())
scanno = int(self.ui.lineEdit_scanNo.text())
logname = str(self.ui.comboBox_indvDetYLabel.currentText())
self._plotSampleLog(expNo, scanno, logname)
def doReduce2Theta(self):
""" Rebin the data and plot in 2theta for tab 'Normalized'
"""
unit = '2theta'
self._uiReducePlotNoramlized(unit)
def doReduceDSpacing(self):
""" Rebin the data and plot in d-spacing for tab 'Normalized'
"""
# new unit and information
unit = "dSpacing"
self._uiReducePlotNoramlized(unit)
def doReduceQ(self):
""" Rebin the data and plot in momentum transfer Q for tab 'Normalized'
"""
unit = 'Momentum Transfer (Q)'
self._uiReducePlotNoramlized(unit)
def doReduceSetData(self):
""" Reduce multiple data
"""
# Get exp number and list of scans
try:
r = self._uiGetExpScanTabMultiScans()
expno = r[0]
scanlist = r[1]
except NotImplementedError as e:
self._logError(str(e))
return False
else:
self._multiScanExp = expno
self._multiScanList = scanlist
# Reduce and plot data
unit = str(self.ui.comboBox_mscanUnit.currentText())
xlabel = self._getXLabelFromUnit(unit)
canvas = self.ui.graphicsView_mergeRun
# canvas.clearAllLines() NO NEED
canvas.clearCanvas()
canvas.resetLineColorStyle()
for scan in scanlist:
r = self._uiReduceData(3, unit, expno, scan)
good = r[0]
expno = r[1]
scanno = r[2]
if good is True:
label = "Exp %s Scan %s" % (str(expno), str(scanno))
self._plotReducedData(expno, scanno, canvas, xlabel, label=label, clearcanvas=False)
else:
self._logError('Failed to reduce Exp %s Scan %s' % (str(expno), str(scanno)))
def doReduceVanadium2Theta(self):
""" Rebin MDEventWorkspaces in 2-theta. for pushButton_rebinD
in vanadium peak strip tab
Suggested workflow
1. Rebin data
2. Calculate vanadium peaks in 2theta
3.
"""
# Reduce data
unit = '2theta'
itab = 4
r = self._uiReduceData(itab, unit)
good = r[0]
expno = r[1]
scanno = r[2]
# Plot reduced data and vanadium peaks
if good is True:
canvas = self.ui.graphicsView_vanPeaks
xlabel = self._getXLabelFromUnit(unit)
label = "Exp %s Scan %s" % (str(expno), str(scanno))
self._plotReducedData(expno, scanno, canvas, xlabel, label=label, clearcanvas=True)
# plot vanadium peaks
vanpeakpos = self._myControl.getVanadiumPeaksPos(expno, scanno)
self.ui.lineEdit_stripVPeaks.setText(str(vanpeakpos))
self._plotPeakIndicators(self.ui.graphicsView_vanPeaks, vanpeakpos)
return good
def doSaveData(self):
""" Save data
"""
# get exp number and scan number
try:
# exp and scan
expno, scanno = self._uiGetExpScanNumber()
# file type
filetype = str(self.ui.comboBox_outputFormat.currentText())
# file name
savedatadir = str(self.ui.lineEdit_outputFileName.text()).strip()
if savedatadir is not None and os.path.exists(savedatadir) is True:
homedir = savedatadir
else:
homedir = os.getcwd()
# launch a dialog to get data
filefilter = "All files (*);;Fullprof (*.dat);;GSAS (*.gsa)"
sfilename = str(QtGui.QFileDialog.getSaveFileName(self, 'Save File', homedir, filefilter))
except NotImplementedError as e:
self._logError(str(e))
else:
self._myControl.savePDFile(expno, scanno, filetype, sfilename)
def doSaveMergedScan(self):
""" Save merged scan
"""
homedir = os.getcwd()
filefilter = "Fullprof (*.dat)"
sfilename = str(QtGui.QFileDialog.getSaveFileName(self, 'Save File In Fullprof', homedir, filefilter))
self._myControl.saveMergedScan(sfilename, mergeindex=self._lastMergeIndex)
def doSaveMultipleScans(self):
""" Save multiple scans
"""
# Get experiment number and scans
r = self._uiGetExpScanTabMultiScans()
expno = r[0]
scanslist = r[1]
# Get base file name
homedir = os.getcwd()
savedir = str(QtGui.QFileDialog.getExistingDirectory(self, 'Get Directory To Save Fullprof', homedir))
for scanno in scanslist:
sfilename = os.path.join(savedir, "HB2A_Exp%d_Scan%d_FP.dat" % (expno, scanno))
self._myControl.savePDFile(expno, scanno, 'fullprof', sfilename)
def doSaveVanRun(self):
""" Save the vanadium run with peaks removed
"""
# Get experiment number and scan number
try:
expno, scanno = self._uiGetExpScanNumber()
except NotImplementedError as e:
self._logError("Unable to get exp number and scan number for smoothing vanadium data due to %s." % (
str(e)))
return False
homedir = os.getcwd()
filefilter = "Fullprof (*.dat)"
sfilename = str(QtGui.QFileDialog.getSaveFileName(self, 'Save File In Fullprof', homedir, filefilter))
self._myControl.saveProcessedVanadium(expno, scanno, sfilename)
def doSmoothVanadiumData(self):
""" Smooth vanadium spectrum
"""
# Get experiment number and scan number
try:
expno, scanno = self._uiGetExpScanNumber()
except NotImplementedError as e:
self._logError("Unable to get exp number and scan number for smoothing vanadium data due to %s." % (
str(e)))
return False
smoothparams_str = str(self.ui.lineEdit_smoothParams.text())
# Smooth data
status = self._myControl.smoothVanadiumSpectrum(expno, scanno, smoothparams_str)
if not status:
self._logError("Failed to smooth vanadium data")
# Plot
unit = '2theta'
xlabel = self._getXLabelFromUnit(unit)
label = "Vanadium Exp %d Scan %d FFT-Smooth by %s" % (expno, scanno, smoothparams_str)
self._plotVanadiumRun(expno, scanno, xlabel, label, False, True)
def doSmoothVanadiumApply(self):
""" Apply smoothing effect to vanadium data
"""
# Get experiment number and scan number
try:
expno, scanno = self._uiGetExpScanNumber()
except NotImplementedError as e:
self._logError("Unable to get exp number and scan number for smoothing vanadium data due to %s." % (
str(e)))
return False
self._myControl.applySmoothVanadium(expno, scanno, True)
def doSmoothVanadiumUndo(self):
""" Undo smoothing vanadium
"""
try:
expno, scanno = self._uiGetExpScanNumber()
except NotImplementedError as e:
self._logError("Unable to get exp number and scan number for smoothing vanadium data due to %s." % (
str(e)))
return False
self._myControl.applySmoothVanadium(expno, scanno, False)
def doStripVandiumPeaks(self):
""" Strip vanadium peaks
"""
# Get exp number an scan number
try:
expno, scanno = self._uiGetExpScanNumber()
except NotImplementedError as e:
self._logError("Error to get Exp and Scan due to %s." % (str(e)))
return False
# Default unit
unit = '2theta'
# Get and build binning parameter
xmin, binsize, xmax = self._uiGetBinningParams(itab=4)
if xmin is None:
binparams = '%f' % (binsize)
else:
binparams = '%f,%f,%f' % (xmin, binsize, xmax)
# Strip vanadium peak
good = self._myControl.stripVanadiumPeaks(expno, scanno, binparams, vanpeakposlist=None)
# Plot
if good:
xlabel = self._getXLabelFromUnit(unit)
label = "Exp %d Scan %d Bin = %.5f Vanadium Stripped" % (expno, scanno, binsize)
self._plotVanadiumRun(expno, scanno, xlabel, label, False)
def doUpdateWavelength(self):
""" Update the wavelength to line edit
"""
index = self.ui.comboBox_wavelength.currentIndex()
print("Update wavelength to ", index)
if index == 0:
wavelength = 2.41
elif index == 1:
wavelength = 1.54
elif index == 2:
wavelength = 1.12
else:
wavelength = None
self.ui.lineEdit_wavelength.setText(str(wavelength))
def on_mouseDownEvent(self, event):
""" Respond to pick up a value with mouse down event
Definition of button_press_event is:
button_press_event(x, y, button, dblclick=False, guiEvent=None)
Thus event has x, y and button.
event.button has 3 values:
1: left
2: middle
3: right
"""
# FUTURE: Need to make this work
x = event.xdata
y = event.ydata
button = event.button
if x is not None and y is not None:
# mouse is clicked within graph
if button == 1:
msg = "Mouse 1: You've clicked on a bar with coords:\n %f, %f\n and button %d" % (x, y, button)
print(msg)
elif button == 2:
msg = "Mouse 2: You've clicked on a bar with coords:\n %f, %f\n and button %d" % (x, y, button)
QtGui.QMessageBox.information(self, "Click!", msg)
elif button == 3:
# right click of mouse will pop up a context-menu
# menu should be self.ui.menu?
menu = QtGui.QMenu(self)
addAction = QtGui.QAction('Add', self)
addAction.triggered.connect(self.addSomething)
menu.addAction(addAction)
rmAction = QtGui.QAction('Remove', self)
rmAction.triggered.connect(self.rmSomething)
menu.addAction(rmAction)
# add other required actions
menu.popup(QtGui.QCursor.pos())
def on_mouseMotion(self, event):
""" Event handler for mouse being detected to move
"""
# prev_x = self._viewMerge_X
# prev_y = self._viewMerge_Y
curx = event.xdata
cury = event.ydata
if curx is None or cury is None:
return
self._viewMerge_X = event.xdata
self._viewMerge_Y = event.ydata
def addSomething(self):
"""
"""
# FUTURE - Need to implement how to deal with this
print("Add scan back to merge")
def rmSomething(self):
"""
"""
# FUTURE - Need to implement how to deal with this
print("Remove a scan from merged data.")
# --------------------------------------------------------------------------
# Private methods to plot data
# --------------------------------------------------------------------------
def _plotIndividualDetCountsVsSampleLog(self, expno, scanno, detid, samplename, raw=True):
""" Plot one specific detector's counts vs. one specified sample log's value
along with all Pts.
For example: detector 11's counts vs. sample_b's value
:param expno:
:param scanno:
:param detid:
:param samplename:
:param raw: boolean whether the output is normalized by monitor counts
:return:
"""
# Validate input
try:
expno = int(expno)
scanno = int(scanno)
detid = int(detid)
samplename = str(samplename)
except ValueError:
raise NotImplementedError("ExpNo, ScanNo or DetID is not integer.")
# Get the array for detector counts vs. sample log value by mapping Pt.
vecx, vecy = self._myControl.getIndividualDetCountsVsSample(expno, scanno,
detid, samplename, raw)
# Clear canvas
self.ui.graphicsView_indvDet.clearCanvas()
# Plot
marker, color = self.ui.graphicsView_indvDet.getNextLineMarkerColorCombo()
self.ui.graphicsView_indvDet.add_plot1d(vec_x=vecx,
vec_y=vecy,
marker=marker,
color=color,
x_label=samplename,
y_label='Counts',
label='DetID = %d' % (detid))
# FUTURE: In future, need to find out how to use self._graphIndDevMode
def _plot_individual_detector_counts(self, expno, scanno, detid, xaxis, resetboundary=False):
""" Plot a specific detector's counts along all experiment points (pt)
:param expno:
:param scanno:
:param detid:
:param xaxis:
:param resetboundary:
:return:
"""
# Validate input
expno = int(expno)
scanno = int(scanno)
detid = int(detid)
plot_error_bar = self.ui.checkBox_indDetErrorBar.isChecked()
plot_normal = self.ui.checkBox_indDetNormByMon.isChecked()
# Reject if data is not loaded
if self._myControl.hasDataLoaded(expno, scanno) is False:
self._logError("Data file for Exp %d Scan %d has not been loaded." % (expno, scanno))
return False
# Canvas and line information
canvas = self.ui.graphicsView_indvDet
if canvas not in self._tabLineDict:
self._tabLineDict[canvas] = []
# get data
self._logNotice("Input x-axis is '%s' for plotting individual detector's counts." % (xaxis))
if len(xaxis) == 0:
xaxis = None
vecx, vecy = self._myControl.getIndividualDetCounts(expno, scanno, detid, xaxis, plot_normal)
if not isinstance(vecx, numpy.ndarray):
raise NotImplementedError('vecx, vecy must be numpy arrays.')
if plot_error_bar:
y_err = numpy.sqrt(vecy)
else:
y_err = None
# Plot to canvas
marker, color = canvas.getNextLineMarkerColorCombo()
if xaxis == "" or xaxis == "2theta/Scattering Angle":
xlabel = r'$2\theta$'
else:
xlabel = xaxis
# FUTURE - If it works with any way of plotting, then refactor Pt. with any other sample names
label = "Detector ID: %d" % (detid)
if self._tabLineDict[canvas].count((expno, scanno, detid)) == 0:
canvas.add_plot1d(vecx, vecy, marker=marker, color=color, x_label=xlabel,
y_label='Counts', label=label, y_err=y_err)
self._tabLineDict[canvas].append((expno, scanno, detid))
if resetboundary:
# Set xmin and xmax about the data for first time
xmin = min(vecx)
xmax = max(vecx)
ymin = min(vecy)
ymax = max(vecy)
else:
# auto setup for image boundary
xmin = min(min(vecx), canvas.getXLimit()[0])
xmax = max(max(vecx), canvas.getXLimit()[1])
ymin = min(min(vecy), canvas.getYLimit()[0])
ymax = max(max(vecy), canvas.getYLimit()[1])
dx = xmax - xmin
dy = ymax - ymin
canvas.setXYLimit(xmin - dx * 0.0001, xmax + dx * 0.0001, ymin - dy * 0.0001, ymax + dy * 0.0001)
# Set canvas mode
# FUTURE: Consider how to use self._graphIndDevMode in future
# self._graphIndDevMode = (xlabel, 'Counts')
return True
def _plotPeakIndicators(self, canvas, peakposlist):
""" Plot indicators for peaks
"""
print("[DB] Peak indicators are at ", peakposlist)
rangey = canvas.getYLimit()
rangex = canvas.getXLimit()
for pos in peakposlist:
if pos >= rangex[0] and pos <= rangex[1]:
vecx = numpy.array([pos, pos])
vecy = numpy.array([rangey[0], rangey[1]])
canvas.add_plot1d(vecx, vecy, color='black', line_style='--')
def _plotRawDetSignal(self, expno, scanno, plotmode, ptno, dooverplot):
""" Plot the counts of all detectors of a certain Pt. in an experiment
"""
# Validate input
expno = int(expno)
scanno = int(scanno)
# Set up canvas and dictionary
canvas = self.ui.graphicsView_Raw
if canvas not in self._tabLineDict:
self._tabLineDict[canvas] = []
# Check whether data exists
if not self._myControl.hasDataLoaded(expno, scanno):
self._logError("File has not been loaded for Exp %d Scan %d. Load data first!" % (expno, scanno))
return
# Get vecx and vecy
if plotmode == "All Pts.":
# Plot all Pts.
vecxylist = self._myControl.getRawDetectorCounts(expno, scanno)
# Clear previous
self.ui.graphicsView_Raw.clearAllLines()
self.ui.graphicsView_Raw.setLineMarkerColorIndex(0)
self._tabLineDict[canvas] = []
elif plotmode == "Single Pts.":
# Plot plot
ptno = int(ptno)
if not dooverplot:
self.ui.graphicsView_Raw.clearAllLines()
self.ui.graphicsView_Raw.setLineMarkerColorIndex(0)
self._tabLineDict[canvas] = []
# Plot one pts.
vecxylist = self._myControl.getRawDetectorCounts(expno, scanno, [ptno])
else:
# Raise exception
raise NotImplementedError("Plot mode %s is not supported." % (plotmode))
# Set up unit/x-label
unit = r"$2\theta$"
# plot
xmin = None
xmax = None
ymin = None
ymax = None
for ptno, vecx, vecy in vecxylist:
# FUTURE: Label is left blank as there can be too many labels
label = 'Pt %d' % (ptno)
# skip if this plot has existed
if self._tabLineDict[canvas].count((expno, scanno, ptno)) == 1:
continue
marker, color = canvas.getNextLineMarkerColorCombo()
canvas.add_plot1d(vecx, vecy, marker=marker, color=color, x_label=unit,
y_label='intensity', label=label)
# set up line tuple
self._tabLineDict[canvas].append((expno, scanno, ptno))
# auto setup for image boundary
xmin = min(min(vecx), canvas.getXLimit()[0])
xmax = max(max(vecx), canvas.getXLimit()[1])
ymin = min(min(vecy), canvas.getYLimit()[0])
ymax = max(max(vecy), canvas.getYLimit()[1])
# Reset canvas x-y limit
if xmin is not None:
dx = xmax - xmin
dy = ymax - ymin
canvas.setXYLimit(xmin - dx * 0.0001, xmax + dx * 0.0001, ymin - dy * 0.0001, ymax + dy * 0.0001)
return True
def _plotMergedReducedData(self, mkey, label):
""" Plot the reduced data from merged ...
"""
# get the data
try:
vecx, vecy = self._myControl.getMergedVector(mkey)
except KeyError as e:
self._logError("Unable to retrieve merged reduced data due to %s." % (str(e)))
return
canvas = self.ui.graphicsView_mergeRun
# Clear canvas
canvas.clearAllLines()
canvas.clearCanvas()
# Plot
marker, color = canvas.getNextLineMarkerColorCombo()
xlabel = self._getXLabelFromUnit(self.ui.comboBox_mscanUnit.currentText())
canvas.add_plot1d(vecx, vecy, marker=marker, color=color,
x_label=xlabel, y_label='intensity', label=label)
xmax = max(vecx)
xmin = min(vecx)
dx = xmax - xmin
ymax = max(vecy)
ymin = min(vecy)
dy = ymax - ymin
canvas.setXYLimit(xmin - dx * 0.1, xmax + dx * 0.1, ymin - dy * 0.1, ymax + dy * 0.1)
def _plotReducedData(self, exp, scan, canvas, xlabel, label=None, clearcanvas=True,
spectrum=0, plot_error=False):
""" Plot reduced data for exp and scan
"""
if spectrum != 0:
raise NotImplementedError("Unable to support spectrum = %d case." % (spectrum))
# whether the data is load
if not self._myControl.hasReducedWS(exp, scan):
self._logWarning("No data to plot!")
return
# get to know whether it is required to clear the image
if clearcanvas:
canvas.clearAllLines()
canvas.setLineMarkerColorIndex(0)
# plot
vec_x, vec_y = self._myControl.getVectorToPlot(exp, scan)
if not isinstance(vec_x, numpy.ndarray):
vec_x = numpy.array(vec_x)
vec_y = numpy.array(vec_y)
# FUTURE - Should check y_err set up correctly in Mantid or not
if plot_error:
raise RuntimeError('Implement how to return y_err ASAP.')
else:
y_err = None
# get the marker color for the line
marker, color = canvas.getNextLineMarkerColorCombo()
# plot
if label is None:
label = "Exp %d Scan %d" % (exp, scan)
canvas.add_plot1d(vec_x, vec_y, marker=marker, color=color,
x_label=xlabel, y_label='intensity', label=label,
y_err=y_err)
if clearcanvas:
xmax = max(vec_x)
xmin = min(vec_x)
dx = xmax - xmin
ymax = max(vec_y)
ymin = min(vec_y)
dy = ymax - ymin
canvas.setXYLimit(xmin - dx * 0.1, xmax + dx * 0.1, ymin - dy * 0.1, ymax + dy * 0.1)
def _plotSampleLog(self, expno, scanno, samplelogname):
""" Plot the value of a sample log among all Pt.
"""
# Validate input
expno = int(expno)
scanno = int(scanno)
samplelogname = str(samplelogname)
# Reject if data is not loaded
if not self._myControl.hasDataLoaded(expno, scanno):
self._logError("Data file for Exp %d Scan %d has not been loaded." % (expno, scanno))
return False
# Canvas and line information
self._indvDetCanvasMode = 'samplelog'
# pop out the xlabel list
# REFACTOR - Only need to set up once if previous plot has the same setup
if self.ui.comboBox_indvDetXLabel.count() == 0:
floatsamplelognamelist = self._myControl.getSampleLogNames(expno, scanno)
self.ui.comboBox_indvDetXLabel.clear()
self.ui.comboBox_indvDetXLabel.addItems(floatsamplelognamelist)
raise RuntimeError("This X-label combo box should be set up during loading data before.")
xlabel = str(self.ui.comboBox_indvDetXLabel.currentText())
# get data
vecx, vecy = self._myControl.getSampleLogValue(expno, scanno, samplelogname, xlabel)
# Plot to canvas
canvas = self.ui.graphicsView_indvDet
# FUTURE - Clear canvas (think of a case that no need to clear canvas)
canvas.clearCanvas()
# canvas.clearAllLines()
marker, color = canvas.getNextLineMarkerColorCombo()
if xlabel is None:
xlabel = r'Pt'
label = samplelogname
canvas.add_plot1d(vecx, vecy, marker=marker, color=color, x_label=xlabel,
y_label='Counts', label=label)
# auto setup for image boundary
xmin = min(vecx)
xmax = max(vecx)
ymin = min(vecy)
ymax = max(vecy)
dx = xmax - xmin
dy = ymax - ymin
canvas.setXYLimit(xmin - dx * 0.0001, xmax + dx * 0.0001, ymin - dy * 0.0001, ymax + dy * 0.0001)
return True
def _plotVanadiumRun(self, exp, scan, xlabel, label, clearcanvas=False, TempData=False):
""" Plot processed vanadium data
Arguments:
- TempData :: flag whether the vanadium run is a temporary data set
"""
# Check whether the data is load
exp = int(exp)
scan = int(scan)
if not self._myControl.hasReducedWS(exp, scan):
self._logWarning("No data to plot!")
return
# Get data to plot
try:
vecx, vecy = self._myControl.getVectorProcessVanToPlot(exp, scan, TempData)
if not TempData:
vecx, vecyOrig = self._myControl.getVectorToPlot(exp, scan)
diffY = vecyOrig - vecy
except NotImplementedError as e:
errmsg = '[Error] Unable to retrieve processed vanadium spectrum for exp %d scan %d. ' \
'Reason: %s' % (exp, scan, str(e))
QtGui.QMessageBox.information(self, "Return!", errmsg)
return
# Get to know whether it is required to clear the image
canvas = self.ui.graphicsView_vanPeaks
if TempData:
clearcanvas = False
if clearcanvas:
canvas.clearAllLines()
canvas.setLineMarkerColorIndex(0)
# get the marker color for the line
if TempData:
marker = None
color = 'blue'
else:
marker, color = canvas.getNextLineMarkerColorCombo()
# plot
canvas.add_plot1d(vecx, vecy, marker=marker, color=color,
x_label=xlabel, y_label='intensity', label=label)
if not TempData:
canvas.add_plot1d(vecx, diffY, marker='+', color='green',
x_label=xlabel, y_label='intensity', label='Diff')
# reset canvas limits
if clearcanvas:
xmax = max(vecx)
xmin = min(vecx)
dx = xmax - xmin
ymax = max(vecy)
ymin = min(diffY)
dy = ymax - ymin
canvas.setXYLimit(xmin - dx * 0.1, xmax + dx * 0.1, ymin - dy * 0.1, ymax + dy * 0.1)
def _uiDownloadDataFile(self, exp, scan):
""" Download data file according to its exp and scan
Either download the data from a server or copy the data file from local
disk
"""
# Get on hold of raw data file
useserver = self.ui.radioButton_useServer.isChecked()
uselocal = self.ui.radioButton_useLocal.isChecked()
if useserver == uselocal:
self._logError("It is logically wrong to set up server/local dir for data.")
self.ui.radioButton_useServer.setChecked(True)
self.ui.radioButton_useLocal.setChecked(False)
rvalue = False
if self._srcFromServer:
# Use server: build the URl to download data
if not self._serverAddress.endswith('/'):
self._serverAddress += '/'
fullurl = "%s%s/exp%d/Datafiles/%s_exp%04d_scan%04d.dat" % (self._serverAddress,
self._instrument.lower(), exp,
self._instrument.upper(), exp, scan)
print("URL: ", fullurl)
cachedir = str(self.ui.lineEdit_cache.text()).strip()
if not os.path.exists(cachedir):
invalidcache = cachedir
cachedir = os.getcwd()
self.ui.lineEdit_cache.setText(cachedir)
self._logWarning("Cache directory %s is not valid. "
"Using current workspace directory %s as cache." % (invalidcache, cachedir))
filename = '%s_exp%04d_scan%04d.dat' % (self._instrument.upper(), exp, scan)
srcFileName = os.path.join(cachedir, filename)
status, errmsg = HfirPDReductionControl.downloadFile(fullurl, srcFileName)
if not status:
self._logError(errmsg)
srcFileName = None
else:
rvalue = True
elif self._srcAtLocal:
# Data from local
srcFileName = os.path.join(self._localSrcDataDir, "%s/Exp%d_Scan%04d.dat" % (self._instrument, exp, scan))
if os.path.exists(srcFileName):
rvalue = True
else:
raise NotImplementedError("Logic error. Neither downloaded from server.\
Nor from local drive")
return (rvalue, srcFileName)
def _uiGetBinningParams(self, itab):
""" Get binning parameters
Return:
- xmin, binsize, xmax
"""
# Get value
if itab == 2:
xmin = str(self.ui.lineEdit_xmin.text())
xmax = str(self.ui.lineEdit_xmax.text())
binsize = str(self.ui.lineEdit_binsize.text())
elif itab == 3:
xmin = str(self.ui.lineEdit_mergeMinX.text())
xmax = str(self.ui.lineEdit_mergeMaxX.text())
binsize = str(self.ui.lineEdit_mergeBinSize.text())
elif itab == 4:
xmin = str(self.ui.lineEdit_min2Theta.text())
xmax = str(self.ui.lineEdit_max2Theta.text())
binsize = str(self.ui.lineEdit_binsize2Theta.text())
else:
raise NotImplementedError("Binning parameters are not used for %d-th tab." % (itab))
# Parse values
try:
xmin = float(xmin)
xmax = float(xmax)
except ValueError:
xmin = None
xmax = None
else:
if xmin >= xmax:
raise NotImplementedError("set minimum X = %.5f is larger than \
maximum X = %.5f" % (xmin, xmax))
try:
binsize = float(binsize)
except ValueError:
raise NotImplementedError("Error: bins size '%s' is not a float number." % (binsize))
# Fix for merging as xmin and xmax must be same for all scans
if itab == 3 and xmin is None:
xmin = 5.
xmax = 150.
return (xmin, binsize, xmax)
def _uiGetExcludedDetectors(self):
""" Get excluded detectors from input line edit
Return :: list of detector IDs to exclude from reduction
"""
excludedetidlist = []
if self.ui.checkBox_useDetExcludeFile.isChecked():
detids_str = str(self.ui.lineEdit_detExcluded.text()).strip()
status, excludedetidlist = self._getIntArray(detids_str)
if status is False:
self._logError("Extra scans are not a list of integers: %s." % (
str(self.ui.lineEdit_extraScans.text())))
excludedetidlist = []
return excludedetidlist
def _uiGetExpScanNumber(self):
""" Get experiment number and scan number from widgets for merged
"""
expnostr = self.ui.lineEdit_expNo.text()
scannostr = self.ui.lineEdit_scanNo.text()
try:
expno = int(expnostr)
scanno = int(scannostr)
except ValueError:
raise NotImplementedError("Either Exp No '%s' or Scan No '%s \
is not set up right as integer." % (expnostr, scannostr))
return (expno, scanno)
def _uiGetExpScanTabMultiScans(self):
""" Get exp number and scans from tab 3
"""
try:
expno = int(self.ui.lineEdit_expNo.text())
startscan = int(self.ui.lineEdit_scanStart.text())
endscan = int(self.ui.lineEdit_scanEnd.text())
except ValueError as e:
raise RuntimeError("For merging scans, Exp No, Starting scan number and \
end scan number must be given: %s" % (str(e)))
# scans = [startscan, endscan] + [others] - [excluded]
status, extrascanlist = self._getIntArray(str(self.ui.lineEdit_extraScans.text()))
if not status:
raise RuntimeError(extrascanlist)
status, excludedlist = self._getIntArray(str(self.ui.lineEdit_exclScans.text()))
self._logDebug("Excluded list: %s" % (str(excludedlist)))
if not status:
self._logError(excludedlist)
return
scanslist = list(range(startscan, endscan + 1))
scanslist.extend(extrascanlist)
scanslist = list(set(scanslist))
for scan in excludedlist:
scanslist.remove(scan)
return (expno, sorted(scanslist))
def _uiIsBinParamsChange(self, itab, binparams):
""" Check whether current bin parameters are same
as given value
"""
xmin, binsize, xmax = self._uiGetBinningParams(itab)
newbinparams = [xmin, binsize, xmax]
# check binning
same = True
for i in range(3):
par_0 = binparams[i]
par_1 = newbinparams[i]
try:
if abs(float(par_0) - float(par_1)) > 1.0E-6:
same = False
except TypeError:
if par_0 is not None or par_1 is not None:
same = False
if not same:
break
change = not same
if change:
print("[D...............B]", end=' ')
print("%s vs %s " % (str(xmin), str(self._tabBinParamDict[itab][0])), end=' ')
print("%s vs %s " % (str(xmax), str(self._tabBinParamDict[itab][2])), end=' ')
print("%s vs %s " % (str(binsize), str(self._tabBinParamDict[itab][1])))
else:
print("[DB] Rebin = False")
return change
def _uiReduceData(self, itab, unit, expno=None, scanno=None):
""" Rebin and plot by reading GUI widgets' value
Arguments:
- itab : index of the tab. Only 2m 3 and 4 are allowed
- unit : string for target unit
"""
# Experiment number and Scan number
if isinstance(expno, int) and isinstance(scanno, int):
# Call from tab-3 multiple scan
pass
else:
try:
expno, scanno = self._uiGetExpScanNumber()
except NotImplementedError as e:
self._logError(str(e))
return
# Get binning parameter
xmin, binsize, xmax = self._uiGetBinningParams(itab)
# Get wavelength
try:
if itab == 3:
wavelength = float(self._myControl.getWavelength(expno, scanno))
else:
wavelength = float(str(self.ui.lineEdit_wavelength.text()))
except TypeError:
if unit != '2theta':
raise NotImplementedError('Wavelength must be specified for unit %s.' % (unit))
# Get scale factor
try:
scalefactor = self._getFloat(self.ui.lineEdit_normalizeMonitor)
except EmptyError:
scalefactor = None
except ValueError as valueerror:
raise ValueError("Unable to get normalization factor due to %s." % (str(valueerror)))
# Rebin
try:
# rebinned = self._myControl.rebin(expno, scanno, unit, wavelength, xmin, binsize, xmax)
excludeddetlist = self._uiGetExcludedDetectors()
self._myControl.reduceSpicePDData(expno, scanno,
unit, xmin, xmax, binsize, wavelength, excludeddetlist, scalefactor)
# Record binning
self._tabBinParamDict[itab] = [xmin, binsize, xmax]
except NotImplementedError as e:
self._logError(str(e))
return (False, expno, scanno)
return (True, expno, scanno)
def _uiReducePlotNoramlized(self, unit):
""" Support Reduce2Theta, ReduceDspacing and ReduceQ
"""
itab = 2
canvas = self.ui.graphicsView_reducedData
expno, scanno = self._uiGetExpScanNumber()
change = self._uiIsBinParamsChange(itab, self._tabBinParamDict[itab])
# check whether line record
if unit == self._currUnit and self._tabLineDict[itab].count((expno, scanno)) > 0 and not change:
# there is no need to plot again as line exists
return
# reduce
r = self._uiReduceData(2, unit)
good = r[0]
expno = r[1]
scanno = r[2]
# failed to reduce
if not good:
self._logError("Failed to reduce Exp %d Scan %d" % (expno, scanno))
return
# clear canvas???
if unit != self._currUnit:
clearcanvas = True
elif not self.ui.checkBox_clearPrevious.isChecked():
# NOTE: naming of the widget is VERY confusing. Should be changed to keepPrevious
clearcanvas = True
else:
clearcanvas = False
# reset record dictionary if unit is different from present
if clearcanvas:
self._tabLineDict[itab] = []
self._currUnit = unit
self._tabLineDict[itab].append((expno, scanno))
xlabel = self._getXLabelFromUnit(unit)
label = "Exp %s Scan %s" % (str(expno), str(scanno))
self._plotReducedData(expno, scanno, canvas, xlabel, label=label, clearcanvas=clearcanvas)
def _logDebug(self, dbinfo):
""" Log debug information
"""
print(dbinfo)
def _logError(self, errinfo):
""" Log error information
"""
QtGui.QMessageBox.information(self, "Click!", errinfo)
def _logNotice(self, loginfo):
""" Log error information
"""
msg = '[Notice] %s' % loginfo
print(msg)
# QtGui.QMessageBox.information(self, "Click!", msg)
def _logWarning(self, warning_info):
""" Log error information
"""
msg = "[Warning]: %s" % (warning_info)
QtGui.QMessageBox.information(self, "OK!", msg)
def _getFloat(self, lineedit):
""" Get integer from line edit
Exception: ValueError if empty or no input
"""
valuestr = str(lineedit.text()).strip()
if len(valuestr) == 0:
raise EmptyError("Input is empty. It cannot be converted to float.")
try:
value = float(valuestr)
except ValueError as e:
raise e
return value
def _getInteger(self, lineedit):
""" Get integer from line edit
"""
valuestr = str(lineedit.text()).strip()
if len(valuestr) == 0:
raise EmptyError("Input is empty. It cannot be converted to integer.")
try:
value = int(valuestr)
except ValueError as e:
raise e
return value
def _getIntArray(self, intliststring):
""" Validate whether the string can be divided into integer strings.
Allowed: a, b, c-d, e, f
Return :: 2-tuple (status, list/error message)
"""
intliststring = str(intliststring)
if intliststring == "":
return (True, [])
# Split by ","
termlevel0s = intliststring.split(",")
intlist = []
# For each term
errmsg = ""
returnstatus = True
for level0term in termlevel0s:
level0term = level0term.strip()
# split upon dash -
numdashes = level0term.count("-")
if numdashes == 0:
# one integer
valuestr = level0term
try:
intvalue = int(valuestr)
if str(intvalue) != valuestr:
returnstatus = False
errmsg = "Contains non-integer string %s." % (valuestr)
except ValueError:
returnstatus = False
errmsg = "String %s is not an integer." % (valuestr)
else:
intlist.append(intvalue)
elif numdashes == 1:
# Integer range
twoterms = level0term.split("-")
templist = []
for i in range(2):
valuestr = twoterms[i]
try:
intvalue = int(valuestr)
if str(intvalue) != valuestr:
returnstatus = False
errmsg = "Contains non-integer string %s." % (valuestr)
except ValueError:
returnstatus = False
errmsg = "String %s is not an integer." % (valuestr)
else:
templist.append(intvalue)
# break loop
if not returnstatus:
break
intlist.extend(range(templist[0], templist[1] + 1))
else:
# Undefined siutation
returnstatus = False
errmsg = "Term %s contains more than 1 dash." % (level0term)
# break loop if something is wrong
if not returnstatus:
break
# Return with false
if not returnstatus:
return (False, errmsg)
return (True, intlist)
def _getXLabelFromUnit(self, unit):
""" Get X-label from unit
"""
if unit == '2theta':
xlabel = r'$2\theta$ (Degrees)'
elif unit == 'dSpacing':
xlabel = r"d $(\AA)$"
elif unit == 'Momentum Transfer (Q)':
xlabel = r"Q $(\AA^{-1})$"
else:
xlabel = 'Wacky Unknown'
return xlabel
| gpl-3.0 |
rnowling/pop-gen-models | single-pop/calculate_phist.py | 2 | 3690 | import sys
import numpy as np
import matplotlib.pyplot as plt
def ssdwpfunc(individuals, frequencies):
"""
Returns the sums squares deviation within populations from the population frequencies.
individuals[pop] = counts
frequencies[pop, haplotype] = freq
"""
ssdwp = 0.0
n_pop = frequencies.shape[0]
n_haplotypes = frequencies.shape[1]
for pop_idx in xrange(n_pop):
gene_copies = individuals[pop_idx] * 2 # diploid
pop_freq = frequencies[pop_idx, :]
pop_ssd = (np.outer(pop_freq, pop_freq).sum() - np.inner(pop_freq, pop_freq)) / 2.0
ssdwp += gene_copies * pop_ssd
return ssdwp
def ssdtotalfunc(individuals, frequencies):
"""
Calculates the total sum squared deviation for a locus.
individuals[pop] = counts
frequencies[pop, haplotype] = freq
"""
# total number of genes across all populations for a given locus
locus_gene_copies = 2.0 * individuals.sum() # diploid
total_freq = np.sum(frequencies * individuals[:, np.newaxis], axis=0) / individuals.sum()
ssd = locus_gene_copies * (np.outer(total_freq, total_freq).sum() - np.inner(total_freq, total_freq)) / 2.0
return ssd
def onecalcphi(individuals, frequencies):
"""
individuals[pop] = individuals
frequencies[pop][haplotype] = individuals
"""
n_gene_copies = individuals.sum() * 2 # diploid
n_pop = individuals.shape[0]
# calculate the sums squared deviation within populations
ssdwp = ssdwpfunc(individuals, frequencies)
# sums squared deviations total at the locus
ssdtotal = ssdtotalfunc(individuals, frequencies)
# degrees of freedom for between populations
dfb = n_pop - 1
# degrees of freedom for total locus
dfw = n_gene_copies - n_pop
if dfw == 0:
return 0.0
# mean squared deviation within populations
msdwp = ssdwp / dfw
# mean squared deviation among populations
msdap = (ssdtotal - ssdwp)/dfb
# Calculate the variation among populations
varAP = (msdap - msdwp)/(float(n_gene_copies)/n_pop)
if (varAP + msdwp) == 0.0:
return 0.0
# PHIst is the proportion of the variation partitioned among populations
phi_st = varAP/(msdwp + varAP)
assert not(np.isnan(phi_st))
return phi_st
def calc_locus_phi_st(individuals, frequencies):
n_loci = individuals.shape[0]
phi_st = np.zeros(n_loci)
for locus_i in xrange(n_loci):
phi_st[locus_i] = onecalcphi(individuals[locus_i, :], frequencies[locus_i, :, :])
return phi_st
def read_counts(flname):
fl = open(flname)
vector = []
populations = []
for ln in fl:
if "Marker" in ln:
if len(populations) != 0:
vector.append(populations)
populations = []
continue
cols = ln.split()
pop_locus_counts = map(float, cols[2:])
populations.append(pop_locus_counts)
vector.append(populations)
fl.close()
return np.array(vector)
def normalize_haplotypes(counts):
# sum total haplotype counts for each
# population-locus combination
total = np.sum(counts, axis=2)
frequencies = counts / total[:, :, None]
return frequencies
def write_phi(flname, phi_values):
fl = open(flname, "w")
for i, phi in enumerate(phi_values):
fl.write("%s,%s\n" % (i, phi))
fl.close()
if __name__ == "__main__":
occur_fl = sys.argv[1]
out_fl = sys.argv[2]
counts = read_counts(occur_fl)
frequencies = normalize_haplotypes(counts)
individuals = counts.sum(axis=2)
phi_sts = calc_locus_phi_st(individuals, frequencies)
write_phi(out_fl, phi_sts)
| apache-2.0 |
kedz/cuttsum | trec2015/sbin/l2s/apsal-dev.py | 1 | 9067 | import cuttsum.events
import cuttsum.corpora
from cuttsum.pipeline import InputStreamResource
from cuttsum.classifiers import NuggetRegressor
import cuttsum.judgements
import pandas as pd
import numpy as np
from datetime import datetime
from cuttsum.misc import event2semsim
from sklearn.cluster import AffinityPropagation
from sklearn.metrics.pairwise import cosine_similarity
import os
def epoch(dt):
return int((dt - datetime(1970, 1, 1)).total_seconds())
matches_df = cuttsum.judgements.get_merged_dataframe()
def get_input_stream(event, gold_probs, extractor="goose", thresh=.8, delay=None, topk=20):
max_nuggets = 3
corpus = cuttsum.corpora.get_raw_corpus(event)
res = InputStreamResource()
df = pd.concat(
res.get_dataframes(event, corpus, extractor, thresh, delay, topk))
selector = (df["n conf"] == 1) & (df["nugget probs"].apply(len) == 0)
df.loc[selector, "nugget probs"] = df.loc[selector, "nuggets"].apply(lambda x: {n:1 for n in x})
df["true probs"] = df["nugget probs"].apply(lambda x: [val for key, val in x.items()] +[0])
df["true probs"] = df["true probs"].apply(lambda x: np.max(x))
df.loc[(df["n conf"] == 1) & (df["nuggets"].apply(len) == 0), "true probs"] = 0
if gold_probs is True:
df["probs"] = df["true probs"]
else:
df["probs"] = NuggetRegressor().predict(event, df)
df["nuggets"] = df["nugget probs"].apply(
lambda x: set([key for key, val in x.items() if val > .9]))
nid2time = {}
nids = set(matches_df[matches_df["query id"] == event.query_id]["nugget id"].tolist())
for nid in nids:
ts = matches_df[matches_df["nugget id"] == nid]["update id"].apply(lambda x: int(x.split("-")[0])).tolist()
ts.sort()
nid2time[nid] = ts[0]
fltr_nuggets = []
for name, row in df.iterrows():
fltr_nuggets.append(
set([nug for nug in row["nuggets"] if nid2time[nug] <= row["timestamp"]]))
#print df[["nuggets", "timestamp"]].apply(lambda y: print y[0]) # datetime.utcfromtimestamp(int(y["timestamp"])))
#print nids
df["nuggets"] = fltr_nuggets
df["nuggets"] = df["nuggets"].apply(lambda x: x if len(x) <= max_nuggets else set([]))
from cuttsum.pipeline import DedupedArticlesResource
ded = DedupedArticlesResource()
stats_df = ded.get_stats_df(event, corpus, extractor, thresh)
stats_df["stream ids"] = stats_df["stream ids"].apply(lambda x: set(eval(x)))
sid2match = {}
for _, row in stats_df.iterrows():
for sid in row["stream ids"]:
sid2match[sid] = row["match"]
all_ts = []
all_docs = []
new_docs = []
for (sid, ts), doc in df.groupby(["stream id", "timestamp"]):
# print sub_doc
if len(all_ts) > 0:
assert ts >= all_ts[-1]
all_ts.append(ts)
if sid2match[sid] is True:
new_docs.append(doc)
all_docs.append(doc)
df = pd.concat(new_docs)
print len(all_docs), len(new_docs)
return df
def main(output_dir, sim_threshold, bucket_size, pref_offset):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
dev_qids = set([19, 23, 27, 34, 35])
summary_data = []
K_data = []
for event in cuttsum.events.get_events():
if event.query_num not in dev_qids: continue
print event
semsim = event2semsim(event)
istream = get_input_stream(event, False, extractor="goose",
thresh=.8, delay=None, topk=20)
prev_time = 0
cache = None
clusters = []
max_h = len(event.list_event_hours()) - 1
for h, hour in enumerate(event.list_event_hours()):
if h % bucket_size != 0 and h != max_h:
continue
current_time = epoch(hour)
input_sents = istream[
(istream["timestamp"] < current_time) & \
(istream["timestamp"] >= prev_time)]
len_select = input_sents["lemmas stopped"].apply(len) > 10
input_sents = input_sents[len_select]
if len(input_sents) <= 1: continue
stems = input_sents["stems"].apply(lambda x: ' '.join(x)).tolist()
X = semsim.transform(stems)
probs = input_sents["probs"]
p = probs.values
K = -(1 - cosine_similarity(X))
K_ma = np.ma.masked_array(K, np.eye(K.shape[0]))
Kmin = np.ma.min(K_ma)
Kmax = np.ma.max(K_ma)
median = np.ma.median(K_ma)[0]
pref = np.minimum(p + median, -.05)
print "SYS TIME:", hour, "# SENTS:", K.shape[0],
print "min/median/max pref: {}/{}/{}".format(
pref.min(), np.median(pref), pref.max())
#K_data.append({"min": Kmin, "max": Kmax, "median": median})
K_data.append({"min": (pref).min(), "max": (pref).max(),
"median": np.median((pref))})
#print K
# continue
#
ap = AffinityPropagation(
preference=pref-pref_offset, affinity="precomputed",
verbose=True, max_iter=1000)
ap.fit(K)
# ##print input_sents["pretty text"]
#
labels = ap.labels_
if ap.cluster_centers_indices_ != None:
for c in ap.cluster_centers_indices_:
if cache == None:
cache = X[c]
updates_df = \
input_sents.reset_index(drop=True).iloc[c]
updates_df["query id"] = event.query_num
updates_df["system timestamp"] = current_time
summary_data.append(
updates_df[
["query id", "stream id", "sent id",
"system timestamp", "sent text"]
].to_frame().T
)
else:
Ksum = cosine_similarity(cache, X[c])
#print "MAX SIM", Ksum.max()
#print input_sents.reset_index(drop=True).iloc[c]["sent text"]
if Ksum.max() < sim_threshold:
cache = np.vstack([cache, X[c]])
updates_df = \
input_sents.reset_index(drop=True).iloc[c]
updates_df["query id"] = event.query_num
updates_df["system timestamp"] = current_time
summary_data.append(
updates_df[
["query id", "stream id", "sent id",
"system timestamp", "sent text"]
].to_frame().T
)
#
# for l, i in enumerate(af.cluster_centers_indices_):
# support = np.sum(labels == l)
# center = input_sents.iloc[i][["update id", "sent text", "pretty text", "stems", "nuggets"]]
# center = center.to_dict()
# center["support"] = support
# center["timestamp"] = current_time
# clusters.append(center)
#
prev_time = current_time
# df = pd.DataFrame(clusters, columns=["update id", "timestamp", "support", "sent text", "pretty text", "stems", "nuggets"])
#
# import os
# dirname = "clusters"
# if not os.path.exists(dirname):
# os.makedirs(dirname)
#
# with open(os.path.join(dirname, "{}.tsv".format(event.query_id)), "w") as f:
# df.to_csv(f, sep="\t", index=False)
#
df = pd.DataFrame(K_data, columns=["min", "max", "median"])
print df
print df.mean()
print df.std()
print df.max()
df = pd.concat(summary_data)
df["conf"] = .5
df["team id"] = "APSAL"
df["run id"] = "sim{}_bs{}_off{}".format(
sim_threshold, bucket_size, pref_offset)
print df
of = os.path.join(output_dir, "apsal" + "sim{}_bs{}_off{}.tsv".format(
sim_threshold, bucket_size, pref_offset))
cols = ["query id", "team id", "run id", "stream id", "sent id",
"system timestamp", "conf"]
df[cols].to_csv(of, sep="\t", header=False, index=False)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(u"--output-dir", type=str,
required=True, help="directory to write results.")
parser.add_argument(
u"--sim-cutoff", type=float, required=True)
parser.add_argument(
u"--bucket-size", type=float, required=True)
parser.add_argument(
u"--pref-offset", type=float, required=True)
args = parser.parse_args()
main(args.output_dir, args.sim_cutoff,
args.bucket_size, args.pref_offset)
| apache-2.0 |
riddlezyc/geolab | src/structure/metricC.py | 1 | 12965 | import pickle
import matplotlib.pyplot as plt
from datetime import datetime
criteriaA = 4.0
criteriaB = 4.0
criteriaC = 10.0
dirName = r'F:\simulations\asphaltenes\production\longtime\athInHeptane\nvt\analysis\fullmatrix/'
# dirName = r'F:\simulations\asphaltenes\production\longtime\athInHeptane-illite\nvt\analysis\fullMatrix/'
# dirName = r'F:\simulations\asphaltenes\production\longtime\athInToluene\nvt\analysis\fullMatrix/'
# dirName = r'F:\simulations\asphaltenes\production\longtime\athInToluene-illite\nvt\analysis/'
# dirName = r'F:\simulations\asphaltenes\production\longtime\a0InHeptane\nvt\rerun/'
# dirName = r'F:\simulations\asphaltenes\production\longtime\a0InToluene\nvt\rerun/'
# dirName = r'F:\simulations\asphaltenes\production\longtime\coalInHeptane\nvt\rerun/'
# dirName = r'F:\simulations\asphaltenes\production\longtime\coalInToluene\nvt\rerun/'
fileNameA = 'minMinMatrixFrames.pickle'
fileNameB = 'mincoreMatrixFrames.pickle'
fileNameC = 'maxcoreMatrixFrames.pickle'
print 'opening pickle file for metric A...'
time0 = datetime.now()
with open(dirName + fileNameA, 'rb') as foo:
data = pickle.load(foo)
print 'timeing:', datetime.now() - time0
def ave_accum(list):
avelist = []
avelist.append(list[0])
for i in range(1, len(list)):
avelist.append((avelist[i - 1] * i + list[i]) / float((i + 1)))
return avelist
# metric A
cluster, clusterNo, clusterAve, gmax = [], [], [], []
for iframe, frame in enumerate(data):
numberofMolecules = len(data[0])
connectSet = [[i] for i in range(numberofMolecules)]
for i in range(numberofMolecules - 1):
for j in range(i + 1, numberofMolecules):
if frame[i][j] <= criteriaA:
connectSet[i].append(j), connectSet[j].append(i)
for i in range(numberofMolecules - 1):
for j in range(i + 1, numberofMolecules):
if len(set(connectSet[i]).intersection(set(connectSet[j]))) > 0:
x = set(connectSet[i]).union(set(connectSet[j]))
connectSet[i] = list(x)
connectSet[j] = list(x)
xconnect = []
for x in connectSet:
if x not in xconnect:
xconnect.append(x)
count = []
for x in xconnect:
count.append(connectSet.count(x))
ng = sum(count) # should always equal to number of molecules
ng2 = sum([x ** 2 for x in count])
avecount = float(ng2) / float(ng)
cluster.append(avecount)
clusterNo.append(len(xconnect))
clusterAve.append(float(ng) / len(count))
gmax.append(max(count))
cumulave = ave_accum(cluster)
print 'timeing:', datetime.now() - time0
print 'opening pickle file for metric B...'
time1 = datetime.now()
with open(dirName + fileNameB, 'rb') as foo:
data = pickle.load(foo)
print 'timeing:', datetime.now() - time1
# metric B
clusterB, clusterBNo, clusterBAve, gmaxB = [], [], [], []
for iframe, frame in enumerate(data):
numberofMolecules = len(data[0])
connectSet = [[i] for i in range(numberofMolecules)]
for i in range(numberofMolecules - 1):
for j in range(i + 1, numberofMolecules):
if frame[i][j] <= criteriaB:
connectSet[i].append(j), connectSet[j].append(i)
for i in range(numberofMolecules - 1):
for j in range(i + 1, numberofMolecules):
if len(set(connectSet[i]).intersection(set(connectSet[j]))) > 0:
x = set(connectSet[i]).union(set(connectSet[j]))
connectSet[i] = list(x)
connectSet[j] = list(x)
xconnect = []
for x in connectSet:
if x not in xconnect:
xconnect.append(x)
count = []
for x in xconnect:
count.append(connectSet.count(x))
ng = sum(count) # should always equal to number of molecules
ng2 = sum([x ** 2 for x in count])
avecount = float(ng2) / float(ng)
clusterB.append(avecount)
clusterBNo.append(len(xconnect))
clusterBAve.append(float(ng) / len(count))
gmaxB.append(max(count))
print 'timeing:', datetime.now() - time1
print 'opening pickle file for metric C...'
time2 = datetime.now()
with open(dirName + fileNameC, 'rb') as foo:
data = pickle.load(foo)
print 'timeing:', datetime.now() - time2
clusterC, clusterCNo, clusterCAve, gmaxC = [], [], [], []
for iframe, frame in enumerate(data):
numberofMolecules = len(data[0])
connectSet = [[i] for i in range(numberofMolecules)]
for i in range(numberofMolecules - 1):
for j in range(i + 1, numberofMolecules):
xswap = max(frame[i][j], frame[j][i])
if xswap <= criteriaC:
connectSet[i].append(j), connectSet[j].append(i)
for i in range(numberofMolecules - 1):
for j in range(i + 1, numberofMolecules):
if len(set(connectSet[i]).intersection(set(connectSet[j]))) > 0:
x = set(connectSet[i]).union(set(connectSet[j]))
connectSet[i] = list(x)
connectSet[j] = list(x)
xconnect = []
for x in connectSet:
if x not in xconnect:
xconnect.append(x)
count = []
for x in xconnect:
count.append(connectSet.count(x))
ng = sum(count) # should always equal to number of molecules
ng2 = sum([x ** 2 for x in count])
avecount = float(ng2) / float(ng)
clusterC.append(avecount)
clusterCNo.append(len(xconnect))
clusterCAve.append(float(ng) / len(count))
gmaxC.append(max(count))
print 'timeing:', datetime.now() - time0
print 'writing data to file...'
with open(dirName + 'cluster-%s-%s.dat' % (criteriaC, criteriaA), 'w') as foo:
print >> foo, '#frame metricA ave_metricA metricB metricC No.A No.B No.C AveA AveB AveC gmaxA gmaxB gmaxC'
for iframe in range(len(data)):
print >> foo, '%5d%10.4f%10.4f%10.4f%10.4f%5d%5d%5d%10.4f%10.4f%10.4f%5d%5d%5d' % (
iframe, cluster[iframe], cumulave[iframe], clusterB[iframe], clusterC[iframe], clusterNo[iframe],
clusterBNo[iframe], clusterCNo[iframe], clusterAve[iframe], clusterBAve[iframe], clusterCAve[iframe],
gmax[iframe], gmaxB[iframe], gmaxC[iframe])
plt.figure(0, figsize=(8, 4))
figName = dirName + 'metric-%s-%s.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.plot(cluster, label='metricA')
# plt.plot(cumulave, label='ave_C', linewidth=3)
plt.plot(clusterB, label='metricB')
plt.plot(clusterC, label='metricC')
plt.plot(clusterAve, label='aveA')
plt.plot(clusterBAve, label='aveB')
plt.plot(clusterCAve, label='aveC')
plt.plot(gmax, label='gmaxA')
plt.plot(gmaxB, label='gmaxB')
plt.plot(gmaxC, label='gmaxC')
plt.legend(loc='best', ncol=3, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(0)
plt.figure(0, figsize=(8, 4))
figName = dirName + 'metric-%s-%s-logy.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.plot(cluster, label='metricA')
# plt.plot(cumulave, label='ave_C', linewidth=3)
plt.semilogy(clusterB, label='metricB')
plt.semilogy(clusterC, label='metricC')
plt.semilogy(clusterAve, label='aveA')
plt.semilogy(clusterBAve, label='aveB')
plt.semilogy(clusterCAve, label='aveC')
plt.semilogy(gmax, label='gmaxA')
plt.semilogy(gmaxB, label='gmaxB')
plt.semilogy(gmaxC, label='gmaxC')
plt.legend(loc='best', ncol=3, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(0)
# plot C
plt.figure(1, figsize=(8, 4))
figName = dirName + 'metricC-%s-%s.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.plot(gmaxC, label='gmaxC')
plt.plot(clusterC, label='metricC')
plt.plot(clusterCAve, label='aveC')
plt.legend(loc='best', ncol=1, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(1)
plt.figure(1, figsize=(8, 4))
figName = dirName + 'metricC-%s-%s-logy.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.semilogy(gmaxC, label='gmaxC')
plt.semilogy(clusterC, label='metricC')
plt.semilogy(clusterCAve, label='aveC')
plt.legend(loc='best', ncol=1, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(1)
plt.figure(2, figsize=(8, 4))
figName = dirName + 'metricB-%s-%s.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.plot(gmaxB, label='gmaxB')
plt.plot(clusterB, label='metricB')
plt.plot(clusterBAve, label='aveB')
plt.legend(loc='best', ncol=1, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(2)
plt.figure(2, figsize=(8, 4))
figName = dirName + 'metricB-%s-%s-logy.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.semilogy(gmaxB, label='gmaxB')
plt.semilogy(clusterB, label='metricB')
plt.semilogy(clusterBAve, label='aveB')
plt.legend(loc='best', ncol=1, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(2)
plt.figure(3, figsize=(8, 4))
figName = dirName + 'metricA-%s-%s.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.plot(gmax, label='gmaxA')
plt.plot(cluster, label='metricA')
# plt.plot(cumulave, label='ave_C', linewidth=3)
plt.plot(clusterAve, label='aveA')
plt.legend(loc='best', ncol=1, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(3)
plt.figure(3, figsize=(8, 4))
figName = dirName + 'metricA-%s-%s-logy.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.semilogy(gmax, label='gmaxA')
plt.semilogy(cluster, label='metricA')
# plt.plot(cumulave, label='ave_C', linewidth=3)
plt.semilogy(clusterAve, label='aveA')
plt.legend(loc='best', ncol=1, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(3)
plt.figure(4, figsize=(8, 4))
figName = dirName + 'metricABC-%s-%s.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.plot(gmax, label='gmaxA')
plt.plot(gmaxB, label='gmaxB')
plt.plot(gmaxC, label='gmaxC')
plt.plot(cluster, label='metricA')
plt.plot(clusterB, label='metricB')
plt.plot(clusterC, label='metricC')
plt.legend(loc='best', ncol=2, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(4)
plt.figure(4, figsize=(8, 4))
figName = dirName + 'metricABC-%s-%s-logy.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.semilogy(gmax, label='gmaxA')
plt.semilogy(gmaxB, label='gmaxB')
plt.semilogy(gmaxC, label='gmaxC')
plt.semilogy(cluster, label='metricA')
plt.semilogy(clusterB, label='metricB')
plt.semilogy(clusterC, label='metricC')
plt.legend(loc='best', ncol=2, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(4)
plt.figure(5, figsize=(8, 4))
figName = dirName + 'aveABC-%s-%s.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.plot(clusterAve, label='aveA')
plt.plot(clusterBAve, label='aveB')
plt.plot(clusterCAve, label='aveC')
plt.legend(loc='best', ncol=1, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(5)
plt.figure(5, figsize=(8, 4))
figName = dirName + 'aveABC-%s-%s-logy.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.semilogy(clusterAve, label='aveA')
plt.semilogy(clusterBAve, label='aveB')
plt.semilogy(clusterCAve, label='aveC')
plt.legend(loc='best', ncol=1, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(5)
print 'timeing:', datetime.now() - time0
| gpl-3.0 |
mlyundin/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
eggplantbren/ExperimentalNS | TwoScalars/DNest/postprocess.py | 1 | 7100 | # Copyright (c) 2009, 2010, 2011, 2012 Brendon J. Brewer.
#
# This file is part of DNest3.
#
# DNest3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DNest3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DNest3. If not, see <http://www.gnu.org/licenses/>.
import copy
import numpy as np
import matplotlib.pyplot as plt
def logsumexp(values):
biggest = np.max(values)
x = values - biggest
result = np.log(np.sum(np.exp(x))) + biggest
return result
def logdiffexp(x1, x2):
biggest = x1
xx1 = x1 - biggest
xx2 = x2 - biggest
result = np.log(np.exp(xx1) - np.exp(xx2)) + biggest
return result
def postprocess(temperature=1., numResampleLogX=1, plot=True, loaded=[], \
cut=0., save=True, zoom_in=True):
if len(loaded) == 0:
levels = np.atleast_2d(np.loadtxt("levels.txt"))
sample_info = np.atleast_2d(np.loadtxt("sample_info.txt"))
sample = np.atleast_2d(np.loadtxt("sample.txt"))
#if(sample.shape[0] == 1):
# sample = sample.T
else:
levels, sample_info, sample = loaded[0], loaded[1], loaded[2]
sample = sample[int(cut*sample.shape[0]):, :]
sample_info = sample_info[int(cut*sample_info.shape[0]):, :]
if sample.shape[0] != sample_info.shape[0]:
print('# Size mismatch. Truncating...')
lowest = np.min([sample.shape[0], sample_info.shape[0]])
sample = sample[0:lowest, :]
sample_info = sample_info[0:lowest, :]
if plot:
if numResampleLogX > 1:
plt.ion()
plt.figure(1)
plt.plot(sample_info[:,0])
plt.xlabel("Iteration")
plt.ylabel("Level")
if numResampleLogX > 1:
plt.draw()
plt.figure(2)
plt.subplot(2,1,1)
plt.plot(np.diff(levels[:,0]))
plt.ylabel("Compression")
plt.xlabel("Level")
xlim = plt.gca().get_xlim()
plt.axhline(-1., color='r')
plt.ylim(ymax=0.05)
if numResampleLogX > 1:
plt.draw()
plt.subplot(2,1,2)
good = np.nonzero(levels[:,4] > 0)[0]
plt.plot(levels[good,3]/levels[good,4])
plt.xlim(xlim)
plt.ylim([0., 1.])
plt.xlabel("Level")
plt.ylabel("MH Acceptance")
if numResampleLogX > 1:
plt.draw()
# Convert to lists of tuples
logl_levels = [(levels[i,1], levels[i, 2]) for i in xrange(0, levels.shape[0])] # logl, tiebreaker
logl_samples = [(sample_info[i, 1], sample_info[i, 2], i) for i in xrange(0, sample.shape[0])] # logl, tiebreaker, id
logx_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logp_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logP_samples = np.zeros((sample_info.shape[0], numResampleLogX))
P_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logz_estimates = np.zeros((numResampleLogX, 1))
H_estimates = np.zeros((numResampleLogX, 1))
# Find sandwiching level for each sample
sandwich = sample_info[:,0].copy().astype('int')
sandwich *= 0
for i in xrange(0, sample.shape[0]):
while sandwich[i] < levels.shape[0]-1 and logl_samples[i] > logl_levels[sandwich[i] + 1]:
sandwich[i] += 1
for z in xrange(0, numResampleLogX):
# For each level
for i in range(0, levels.shape[0]):
# Find the samples sandwiched by this level
which = np.nonzero(sandwich == i)[0]
logl_samples_thisLevel = [] # (logl, tieBreaker, ID)
for j in xrange(0, len(which)):
logl_samples_thisLevel.append(copy.deepcopy(logl_samples[which[j]]))
logl_samples_thisLevel = sorted(logl_samples_thisLevel)
N = len(logl_samples_thisLevel)
# Generate intermediate logx values
logx_max = levels[i, 0]
if i == levels.shape[0]-1:
logx_min = -1E300
else:
logx_min = levels[i+1, 0]
Umin = np.exp(logx_min - logx_max)
if N == 0 or numResampleLogX > 1:
U = Umin + (1. - Umin)*np.random.rand(len(which))
else:
U = Umin + (1. - Umin)*np.linspace(1./(N+1), 1. - 1./(N+1), N)
logx_samples_thisLevel = np.sort(logx_max + np.log(U))[::-1]
for j in xrange(0, which.size):
logx_samples[logl_samples_thisLevel[j][2]][z] = logx_samples_thisLevel[j]
if j != which.size - 1:
left = logx_samples_thisLevel[j+1]
elif i == levels.shape[0]-1:
left = -1E300
else:
left = levels[i+1][0]
if j != 0:
right = logx_samples_thisLevel[j-1]
else:
right = levels[i][0]
logp_samples[logl_samples_thisLevel[j][2]][z] = np.log(0.5) + logdiffexp(right, left)
logl = sample_info[:,1]/temperature
logp_samples[:,z] = logp_samples[:,z] - logsumexp(logp_samples[:,z])
logP_samples[:,z] = logp_samples[:,z] + logl
logz_estimates[z] = logsumexp(logP_samples[:,z])
logP_samples[:,z] -= logz_estimates[z]
P_samples[:,z] = np.exp(logP_samples[:,z])
H_estimates[z] = -logz_estimates[z] + np.sum(P_samples[:,z]*logl)
if plot:
plt.figure(3)
if z == 0:
plt.subplot(2,1,1)
plt.plot(logx_samples[:,z], sample_info[:,1], 'b.', label='Samples')
plt.hold(True)
plt.plot(levels[1:,0], levels[1:,1], 'r.', label='Levels')
plt.legend(numpoints=1, loc='lower left')
plt.ylabel('log(L)')
plt.title(str(z+1) + "/" + str(numResampleLogX) + ", log(Z) = " + str(logz_estimates[z][0]))
# Use all plotted logl values to set ylim
combined_logl = np.hstack([sample_info[:,1], levels[1:, 1]])
combined_logl = np.sort(combined_logl)
lower = combined_logl[int(0.1*combined_logl.size)]
upper = combined_logl[-1]
diff = upper - lower
lower -= 0.05*diff
upper += 0.05*diff
if zoom_in:
plt.ylim([lower, upper])
if numResampleLogX > 1:
plt.draw()
xlim = plt.gca().get_xlim()
if plot:
plt.subplot(2,1,2)
plt.hold(False)
plt.plot(logx_samples[:,z], P_samples[:,z], 'b.')
plt.ylabel('Posterior Weights')
plt.xlabel('log(X)')
plt.xlim(xlim)
if numResampleLogX > 1:
plt.draw()
P_samples = np.mean(P_samples, 1)
P_samples = P_samples/np.sum(P_samples)
logz_estimate = np.mean(logz_estimates)
logz_error = np.std(logz_estimates)
H_estimate = np.mean(H_estimates)
H_error = np.std(H_estimates)
ESS = np.exp(-np.sum(P_samples*np.log(P_samples+1E-300)))
print("log(Z) = " + str(logz_estimate) + " +- " + str(logz_error))
print("Information = " + str(H_estimate) + " +- " + str(H_error) + " nats.")
print("Effective sample size = " + str(ESS))
# Resample to uniform weight
N = int(ESS)
posterior_sample = np.zeros((N, sample.shape[1]))
w = P_samples
w = w/np.max(w)
if save:
np.savetxt('weights.txt', w) # Save weights
for i in xrange(0, N):
while True:
which = np.random.randint(sample.shape[0])
if np.random.rand() <= w[which]:
break
posterior_sample[i,:] = sample[which,:]
if save:
np.savetxt("posterior_sample.txt", posterior_sample)
if plot:
if numResampleLogX > 1:
plt.ioff()
plt.show()
return [logz_estimate, H_estimate, logx_samples, logp_samples.flatten()]
| gpl-3.0 |
nvoron23/statsmodels | statsmodels/sandbox/tsa/diffusion2.py | 38 | 13366 | """ Diffusion 2: jump diffusion, stochastic volatility, stochastic time
Created on Tue Dec 08 15:03:49 2009
Author: josef-pktd following Meucci
License: BSD
contains:
CIRSubordinatedBrownian
Heston
IG
JumpDiffusionKou
JumpDiffusionMerton
NIG
VG
References
----------
Attilio Meucci, Review of Discrete and Continuous Processes in Finance: Theory and Applications
Bloomberg Portfolio Research Paper No. 2009-02-CLASSROOM July 1, 2009
http://papers.ssrn.com/sol3/papers.cfm?abstract_id=1373102
this is currently mostly a translation from matlab of
http://www.mathworks.com/matlabcentral/fileexchange/23554-review-of-discrete-and-continuous-processes-in-finance
license BSD:
Copyright (c) 2008, Attilio Meucci
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
TODO:
* vectorize where possible
* which processes are exactly simulated by finite differences ?
* include or exclude (now) the initial observation ?
* convert to and merge with diffusion.py (part 1 of diffusions)
* which processes can be easily estimated ?
loglike or characteristic function ?
* tests ? check for possible index errors (random indices), graphs look ok
* adjust notation, variable names, more consistent, more pythonic
* delete a few unused lines, cleanup
* docstrings
random bug (showed up only once, need fuzz-testing to replicate)
File "...\diffusion2.py", line 375, in <module>
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
File "...\diffusion2.py", line 129, in simulate
jumps_ts[n] = CumS[Events]
IndexError: index out of bounds
CumS is empty array, Events == -1
"""
import numpy as np
#from scipy import stats # currently only uses np.random
import matplotlib.pyplot as plt
class JumpDiffusionMerton(object):
'''
Example
-------
mu=.00 # deterministic drift
sig=.20 # Gaussian component
l=3.45 # Poisson process arrival rate
a=0 # drift of log-jump
D=.2 # st.dev of log-jump
X = JumpDiffusionMerton().simulate(mu,sig,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(X.T)
plt.title('Merton jump-diffusion')
'''
def __init__(self):
pass
def simulate(self, m,s,lambd,a,D,ts,nrepl):
T = ts[-1] # time points
# simulate number of jumps
n_jumps = np.random.poisson(lambd*T, size=(nrepl, 1))
jumps=[]
nobs=len(ts)
jumps=np.zeros((nrepl,nobs))
for j in range(nrepl):
# simulate jump arrival time
t = T*np.random.rand(n_jumps[j])#,1) #uniform
t = np.sort(t,0)
# simulate jump size
S = a + D*np.random.randn(n_jumps[j],1)
# put things together
CumS = np.cumsum(S)
jumps_ts = np.zeros(nobs)
for n in range(nobs):
Events = np.sum(t<=ts[n])-1
#print n, Events, CumS.shape, jumps_ts.shape
jumps_ts[n]=0
if Events > 0:
jumps_ts[n] = CumS[Events] #TODO: out of bounds see top
#jumps = np.column_stack((jumps, jumps_ts)) #maybe wrong transl
jumps[j,:] = jumps_ts
D_Diff = np.zeros((nrepl,nobs))
for k in range(nobs):
Dt=ts[k]
if k>1:
Dt=ts[k]-ts[k-1]
D_Diff[:,k]=m*Dt + s*np.sqrt(Dt)*np.random.randn(nrepl)
x = np.hstack((np.zeros((nrepl,1)),np.cumsum(D_Diff,1)+jumps))
return x
class JumpDiffusionKou(object):
def __init__(self):
pass
def simulate(self, m,s,lambd,p,e1,e2,ts,nrepl):
T=ts[-1]
# simulate number of jumps
N = np.random.poisson(lambd*T,size =(nrepl,1))
jumps=[]
nobs=len(ts)
jumps=np.zeros((nrepl,nobs))
for j in range(nrepl):
# simulate jump arrival time
t=T*np.random.rand(N[j])
t=np.sort(t)
# simulate jump size
ww = np.random.binomial(1, p, size=(N[j]))
S = ww * np.random.exponential(e1, size=(N[j])) - \
(1-ww) * np.random.exponential(e2, N[j])
# put things together
CumS = np.cumsum(S)
jumps_ts = np.zeros(nobs)
for n in range(nobs):
Events = sum(t<=ts[n])-1
jumps_ts[n]=0
if Events:
jumps_ts[n]=CumS[Events]
jumps[j,:] = jumps_ts
D_Diff = np.zeros((nrepl,nobs))
for k in range(nobs):
Dt=ts[k]
if k>1:
Dt=ts[k]-ts[k-1]
D_Diff[:,k]=m*Dt + s*np.sqrt(Dt)*np.random.normal(size=nrepl)
x = np.hstack((np.zeros((nrepl,1)),np.cumsum(D_Diff,1)+jumps))
return x
class VG(object):
'''variance gamma process
'''
def __init__(self):
pass
def simulate(self, m,s,kappa,ts,nrepl):
T=len(ts)
dXs = np.zeros((nrepl,T))
for t in range(T):
dt=ts[1]-0
if t>1:
dt = ts[t]-ts[t-1]
#print dt/kappa
#TODO: check parameterization of gamrnd, checked looks same as np
d_tau = kappa * np.random.gamma(dt/kappa,1.,size=(nrepl))
#print s*np.sqrt(d_tau)
# this raises exception:
#dX = stats.norm.rvs(m*d_tau,(s*np.sqrt(d_tau)))
# np.random.normal requires scale >0
dX = np.random.normal(loc=m*d_tau, scale=1e-6+s*np.sqrt(d_tau))
dXs[:,t] = dX
x = np.cumsum(dXs,1)
return x
class IG(object):
'''inverse-Gaussian ??? used by NIG
'''
def __init__(self):
pass
def simulate(self, l,m,nrepl):
N = np.random.randn(nrepl,1)
Y = N**2
X = m + (.5*m*m/l)*Y - (.5*m/l)*np.sqrt(4*m*l*Y+m*m*(Y**2))
U = np.random.rand(nrepl,1)
ind = U>m/(X+m)
X[ind] = m*m/X[ind]
return X.ravel()
class NIG(object):
'''normal-inverse-Gaussian
'''
def __init__(self):
pass
def simulate(self, th,k,s,ts,nrepl):
T = len(ts)
DXs = np.zeros((nrepl,T))
for t in range(T):
Dt=ts[1]-0
if t>1:
Dt=ts[t]-ts[t-1]
l = 1/k*(Dt**2)
m = Dt
DS = IG().simulate(l,m,nrepl)
N = np.random.randn(nrepl)
DX = s*N*np.sqrt(DS) + th*DS
#print DS.shape, DX.shape, DXs.shape
DXs[:,t] = DX
x = np.cumsum(DXs,1)
return x
class Heston(object):
'''Heston Stochastic Volatility
'''
def __init__(self):
pass
def simulate(self, m, kappa, eta,lambd,r, ts, nrepl,tratio=1.):
T = ts[-1]
nobs = len(ts)
dt = np.zeros(nobs) #/tratio
dt[0] = ts[0]-0
dt[1:] = np.diff(ts)
DXs = np.zeros((nrepl,nobs))
dB_1 = np.sqrt(dt) * np.random.randn(nrepl,nobs)
dB_2u = np.sqrt(dt) * np.random.randn(nrepl,nobs)
dB_2 = r*dB_1 + np.sqrt(1-r**2)*dB_2u
vt = eta*np.ones(nrepl)
v=[]
dXs = np.zeros((nrepl,nobs))
vts = np.zeros((nrepl,nobs))
for t in range(nobs):
dv = kappa*(eta-vt)*dt[t]+ lambd*np.sqrt(vt)*dB_2[:,t]
dX = m*dt[t] + np.sqrt(vt*dt[t]) * dB_1[:,t]
vt = vt + dv
vts[:,t] = vt
dXs[:,t] = dX
x = np.cumsum(dXs,1)
return x, vts
class CIRSubordinatedBrownian(object):
'''CIR subordinated Brownian Motion
'''
def __init__(self):
pass
def simulate(self, m, kappa, T_dot,lambd,sigma, ts, nrepl):
T = ts[-1]
nobs = len(ts)
dtarr = np.zeros(nobs) #/tratio
dtarr[0] = ts[0]-0
dtarr[1:] = np.diff(ts)
DXs = np.zeros((nrepl,nobs))
dB = np.sqrt(dtarr) * np.random.randn(nrepl,nobs)
yt = 1.
dXs = np.zeros((nrepl,nobs))
dtaus = np.zeros((nrepl,nobs))
y = np.zeros((nrepl,nobs))
for t in range(nobs):
dt = dtarr[t]
dy = kappa*(T_dot-yt)*dt + lambd*np.sqrt(yt)*dB[:,t]
yt = np.maximum(yt+dy,1e-10) # keep away from zero ?
dtau = np.maximum(yt*dt, 1e-6)
dX = np.random.normal(loc=m*dtau, scale=sigma*np.sqrt(dtau))
y[:,t] = yt
dtaus[:,t] = dtau
dXs[:,t] = dX
tau = np.cumsum(dtaus,1)
x = np.cumsum(dXs,1)
return x, tau, y
def schout2contank(a,b,d):
th = d*b/np.sqrt(a**2-b**2)
k = 1/(d*np.sqrt(a**2-b**2))
s = np.sqrt(d/np.sqrt(a**2-b**2))
return th,k,s
if __name__ == '__main__':
#Merton Jump Diffusion
#^^^^^^^^^^^^^^^^^^^^^
# grid of time values at which the process is evaluated
#("0" will be added, too)
nobs = 252.#1000 #252.
ts = np.linspace(1./nobs, 1., nobs)
nrepl=5 # number of simulations
mu=.010 # deterministic drift
sigma = .020 # Gaussian component
lambd = 3.45 *10 # Poisson process arrival rate
a=0 # drift of log-jump
D=.2 # st.dev of log-jump
jd = JumpDiffusionMerton()
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('Merton jump-diffusion')
sigma = 0.2
lambd = 3.45
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('Merton jump-diffusion')
#Kou jump diffusion
#^^^^^^^^^^^^^^^^^^
mu=.0 # deterministic drift
lambd=4.25 # Poisson process arrival rate
p=.5 # prob. of up-jump
e1=.2 # parameter of up-jump
e2=.3 # parameter of down-jump
sig=.2 # Gaussian component
x = JumpDiffusionKou().simulate(mu,sig,lambd,p,e1,e2,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('double exponential (Kou jump diffusion)')
#variance-gamma
#^^^^^^^^^^^^^^
mu = .1 # deterministic drift in subordinated Brownian motion
kappa = 1. #10. #1 # inverse for gamma shape parameter
sig = 0.5 #.2 # s.dev in subordinated Brownian motion
x = VG().simulate(mu,sig,kappa,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('variance gamma')
#normal-inverse-Gaussian
#^^^^^^^^^^^^^^^^^^^^^^^
# (Schoutens notation)
al = 2.1
be = 0
de = 1
# convert parameters to Cont-Tankov notation
th,k,s = schout2contank(al,be,de)
x = NIG().simulate(th,k,s,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo x-axis
plt.title('normal-inverse-Gaussian')
#Heston Stochastic Volatility
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^
m=.0
kappa = .6 # 2*Kappa*Eta>Lambda^2
eta = .3**2
lambd =.25
r = -.7
T = 20.
nobs = 252.*T#1000 #252.
tsh = np.linspace(T/nobs, T, nobs)
x, vts = Heston().simulate(m,kappa, eta,lambd,r, tsh, nrepl, tratio=20.)
plt.figure()
plt.plot(x.T)
plt.title('Heston Stochastic Volatility')
plt.figure()
plt.plot(np.sqrt(vts).T)
plt.title('Heston Stochastic Volatility - CIR Vol.')
plt.figure()
plt.subplot(2,1,1)
plt.plot(x[0])
plt.title('Heston Stochastic Volatility process')
plt.subplot(2,1,2)
plt.plot(np.sqrt(vts[0]))
plt.title('CIR Volatility')
#CIR subordinated Brownian
#^^^^^^^^^^^^^^^^^^^^^^^^^
m=.1
sigma=.4
kappa=.6 # 2*Kappa*T_dot>Lambda^2
T_dot=1
lambd=1
#T=252*10
#dt=1/252
#nrepl=2
T = 10.
nobs = 252.*T#1000 #252.
tsh = np.linspace(T/nobs, T, nobs)
x, tau, y = CIRSubordinatedBrownian().simulate(m, kappa, T_dot,lambd,sigma, tsh, nrepl)
plt.figure()
plt.plot(tsh, x.T)
plt.title('CIRSubordinatedBrownian process')
plt.figure()
plt.plot(tsh, y.T)
plt.title('CIRSubordinatedBrownian - CIR')
plt.figure()
plt.plot(tsh, tau.T)
plt.title('CIRSubordinatedBrownian - stochastic time ')
plt.figure()
plt.subplot(2,1,1)
plt.plot(tsh, x[0])
plt.title('CIRSubordinatedBrownian process')
plt.subplot(2,1,2)
plt.plot(tsh, y[0], label='CIR')
plt.plot(tsh, tau[0], label='stoch. time')
plt.legend(loc='upper left')
plt.title('CIRSubordinatedBrownian')
#plt.show()
| bsd-3-clause |
alexeyum/scikit-learn | sklearn/cluster/tests/test_spectral.py | 72 | 7950 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.randint(0, n_class + 1, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
olologin/scikit-learn | sklearn/gaussian_process/tests/test_kernels.py | 24 | 11602 | """Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
from collections import Hashable
from sklearn.externals.funcsigs import signature
import numpy as np
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.base import clone
from sklearn.utils.testing import (assert_equal, assert_almost_equal,
assert_not_equal, assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
def test_kernel_gradient():
""" Compare analytic and numeric gradient of kernels. """
for kernel in kernels:
K, K_gradient = kernel(X, eval_gradient=True)
assert_equal(K_gradient.shape[0], X.shape[0])
assert_equal(K_gradient.shape[1], X.shape[0])
assert_equal(K_gradient.shape[2], kernel.theta.shape[0])
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
def test_kernel_theta():
""" Check that parameter vector theta of kernel is set correctly. """
for kernel in kernels:
if isinstance(kernel, KernelOperator) \
or isinstance(kernel, Exponentiation): # skip non-basic kernels
continue
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s.rstrip("_bounds"),
filter(lambda s: s.endswith("_bounds"), args))
assert_equal(
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters),
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert_equal(theta[i],
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)
assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)
if i > 0:
assert_equal(theta[:i], new_kernel.theta[:i])
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert_equal(theta[i+1:], new_kernel.theta[i:])
assert_array_equal(K_gradient[..., i+1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
def test_auto_vs_cross():
""" Auto-correlation and cross-correlation should be consistent. """
for kernel in kernels:
if kernel == kernel_white:
continue # Identity is not satisfied on diagonal
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
def test_kernel_diag():
""" Test that diag method of kernel returns consistent results. """
for kernel in kernels:
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
""" Adding kernels and multiplying kernels should be commutative. """
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
""" Anisotropic kernel should be consistent with isotropic kernels."""
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
def test_kernel_stationary():
""" Test stationarity of kernels."""
for kernel in kernels:
if not kernel.is_stationary():
continue
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def test_kernel_clone():
""" Test that sklearn's clone works correctly on kernels. """
for kernel in kernels:
kernel_cloned = clone(kernel)
assert_equal(kernel, kernel_cloned)
assert_not_equal(id(kernel), id(kernel_cloned))
for attr in kernel.__dict__.keys():
attr_value = getattr(kernel, attr)
attr_value_cloned = getattr(kernel_cloned, attr)
if attr.startswith("hyperparameter_"):
assert_equal(attr_value.name, attr_value_cloned.name)
assert_equal(attr_value.value_type,
attr_value_cloned.value_type)
assert_array_equal(attr_value.bounds,
attr_value_cloned.bounds)
assert_equal(attr_value.n_elements,
attr_value_cloned.n_elements)
elif np.iterable(attr_value):
for i in range(len(attr_value)):
if np.iterable(attr_value[i]):
assert_array_equal(attr_value[i],
attr_value_cloned[i])
else:
assert_equal(attr_value[i], attr_value_cloned[i])
else:
assert_equal(attr_value, attr_value_cloned)
if not isinstance(attr_value, Hashable):
# modifiable attributes must not be identical
assert_not_equal(id(attr_value), id(attr_value_cloned))
def test_matern_kernel():
""" Test consistency of Matern kernel for special values of nu. """
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
def test_kernel_versus_pairwise():
"""Check that GP kernels can also be used as pairwise kernels."""
for kernel in kernels:
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
def test_set_get_params():
"""Check that set_params()/get_params() is consistent with kernel.theta."""
for kernel in kernels:
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value]*size})
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
[value]*size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
| bsd-3-clause |
aronnem/IMProToo | examples/batch_makeQuicklooks.py | 1 | 6396 | # -*- coding: utf-8 -*-
'''
Copyright (C) 2011,2012 Maximilian Maahn, IGMK ([email protected])
make quicklooks from IMProToo NetCDF files.
use: python batch_makeQuicklooks.py pathIn pathOut site
requires:
numpy, matplotlib, netcdf4-python or python-netcdf
'''
import sys
import numpy as np
import glob
import calendar
import datetime
import os
import matplotlib
matplotlib.use('Agg')
from matplotlib import rc,ticker
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import random
import string
from copy import deepcopy
import IMProToo
from IMProToo.tools import *
try:
import netCDF4 as nc
pyNc = True
except:
import Scientific.IO.NetCDF as nc
pyNc = False
tmpDir="/tmp/"
skipExisting = True
def unix2timestamp(unix):
return datetime.datetime.utcfromtimestamp(unix).strftime("%Y%m%d")
def timestamp2unix(timestamp):
return calendar.timegm(datetime.datetime(year = int(timestamp[0:4]), month = int(timestamp[4:6]), day = int(timestamp[6:8]), hour = 0, minute = 0, second = 0).timetuple())
def quicklook(site,ncFile,imgFile,imgTitle):
"""
Makes Quicklooks of MRR data
@parameter site (str): code for the site where the data was recorded (usually 3 letter)
@parameter ncFile (str): netcdf file name incl. path, usually "path/mrr_site_yyyymmdd.nc"
@parameter imgFile (str): image file name, incl. path, extensions determines file format (e.g. png, eps, pdf ...)
@parameter imgTitle (str): plot title
"""
print "##### " + imgTitle + "######"
tmpFile = False
if ncFile.split(".")[-1]=="gz":
tmpFile = True
gzFile = deepcopy(ncFile)
ncFile = tmpDir+"/maxLibs_netcdf_"+''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(5))+".tmp.nc"
print 'uncompressing', gzFile, "->",ncFile
os.system("zcat "+gzFile+">"+ncFile)
else:
print 'opening', ncFile
if pyNc: ncData = nc.Dataset(ncFile,'r')
else: ncData = nc.NetCDFFile(ncFile,'r')
timestampsNew = ncData.variables["time"][:]
HNew = ncData.variables["height"][:]
ZeNew = ncData.variables["Ze"][:]
noiseAveNew = ncData.variables["etaNoiseAve"][:]
noiseStdNew = ncData.variables["etaNoiseStd"][:]
spectralWidthNew = ncData.variables["spectralWidth"][:]
WNew = ncData.variables["W"][:]
qualityNew = ncData.variables["quality"][:]
ncData.close()
if (tmpFile):
os.system("rm -f "+ncFile)
date = unix2timestamp(timestampsNew[0])
starttime = timestamp2unix(date)
endtime = starttime+60*60*24
HNew[np.isnan(HNew)] = -9999
ylim = [np.min(HNew[HNew!=-9999]),np.max(HNew)]
xlim = [starttime,endtime]
timestampsNew = oneD2twoD(timestampsNew,ZeNew.shape[1],1)
fig=plt.figure(figsize=(10, 13))
sp1 = fig.add_subplot(511)
sp1.set_title(imgTitle)
levels = np.arange(-15,40,0.1)
plotCF = sp1.contourf(timestampsNew,HNew, ZeNew, levels,cmap=plt.get_cmap("spectral"), extend="both")#
cbZe=plt.colorbar(plotCF)
cbZe.set_label('MRR Ze [dBz]')
sp1.set_ylim(ylim)
sp1.set_xlim(xlim)
sp1.axhline(HNew[-1,2])
sp1.axhline(HNew[-1,29])
sp2 = fig.add_subplot(512)
levels = np.arange(-10,18,0.1)
plotCF = sp2.contourf(timestampsNew,HNew, WNew, levels,cmap=plt.get_cmap("spectral"), extend="both")#
cbZe=plt.colorbar(plotCF)
cbZe.set_label('MRR W [m/s]')
sp2.set_ylim(ylim)
sp2.set_xlim(xlim)
sp2.axhline(HNew[-1,2])
sp2.axhline(HNew[-1,29])
sp3 = fig.add_subplot(513)
levels = np.arange(0,1.5,0.1)
plotCF = sp3.contourf(timestampsNew,HNew, spectralWidthNew, levels,cmap=plt.get_cmap("spectral"), extend="both")#
cbZe=plt.colorbar(plotCF)
cbZe.set_label('spectralWidth [m/s]')
sp3.set_ylim(ylim)
sp3.set_xlim(xlim)
sp3.axhline(HNew[-1,2])
sp3.axhline(HNew[-1,29])
sp4 = fig.add_subplot(514)
levels = np.arange(1e-10,1e-8,2e-10)
plotCF = sp4.contourf(timestampsNew,HNew, noiseAveNew, levels,cmap=plt.get_cmap("spectral"), extend="both")#
cbZe=plt.colorbar(plotCF)
cbZe.set_label('mean spectral noise [1/m]')
sp4.set_ylim(ylim)
sp4.set_xlim(xlim)
sp4.axhline(HNew[-1,2])
sp4.axhline(HNew[-1,29])
#import pdb;pdb.set_trace()
sp5 = fig.add_subplot(515)
levels = np.arange(20)
for i in levels:
levels[i] = 2**i
plotCF = sp5.contourf(timestampsNew,HNew, qualityNew, levels,cmap=plt.get_cmap("spectral"), norm = matplotlib.colors.LogNorm())#
cbZe=plt.colorbar(plotCF)
cbZe.set_label('quality array')
sp5.set_ylim(ylim)
sp5.set_xlim(xlim)
sp5.axhline(HNew[-1,2])
sp5.axhline(HNew[-1,29])
#sp1.set_xlim(np.min(timestampsNew),np.max(timestampsNew))
sp1.set_xticks(np.arange(sp1.get_xlim()[0],sp1.get_xlim()[1],7200))
sp1.set_xticklabels([])
#sp2.set_xlim(np.min(timestampsNew),np.max(timestampsNew))
sp2.set_xticks(np.arange(sp1.get_xlim()[0],sp1.get_xlim()[1],7200))
sp2.set_xticklabels([])
#sp3.set_xlim(np.min(timestampsNew),np.max(timestampsNew))
sp3.set_xticks(np.arange(sp1.get_xlim()[0],sp1.get_xlim()[1],7200))
sp3.set_xticklabels([])
#sp4.set_xlim(np.min(timestampsNew),np.max(timestampsNew))
sp4.set_xticks(np.arange(sp1.get_xlim()[0],sp1.get_xlim()[1],7200))
sp4.set_xticklabels([])
#pdb.set_trace()
#sp5.set_xlim(np.min(timestampsNew)-60,np.max(timestampsNew))
sp5.set_xticks(np.arange(sp5.get_xlim()[0],sp5.get_xlim()[1]+7200,7200))
niceDates = list()
for timestamp in np.arange(sp5.get_xlim()[0],sp5.get_xlim()[1]+7200,7200):
niceDates.append(str(datetime.datetime.utcfromtimestamp(timestamp).strftime("%H:%M")))
sp5.set_xticklabels(niceDates)
plt.subplots_adjust(hspace=0.02,left=0.085,right=0.78)
plt.savefig(imgFile)
print(imgFile)
plt.close()
return
if len(sys.argv) < 4:
print 'use: batch_makeQuicklooks.py pathIn pathOut site'
sys.exit()
pathIn = sys.argv[1]
pathOut = sys.argv[2]
site = sys.argv[3]
try: os.mkdir(pathOut)
except OSError: pass
for ncFile in np.sort(glob.glob(pathIn+"/*")):
#import pdb;pdb.set_trace()
date = ncFile.split("_")[-1].split(".")[0]
print date, ncFile
imgFile = pathOut + "/mrr_improtoo_"+IMProToo.__version__+'_'+site+"_"+date+".png"
imgTitle = site + " " + date + " IMProToo " + IMProToo.__version__
if skipExisting and os.path.isfile(imgFile):
print "Quicklook aready exists, skipping: ", date, ncFile, imgFile
continue
quicklook(site,ncFile,imgFile,imgTitle)
| gpl-3.0 |
dsm054/pandas | pandas/tests/plotting/test_boxplot_method.py | 3 | 16170 | # coding: utf-8
import pytest
import itertools
import string
from pandas import Series, DataFrame, MultiIndex
from pandas.compat import range, lzip
import pandas.util.testing as tm
import pandas.util._test_decorators as td
import numpy as np
from numpy import random
import pandas.plotting as plotting
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works)
""" Test cases for .boxplot method """
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
df = DataFrame(np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
df['indic2'] = ['foo', 'bar', 'foo'] * 2
_check_plot_works(df.boxplot, return_type='dict')
_check_plot_works(df.boxplot, column=[
'one', 'two'], return_type='dict')
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, column=['one', 'two'],
by='indic')
_check_plot_works(df.boxplot, column='one', by=['indic', 'indic2'])
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by='indic')
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by=['indic', 'indic2'])
_check_plot_works(plotting._core.boxplot, data=df['one'],
return_type='dict')
_check_plot_works(df.boxplot, notch=1, return_type='dict')
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by='indic', notch=1)
@pytest.mark.slow
def test_boxplot_legacy2(self):
df = DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2'])
df['X'] = Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B'])
df['Y'] = Series(['A'] * 10)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by='X')
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.boxplot('Col1', by='X', ax=ax)
ax_axes = ax.axes
assert ax_axes is axes
fig, ax = self.plt.subplots()
axes = df.groupby('Y').boxplot(ax=ax, return_type='axes')
ax_axes = ax.axes
assert ax_axes is axes['A']
# Multiple columns with an ax argument should use same figure
fig, ax = self.plt.subplots()
with tm.assert_produces_warning(UserWarning):
axes = df.boxplot(column=['Col1', 'Col2'],
by='X', ax=ax, return_type='axes')
assert axes['Col1'].get_figure() is fig
# When by is None, check that all relevant lines are present in the
# dict
fig, ax = self.plt.subplots()
d = df.boxplot(ax=ax, return_type='dict')
lines = list(itertools.chain.from_iterable(d.values()))
assert len(ax.get_lines()) == len(lines)
@pytest.mark.slow
def test_boxplot_return_type_none(self):
# GH 12216; return_type=None & by=None -> axes
result = self.hist_df.boxplot()
assert isinstance(result, self.plt.Axes)
@pytest.mark.slow
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pandas-dev/pandas/pull/7096
import matplotlib as mpl # noqa
df = DataFrame(np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
with pytest.raises(ValueError):
df.boxplot(return_type='NOTATYPE')
result = df.boxplot()
self._check_box_return_type(result, 'axes')
with tm.assert_produces_warning(False):
result = df.boxplot(return_type='dict')
self._check_box_return_type(result, 'dict')
with tm.assert_produces_warning(False):
result = df.boxplot(return_type='axes')
self._check_box_return_type(result, 'axes')
with tm.assert_produces_warning(False):
result = df.boxplot(return_type='both')
self._check_box_return_type(result, 'both')
@pytest.mark.slow
def test_boxplot_axis_limits(self):
def _check_ax_limits(col, ax):
y_min, y_max = ax.get_ylim()
assert y_min <= col.min()
assert y_max >= col.max()
df = self.hist_df.copy()
df['age'] = np.random.randint(1, 20, df.shape[0])
# One full row
height_ax, weight_ax = df.boxplot(['height', 'weight'], by='category')
_check_ax_limits(df['height'], height_ax)
_check_ax_limits(df['weight'], weight_ax)
assert weight_ax._sharey == height_ax
# Two rows, one partial
p = df.boxplot(['height', 'weight', 'age'], by='category')
height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]
dummy_ax = p[1, 1]
_check_ax_limits(df['height'], height_ax)
_check_ax_limits(df['weight'], weight_ax)
_check_ax_limits(df['age'], age_ax)
assert weight_ax._sharey == height_ax
assert age_ax._sharey == height_ax
assert dummy_ax._sharey is None
@pytest.mark.slow
def test_boxplot_empty_column(self):
df = DataFrame(np.random.randn(20, 4))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type='axes')
@pytest.mark.slow
def test_figsize(self):
df = DataFrame(np.random.rand(10, 5),
columns=['A', 'B', 'C', 'D', 'E'])
result = df.boxplot(return_type='axes', figsize=(12, 8))
assert result.figure.bbox_inches.width == 12
assert result.figure.bbox_inches.height == 8
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6]})
self._check_ticks_props(df.boxplot("a", fontsize=16),
xlabelsize=16, ylabelsize=16)
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
grouped = self.hist_df.groupby(by='gender')
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type='axes')
self._check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False,
return_type='axes')
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_boxplot_legacy2(self):
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
grouped = df.groupby(level=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type='axes')
self._check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3))
axes = _check_plot_works(grouped.boxplot, subplots=False,
return_type='axes')
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_boxplot_legacy3(self):
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
grouped = df.unstack(level=1).groupby(level=0, axis=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type='axes')
self._check_axes_shape(list(axes.values), axes_num=3, layout=(2, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False,
return_type='axes')
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_grouped_plot_fignums(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(['male', 'female'], size=n)
df = DataFrame({'height': height, 'weight': weight, 'gender': gender})
gb = df.groupby('gender')
res = gb.plot()
assert len(self.plt.get_fignums()) == 2
assert len(res) == 2
tm.close()
res = gb.boxplot(return_type='axes')
assert len(self.plt.get_fignums()) == 1
assert len(res) == 2
tm.close()
# now works with GH 5610 as gender is excluded
res = df.groupby('gender').hist()
tm.close()
@pytest.mark.slow
def test_grouped_box_return_type(self):
df = self.hist_df
# old style: return_type=None
result = df.boxplot(by='gender')
assert isinstance(result, np.ndarray)
self._check_box_return_type(
result, None,
expected_keys=['height', 'weight', 'category'])
# now for groupby
result = df.groupby('gender').boxplot(return_type='dict')
self._check_box_return_type(
result, 'dict', expected_keys=['Male', 'Female'])
columns2 = 'X B C D A G Y N Q O'.split()
df2 = DataFrame(random.randn(50, 10), columns=columns2)
categories2 = 'A B C D E F G H I J'.split()
df2['category'] = categories2 * 5
for t in ['dict', 'axes', 'both']:
returned = df.groupby('classroom').boxplot(return_type=t)
self._check_box_return_type(
returned, t, expected_keys=['A', 'B', 'C'])
returned = df.boxplot(by='classroom', return_type=t)
self._check_box_return_type(
returned, t,
expected_keys=['height', 'weight', 'category'])
returned = df2.groupby('category').boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=categories2)
returned = df2.boxplot(by='category', return_type=t)
self._check_box_return_type(returned, t, expected_keys=columns2)
@pytest.mark.slow
def test_grouped_box_layout(self):
df = self.hist_df
pytest.raises(ValueError, df.boxplot, column=['weight', 'height'],
by=df.gender, layout=(1, 1))
pytest.raises(ValueError, df.boxplot,
column=['height', 'weight', 'category'],
layout=(2, 1), return_type='dict')
pytest.raises(ValueError, df.boxplot, column=['weight', 'height'],
by=df.gender, layout=(-1, -1))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(df.groupby('gender').boxplot,
column='height', return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(df.groupby('category').boxplot,
column='height',
return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
# GH 6769
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(df.groupby('classroom').boxplot,
column='height', return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
# GH 5897
axes = df.boxplot(column=['height', 'weight', 'category'], by='gender',
return_type='axes')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
for ax in [axes['height']]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes['weight'], axes['category']]:
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
box = df.groupby('classroom').boxplot(
column=['height', 'weight', 'category'], return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(df.groupby('category').boxplot,
column='height',
layout=(3, 2), return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(df.groupby('category').boxplot,
column='height',
layout=(3, -1), return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
box = df.boxplot(column=['height', 'weight', 'category'], by='gender',
layout=(4, 1))
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(4, 1))
box = df.boxplot(column=['height', 'weight', 'category'], by='gender',
layout=(-1, 1))
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(3, 1))
box = df.groupby('classroom').boxplot(
column=['height', 'weight', 'category'], layout=(1, 4),
return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4))
box = df.groupby('classroom').boxplot( # noqa
column=['height', 'weight', 'category'], layout=(1, -1),
return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))
@pytest.mark.slow
def test_grouped_box_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
# check warning to ignore sharex / sharey
# this check should be done in the first function which
# passes multiple axes to plot, hist or boxplot
# location should be changed if other test is added
# which has earlier alphabetical order
with tm.assert_produces_warning(UserWarning):
fig, axes = self.plt.subplots(2, 2)
df.groupby('category').boxplot(
column='height', return_type='axes', ax=axes)
self._check_axes_shape(self.plt.gcf().axes,
axes_num=4, layout=(2, 2))
fig, axes = self.plt.subplots(2, 3)
with tm.assert_produces_warning(UserWarning):
returned = df.boxplot(column=['height', 'weight', 'category'],
by='gender', return_type='axes', ax=axes[0])
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[0])
assert returned[0].figure is fig
# draw on second row
with tm.assert_produces_warning(UserWarning):
returned = df.groupby('classroom').boxplot(
column=['height', 'weight', 'category'],
return_type='axes', ax=axes[1])
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[1])
assert returned[0].figure is fig
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
with tm.assert_produces_warning(UserWarning):
axes = df.groupby('classroom').boxplot(ax=axes)
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]})
self._check_ticks_props(df.boxplot("a", by="b", fontsize=16),
xlabelsize=16, ylabelsize=16)
| bsd-3-clause |
thomasbarillot/DAQ | HHGMonitor/ADQ14_FWDAQ_streaming_example.py | 1 | 10160 | #!/usr/bin/env python3
#
# Copyright 2015 Signal Processing Devices Sweden AB. All rights reserved.
#
# Description: ADQ14 FWDAQ streaming example
# Documentation:
#
import numpy as np
import ctypes as ct
import matplotlib.pyplot as plt
import sys
import time
import os
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__))+'/..')
from modules.example_helpers import *
# Record settings
number_of_records = 1000
samples_per_record = 512
# Plot data if set to True
plot_data = True
# Print metadata in headers
print_headers = True
# DMA transfer buffer settings
transfer_buffer_size = 65536
num_transfer_buffers = 8
# DMA flush timeout in seconds
flush_timeout = 0.5
# Load ADQAPI
ADQAPI = adqapi_load()
# Create ADQControlUnit
adq_cu = ct.c_void_p(ADQAPI.CreateADQControlUnit())
# Enable error logging from ADQAPI
ADQAPI.ADQControlUnit_EnableErrorTrace(adq_cu, 3, '.')
# Find ADQ devices
ADQAPI.ADQControlUnit_FindDevices(adq_cu)
n_of_ADQ = ADQAPI.ADQControlUnit_NofADQ(adq_cu)
print('Number of ADQ found: {}'.format(n_of_ADQ))
# Exit if no devices were found
if n_of_ADQ < 1:
print('No ADQ connected.')
ADQAPI.DeleteADQControlUnit(adq_cu)
adqapi_unload(ADQAPI)
sys.exit(1)
# Select ADQ
if n_of_ADQ > 1:
adq_num = int(input('Select ADQ device 1-{:d}: '.format(n_of_ADQ)))
else:
adq_num = 1
print_adq_device_revisions(ADQAPI, adq_cu, adq_num)
# Set clock source
ADQ_CLOCK_INT_INTREF = 0
ADQAPI.ADQ_SetClockSource(adq_cu, adq_num, ADQ_CLOCK_INT_INTREF)
# Maximum number of channels for ADQ14 FWPD is four
max_number_of_channels = ADQAPI.ADQ_GetNofChannels(adq_cu, adq_num)
# Setup test pattern
# 0 enables the analog input from the ADCs
# > 0 enables a specific test pattern
# Note: Default is to enable a test pattern (4) and disconnect the
# analog inputs inside the FPGA.
ADQAPI.ADQ_SetTestPatternMode(adq_cu, adq_num, 4)
# Set trig mode
SW_TRIG = 1
EXT_TRIG_1 = 2
EXT_TRIG_2 = 7
EXT_TRIG_3 = 8
LVL_TRIG = 3
INT_TRIG = 4
LVL_FALLING = 0
LVL_RISING = 1
trig_type = EXT_TRIG_1
success = ADQAPI.ADQ_SetTriggerMode(adq_cu, adq_num, trig_type)
if (success == 0):
print('ADQ_SetTriggerMode failed.')
success = ADQAPI.ADQ_SetLvlTrigLevel(adq_cu, adq_num, 0)
if (success == 0):
print('ADQ_SetLvlTrigLevel failed.')
success = ADQAPI.ADQ_SetTrigLevelResetValue(adq_cu, adq_num, 1000)
if (success == 0):
print('ADQ_SetTrigLevelResetValue failed.')
success = ADQAPI.ADQ_SetLvlTrigChannel(adq_cu, adq_num, 1)
if (success == 0):
print('ADQ_SetLvlTrigChannel failed.')
success = ADQAPI.ADQ_SetLvlTrigEdge(adq_cu, adq_num, LVL_RISING)
if (success == 0):
print('ADQ_SetLvlTrigEdge failed.')
# Setup acquisition
channels_mask = 0xf
ADQAPI.ADQ_TriggeredStreamingSetup(adq_cu, adq_num, number_of_records, samples_per_record, 0, 0, channels_mask)
ADQAPI.ADQ_SetStreamStatus(adq_cu, adq_num, 1);
# Get number of channels from device
number_of_channels = ADQAPI.ADQ_GetNofChannels(adq_cu, adq_num)
# Setup size of transfer buffers
print('Setting up streaming...')
ADQAPI.ADQ_SetTransferBuffers(adq_cu, adq_num, num_transfer_buffers, transfer_buffer_size)
# Start streaming
print('Collecting data, please wait...')
ADQAPI.ADQ_StopStreaming(adq_cu, adq_num)
ADQAPI.ADQ_StartStreaming(adq_cu, adq_num)
# Allocate target buffers for intermediate data storage
target_buffers = (ct.POINTER(ct.c_int16*transfer_buffer_size)*number_of_channels)()
for bufp in target_buffers:
bufp.contents = (ct.c_int16*transfer_buffer_size)()
# Create some buffers for the full records
data_16bit = [np.array([], dtype=np.int16),
np.array([], dtype=np.int16),
np.array([], dtype=np.int16),
np.array([], dtype=np.int16)]
# Allocate target buffers for headers
headerbuf_list = [(HEADER*number_of_records)() for ch in range(number_of_channels)]
# Create an C array of pointers to header buffers
headerbufp_list = ((ct.POINTER(HEADER*number_of_records))*number_of_channels)()
# Initiate pointers with allocated header buffers
for ch,headerbufp in enumerate(headerbufp_list):
headerbufp.contents = headerbuf_list[ch]
# Create a second level pointer to each buffer pointer,
# these will only be used to change the bufferp_list pointer values
headerbufvp_list = [ct.cast(ct.pointer(headerbufp_list[ch]), ct.POINTER(ct.c_void_p)) for ch in range(number_of_channels)]
# Allocate length output variable
samples_added = (4*ct.c_uint)()
for ind in range(len(samples_added)):
samples_added[ind] = 0
headers_added = (4*ct.c_uint)()
for ind in range(len(headers_added)):
headers_added[ind] = 0
header_status = (4*ct.c_uint)()
for ind in range(len(header_status)):
header_status[ind] = 0
# Generate triggers if software trig is used
if (trig_type == 1):
for trig in range(number_of_records):
ADQAPI.ADQ_SWTrig(adq_cu, adq_num)
print('Waiting for data...')
# Collect data until all requested records have been recieved
records_completed = [0, 0, 0, 0]
headers_completed = [0, 0, 0, 0]
records_completed_cnt = 0
ltime = time.time()
buffers_filled = ct.c_uint(0)
# Read out data until records_completed for ch A is number_of_records
while (number_of_records > records_completed[0]):
buffers_filled.value = 0
collect_result = 1
poll_time_diff_prev = time.time()
# Wait for next data buffer
while ((buffers_filled.value == 0) and (collect_result)):
collect_result = ADQAPI.ADQ_GetTransferBufferStatus(adq_cu, adq_num,
ct.byref(buffers_filled))
poll_time_diff = time.time()
if ((poll_time_diff - poll_time_diff_prev) > flush_timeout):
# Force flush
print('No data for {}s, flushing the DMA buffer.'.format(flush_timeout))
status = ADQAPI.ADQ_FlushDMA(adq_cu, adq_num);
print('ADQAPI.ADQ_FlushDMA returned {}'.format(adq_status(status)))
poll_time_diff_prev = time.time()
# Fetch data and headers into target buffers
status = ADQAPI.ADQ_GetDataStreaming(adq_cu, adq_num,
target_buffers,
headerbufp_list,
channels_mask,
ct.byref(samples_added),
ct.byref(headers_added),
ct.byref(header_status))
if status == 0:
print('GetDataStreaming failed!')
sys.exit()
for ch in range(number_of_channels):
if (headers_added[ch] > 0):
# The last call to GetDataStreaming has generated header data
if (header_status[ch]):
headers_done = headers_added[ch]
else:
# One incomplete header
headers_done = headers_added[ch]-1
# Update counter counting completed records
headers_completed[ch] += headers_done
# Update the number of completed records if at least one header has completed
if (headers_done > 0):
records_completed[ch] = headerbuf_list[ch][headers_completed[ch]-1].RecordNumber + 1
# Update header pointer so that it points to the current header
headerbufvp_list[ch].contents.value += headers_done*ct.sizeof(headerbuf_list[ch]._type_)
if headers_done > 0 and (np.sum(records_completed)-records_completed_cnt) > 1000:
dtime = time.time()-ltime
if (dtime > 0):
print('{:d} {:.2f} MB/s'.format(np.sum(records_completed),
((samples_per_record
*2
*(np.sum(records_completed)-records_completed_cnt))
/(dtime))/(1024*1024)))
sys.stdout.flush()
records_completed_cnt = np.sum(records_completed)
ltime = time.time()
if (samples_added[ch] > 0 and plot_data):
# Copy channel data to continuous buffer
data_buf = np.frombuffer(target_buffers[ch].contents, dtype=np.int16, count=samples_added[ch])
data_16bit[ch] = np.append(data_16bit[ch], data_buf)
print(records_completed[0])
# Stop streaming
ADQAPI.ADQ_StopStreaming(adq_cu, adq_num)
# Print recieved headers
if print_headers:
for ch in range(max_number_of_channels):
if number_of_records > 0:
print('------------------')
print('Headers channel {}'.format(ch))
print('------------------')
for rec in range(number_of_records):
header = headerbuf_list[ch][rec]
print('RecordStatus: {}'.format(header.RecordNumber))
print('UserID: {}'.format(header.UserID))
print('SerialNumber: {}'.format(header.SerialNumber))
print('Channel: {}'.format(header.Channel))
print('DataFormat: {}'.format(header.DataFormat))
print('RecordNumber: {}'.format(header.RecordNumber))
print('Timestamp: {} ns'.format(header.Timestamp * 0.125))
print('RecordStart: {} ns'.format(header.RecordStart * 0.125))
print('SamplePeriod: {} ns'.format(header.SamplePeriod * 0.125))
print('RecordLength: {} ns'.format(header.RecordLength * (header.SamplePeriod* 0.125)))
print('------------------')
# Plot data
if plot_data:
for ch in range(max_number_of_channels):
if number_of_records > 0:
widths = np.array([], dtype=np.uint32)
record_end_offset = 0
# Extract record lengths from headers
for rec in range(number_of_records):
header = headerbuf_list[ch][rec]
if rec>0:
print header.Timestamp*0.125-headerbuf_list[ch][rec-1].Timestamp*0.125
widths = np.append(widths, header.RecordLength)
# Get new figure
plt.figure(ch)
plt.clf()
# Plot data
plt.plot(data_16bit[ch].T, '.-')
# Set window title
plt.gcf().canvas.set_window_title('Channel {}'.format(ch))
# Set grid mode
plt.grid(which='Major')
# Mark records in plot
alternate_background(plt.gca(), 0, widths, labels=True)
# Show plot
plt.show()
# Delete ADQ device handle
ADQAPI.ADQControlUnit_DeleteADQ(adq_cu, adq_num)
# Delete ADQControlunit
ADQAPI.DeleteADQControlUnit(adq_cu)
print('Done.')
| mit |
toobaz/pandas | pandas/core/groupby/generic.py | 1 | 62983 | """
Define the SeriesGroupBy and DataFrameGroupBy
classes that hold the groupby interfaces (and some implementations).
These are user facing as the result of the ``df.groupby(...)`` operations,
which here returns a DataFrameGroupBy object.
"""
from collections import OrderedDict, abc, namedtuple
import copy
import functools
from functools import partial
from textwrap import dedent
import typing
from typing import Any, Callable, FrozenSet, Iterator, Sequence, Type, Union
import warnings
import numpy as np
from pandas._libs import Timestamp, lib
from pandas.compat import PY36
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.cast import maybe_convert_objects, maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_bool,
is_datetimelike,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_list_like,
is_numeric_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import _isna_ndarraylike, isna, notna
from pandas._typing import FrameOrSeries
import pandas.core.algorithms as algorithms
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import ABCDataFrame, ABCSeries, NDFrame, _shared_docs
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy,
_apply_docs,
_transform_template,
groupby,
)
from pandas.core.index import Index, MultiIndex, _all_indexes_same
import pandas.core.indexes.base as ibase
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.sparse.frame import SparseDataFrame
from pandas.plotting import boxplot_frame_groupby
NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"])
# TODO(typing) the return value on this callable should be any *scalar*.
AggScalar = Union[str, Callable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = typing.TypeVar("ScalarResult")
def whitelist_method_generator(
base_class: Type[GroupBy], klass: Type[FrameOrSeries], whitelist: FrozenSet[str]
) -> Iterator[str]:
"""
Yields all GroupBy member defs for DataFrame/Series names in whitelist.
Parameters
----------
base_class : Groupby class
base class
klass : DataFrame or Series class
class where members are defined.
whitelist : frozenset
Set of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
property_wrapper_template = """@property
def %(name)s(self) :
\"""%(doc)s\"""
return self.__getattr__('%(name)s')"""
for name in whitelist:
# don't override anything that was explicitly defined
# in the base class
if hasattr(base_class, name):
continue
# ugly, but we need the name string itself in the method.
f = getattr(klass, name)
doc = f.__doc__
doc = doc if type(doc) == str else ""
wrapper_template = property_wrapper_template
params = {"name": name, "doc": doc}
yield wrapper_template % params
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, alt=None, numeric_only=True, min_count=-1):
new_items, new_blocks = self._cython_agg_blocks(
how, alt=alt, numeric_only=numeric_only, min_count=min_count
)
return self._wrap_agged_blocks(new_items, new_blocks)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, alt=None, numeric_only=True, min_count=-1):
# TODO: the actual managing of mgr_locs is a PITA
# here, it should happen via BlockManager.combine
data, agg_axis = self._get_data_to_aggregate()
if numeric_only:
data = data.get_numeric_data(copy=False)
new_blocks = []
new_items = []
deleted_items = []
no_result = object()
for block in data.blocks:
# Avoid inheriting result from earlier in the loop
result = no_result
locs = block.mgr_locs.as_array
try:
result, _ = self.grouper.aggregate(
block.values, how, axis=agg_axis, min_count=min_count
)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
if alt is None:
# we cannot perform the operation
# in an alternate way, exclude the block
deleted_items.append(locs)
continue
# call our grouper again with only this block
obj = self.obj[data.items[locs]]
s = groupby(obj, self.grouper)
try:
result = s.aggregate(lambda x: alt(x, axis=self.axis))
except TypeError:
# we may have an exception in trying to aggregate
# continue and exclude the block
deleted_items.append(locs)
continue
finally:
if result is not no_result:
dtype = block.values.dtype
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result, dtype=dtype)
newb = block.make_block(result)
new_items.append(locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError("No numeric types to aggregate")
# reset the locs in the blocks to correspond to our
# current ordering
indexer = np.concatenate(new_items)
new_items = data.items.take(np.sort(indexer))
if len(deleted_items):
# we need to adjust the indexer to account for the
# items we have removed
# really should be done in internals :<
deleted = np.concatenate(deleted_items)
ai = np.arange(len(data))
mask = np.zeros(len(data))
mask[deleted] = 1
indexer = (ai - mask.cumsum())[indexer]
offset = 0
for b in new_blocks:
loc = len(b.mgr_locs)
b.mgr_locs = indexer[offset : (offset + loc)]
offset += loc
return new_items, new_blocks
def aggregate(self, func, *args, **kwargs):
_level = kwargs.pop("_level", None)
relabeling = func is None and _is_multi_agg_with_relabel(**kwargs)
if relabeling:
func, columns, order = _normalize_keyword_aggregation(kwargs)
kwargs = {}
elif func is None:
# nicer error message
raise TypeError("Must provide 'func' or tuples of " "'(column, aggfunc).")
func = _maybe_mangle_lambdas(func)
result, how = self._aggregate(func, _level=_level, *args, **kwargs)
if how is None:
return result
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs(
[func], _level=_level, _axis=self.axis
)
result.columns = Index(
result.columns.levels[0], name=self._selected_obj.columns.name
)
if isinstance(self.obj, SparseDataFrame):
# Backwards compat for groupby.agg() with sparse
# values. concat no longer converts DataFrame[Sparse]
# to SparseDataFrame, so we do it here.
result = SparseDataFrame(result._data)
except Exception:
result = self._aggregate_generic(func, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
if relabeling:
result = result[order]
result.columns = columns
return result._convert(datetime=True)
agg = aggregate
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError("Number of keys must be 1")
axis = self.axis
obj = self._obj_with_exclusions
result = OrderedDict()
if axis != obj._info_axis_number:
try:
for name, data in self:
result[name] = self._try_cast(func(data, *args, **kwargs), data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs), data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise AbstractMethodError(self)
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = OrderedDict()
cannot_agg = []
errors = None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item, grouper=self.grouper)
cast = self._transform_should_cast(func)
result[item] = colg.aggregate(func, *args, **kwargs)
if cast:
result[item] = self._try_cast(result[item], data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors = e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys, names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
return DataFrame(index=keys)
key_names = self.grouper.names
# GH12824.
def first_not_none(values):
try:
return next(com._not_none(*values))
except StopIteration:
return None
v = first_not_none(values)
if v is None:
# GH9684. If all values are None, then this will throw an error.
# We'd prefer it return an empty dataframe.
return DataFrame()
elif isinstance(v, DataFrame):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = self.grouper.result_index
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
v = first_not_none(values)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([x.index for x in values])
singular_series = len(values) == 1 and applied_index.nlevels == 1
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.core.reshape.concat import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(keys, values, not_indexed_same=True)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = {v.name for v in values}
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if (
isinstance(v.index, MultiIndex)
or key_index is None
or isinstance(key_index, MultiIndex)
):
stacked_values = np.vstack([np.asarray(v) for v in values])
result = DataFrame(
stacked_values, index=key_index, columns=index
)
else:
# GH5788 instead of stacking; concat gets the
# dtypes correct
from pandas.core.reshape.concat import concat
result = concat(
values,
keys=key_index,
names=key_index.names,
axis=self.axis,
).unstack()
result.columns = index
else:
stacked_values = np.vstack([np.asarray(v) for v in values])
result = DataFrame(
stacked_values.T, index=v.index, columns=key_index
)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index, name=self._selection_name)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if so.ndim == 2 and so.dtypes.apply(is_datetimelike).any():
result = _recast_datetimelike_result(result)
else:
result = result._convert(datetime=True)
return self._reindex_output(result)
# values are not series or array-like but scalars
else:
# only coerce dates if we find at least 1 datetime
coerce = any(isinstance(x, Timestamp) for x in values)
# self._selection_name not passed through to Series as the
# result should not take the name of original selection
# of columns
return Series(values, index=key_index)._convert(
datetime=True, coerce=coerce
)
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.core.reshape.concat import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, "name", name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except ValueError:
msg = "transform must return a scalar value for each group"
raise ValueError(msg)
else:
res = path(group)
if isinstance(res, Series):
# we need to broadcast across the
# other dimension; this will preserve dtypes
# GH14457
if not np.prod(group.shape):
continue
elif res.index.is_(obj.index):
r = concat([res] * len(group.columns), axis=1)
r.columns = group.columns
r.index = group.index
else:
r = DataFrame(
np.concatenate([res.values] * len(group.index)).reshape(
group.shape
),
columns=group.columns,
index=group.index,
)
applied.append(r)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1
concatenated = concat(applied, axis=self.axis, verify_integrity=False)
concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False)
return self._set_result_index_ordered(concatenated)
@Substitution(klass="DataFrame", selected="")
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
# optimized transforms
func = self._get_cython_func(func) or func
if isinstance(func, str):
if not (func in base.transform_kernel_whitelist):
msg = "'{func}' is not a valid function name for transform(name)"
raise ValueError(msg.format(func=func))
if func in base.cythonized_kernels:
# cythonized transformation or canned "reduction+broadcast"
return getattr(self, func)(*args, **kwargs)
else:
# If func is a reduction, we need to broadcast the
# result to the whole group. Compute func result
# and deal with possible broadcasting below.
result = getattr(self, func)(*args, **kwargs)
else:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
obj = self._obj_with_exclusions
# nuisance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
return self._transform_fast(result, obj, func)
def _transform_fast(self, result, obj, func_nm):
"""
Fast transform path for aggregations
"""
# if there were groups with no observations (Categorical only?)
# try casting data to original dtype
cast = self._transform_should_cast(func_nm)
# for each col, reshape to to size of original frame
# by take operation
ids, _, ngroup = self.grouper.group_info
output = []
for i, _ in enumerate(result.columns):
res = algorithms.take_1d(result.iloc[:, i].values, ids)
if cast:
res = self._try_cast(res, obj.iloc[:, i])
output.append(res)
return DataFrame._from_arrays(output, columns=result.columns, index=obj.index)
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, str):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis
)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis
)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# verify fast path does not change columns (and names), otherwise
# its results cannot be joined with those of the slow path
if res_fast.columns != group.columns:
return path, res
# verify numerical equality with the slow path
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notna(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except Exception:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError("Transform function invalid for data types")
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return DataFrame(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Returns
-------
filtered : DataFrame
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isna(res)):
if res and notna(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError(
"filter function returned a %s, "
"but expected a scalar bool" % type(res).__name__
)
return self._apply_filter(indices, dropna)
class SeriesGroupBy(GroupBy):
#
# Make class defs of attributes on SeriesGroupBy whitelist
_apply_whitelist = base.series_apply_whitelist
for _def_str in whitelist_method_generator(GroupBy, Series, _apply_whitelist):
exec(_def_str)
@property
def _selection_name(self):
"""
since we are a series, we by definition only have
a single name, but may be the result of a selection or
the name of our object
"""
if self._selection is None:
return self.obj.name
else:
return self._selection
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.Series.groupby.apply
pandas.Series.groupby.transform
pandas.Series.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.groupby([1, 1, 2, 2]).agg(
... minimum='min',
... maximum='max',
... )
minimum maximum
1 1 2
2 3 4
"""
)
@Appender(
_apply_docs["template"].format(
input="series", examples=_apply_docs["series_examples"]
)
)
def apply(self, func, *args, **kwargs):
return super().apply(func, *args, **kwargs)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, func_or_funcs=None, *args, **kwargs):
_level = kwargs.pop("_level", None)
relabeling = func_or_funcs is None
columns = None
no_arg_message = (
"Must provide 'func_or_funcs' or named " "aggregation **kwargs."
)
if relabeling:
columns = list(kwargs)
if not PY36:
# sort for 3.5 and earlier
columns = list(sorted(columns))
func_or_funcs = [kwargs[col] for col in columns]
kwargs = {}
if not columns:
raise TypeError(no_arg_message)
if isinstance(func_or_funcs, str):
return getattr(self, func_or_funcs)(*args, **kwargs)
if isinstance(func_or_funcs, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func_or_funcs = _maybe_mangle_lambdas(func_or_funcs)
ret = self._aggregate_multiple_funcs(func_or_funcs, (_level or 0) + 1)
if relabeling:
ret.columns = columns
else:
cyfunc = self._get_cython_func(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print("Warning, ignoring as_index=True")
# _level handled at higher
if not _level and isinstance(ret, dict):
from pandas import concat
ret = concat(ret, axis=1)
return ret
agg = aggregate
def _aggregate_multiple_funcs(self, arg, _level):
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
if isinstance(self._selected_obj, Series) and _level <= 1:
msg = dedent(
"""\
using a dict on a Series for aggregation
is deprecated and will be removed in a future version. Use \
named aggregation instead.
>>> grouper.agg(name_1=func_1, name_2=func_2)
"""
)
warnings.warn(msg, FutureWarning, stacklevel=3)
columns = list(arg.keys())
arg = arg.items()
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.append(com.get_callable_name(f) or f)
arg = zip(columns, arg)
results = OrderedDict()
for name, func in arg:
obj = self
if name in results:
raise SpecificationError(
"Function names must be unique, found multiple named "
"{}".format(name)
)
# reset the cache so that we
# only include the named selection
if name in self._selected_obj:
obj = copy.copy(obj)
obj._reset_cache()
obj._selection = name
results[name] = obj.aggregate(func)
if any(isinstance(x, DataFrame) for x in results.values()):
# let higher level handle
if _level:
return results
return DataFrame(results, columns=columns)
def _wrap_output(self, output, index, names=None):
""" common agg/transform wrapping logic """
output = output[self._selection_name]
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self._selection_name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_aggregated_output(self, output, names=None):
result = self._wrap_output(
output=output, index=self.grouper.result_index, names=names
)
return self._reindex_output(result)._convert(datetime=True)
def _wrap_transformed_output(self, output, names=None):
return self._wrap_output(output=output, index=self.obj.index, names=names)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self._selection_name, index=keys)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823 #24880
index = _get_index()
result = self._reindex_output(DataFrame(values, index=index))
# if self.observed is False,
# keep all-NaN rows created while re-indexing
result = result.stack(dropna=self.observed)
result.name = self._selection_name
return result
if isinstance(values[0], Series):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = Series(data=values, index=_get_index(), name=self._selection_name)
return self._reindex_output(result)
def _aggregate_named(self, func, *args, **kwargs):
result = OrderedDict()
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception("Must produce aggregated value")
result[name] = self._try_cast(output, group)
return result
@Substitution(klass="Series", selected="A.")
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
func = self._get_cython_func(func) or func
if isinstance(func, str):
if not (func in base.transform_kernel_whitelist):
msg = "'{func}' is not a valid function name for transform(name)"
raise ValueError(msg.format(func=func))
if func in base.cythonized_kernels:
# cythonized transform or canned "agg+broadcast"
return getattr(self, func)(*args, **kwargs)
else:
# If func is a reduction, we need to broadcast the
# result to the whole group. Compute func result
# and deal with possible broadcasting below.
return self._transform_fast(
lambda: getattr(self, func)(*args, **kwargs), func
)
# reg transform
klass = self._selected_obj.__class__
results = []
wrapper = lambda x: func(x, *args, **kwargs)
for name, group in self:
object.__setattr__(group, "name", name)
res = wrapper(group)
if isinstance(res, (ABCDataFrame, ABCSeries)):
res = res._values
indexer = self._get_index(name)
s = klass(res, indexer)
results.append(s)
# check for empty "results" to avoid concat ValueError
if results:
from pandas.core.reshape.concat import concat
result = concat(results).sort_index()
else:
result = Series()
# we will only try to coerce the result type if
# we have a numeric dtype, as these are *always* udfs
# the cython take a different path (and casting)
dtype = self._selected_obj.dtype
if is_numeric_dtype(dtype):
result = maybe_downcast_to_dtype(result, dtype)
result.name = self._selected_obj.name
result.index = self._selected_obj.index
return result
def _transform_fast(self, func, func_nm):
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
if isinstance(func, str):
func = getattr(self, func)
ids, _, ngroup = self.grouper.group_info
cast = self._transform_should_cast(func_nm)
out = algorithms.take_1d(func()._values, ids)
if cast:
out = self._try_cast(out, self.obj)
return Series(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, str):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notna(b)
try:
indices = [
self._get_index(name) for name, group in self if true_and_notna(group)
]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna=True):
"""
Return number of unique elements in the group.
Returns
-------
Series
Number of unique values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._internal_get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
msg = "val.dtype must be object, got {}".format(val.dtype)
assert val.dtype == object, msg
val, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
_isna = lambda a: a == -1
else:
_isna = isna
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = _isna(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype("int64", copy=False)
if len(ids):
# NaN/NaT group exists if the head of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
return Series(res, index=ri, name=self._selection_name)
@Appender(Series.describe.__doc__)
def describe(self, **kwargs):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def value_counts(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
):
from pandas.core.reshape.tile import cut
from pandas.core.reshape.merge import _get_join_indexers
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return self.apply(
Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
ids, _, _ = self.grouper.group_info
val = self.obj._internal_get_values()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
lev = lab.cat.categories
lab = lev.take(lab.cat.codes)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab):
# TODO: should we do this inside II?
sorter = np.lexsort((lab.left, lab.right, ids))
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
labels = list(map(rep, self.grouper.recons_labels)) + [llab(lab, inc)]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self._selection_name]
if dropna:
mask = labels[-1] != -1
if mask.all():
dropna = False
else:
out, labels = out[mask], [label[mask] for label in labels]
if normalize:
out = out.astype("float")
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, labels[-1] = out[sorter], labels[-1][sorter]
if bins is None:
mi = MultiIndex(
levels=levels, codes=labels, names=names, verify_integrity=False
)
if is_integer_dtype(out):
out = ensure_int64(out)
return Series(out, index=mi, name=self._selection_name)
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype="bool")
for lab in labels[:-1]:
diff |= np.r_[True, lab[1:] != lab[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, labels[-1]]
_, idx = _get_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
codes = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1]))
codes.append(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out):
out = ensure_int64(out)
return Series(out, index=mi, name=self._selection_name)
def count(self):
"""
Compute count of group, excluding missing values.
Returns
-------
Series
Count of values within each group.
"""
ids, _, ngroups = self.grouper.group_info
val = self.obj._internal_get_values()
mask = (ids != -1) & ~isna(val)
ids = ensure_platform_int(ids)
minlength = ngroups or 0
out = np.bincount(ids[mask], minlength=minlength)
return Series(
out,
index=self.grouper.result_index,
name=self._selection_name,
dtype="int64",
)
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None):
"""Calculate pct_change of each value to previous entry in group"""
# TODO: Remove this conditional when #23918 is fixed
if freq:
return self.apply(
lambda x: x.pct_change(
periods=periods, fill_method=fill_method, limit=limit, freq=freq
)
)
filled = getattr(self, fill_method)(limit=limit)
fill_grp = filled.groupby(self.grouper.labels)
shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
class DataFrameGroupBy(NDFrameGroupBy):
_apply_whitelist = base.dataframe_apply_whitelist
#
# Make class defs of attributes on DataFrameGroupBy whitelist.
for _def_str in whitelist_method_generator(GroupBy, DataFrame, _apply_whitelist):
exec(_def_str)
_block_agg_axis = 1
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.groupby.apply
pandas.DataFrame.groupby.transform
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 2],
... 'B': [1, 2, 3, 4],
... 'C': np.random.randn(4)})
>>> df
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> df.groupby('A').agg('min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> df.groupby('A').agg(['min', 'max'])
B C
min max min max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> df.groupby('A').B.agg(['min', 'max'])
min max
A
1 1 2
2 3 4
Different aggregations per column
>>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
B C
min max sum
A
1 1 2 0.590716
2 3 4 0.704907
To control the output names with different aggregations per column,
pandas supports "named aggregation"
>>> df.groupby("A").agg(
... b_min=pd.NamedAgg(column="B", aggfunc="min"),
... c_sum=pd.NamedAgg(column="C", aggfunc="sum"))
b_min c_sum
A
1 1 -1.956929
2 3 -0.322183
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to apply to that column.
Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a callable or a string alias.
See :ref:`groupby.aggregate.named` for more.
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg=None, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(
subset,
self.grouper,
selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index,
observed=self.observed,
)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(
subset, selection=key, grouper=self.grouper, observed=self.observed
)
raise AssertionError("invalid ndim for _gotitem")
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if self.axis == 0:
return DataFrame(result, index=obj.columns, columns=result_index).T
else:
return DataFrame(result, index=obj.index, columns=result_index)
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _insert_inaxis_grouper_inplace(self, result):
# zip in reverse so we can always insert at loc 0
izip = zip(
*map(
reversed,
(
self.grouper.names,
self.grouper.get_group_levels(),
[grp.in_axis for grp in self.grouper.groupings],
),
)
)
for name, lev, in_axis in izip:
if in_axis:
result.insert(0, name, lev)
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._get_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = DataFrame(output, columns=output_keys)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
result = DataFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _wrap_transformed_output(self, output, names=None):
return DataFrame(output, index=self.obj.index)
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[-1])
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(
self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions,
)
def _apply_to_column_groupbys(self, func):
from pandas.core.reshape.concat import concat
return concat(
(func(col_groupby) for _, col_groupby in self._iterate_column_groupbys()),
keys=self._selected_obj.columns,
axis=1,
)
def count(self):
"""
Compute count of group, excluding missing values.
Returns
-------
DataFrame
Count of values within each group.
"""
data, _ = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
val = (
(mask & ~_isna_ndarraylike(np.atleast_2d(blk.get_values())))
for blk in data.blocks
)
loc = (blk.mgr_locs for blk in data.blocks)
counter = partial(lib.count_level_2d, labels=ids, max_bin=ngroups, axis=1)
blk = map(make_block, map(counter, val), loc)
return self._wrap_agged_blocks(data.items, list(blk))
def nunique(self, dropna=True):
"""
Return DataFrame with number of distinct observations per group for
each column.
.. versionadded:: 0.20.0
Parameters
----------
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
id value1 value2
id
egg 1 1 1
ham 1 1 2
spam 1 2 1
Check for rows with the same id but conflicting values:
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
obj = self._selected_obj
def groupby_series(obj, col=None):
return SeriesGroupBy(obj, selection=col, grouper=self.grouper).nunique(
dropna=dropna
)
if isinstance(obj, Series):
results = groupby_series(obj)
else:
from pandas.core.reshape.concat import concat
results = [groupby_series(obj[col], col) for col in obj.columns]
results = concat(results, axis=1)
results.columns.names = obj.columns.names
if not self.as_index:
results.index = ibase.default_index(len(results))
return results
boxplot = boxplot_frame_groupby
def _is_multi_agg_with_relabel(**kwargs):
"""
Check whether kwargs passed to .agg look like multi-agg with relabeling.
Parameters
----------
**kwargs : dict
Returns
-------
bool
Examples
--------
>>> _is_multi_agg_with_relabel(a='max')
False
>>> _is_multi_agg_with_relabel(a_max=('a', 'max'),
... a_min=('a', 'min'))
True
>>> _is_multi_agg_with_relabel()
False
"""
return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and kwargs
def _normalize_keyword_aggregation(kwargs):
"""
Normalize user-provided "named aggregation" kwargs.
Transforms from the new ``Dict[str, NamedAgg]`` style kwargs
to the old OrderedDict[str, List[scalar]]].
Parameters
----------
kwargs : dict
Returns
-------
aggspec : dict
The transformed kwargs.
columns : List[str]
The user-provided keys.
order : List[Tuple[str, str]]
Pairs of the input and output column names.
Examples
--------
>>> _normalize_keyword_aggregation({'output': ('input', 'sum')})
(OrderedDict([('input', ['sum'])]), ('output',), [('input', 'sum')])
"""
if not PY36:
kwargs = OrderedDict(sorted(kwargs.items()))
# Normalize the aggregation functions as Dict[column, List[func]],
# process normally, then fixup the names.
# TODO(Py35): When we drop python 3.5, change this to
# defaultdict(list)
# TODO: aggspec type: typing.OrderedDict[str, List[AggScalar]]
# May be hitting https://github.com/python/mypy/issues/5958
# saying it doesn't have an attribute __name__
aggspec = OrderedDict()
order = []
columns, pairs = list(zip(*kwargs.items()))
for name, (column, aggfunc) in zip(columns, pairs):
if column in aggspec:
aggspec[column].append(aggfunc)
else:
aggspec[column] = [aggfunc]
order.append((column, com.get_callable_name(aggfunc) or aggfunc))
return aggspec, columns, order
# TODO: Can't use, because mypy doesn't like us setting __name__
# error: "partial[Any]" has no attribute "__name__"
# the type is:
# typing.Sequence[Callable[..., ScalarResult]]
# -> typing.Sequence[Callable[..., ScalarResult]]:
def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:
"""
Possibly mangle a list of aggfuncs.
Parameters
----------
aggfuncs : Sequence
Returns
-------
mangled: list-like
A new AggSpec sequence, where lambdas have been converted
to have unique names.
Notes
-----
If just one aggfunc is passed, the name will not be mangled.
"""
if len(aggfuncs) <= 1:
# don't mangle for .agg([lambda x: .])
return aggfuncs
i = 0
mangled_aggfuncs = []
for aggfunc in aggfuncs:
if com.get_callable_name(aggfunc) == "<lambda>":
aggfunc = functools.partial(aggfunc)
aggfunc.__name__ = "<lambda_{}>".format(i)
i += 1
mangled_aggfuncs.append(aggfunc)
return mangled_aggfuncs
def _maybe_mangle_lambdas(agg_spec: Any) -> Any:
"""
Make new lambdas with unique names.
Parameters
----------
agg_spec : Any
An argument to NDFrameGroupBy.agg.
Non-dict-like `agg_spec` are pass through as is.
For dict-like `agg_spec` a new spec is returned
with name-mangled lambdas.
Returns
-------
mangled : Any
Same type as the input.
Examples
--------
>>> _maybe_mangle_lambdas('sum')
'sum'
>>> _maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
[<function __main__.<lambda_0>,
<function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
"""
is_dict = is_dict_like(agg_spec)
if not (is_dict or is_list_like(agg_spec)):
return agg_spec
mangled_aggspec = type(agg_spec)() # dict or OrderdDict
if is_dict:
for key, aggfuncs in agg_spec.items():
if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
mangled_aggfuncs = _managle_lambda_list(aggfuncs)
else:
mangled_aggfuncs = aggfuncs
mangled_aggspec[key] = mangled_aggfuncs
else:
mangled_aggspec = _managle_lambda_list(agg_spec)
return mangled_aggspec
def _recast_datetimelike_result(result: DataFrame) -> DataFrame:
"""
If we have date/time like in the original, then coerce dates
as we are stacking can easily have object dtypes here.
Parameters
----------
result : DataFrame
Returns
-------
DataFrame
Notes
-----
- Assumes Groupby._selected_obj has ndim==2 and at least one
datetimelike column
"""
result = result.copy()
obj_cols = [
idx for idx in range(len(result.columns)) if is_object_dtype(result.dtypes[idx])
]
# See GH#26285
for n in obj_cols:
converted = maybe_convert_objects(
result.iloc[:, n].values, convert_numeric=False
)
result.iloc[:, n] = converted
return result
| bsd-3-clause |
belltailjp/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/event_handling/data_browser.py | 1 | 3261 | """
============
Data Browser
============
"""
import numpy as np
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
class PointBrowser(object):
"""
Click on a point to select and highlight it -- the data that
generated the point will be shown in the lower axes. Use the 'n'
and 'p' keys to browse through the next and previous points
"""
def __init__(self):
self.lastind = 0
self.text = ax.text(0.05, 0.95, 'selected: none',
transform=ax.transAxes, va='top')
self.selected, = ax.plot([xs[0]], [ys[0]], 'o', ms=12, alpha=0.4,
color='yellow', visible=False)
def onpress(self, event):
if self.lastind is None:
return
if event.key not in ('n', 'p'):
return
if event.key == 'n':
inc = 1
else:
inc = -1
self.lastind += inc
self.lastind = np.clip(self.lastind, 0, len(xs) - 1)
self.update()
def onpick(self, event):
if event.artist != line:
return True
N = len(event.ind)
if not N:
return True
# the click locations
x = event.mouseevent.xdata
y = event.mouseevent.ydata
distances = np.hypot(x - xs[event.ind], y - ys[event.ind])
indmin = distances.argmin()
dataind = event.ind[indmin]
self.lastind = dataind
self.update()
def update(self):
if self.lastind is None:
return
dataind = self.lastind
ax2.cla()
ax2.plot(X[dataind])
ax2.text(0.05, 0.9, 'mu=%1.3f\nsigma=%1.3f' % (xs[dataind], ys[dataind]),
transform=ax2.transAxes, va='top')
ax2.set_ylim(-0.5, 1.5)
self.selected.set_visible(True)
self.selected.set_data(xs[dataind], ys[dataind])
self.text.set_text('selected: %d' % dataind)
fig.canvas.draw()
if 1: #__name__ == '__main__':
# Fixing random state for reproducibility
np.random.seed(19680801)
X = np.random.rand(100, 200)
xs = np.mean(X, axis=1)
ys = np.std(X, axis=1)
fig, (ax, ax2) = plt.subplots(2, 1)
ax.set_title('click on point to plot time series')
line, = ax.plot(xs, ys, 'o', picker=5) # 5 points tolerance
browser = PointBrowser()
fig.canvas.mpl_connect('pick_event', browser.onpick)
fig.canvas.mpl_connect('key_press_event', browser.onpress)
pltshow(plt)
| mit |
michael-pacheco/dota2-predictor | visualizing/dataset_stats.py | 2 | 3746 | import numpy as np
from tools.metadata import get_hero_dict
import operator
import pandas as pd
import plotly.graph_objs as go
import plotly.plotly as py
def winrate_statistics(dataset_df, mmr_info):
x_data, y_data = dataset_df
wins = np.zeros(114)
games = np.zeros(114)
winrate = np.zeros(114)
for idx, game in enumerate(x_data):
for i in range(228):
if game[i] == 1:
games[i % 114] += 1
if y_data[idx] == 1:
if i < 114:
wins[i] += 1
else:
if i >= 114:
wins[i - 114] += 1
winrate = wins / games
winrate_dict = dict()
hero_dict = get_hero_dict()
for i in range(114):
if i != 23:
winrate_dict[hero_dict[i + 1]] = winrate[i]
sorted_winrates = sorted(winrate_dict.items(), key=operator.itemgetter(1))
x_plot_data = [x[0] for x in sorted_winrates]
y_plot_data = [x[1] for x in sorted_winrates]
title = 'Hero winrates at ' + mmr_info + ' MMR'
data = [go.Bar(
y=x_plot_data,
x=y_plot_data,
orientation='h'
)]
layout = go.Layout(
title=title,
width=1000,
height=1400,
yaxis=dict(title='hero',
ticks='',
nticks=114,
tickfont=dict(
size=8,
color='black')
),
xaxis=dict(title='win rate',
nticks=30,
tickfont=dict(
size=10,
color='black')
)
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='hero_winrates_' + mmr_info)
def pick_statistics(dataset_df, mmr_info):
x_data, y_data = dataset_df
wins = np.zeros(114)
games = np.zeros(114)
pick_rate = np.zeros(114)
for idx, game in enumerate(x_data):
for i in range(228):
if game[i] == 1:
games[i % 114] += 1
if y_data[idx] == 1:
if i < 114:
wins[i] += 1
else:
if i >= 114:
wins[i - 114] += 1
pick_rate = games / np.sum(games)
pick_rate_dict = dict()
hero_dict = get_hero_dict()
for i in range(114):
if i != 23:
pick_rate_dict[hero_dict[i + 1]] = pick_rate[i]
sorted_pickrates = sorted(pick_rate_dict.items(), key=operator.itemgetter(1))
x_plot_data = [x[0] for x in sorted_pickrates]
y_plot_data = [x[1] for x in sorted_pickrates]
title = 'Hero pick rates at ' + mmr_info + ' MMR'
data = [go.Bar(
y=x_plot_data,
x=y_plot_data * 100,
orientation='h'
)]
layout = go.Layout(
title=title,
width=1000,
height=1400,
yaxis=dict(title='hero',
ticks='',
nticks=114,
tickfont=dict(
size=8,
color='black')
),
xaxis=dict(title='pick rate',
nticks=30,
tickfont=dict(
size=10,
color='black')
)
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='hero_pickrates_' + mmr_info)
def mmr_distribution(csv_file):
dataset = pd.read_csv(csv_file)
data = [go.Histogram(x=dataset[:30000]['avg_mmr'])]
layout = go.Layout(
title='MMR distribution (sample of 30k games)'
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='MMR_distribution')
| mit |
euri10/zipline | tests/test_algorithm_gen.py | 18 | 7339 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from nose.tools import (
timed,
nottest
)
from datetime import datetime
import pandas as pd
import pytz
from zipline.finance import trading
from zipline.algorithm import TradingAlgorithm
from zipline.finance import slippage
from zipline.utils import factory
from zipline.utils.factory import create_simulation_parameters
from zipline.utils.test_utils import (
setup_logger,
teardown_logger
)
from zipline.protocol import (
Event,
DATASOURCE_TYPE
)
DEFAULT_TIMEOUT = 15 # seconds
EXTENDED_TIMEOUT = 90
class RecordDateSlippage(slippage.FixedSlippage):
def __init__(self, spread):
super(RecordDateSlippage, self).__init__(spread=spread)
self.latest_date = None
def simulate(self, event, open_orders):
self.latest_date = event.dt
result = super(RecordDateSlippage, self).simulate(event, open_orders)
return result
class TestAlgo(TradingAlgorithm):
def __init__(self, asserter, *args, **kwargs):
super(TestAlgo, self).__init__(*args, **kwargs)
self.asserter = asserter
def initialize(self, window_length=100):
self.latest_date = None
self.set_slippage(RecordDateSlippage(spread=0.05))
self.stocks = [self.sid(8229)]
self.ordered = False
self.num_bars = 0
def handle_data(self, data):
self.num_bars += 1
self.latest_date = self.get_datetime()
if not self.ordered:
for stock in self.stocks:
self.order(stock, 100)
self.ordered = True
else:
self.asserter.assertGreaterEqual(
self.latest_date,
self.slippage.latest_date
)
class AlgorithmGeneratorTestCase(TestCase):
def setUp(self):
setup_logger(self)
def tearDown(self):
teardown_logger(self)
@nottest
def test_lse_algorithm(self):
lse = trading.TradingEnvironment(
bm_symbol='^FTSE',
exchange_tz='Europe/London'
)
with lse:
sim_params = factory.create_simulation_parameters(
start=datetime(2012, 5, 1, tzinfo=pytz.utc),
end=datetime(2012, 6, 30, tzinfo=pytz.utc)
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
200,
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
results = list(gen)
self.assertEqual(len(results), 42)
# May 7, 2012 was an LSE holiday, confirm the 4th trading
# day was May 8.
self.assertEqual(results[4]['daily_perf']['period_open'],
datetime(2012, 5, 8, 8, 31, tzinfo=pytz.utc))
@timed(DEFAULT_TIMEOUT)
def test_generator_dates(self):
"""
Ensure the pipeline of generators are in sync, at least as far as
their current dates.
"""
sim_params = factory.create_simulation_parameters(
start=datetime(2011, 7, 30, tzinfo=pytz.utc),
end=datetime(2012, 7, 30, tzinfo=pytz.utc)
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
self.assertTrue(list(gen))
self.assertTrue(algo.slippage.latest_date)
self.assertTrue(algo.latest_date)
@timed(DEFAULT_TIMEOUT)
def test_handle_data_on_market(self):
"""
Ensure that handle_data is only called on market minutes.
i.e. events that come in at midnight should be processed at market
open.
"""
from zipline.finance.trading import SimulationParameters
sim_params = SimulationParameters(
period_start=datetime(2012, 7, 30, tzinfo=pytz.utc),
period_end=datetime(2012, 7, 30, tzinfo=pytz.utc),
data_frequency='minute'
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
midnight_custom_source = [Event({
'custom_field': 42.0,
'sid': 'custom_data',
'source_id': 'TestMidnightSource',
'dt': pd.Timestamp('2012-07-30', tz='UTC'),
'type': DATASOURCE_TYPE.CUSTOM
})]
minute_event_source = [Event({
'volume': 100,
'price': 200.0,
'high': 210.0,
'open_price': 190.0,
'low': 180.0,
'sid': 8229,
'source_id': 'TestMinuteEventSource',
'dt': pd.Timestamp('2012-07-30 9:31 AM', tz='US/Eastern').
tz_convert('UTC'),
'type': DATASOURCE_TYPE.TRADE
})]
algo.set_sources([midnight_custom_source, minute_event_source])
gen = algo.get_generator()
# Consume the generator
list(gen)
# Though the events had different time stamps, handle data should
# have only been called once, at the market open.
self.assertEqual(algo.num_bars, 1)
@timed(DEFAULT_TIMEOUT)
def test_progress(self):
"""
Ensure the pipeline of generators are in sync, at least as far as
their current dates.
"""
sim_params = factory.create_simulation_parameters(
start=datetime(2008, 1, 1, tzinfo=pytz.utc),
end=datetime(2008, 1, 5, tzinfo=pytz.utc)
)
algo = TestAlgo(self, sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
results = list(gen)
self.assertEqual(results[-2]['progress'], 1.0)
def test_benchmark_times_match_market_close_for_minutely_data(self):
"""
Benchmark dates should be adjusted so that benchmark events are
emitted at the end of each trading day when working with minutely
data.
Verification relies on the fact that there are no trades so
algo.datetime should be equal to the last benchmark time.
See https://github.com/quantopian/zipline/issues/241
"""
sim_params = create_simulation_parameters(num_days=1,
data_frequency='minute')
algo = TestAlgo(self, sim_params=sim_params, identifiers=[8229])
algo.run(source=[], overwrite_sim_params=False)
self.assertEqual(algo.datetime, sim_params.last_close)
| apache-2.0 |
georgyberdyshev/ascend | pygtk/loading.py | 1 | 4002 | import sys
import config
import os.path
global have_gtk
have_gtk = False
#if not sys.executable.endswith("pythonw.exe"):
# print "PYTHON PATH =",sys.path
try:
import pygtk
pygtk.require('2.0')
import gtk
have_gtk = True
except Exception,e:
if sys.platform=="win32":
try:
from ctypes import c_int, WINFUNCTYPE, windll
from ctypes.wintypes import HWND, LPCSTR, UINT
prototype = WINFUNCTYPE(c_int, HWND, LPCSTR, LPCSTR, UINT)
paramflags = (1, "hwnd", 0), (1, "text", "Hi"), (1, "caption", None), (1, "flags", 0)
MessageBox = prototype(("MessageBoxA", windll.user32), paramflags)
MessageBox(text="""ASCEND could not load PyGTK. Probably this is because
either PyGTK, PyCairo, PyGObject or GTK+ are not installed on your
system. Please try re-installing ASCEND to rectify the problem.""")
except:
pass
else:
print "PyGTK COULD NOT BE LOADED (is it installed? do you have X-Windows running?) (%s)" % str(e)
sys.exit("FATAL ERROR: PyGTK not available, unable to start ASCEND.")
global _messages
_messages = []
def get_messages():
return _messages
def load_matplotlib(throw=False,alert=False):
print_status("Loading python matplotlib")
try:
import matplotlib
matplotlib.use('GTKAgg')
try:
print_status("Trying python numpy")
import numpy
print_status("","Using python module numpy")
except ImportError:
print_status("","FAILED to load Python module 'numpy'")
import pylab
except ImportError,e:
print_status("","FAILED TO LOAD MATPLOTLIB")
if alert or throw:
_d = gtk.MessageDialog(None,gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT
,gtk.MESSAGE_ERROR,gtk.BUTTONS_CLOSE,"Plotting functions are not available unless you have 'matplotlib' installed.\n\nSee http://matplotlib.sf.net/\n\nFailed to load matplotlib (%s)" % str(e)
)
_d.run()
_d.destroy()
while gtk.events_pending():
gtk.main_iteration(False)
if throw:
raise RuntimeError("Failed to load plotting library 'matplotlib'. (%s)" % str(e))
class LoadingWindow:
def __init__(self):
self.is_loading = False
self.set_assets_dir(config.PYGTK_ASSETS)
def set_assets_dir(self, d):
self.assetsdir = d
self.splashfile = os.path.join(self.assetsdir,'ascend-loading.png')
def create_window(self):
if have_gtk:
if os.path.exists(self.splashfile):
_w = gtk.Window(gtk.WINDOW_TOPLEVEL)
_w.set_decorated(False)
_w.set_position(gtk.WIN_POS_CENTER)
_a = gtk.Alignment()
_a.set_padding(4,4,4,4)
_w.add(_a)
_a.show()
_v = gtk.VBox()
_a.add(_v)
_v.show()
_i = gtk.Image()
self.image = _i
_i.set_pixel_size(3)
_i.set_from_file(self.splashfile)
_v.add(_i)
_i.show()
_l = gtk.Label("Loading ASCEND...")
_l.set_justify(gtk.JUSTIFY_CENTER)
_v.add(_l)
_l.show()
_w.show()
self.window = _w
self.label = _l
self.is_loading = True
while gtk.events_pending():
gtk.main_iteration(False)
else:
pass
#do nothing, don't know where splash file is yet
else:
print "DON'T HAVE GTK!"
sys.exit(1)
def print_status(self,status,msg=None):
if self.is_loading:
if not sys.executable.endswith("pythonw.exe"):
print status
self.label.set_text(status)
if msg is not None:
try:
sys.stderr.write(msg+"\n")
except IOError:
pass
_messages.append(msg)
while gtk.events_pending():
gtk.main_iteration(False)
else:
try:
sys.stderr.write("\r \r")
if msg!=None:
sys.stderr.write(msg+"\r")
_messages.append(msg)
sys.stderr.write(status+"...\r")
sys.stderr.flush()
except IOError:
pass
def complete(self):
if self.is_loading:
self.window.destroy()
self.is_loading = False
global w
def print_status(status,msg=None):
w.print_status(status,msg)
def complete():
w.complete()
def create_window(assetsdir=config.PYGTK_ASSETS):
w.set_assets_dir(assetsdir)
w.create_window()
w = LoadingWindow()
create_window()
| gpl-2.0 |
andyh616/mne-python | mne/stats/regression.py | 3 | 14851 | # Authors: Tal Linzen <[email protected]>
# Teon Brooks <[email protected]>
# Denis A. Engemann <[email protected]>
# Jona Sassenhagen <[email protected]>
# Marijn van Vliet <[email protected]>
#
# License: BSD (3-clause)
from collections import namedtuple
from inspect import isgenerator
import warnings
from ..externals.six import string_types
import numpy as np
from scipy import linalg, sparse
from ..source_estimate import SourceEstimate
from ..epochs import _BaseEpochs
from ..evoked import Evoked, EvokedArray
from ..utils import logger, _reject_data_segments, _get_fast_dot
from ..io.pick import pick_types, pick_info
from ..fixes import in1d
def linear_regression(inst, design_matrix, names=None):
"""Fit Ordinary Least Squares regression (OLS)
Parameters
----------
inst : instance of Epochs | iterable of SourceEstimate
The data to be regressed. Contains all the trials, sensors, and time
points for the regression. For Source Estimates, accepts either a list
or a generator object.
design_matrix : ndarray, shape (n_observations, n_regressors)
The regressors to be used. Must be a 2d array with as many rows as
the first dimension of `data`. The first column of this matrix will
typically consist of ones (intercept column).
names : list-like | None
Optional parameter to name the regressors. If provided, the length must
correspond to the number of columns present in regressors
(including the intercept, if present).
Otherwise the default names are x0, x1, x2...xn for n regressors.
Returns
-------
results : dict of namedtuple
For each regressor (key) a namedtuple is provided with the
following attributes:
beta : regression coefficients
stderr : standard error of regression coefficients
t_val : t statistics (beta / stderr)
p_val : two-sided p-value of t statistic under the t distribution
mlog10_p_val : -log10 transformed p-value.
The tuple members are numpy arrays. The shape of each numpy array is
the shape of the data minus the first dimension; e.g., if the shape of
the original data was (n_observations, n_channels, n_timepoints),
then the shape of each of the arrays will be
(n_channels, n_timepoints).
"""
if names is None:
names = ['x%i' % i for i in range(design_matrix.shape[1])]
if isinstance(inst, _BaseEpochs):
picks = pick_types(inst.info, meg=True, eeg=True, ref_meg=True,
stim=False, eog=False, ecg=False,
emg=False, exclude=['bads'])
if [inst.ch_names[p] for p in picks] != inst.ch_names:
warnings.warn('Fitting linear model to non-data or bad '
'channels. Check picking', UserWarning)
msg = 'Fitting linear model to epochs'
data = inst.get_data()
out = EvokedArray(np.zeros(data.shape[1:]), inst.info, inst.tmin)
elif isgenerator(inst):
msg = 'Fitting linear model to source estimates (generator input)'
out = next(inst)
data = np.array([out.data] + [i.data for i in inst])
elif isinstance(inst, list) and isinstance(inst[0], SourceEstimate):
msg = 'Fitting linear model to source estimates (list input)'
out = inst[0]
data = np.array([i.data for i in inst])
else:
raise ValueError('Input must be epochs or iterable of source '
'estimates')
logger.info(msg + ', (%s targets, %s regressors)' %
(np.product(data.shape[1:]), len(names)))
lm_params = _fit_lm(data, design_matrix, names)
lm = namedtuple('lm', 'beta stderr t_val p_val mlog10_p_val')
lm_fits = {}
for name in names:
parameters = [p[name] for p in lm_params]
for ii, value in enumerate(parameters):
out_ = out.copy()
if isinstance(out_, SourceEstimate):
out_._data[:] = value
elif isinstance(out_, Evoked):
out_.data[:] = value
else:
raise RuntimeError('Invalid container.')
parameters[ii] = out_
lm_fits[name] = lm(*parameters)
logger.info('Done')
return lm_fits
def _fit_lm(data, design_matrix, names):
"""Aux function"""
from scipy import stats
n_samples = len(data)
n_features = np.product(data.shape[1:])
if design_matrix.ndim != 2:
raise ValueError('Design matrix must be a 2d array')
n_rows, n_predictors = design_matrix.shape
if n_samples != n_rows:
raise ValueError('Number of rows in design matrix must be equal '
'to number of observations')
if n_predictors != len(names):
raise ValueError('Number of regressor names must be equal to '
'number of column in design matrix')
y = np.reshape(data, (n_samples, n_features))
betas, resid_sum_squares, _, _ = linalg.lstsq(a=design_matrix, b=y)
df = n_rows - n_predictors
sqrt_noise_var = np.sqrt(resid_sum_squares / df).reshape(data.shape[1:])
design_invcov = linalg.inv(np.dot(design_matrix.T, design_matrix))
unscaled_stderrs = np.sqrt(np.diag(design_invcov))
beta, stderr, t_val, p_val, mlog10_p_val = (dict() for _ in range(5))
for x, unscaled_stderr, predictor in zip(betas, unscaled_stderrs, names):
beta[predictor] = x.reshape(data.shape[1:])
stderr[predictor] = sqrt_noise_var * unscaled_stderr
t_val[predictor] = beta[predictor] / stderr[predictor]
cdf = stats.t.cdf(np.abs(t_val[predictor]), df)
p_val[predictor] = (1. - cdf) * 2.
mlog10_p_val[predictor] = -np.log10(p_val[predictor])
return beta, stderr, t_val, p_val, mlog10_p_val
def linear_regression_raw(raw, events, event_id=None, tmin=-.1, tmax=1,
covariates=None, reject=None, flat=None, tstep=1.,
decim=1, picks=None, solver='pinv'):
"""Estimate regression-based evoked potentials/fields by linear modelling
This models the full M/EEG time course, including correction for
overlapping potentials and allowing for continuous/scalar predictors.
Internally, this constructs a predictor matrix X of size
n_samples * (n_conds * window length), solving the linear system
``Y = bX`` and returning ``b`` as evoked-like time series split by
condition. See [1]_.
Parameters
----------
raw : instance of Raw
A raw object. Note: be very careful about data that is not
downsampled, as the resulting matrices can be enormous and easily
overload your computer. Typically, 100 Hz sampling rate is
appropriate - or using the decim keyword (see below).
events : ndarray of int, shape (n_events, 3)
An array where the first column corresponds to samples in raw
and the last to integer codes in event_id.
event_id : dict
As in Epochs; a dictionary where the values may be integers or
iterables of integers, corresponding to the 3rd column of
events, and the keys are condition names.
tmin : float | dict
If float, gives the lower limit (in seconds) for the time window for
which all event types' effects are estimated. If a dict, can be used to
specify time windows for specific event types: keys correspond to keys
in event_id and/or covariates; for missing values, the default (-.1) is
used.
tmax : float | dict
If float, gives the upper limit (in seconds) for the time window for
which all event types' effects are estimated. If a dict, can be used to
specify time windows for specific event types: keys correspond to keys
in event_id and/or covariates; for missing values, the default (1.) is
used.
covariates : dict-like | None
If dict-like (e.g., a pandas DataFrame), values have to be array-like
and of the same length as the columns in ```events```. Keys correspond
to additional event types/conditions to be estimated and are matched
with the time points given by the first column of ```events```. If
None, only binary events (from event_id) are used.
reject : None | dict
For cleaning raw data before the regression is performed: set up
rejection parameters based on peak-to-peak amplitude in continuously
selected subepochs. If None, no rejection is done.
If dict, keys are types ('grad' | 'mag' | 'eeg' | 'eog' | 'ecg')
and values are the maximal peak-to-peak values to select rejected
epochs, e.g.::
reject = dict(grad=4000e-12, # T / m (gradiometers)
mag=4e-11, # T (magnetometers)
eeg=40e-5, # uV (EEG channels)
eog=250e-5 # uV (EOG channels))
flat : None | dict
or cleaning raw data before the regression is performed: set up
rejection parameters based on flatness of the signal. If None, no
rejection is done. If a dict, keys are ('grad' | 'mag' |
'eeg' | 'eog' | 'ecg') and values are minimal peak-to-peak values to
select rejected epochs.
tstep : float
Length of windows for peak-to-peak detection for raw data cleaning.
decim : int
Decimate by choosing only a subsample of data points. Highly
recommended for data recorded at high sampling frequencies, as
otherwise huge intermediate matrices have to be created and inverted.
picks : None | list
List of indices of channels to be included. If None, defaults to all
MEG and EEG channels.
solver : str | function
Either a function which takes as its inputs the sparse predictor
matrix X and the observation matrix Y, and returns the coefficient
matrix b; or a string (for now, only 'pinv'), in which case the
solver used is dot(scipy.linalg.pinv(dot(X.T, X)), dot(X.T, Y.T)).T.
Returns
-------
evokeds : dict
A dict where the keys correspond to conditions and the values are
Evoked objects with the ER[F/P]s. These can be used exactly like any
other Evoked object, including e.g. plotting or statistics.
References
----------
.. [1] Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP
waveforms: II. Non-linear effects, overlap correction, and practical
considerations. Psychophysiology, 52(2), 169-189.
"""
if isinstance(solver, string_types):
if solver == 'pinv':
fast_dot = _get_fast_dot()
# inv is slightly (~10%) faster, but pinv seemingly more stable
def solver(X, Y):
return fast_dot(linalg.pinv(X.T.dot(X).todense()),
X.T.dot(Y.T)).T
else:
raise ValueError("No such solver: {0}".format(solver))
# prepare raw and events
if picks is None:
picks = pick_types(raw.info, meg=True, eeg=True, ref_meg=True)
info = pick_info(raw.info, picks, copy=True)
info["sfreq"] /= decim
data, times = raw[:]
data = data[picks, ::decim]
times = times[::decim]
events = events.copy()
events[:, 0] -= raw.first_samp
events[:, 0] /= decim
conds = list(event_id)
if covariates is not None:
conds += list(covariates)
# time windows (per event type) are converted to sample points from times
if isinstance(tmin, (float, int)):
tmin_s = dict((cond, int(tmin * info["sfreq"])) for cond in conds)
else:
tmin_s = dict((cond, int(tmin.get(cond, -.1) * info["sfreq"]))
for cond in conds)
if isinstance(tmax, (float, int)):
tmax_s = dict(
(cond, int((tmax * info["sfreq"]) + 1.)) for cond in conds)
else:
tmax_s = dict((cond, int((tmax.get(cond, 1.) * info["sfreq"]) + 1))
for cond in conds)
# Construct predictor matrix
# We do this by creating one array per event type, shape (lags, samples)
# (where lags depends on tmin/tmax and can be different for different
# event types). Columns correspond to predictors, predictors correspond to
# time lags. Thus, each array is mostly sparse, with one diagonal of 1s
# per event (for binary predictors).
cond_length = dict()
xs = []
for cond in conds:
tmin_, tmax_ = tmin_s[cond], tmax_s[cond]
n_lags = int(tmax_ - tmin_) # width of matrix
if cond in event_id: # for binary predictors
ids = ([event_id[cond]]
if isinstance(event_id[cond], int)
else event_id[cond])
onsets = -(events[in1d(events[:, 2], ids), 0] + tmin_)
values = np.ones((len(onsets), n_lags))
else: # for predictors from covariates, e.g. continuous ones
covs = covariates[cond]
if len(covs) != len(events):
error = ("Condition {0} from ```covariates``` is "
"not the same length as ```events```").format(cond)
raise ValueError(error)
onsets = -(events[np.where(covs != 0), 0] + tmin_)[0]
v = np.asarray(covs)[np.nonzero(covs)].astype(float)
values = np.ones((len(onsets), n_lags)) * v[:, np.newaxis]
cond_length[cond] = len(onsets)
xs.append(sparse.dia_matrix((values, onsets),
shape=(data.shape[1], n_lags)))
X = sparse.hstack(xs)
# find only those positions where at least one predictor isn't 0
has_val = np.unique(X.nonzero()[0])
# additionally, reject positions based on extreme steps in the data
if reject is not None:
_, inds = _reject_data_segments(data, reject, flat, decim=None,
info=info, tstep=tstep)
for t0, t1 in inds:
has_val = np.setdiff1d(has_val, range(t0, t1))
# solve linear system
X, data = X.tocsr()[has_val], data[:, has_val]
coefs = solver(X, data)
# construct Evoked objects to be returned from output
evokeds = dict()
cum = 0
for cond in conds:
tmin_, tmax_ = tmin_s[cond], tmax_s[cond]
evokeds[cond] = EvokedArray(coefs[:, cum:cum + tmax_ - tmin_],
info=info, comment=cond,
tmin=tmin_ / float(info["sfreq"]),
nave=cond_length[cond],
kind='mean') # note that nave and kind are
cum += tmax_ - tmin_ # technically not correct
return evokeds
| bsd-3-clause |
JPFrancoia/scikit-learn | sklearn/cluster/k_means_.py | 4 | 59475 | """K-means clustering"""
# Authors: Gael Varoquaux <[email protected]>
# Thomas Rueckstiess <[email protected]>
# James Bergstra <[email protected]>
# Jan Schlueter <[email protected]>
# Nelle Varoquaux
# Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..utils.extmath import row_norms, squared_norm, stable_cumsum
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.six import string_types
from . import _k_means
from ._k_means_elkan import k_means_elkan
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X: array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters: integer
The number of seeds to choose
x_squared_norms: array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state: numpy.RandomState
The generator used to initialize the centers.
n_local_trials: integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
algorithm="auto", return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter: int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X) or hasattr(init, '__array__'):
X_mean = X.mean(axis=0)
if not sp.issparse(X):
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
init -= X_mean
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_clusters == 1:
# elkan doesn't make sense for a single cluster, full will produce
# the right result.
algorithm = "full"
if algorithm == "auto":
algorithm = "full" if sp.issparse(X) else 'elkan'
if algorithm == "full":
kmeans_single = _kmeans_single_lloyd
elif algorithm == "elkan":
kmeans_single = _kmeans_single_elkan
else:
raise ValueError("Algorithm must be 'auto', 'full' or 'elkan', got"
" %s" % str(algorithm))
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(kmeans_single)(X, n_clusters, max_iter=max_iter, init=init,
verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single_elkan(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
if sp.issparse(X):
raise ValueError("algorithm='elkan' not supported for sparse input X")
X = check_array(X, order="C")
random_state = check_random_state(random_state)
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
centers = np.ascontiguousarray(centers)
if verbose:
print('Initialization complete')
centers, labels, n_iter = k_means_elkan(X, n_clusters, centers, tol=tol,
max_iter=max_iter, verbose=verbose)
inertia = np.sum((X - centers[labels]) ** 2, dtype=np.float64)
return labels, inertia, centers, n_iter
def _kmeans_single_lloyd(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters: int
The number of clusters to form as well as the number of
centroids to generate.
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init: {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol: float, optional
The relative increment in the results before declaring convergence.
verbose: boolean, optional
Verbosity mode
x_squared_norms: array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
k = centers.shape[0]
all_distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(k):
dist = all_distances[center_id]
labels[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances: float array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=X.dtype)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X: array, shape (n_samples, n_features)
k: int
number of centroids
init: {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers: array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if isinstance(init, string_types) and init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif isinstance(init, string_types) and init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
# ensure that the centers have the same dtype as X
# this is a requirement of fused types of cython
centers = np.array(init, dtype=X.dtype)
elif callable(init):
centers = init(X, k, random_state=random_state)
centers = np.asarray(centers, dtype=X.dtype)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Examples
--------
>>> from sklearn.cluster import KMeans
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
>>> kmeans.labels_
array([0, 0, 0, 1, 1, 1], dtype=int32)
>>> kmeans.predict([[0, 0], [4, 4]])
array([0, 1], dtype=int32)
>>> kmeans.cluster_centers_
array([[ 1., 2.],
[ 4., 2.]])
See also
--------
MiniBatchKMeans
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster than the default batch implementation.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True,
n_jobs=1, algorithm='auto'):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.algorithm = algorithm
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs, algorithm=self.algorithm,
return_n_iter=True)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
# Note: numpy >= 1.10 does not support '/=' for the following
# expression for a mixture of int and float (see numpy issue #6464)
centers[center_idx] = centers[center_idx] / counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Read more in the :ref:`User Guide <mini_batch_kmeans>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
See also
--------
KMeans
The classic implementation of the clustering method based on the
Lloyd's algorithm. It consumes the whole set of input data at each
iteration.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C',
dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, dtype=X.dtype)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, dtype=X.dtype)
distances = np.zeros(self.batch_size, dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.randint(0, n_samples, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.randint(
0, n_samples, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=X.dtype)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, dtype=X.dtype), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
| bsd-3-clause |
musically-ut/statsmodels | statsmodels/graphics/tests/test_dotplot.py | 26 | 15330 | import numpy as np
from statsmodels.graphics.dotplots import dot_plot
import pandas as pd
from numpy.testing import dec
# If true, the output is written to a multi-page pdf file.
pdf_output = False
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
else:
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_all():
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_dotplot.pdf")
else:
pdf = None
# Basic dotplot with points only
plt.clf()
points = range(20)
ax = plt.axes()
fig = dot_plot(points, ax=ax)
ax.set_title("Basic horizontal dotplot")
close_or_save(pdf, fig)
# Basic vertical dotplot
plt.clf()
points = range(20)
ax = plt.axes()
fig = dot_plot(points, ax=ax, horizontal=False)
ax.set_title("Basic vertical dotplot")
close_or_save(pdf, fig)
# Tall and skinny
plt.figure(figsize=(4,12))
ax = plt.axes()
vals = np.arange(40)
fig = dot_plot(points, ax=ax)
ax.set_title("Tall and skinny dotplot")
ax.set_xlabel("x axis label")
close_or_save(pdf, fig)
# Short and wide
plt.figure(figsize=(12,4))
ax = plt.axes()
vals = np.arange(40)
fig = dot_plot(points, ax=ax, horizontal=False)
ax.set_title("Short and wide dotplot")
ax.set_ylabel("y axis label")
close_or_save(pdf, fig)
# Tall and skinny striped dotplot
plt.figure(figsize=(4,12))
ax = plt.axes()
points = np.arange(40)
fig = dot_plot(points, ax=ax, striped=True)
ax.set_title("Tall and skinny striped dotplot")
ax.set_xlim(-10, 50)
close_or_save(pdf, fig)
# Short and wide striped
plt.figure(figsize=(12,4))
ax = plt.axes()
points = np.arange(40)
fig = dot_plot(points, ax=ax, striped=True, horizontal=False)
ax.set_title("Short and wide striped dotplot")
ax.set_ylim(-10, 50)
close_or_save(pdf, fig)
# Basic dotplot with few points
plt.figure()
ax = plt.axes()
points = np.arange(4)
fig = dot_plot(points, ax=ax)
ax.set_title("Basic horizontal dotplot with few lines")
close_or_save(pdf, fig)
# Basic dotplot with few points
plt.figure()
ax = plt.axes()
points = np.arange(4)
fig = dot_plot(points, ax=ax, horizontal=False)
ax.set_title("Basic vertical dotplot with few lines")
close_or_save(pdf, fig)
# Manually set the x axis limits
plt.figure()
ax = plt.axes()
points = np.arange(20)
fig = dot_plot(points, ax=ax)
ax.set_xlim(-10, 30)
ax.set_title("Dotplot with adjusted horizontal range")
close_or_save(pdf, fig)
# Left row labels
plt.clf()
ax = plt.axes()
lines = ["ABCDEFGH"[np.random.randint(0, 8)] for k in range(20)]
points = np.random.normal(size=20)
fig = dot_plot(points, lines=lines, ax=ax)
ax.set_title("Dotplot with user-supplied labels in the left margin")
close_or_save(pdf, fig)
# Left and right row labels
plt.clf()
ax = plt.axes()
points = np.random.normal(size=20)
lines = ["ABCDEFGH"[np.random.randint(0, 8)] + "::" + str(k+1)
for k in range(20)]
fig = dot_plot(points, lines=lines, ax=ax, split_names="::")
ax.set_title("Dotplot with user-supplied labels in both margins")
close_or_save(pdf, fig)
# Both sides row labels
plt.clf()
ax = plt.axes([0.1, 0.1, 0.88, 0.8])
points = np.random.normal(size=20)
lines = ["ABCDEFGH"[np.random.randint(0, 8)] + "::" + str(k+1)
for k in range(20)]
fig = dot_plot(points, lines=lines, ax=ax, split_names="::",
horizontal=False)
txt = ax.set_title("Vertical dotplot with user-supplied labels in both margins")
txt.set_position((0.5, 1.06))
close_or_save(pdf, fig)
# Custom colors and symbols
plt.clf()
ax = plt.axes([0.1, 0.07, 0.78, 0.85])
points = np.random.normal(size=20)
lines = np.kron(range(5), np.ones(4)).astype(np.int32)
styles = np.kron(np.ones(5), range(4)).astype(np.int32)
#marker_props = {k: {"color": "rgbc"[k], "marker": "osvp"[k],
# "ms": 7, "alpha": 0.6} for k in range(4)}
# python 2.6 compat, can be removed later
marker_props = dict((k, {"color": "rgbc"[k], "marker": "osvp"[k],
"ms": 7, "alpha": 0.6}) for k in range(4))
fig = dot_plot(points, lines=lines, styles=styles, ax=ax,
marker_props=marker_props)
ax.set_title("Dotplot with custom colors and symbols")
close_or_save(pdf, fig)
# Basic dotplot with symmetric intervals
plt.clf()
ax = plt.axes()
points = range(20)
fig = dot_plot(points, intervals=np.ones(20), ax=ax)
ax.set_title("Dotplot with symmetric intervals")
close_or_save(pdf, fig)
# Basic dotplot with symmetric intervals, pandas inputs.
plt.clf()
ax = plt.axes()
points = pd.Series(range(20))
intervals = pd.Series(np.ones(20))
fig = dot_plot(points, intervals=intervals, ax=ax)
ax.set_title("Dotplot with symmetric intervals (Pandas inputs)")
close_or_save(pdf, fig)
# Basic dotplot with nonsymmetric intervals
plt.clf()
ax = plt.axes()
points = np.arange(20)
intervals = [(1, 3) for i in range(20)]
fig = dot_plot(points, intervals=intervals, ax=ax)
ax.set_title("Dotplot with nonsymmetric intervals")
close_or_save(pdf, fig)
# Vertical dotplot with nonsymmetric intervals
plt.clf()
ax = plt.axes()
points = np.arange(20)
intervals = [(1, 3) for i in range(20)]
fig = dot_plot(points, intervals=intervals, ax=ax, horizontal=False)
ax.set_title("Vertical dotplot with nonsymmetric intervals")
close_or_save(pdf, fig)
# Dotplot with nonsymmetric intervals, adjust line properties
plt.clf()
ax = plt.axes()
points = np.arange(20)
intervals = [(1, 3) for x in range(20)]
line_props = {0: {"color": "lightgrey",
"solid_capstyle": "round"}}
fig = dot_plot(points, intervals=intervals, line_props=line_props, ax=ax)
ax.set_title("Dotplot with custom line properties")
close_or_save(pdf, fig)
# Dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with two points per line")
close_or_save(pdf, fig)
# Dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
fig = dot_plot(points, intervals=intervals, lines=lines,
styles=styles, ax=ax, stacked=True,
styles_order=["Dog", "Cat"])
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with two points per line (reverse order)")
close_or_save(pdf, fig)
# Vertical dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, horizontal=False)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Vertical dotplot with two points per line")
close_or_save(pdf, fig)
# Vertical dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
styles_order = ["Dog", "Cat"]
fig = dot_plot(points, intervals=intervals, lines=lines,
styles=styles, ax=ax, stacked=True,
horizontal=False, styles_order=styles_order)
handles, labels = ax.get_legend_handles_labels()
lh = dict(zip(labels, handles))
handles = [lh[l] for l in styles_order]
leg = plt.figlegend(handles, styles_order, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Vertical dotplot with two points per line (reverse order)")
close_or_save(pdf, fig)
# Vertical dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, striped=True, horizontal=False)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
plt.ylim(-20, 20)
ax.set_title("Vertical dotplot with two points per line")
close_or_save(pdf, fig)
# Dotplot with color-matched points and intervals
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
marker_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
line_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, marker_props=marker_props,
line_props=line_props)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with color-matched points and intervals")
close_or_save(pdf, fig)
# Dotplot with color-matched points and intervals
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
marker_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
line_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, marker_props=marker_props,
line_props=line_props, horizontal=False)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with color-matched points and intervals")
close_or_save(pdf, fig)
# Dotplot with sections
plt.clf()
ax = plt.axes()
points = range(30)
lines = np.kron(range(15), (1,1)).astype(np.int32)
styles = np.kron(np.ones(15), (0,1)).astype(np.int32)
sections = np.kron((0,1,2), np.ones(10)).astype(np.int32)
sections = [["Axx", "Byy", "Czz"][k] for k in sections]
fig = dot_plot(points, lines=lines, styles=styles, sections=sections, ax=ax)
ax.set_title("Dotplot with sections")
close_or_save(pdf, fig)
# Vertical dotplot with sections
plt.clf()
ax = plt.axes([0.1,0.1,0.9,0.75])
points = range(30)
lines = np.kron(range(15), (1,1)).astype(np.int32)
styles = np.kron(np.ones(15), (0,1)).astype(np.int32)
sections = np.kron((0,1,2), np.ones(10)).astype(np.int32)
sections = [["Axx", "Byy", "Czz"][k] for k in sections]
fig = dot_plot(points, lines=lines, styles=styles,
sections=sections, ax=ax, horizontal=False)
txt = ax.set_title("Vertical dotplot with sections")
txt.set_position((0.5, 1.08))
close_or_save(pdf, fig)
# Reorder sections
plt.clf()
ax = plt.axes()
points = range(30)
lines = np.kron(range(15), (1,1)).astype(np.int32)
styles = np.kron(np.ones(15), (0,1)).astype(np.int32)
sections = np.kron((0,1,2), np.ones(10)).astype(np.int32)
sections = [["Axx", "Byy", "Czz"][k] for k in sections]
fig = dot_plot(points, lines=lines, styles=styles, sections=sections, ax=ax,
section_order=["Byy", "Axx", "Czz"])
ax.set_title("Dotplot with sections in specified order")
close_or_save(pdf, fig)
# Reorder the lines.
plt.figure()
ax = plt.axes()
points = np.arange(4)
lines = ["A", "B", "C", "D"]
line_order = ["B", "C", "A", "D"]
fig = dot_plot(points, lines=lines, line_order=line_order, ax=ax)
ax.set_title("Dotplot with reordered lines")
close_or_save(pdf, fig)
# Format labels
plt.clf()
points = range(20)
lines = ["%d::%d" % (i, 100+i) for i in range(20)]
fmt_left = lambda x : "lft_" + x
fmt_right = lambda x : "rgt_" + x
ax = plt.axes()
fig = dot_plot(points, lines=lines, ax=ax, split_names="::",
fmt_left_name=fmt_left, fmt_right_name=fmt_right)
ax.set_title("Horizontal dotplot with name formatting")
close_or_save(pdf, fig)
# Right names only
plt.clf()
points = range(20)
lines = ["%d::%d" % (i, 100+i) for i in range(20)]
ax = plt.axes()
fig = dot_plot(points, lines=lines, ax=ax, split_names="::",
show_names="right")
ax.set_title("Show right names only")
close_or_save(pdf, fig)
# Dotplot with different numbers of points per line
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = []
ii = 0
while len(lines) < 40:
for k in range(np.random.randint(1, 4)):
lines.append(ii)
ii += 1
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, lines=lines, styles=styles,
ax=ax, stacked=True)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with different numbers of points per line")
close_or_save(pdf, fig)
if pdf_output:
pdf.close()
| bsd-3-clause |
oknuutti/visnav-py | visnav/render/stars.py | 1 | 34131 | from datetime import datetime
from functools import lru_cache
import cv2
import math
import os
import sqlite3
import re
import time
import numpy as np
import quaternion
from visnav.algo import tools
from visnav.algo.image import ImageProc
from visnav.algo.model import SystemModel
from visnav.missions.didymos import DidymosSystemModel
from visnav.missions.rosetta import RosettaSystemModel
from visnav.settings import *
# https://pysynphot.readthedocs.io/en/latest/index.html#pysynphot-installation-setup
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import importlib
mod = importlib.util.find_spec('pysynphot')
if mod is not None:
root = mod.submodule_search_locations[0]
os.environ['PYSYN_CDBS'] = os.path.join(root, 'data', 'cdbs') # http://ssb.stsci.edu/cdbs/tarfiles/synphot1.tar.gz
import pysynphot as S # http://ssb.stsci.edu/cdbs/tarfiles/synphot2.tar.gz
# http://ssb.stsci.edu/cdbs/tarfiles/synphot3.tar.gz
else:
print('warning: module pysynphot not found')
class Stars:
# from VizieR catalogs:
SOURCE_HIPPARCHOS = 'H' # I/239/hip_main
SOURCE_PASTEL = 'P' # B/pastel/pastel
SOURCE_WU = 'W' # J/A+A/525/A71/table2
SOURCE_GAIA1 = 'G' # J/MNRAS/471/770/table2
STARDB_TYC = os.path.join(DATA_DIR, 'deep_space_objects_tyc.sqlite')
STARDB_HIP = os.path.join(DATA_DIR, 'deep_space_objects_hip.sqlite')
STARDB = STARDB_HIP
MAG_CUTOFF = 10
MAG_V_LAM0 = 545e-9
SUN_MAG_V = -26.74
SUN_MAG_B = 0.6222 + SUN_MAG_V
# from sc cam frame (axis: +x, up: +z) to equatorial frame (axis: +y, up: +z)
sc2ec_q = np.quaternion(1, 0, 0, 1).normalized().conj()
@staticmethod
def black_body_radiation(Teff, lam):
return Stars.black_body_radiation_fn(Teff)(lam)
@staticmethod
def black_body_radiation_fn(Teff):
def phi(lam):
# planck's law of black body radiation [W/m3/sr]
h = 6.626e-34 # planck constant (m2kg/s)
c = 3e8 # speed of light
k = 1.380649e-23 # Boltzmann constant
r = 2*h*c**2/lam**5/(np.exp(h*c/lam/k/Teff) - 1)
return r
return phi
@staticmethod
def synthetic_radiation(Teff, fe_h, log_g, lam, mag_v=None):
return Stars.synthetic_radiation_fn(Teff, fe_h, log_g, mag_v=mag_v)(lam)
@staticmethod
@lru_cache(maxsize=1000)
def synthetic_radiation_fn(Teff, fe_h, log_g, mag_v=None, model='k93models',
lam_min=0, lam_max=np.inf, return_sp=False):
return Stars.uncached_synthetic_radiation_fn(Teff, fe_h, log_g, mag_v, model, lam_min, lam_max, return_sp)
@staticmethod
def uncached_synthetic_radiation_fn(Teff, fe_h, log_g, mag_v=None, model='k93models',
lam_min=0, lam_max=np.inf, return_sp=False):
sp = None
orig_log_g = log_g
if isinstance(model, tuple):
# give in meters, W/m3
sp = S.spectrum.ArraySourceSpectrum(np.array(model[0]) * 1e10,
np.array(model[1]) * 1e-4 * 1e-10 / 1e-7, 'angstrom', 'flam')
else:
first_try = True
if Teff < 3500:
print('could not init spectral model with given t_eff=%s, using t_eff=3500K instead' % Teff)
Teff = 3500
for i in range(15):
try:
sp = S.Icat(model, Teff, fe_h, log_g) # 'ck04models' or 'k93models'
break
except:
first_try = False
log_g = log_g + (0.2 if Teff > 6000 else -0.2)
assert sp is not None, 'could not init spectral model with given params: t_eff=%s, log_g=%s, fe_h=%s' % (Teff, orig_log_g, fe_h)
if not first_try:
print('could not init spectral model with given params (t_eff=%s, log_g=%s, fe_h=%s), changed log_g to %s' %
(Teff, orig_log_g, fe_h, log_g))
if mag_v is not None:
sp = sp.renorm(mag_v, 'vegamag', S.ObsBandpass('johnson,v'))
if return_sp:
return sp
# for performance reasons (caching)
from scipy.interpolate import interp1d
I = np.logical_and(sp.wave >= lam_min*1e10, sp.wave <= lam_max*1e10)
sample_fn = interp1d(sp.wave[I], sp.flux[I], kind='linear', assume_sorted=True)
def phi(lam):
r = sample_fn(lam*1e10) # wavelength in Å, result in "flam" (erg/s/cm2/Å)
return r * 1e-7 / 1e-4 / 1e-10 # result in W/m3
return phi
@staticmethod
def magnitude_to_spectral_flux_density(mag):
# spectral flux density for standard magnitude for V-band (at 545nm)
# from "Model atmospheres broad-band colors, bolometric corrections and temperature calibrations for O - M stars"
# Bessel M.S. et al, Astronomy and Astrophysics, 1998, table A2
# Also at http://web.ipac.caltech.edu/staff/fmasci/home/astro_refs/magsystems.pdf
# 363.1e-11 erg/cm2/s/Å (erg=1e-7J, 1cm2=1e-4m2, Å=1e-10m)
phi0 = 363.1e-11 * 1e-7 / 1e-4 / 1e-10 # W/m3
return np.power(10., -0.4 * mag) * phi0
@staticmethod
def tycho_to_johnson(mag_bt, mag_vt):
v = mag_vt - 0.09 * (mag_bt - mag_vt)
b = 0.85 * (mag_bt - mag_vt) + v
return b, v
@staticmethod
def effective_temp(b_v, metal=0, log_g=0):
""" magnitudes in johnson system """
# calculate star effective temperatures, from:
# - http://help.agi.com/stk/index.htm#stk/starConstruction.htm
# - Sekiguchi, M. and Fukugita, M., 2000. A Study of the B−V Color-Temperature Relation. The Astronomical Journal, 120(2), p.1072.
# - metallicity (Fe/H) and log surface gravity can be set to zero without big impact
c0 = 3.939654
c1 = -0.395361
c2 = 0.2082113
c3 = -0.0604097
f1 = 0.027153
f2 = 0.005036
g1 = 0.007367
h1 = -0.01069
return 10**(c0+c1*(b_v)+c2*(b_v)**2+c3*(b_v)**3 + f1*metal + f2*metal**2 + g1*log_g + h1*(b_v)*log_g)
@staticmethod
def flux_density(cam_q, cam, mask=None, mag_cutoff=MAG_CUTOFF, array=False, undistorted=False, order_by=None):
"""
plots stars based on Tycho-2 database, gives out photon count per unit area given exposure time in seconds,
cam_q is a quaternion in ICRS coord frame, x_fov and y_fov in degrees
"""
# calculate query conditions for star ra and dec
cam_dec, cam_ra, _ = tools.q_to_ypr(cam_q) # camera boresight in ICRS coords
d = np.linalg.norm((cam.x_fov, cam.y_fov))/2
min_dec, max_dec = math.degrees(cam_dec) - d, math.degrees(cam_dec) + d
dec_cond = '(dec BETWEEN %s AND %s)' % (min_dec, max_dec)
# goes over the pole to the other side of the sphere, easy solution => ignore limit on ra
skip_ra_cond = min_dec < -90 or max_dec > 90
if skip_ra_cond:
ra_cond = '1'
else:
min_ra, max_ra = math.degrees(cam_ra) - d, math.degrees(cam_ra) + d
if min_ra < 0:
ra_cond = '(ra < %s OR ra > %s)' % (max_ra, (min_ra + 360) % 360)
elif max_ra > 360:
ra_cond = '(ra > %s OR ra < %s)' % (min_ra, max_ra % 360)
else:
ra_cond = '(ra BETWEEN %s AND %s)' % (min_ra, max_ra)
conn = sqlite3.connect(Stars.STARDB)
cursor = conn.cursor()
# the magnitudes for tycho id xxxx-xxxxx-2 entries are bad as they are most likely taken from hip catalog that bundles all .*-(\d)
results = cursor.execute("""
SELECT x, y, z, mag_v""" + (", mag_b, t_eff, fe_h, log_g, dec, ra, id" if array else "") + """
FROM deep_sky_objects
WHERE """ + ("tycho like '%-1' AND " if Stars.STARDB == Stars.STARDB_TYC else "") +
"mag_v < " + str(mag_cutoff) + " AND " + dec_cond + " AND " + ra_cond +
((" ORDER BY %s ASC" % order_by) if order_by is not None else ''))
stars = np.array(results.fetchall())
conn.close()
flux_density = ([], None) if array else np.zeros((cam.height, cam.width), dtype=np.float32)
if len(stars) == 0:
return flux_density
stars[:, 0:3] = tools.q_times_mx(SystemModel.sc2gl_q.conj() * cam_q.conj(), stars[:, 0:3])
stars_ixy_ = cam.calc_img_R(stars[:, 0:3], undistorted=undistorted)
stars_ixy = np.round(stars_ixy_.astype(np.float)).astype(np.int)
I = np.logical_and.reduce((np.all(stars_ixy >= 0, axis=1),
stars_ixy[:, 0] <= cam.width-1,
stars_ixy[:, 1] <= cam.height-1))
if array:
cols = ('ix', 'iy', 'x', 'y', 'z', 'mag_v', 'mag_b', 't_eff', 'fe_h', 'log_g', 'dec', 'ra', 'id')
return (
np.hstack((stars_ixy_[I, :], stars[I, :])),
dict(zip(cols, range(len(cols))))
)
stars_ixy = stars_ixy[I, :]
flux_density_per_star = Stars.magnitude_to_spectral_flux_density(stars[I, 3])
for i, f in enumerate(flux_density_per_star):
flux_density[stars_ixy[i, 1], stars_ixy[i, 0]] += f
if mask is not None:
flux_density[np.logical_not(mask)] = 0
if True:
# assume every star is like our sun, convert to total flux density [W/m2]
solar_constant = 1360.8
# sun magnitude from http://mips.as.arizona.edu/~cnaw/sun.html
sun_flux_density = Stars.magnitude_to_spectral_flux_density(Stars.SUN_MAG_V)
flux_density = flux_density * (solar_constant / sun_flux_density)
return flux_density
@staticmethod
def get_property_by_id(id, field=None):
res = Stars._query_cursor.execute(f"select {field} from deep_sky_objects where id = {int(id)}").fetchone()[0]
return res
@staticmethod
def get_catalog_id(id, field=None):
try:
is_arr = False
id = int(id)
except:
is_arr = True
if Stars._query_conn is None:
Stars._conn = sqlite3.connect(Stars.STARDB)
Stars._query_cursor = Stars._conn.cursor()
field = field or ("tycho" if Stars.STARDB == Stars.STARDB_TYC else "hip")
if is_arr:
res = Stars._query_cursor.execute(
"select id, %s from deep_sky_objects where id IN (%s)" % (
field, ','.join(str(i) for i in id))).fetchall()
return {r[0]: str(r[1]) for r in res}
else:
res = Stars._query_cursor.execute(
"select %s from deep_sky_objects where id = %s" % (
field, id)).fetchone()[0]
return str(res)
_query_conn, _query_cursor = None, None
@staticmethod
def _create_stardb(fname):
conn = sqlite3.connect(fname)
cursor = conn.cursor()
cursor.execute("DROP TABLE IF EXISTS deep_sky_objects")
cursor.execute("""
CREATE TABLE deep_sky_objects (
id INTEGER PRIMARY KEY ASC NOT NULL,
hip INT,
hd INT DEFAULT NULL,
simbad CHAR(20) DEFAULT NULL,
ra REAL NOT NULL, /* src[0] */
dec REAL NOT NULL, /* src[0] */
x REAL NOT NULL,
y REAL NOT NULL,
z REAL NOT NULL,
mag_v REAL NOT NULL, /* src[1] */
mag_b REAL DEFAULT NULL, /* src[2] */
t_eff REAL DEFAULT NULL, /* src[3] */
log_g REAL DEFAULT NULL, /* src[4] */
fe_h REAL DEFAULT NULL, /* src[5] */
src CHAR(6) DEFAULT 'HHHPPP'
)""")
cursor.execute("DROP INDEX IF EXISTS ra_idx")
cursor.execute("CREATE INDEX ra_idx ON deep_sky_objects (ra)")
cursor.execute("DROP INDEX IF EXISTS dec_idx")
cursor.execute("CREATE INDEX dec_idx ON deep_sky_objects (dec)")
cursor.execute("DROP INDEX IF EXISTS mag_idx")
cursor.execute("CREATE INDEX mag_idx ON deep_sky_objects (mag_v)")
cursor.execute("DROP INDEX IF EXISTS hd")
cursor.execute("CREATE INDEX hd ON deep_sky_objects (hd)")
cursor.execute("DROP INDEX IF EXISTS simbad")
cursor.execute("CREATE INDEX simbad ON deep_sky_objects (simbad)")
cursor.execute("DROP INDEX IF EXISTS hip")
cursor.execute("CREATE UNIQUE INDEX hip ON deep_sky_objects (hip)")
conn.commit()
@staticmethod
def import_stars_hip():
# I/239/hip_main
Stars._create_stardb(Stars.STARDB_HIP)
conn = sqlite3.connect(Stars.STARDB_HIP)
cursor = conn.cursor()
from astroquery.vizier import Vizier
Vizier.ROW_LIMIT = -1
cols = ["HIP", "HD", "_RA.icrs", "_DE.icrs", "Vmag", "B-V"]
r = Vizier(catalog="I/239/hip_main", columns=cols, row_limit=-1).query_constraints()[0]
for i, row in enumerate(r):
hip, hd, ra, dec, mag_v, b_v = [row[f] for f in cols]
if np.any(list(map(np.ma.is_masked, (ra, dec, mag_v)))):
continue
hd = 'null' if np.ma.is_masked(hd) else hd
mag_b = 'null' if np.ma.is_masked(b_v) or np.isnan(b_v) else b_v + mag_v
x, y, z = tools.spherical2cartesian(math.radians(dec), math.radians(ra), 1)
cursor.execute("""
INSERT INTO deep_sky_objects (hip, hd, ra, dec, x, y, z, mag_v, mag_b)
VALUES (%s, %s, %f, %f, %f, %f, %f, %f, %s)"""
% (hip, hd, ra, dec, x, y, z, mag_v, mag_b))
if i % 100 == 0:
conn.commit()
tools.show_progress(len(r), i)
conn.commit()
conn.close()
@staticmethod
def import_stars_tyc():
assert False, 'not supported anymore'
Stars._create_stardb(Stars.STARDB_TYC, 12)
conn = sqlite3.connect(Stars.STARDB_TYC)
cursor = conn.cursor()
# Tycho-2 catalogue, from http://archive.eso.org/ASTROM/TYC-2/data/
for file in ('catalog.dat', 'suppl_1.dat'):
with open(os.path.join(DATA_DIR, file), 'r') as fh:
line = fh.readline()
while line:
c = line
line = fh.readline()
# mean position, ICRS, at epoch 2000.0
# proper motion milliarcsecond/year
# apparent magnitude
if file == 'catalog.dat':
# main catalog
epoch = 2000.0
tycho, ra, dec, pmra, pmdec, mag_bt, mag_vt = c[0:12], c[15:27], c[28:40], c[41:48], c[49:56], c[110:116], c[123:129]
mag_b, mag_v = Stars.tycho_to_johnson(float(mag_bt), float(mag_vt))
else:
# supplement-1 has the brightest stars, from hipparcos and tycho-1
epoch = 1991.25
tycho, ra, dec, pmra, pmdec, mag_bt, mag_vt, flag, hip = \
c[0:12], c[15:27], c[28:40], c[41:48], c[49:56], c[83:89], c[96:102], c[81:82], c[115:120]
if flag in ('H', 'V', 'B'):
if len(hip.strip()) > 0:
mag_b, mag_v = Stars.get_hip_mag_bv(hip)
else:
continue
else:
mag_b, mag_v = Stars.tycho_to_johnson(float(mag_bt), float(mag_vt))
tycho = tycho.replace(' ', '-')
if np.all(list(map(tools.numeric, (ra, dec)))):
ra, dec = list(map(float, (ra, dec)))
if -10 < mag_v < Stars.MAG_CUTOFF:
curr_epoch = datetime.now().year + \
(datetime.now().timestamp()
- datetime.strptime(str(datetime.now().year),'%Y').timestamp()
)/365.25/24/3600
years = curr_epoch - epoch
# TODO: (1) adjust to current epoch using proper motion and years since epoch
x, y, z = tools.spherical2cartesian(math.radians(dec), math.radians(ra), 1)
cursor.execute("INSERT INTO deep_sky_objects (tycho,ra,dec,x,y,z,mag_b,mag_v) VALUES (?,?,?,?,?,?,?,?)", (
tycho, (ra+360)%360, dec, x, y, z, mag_b, mag_v
))
conn.commit()
conn.close()
@staticmethod
def add_simbad_col():
conn = sqlite3.connect(Stars.STARDB)
cursor_r = conn.cursor()
cursor_w = conn.cursor()
# cursor_w.execute("alter table deep_sky_objects add column simbad char(20) default null")
# conn.commit()
N_tot = cursor_r.execute("SELECT max(id) FROM deep_sky_objects WHERE 1").fetchone()[0]
skip = 0
result = cursor_r.execute("select id, hip from deep_sky_objects where id >= %d" % skip)
import time
from astroquery.simbad import Simbad
Simbad.add_votable_fields('typed_id')
while 1:
rows = result.fetchmany(1000)
if rows is None or len(rows) == 0:
break
tools.show_progress(N_tot, rows[0][0]-1)
s = Simbad.query_objects(['HIP %d' % int(row[1]) for row in rows])
time.sleep(2)
values = []
if s is not None:
s.add_index('TYPED_ID')
for row in rows:
sr = get(s, ('HIP %d' % int(row[1])).encode('utf-8'))
if sr is not None:
k = sr['MAIN_ID'].decode('utf-8')
values.append("(%d, '%s', 0,0,0,0,0,0)" % (row[0], k.replace("'", "''")))
if len(values) > 0:
cursor_w.execute("""
INSERT INTO deep_sky_objects (id, simbad, ra, dec, x, y, z, mag_v) VALUES """ + ','.join(values) + """
ON CONFLICT(id) DO UPDATE SET simbad = excluded.simbad""")
conn.commit()
conn.close()
@staticmethod
def query_t_eff():
from astroquery.vizier import Vizier
v = Vizier(catalog="B/pastel/pastel", columns=["ID", "Teff", "logg", "[Fe/H]"], row_limit=-1)
v2 = Vizier(catalog="J/A+A/525/A71/table2", columns=["Name", "Teff", "log(g)", "[Fe/H]"], row_limit=-1)
v3 = Vizier(catalog="J/MNRAS/471/770/table2", columns=["HIP", "Teff", "log(g)"], row_limit=-1)
conn = sqlite3.connect(Stars.STARDB)
cursor_r = conn.cursor()
cursor_w = conn.cursor()
cond = "(t_eff is null OR log_g is null OR 1)"
N_tot = cursor_r.execute("""
SELECT max(id) FROM deep_sky_objects
WHERE %s
""" % cond).fetchone()[0]
skip = 37601
f_id, f_hip, f_hd, f_sim, f_ra, f_dec, f_t, f_g, f_m, f_src = range(10)
results = cursor_r.execute("""
SELECT id, hip, hd, simbad, ra, dec, t_eff, log_g, fe_h, src
FROM deep_sky_objects
WHERE %s AND id >= ?
ORDER BY id ASC
""" % cond, (skip,))
r = v.query_constraints()[0]
r.add_index('ID')
N = 40
while True:
rows = results.fetchmany(N)
if rows is None or len(rows) == 0:
break
tools.show_progress(N_tot, rows[0][f_id]-1)
ids = {row[f_id]: [i, row[f_src][:3] + '___'] for i, row in enumerate(rows)}
insert = {}
for i, row in enumerate(rows):
k = 'HIP %6d' % int(row[f_hip])
if get(r, k) is None and row[f_hd]:
k = 'HD %6d' % int(row[f_hd])
if get(r, k) is None and row[f_sim]:
k = row[f_sim]
if get(r, k) is None and row[f_sim]:
k = row[f_sim] + ' A'
dr = get(r, k)
if dr is not None:
t_eff, log_g, fe_h = median(dr, ('Teff', 'logg', '__Fe_H_'), null='null')
src = row[f_src][0:3] + ''.join([('_' if v == 'null' else Stars.SOURCE_PASTEL) for v in (t_eff, log_g, fe_h)])
insert[row[f_id]] = [t_eff, log_g, fe_h, src]
if '_' not in src[3:5]:
ids.pop(row[f_id])
else:
ids[row[f_id]][1] = src
if len(ids) > 0:
# try using other catalog
r = v2.query_constraints(Name='=,' + ','.join([
('HD%06d' % int(rows[i][f_hd])) for i, s in ids.values() if rows[i][f_hd] is not None
]))
time.sleep(2)
if len(r) > 0:
r = r[0]
r.add_index('Name')
for id, (i, src) in ids.copy().items():
dr = get(r, 'HD%06d' % int(rows[i][f_hd])) if rows[i][f_hd] else None
if dr is not None:
t_eff, log_g, fe_h = median(dr, ('Teff', 'log_g_', '__Fe_H_'), null='null')
src = src[0:3] + ''.join([('_' if v == 'null' else Stars.SOURCE_WU) for v in (t_eff, log_g, fe_h)])
insert[id] = [t_eff, log_g, fe_h, src]
if '_' not in src[3:5]:
ids.pop(rows[i][f_id])
else:
ids[rows[i][f_id]][1] = src
if len(ids) > 0:
# try using other catalog
r = v3.query_constraints(HIP='=,' + ','.join([str(rows[i][f_hip]) for i, s in ids.values()]))[0]
r.add_index('HIP')
for id, (i, src) in ids.copy().items():
dr = get(r, int(rows[i][f_hip]))
if dr is not None:
t_eff, log_g = median(dr, ('Teff', 'log_g_'), null='null')
src = src[0:3] + ''.join([('_' if v == 'null' else Stars.SOURCE_GAIA1) for v in (t_eff, log_g)]) + src[5]
insert[id] = [t_eff, log_g, insert[id][2] if id in insert else 'null', src]
# if '_' not in src[3:5]:
# ids.pop(rows[i][f_id])
# else:
# ids[rows[i][f_id]][1] = src
if len(insert) > 0:
values = ["(%d, %s, %s, %s, '%s', 0,0,0,0,0,0)" % (
id, t_eff, log_g, fe_h, src)
for id, (t_eff, log_g, fe_h, src) in insert.items()]
cursor_w.execute("""
INSERT INTO deep_sky_objects (id, t_eff, log_g, fe_h, src, ra, dec, x, y, z, mag_v) VALUES """ + ','.join(values) + """
ON CONFLICT(id) DO UPDATE SET
t_eff = excluded.t_eff,
log_g = excluded.log_g,
fe_h = excluded.fe_h,
src = excluded.src
""")
conn.commit()
conn.close()
@staticmethod
def query_v_mag():
from astroquery.vizier import Vizier
from tqdm import tqdm
v = Vizier(catalog="B/pastel/pastel", columns=["ID", "Vmag"], row_limit=-1)
conn = sqlite3.connect(Stars.STARDB)
cursor_r = conn.cursor()
cursor_w = conn.cursor()
cond = f"(substr(src,2,1) = '{Stars.SOURCE_HIPPARCHOS}')"
N_tot = cursor_r.execute(f"SELECT count(*) FROM deep_sky_objects WHERE {cond}").fetchone()[0]
f_id, f_hip, f_hd, f_sim, f_mag_v, f_src = range(6)
results = cursor_r.execute("""
SELECT id, hip, hd, simbad, mag_v, src
FROM deep_sky_objects
WHERE %s
ORDER BY mag_v ASC
""" % cond)
r = v.query_constraints()[0]
r.add_index('ID')
N = 40
pbar = tqdm(total=N_tot)
while True:
rows = results.fetchmany(N)
if rows is None or len(rows) == 0:
break
ids = {row[f_id]: [i, row[f_src]] for i, row in enumerate(rows)}
insert = {}
for i, row in enumerate(rows):
k = 'HIP %6d' % int(row[f_hip])
if get(r, k) is None and row[f_hd]:
k = 'HD %6d' % int(row[f_hd])
if get(r, k) is None and row[f_sim]:
k = row[f_sim]
if get(r, k) is None and row[f_sim]:
k = row[f_sim] + ' A'
dr = get(r, k)
if dr is not None:
v_mag, *_ = median(dr, ('Vmag',), null='null')
if v_mag != 'null':
src = row[f_src]
src = src[:1] + Stars.SOURCE_PASTEL + src[2:]
insert[row[f_id]] = [v_mag, src]
ids.pop(row[f_id])
if len(insert) > 0:
values = [f"({id}, 0, 0, 0, '{src}', 0, 0, 0, 0, 0, {v_mag})" for id, (v_mag, src) in insert.items()]
cursor_w.execute("INSERT INTO deep_sky_objects (id, t_eff, log_g, fe_h, src, ra, dec, x, y, z, mag_v) "
"VALUES " + ','.join(values) + " "
"ON CONFLICT(id) DO UPDATE SET "
" mag_v = excluded.mag_v, "
" src = excluded.src")
conn.commit()
pbar.set_postfix({'v_mag': np.max([float(row[f_mag_v]) for row in rows])})
pbar.update(len(rows))
conn.close()
@staticmethod
def correct_supplement_data():
conn = sqlite3.connect(Stars.STARDB)
cursor = conn.cursor()
def insert_mags(hips):
res = Stars.get_hip_mag_bv([h[0] for h in hips.values()])
insert = ["('%s', %f, %f, %f, %f, %f, %f, %f)" %
(t, h[1], h[2], h[3], h[4], h[5], res[h[0]][0], res[h[0]][1])
for t, h in hips.items() if h[0] in res and -10 < res[h[0]][1] < Stars.MAG_CUTOFF]
if len(insert) > 0:
cursor.execute("""
INSERT INTO deep_sky_objects (tycho, ra, dec, x, y, z, mag_b, mag_v) VALUES
""" + ','.join(insert) + """
ON CONFLICT(tycho) DO UPDATE SET mag_b = excluded.mag_b, mag_v = excluded.mag_v """)
conn.commit()
file = 'suppl_1.dat'
N = 30
rx = re.compile(r'0*(\d+)')
with open(os.path.join(DATA_DIR, file), 'r') as fh:
hips = {}
line = fh.readline()
while line:
c = line
line = fh.readline()
tycho, ra, dec, mag_bt, mag_vt, flag, hip = c[0:12], c[15:27], c[28:40], c[83:89], c[96:102], c[81:82], c[115:123]
tycho = tycho.replace(' ', '-')
hip = rx.findall(hip)[0] if len(hip.strip()) > 0 else False
if flag in ('H', 'V', 'B') and hip:
ra, dec = float(ra), float(dec)
x, y, z = tools.spherical2cartesian(math.radians(dec), math.radians(ra), 1)
hips[tycho] = (hip, ra, dec, x, y, z)
if len(hips) >= N:
insert_mags(hips)
hips.clear()
else:
continue
if len(hips) > 0:
insert_mags(hips)
@staticmethod
def get_hip_mag_bv(hip, v=None):
from astroquery.vizier import Vizier
Vizier.ROW_LIMIT = -1
hips = [hip] if isinstance(hip, str) else hip
v = Vizier(columns=["HIP", "Vmag", "B-V"], catalog="I/239/hip_main", row_limit=-1)
r = v.query_constraints(HIP='=,'+','.join(hips))
results = {}
if len(r):
r = r[0]
r.add_index('HIP')
for h in hips:
try:
if not np.ma.is_masked(r.loc[int(h)]['Vmag']) and not np.ma.is_masked(r.loc[int(h)]['B-V']):
mag_v, b_v = float(r.loc[int(h)]['Vmag']), float(r.loc[int(h)]['B-V'])
results[h] = (mag_v + b_v, mag_v)
except:
continue
return results.get(hip, (None, None)) if isinstance(hip, str) else results
@staticmethod
def override_betelgeuse():
conn = sqlite3.connect(Stars.STARDB)
cursor = conn.cursor()
# from "The Advanced Spectral Library (ASTRAL): Reference Spectra for Evolved M Stars",
# The Astrophysical Journal, 2018, https://iopscience.iop.org/article/10.3847/1538-4357/aaf164/pdf
#t_eff = 3650 # based on query_t_eff was 3562
#mag_v = 0.42 # based on tycho2 suppl2 was 0.58
# from CTOA observations on 2018-12-07 and 18-12-22, accessed through https://www.aavso.org database
mag_v = 0.8680
mag_b = 2.6745 # based on tycho2 suppl2 was 2.3498
t_eff = None # Stars.effective_temp(mag_b - mag_v, metal=0.006, log_g=-0.26) gives 3565K vs 3538K without log_g & metal
cursor.execute("UPDATE deep_sky_objects SET t_eff=?, mag_v=?, mag_b=? where tycho='0129-01873-1'", (t_eff, mag_v, mag_b))
conn.commit()
conn.close()
def get(r, k, d=None):
if k is None or r is None:
return d
try:
return r.loc[k]
except:
return d
def median(dr, fields, null='null'):
try:
values = [np.ma.median(dr[f]) for f in fields]
values = [(null if np.ma.is_masked(v) else v) for v in values]
except:
values = [null if np.ma.is_masked(dr[f]) or np.isnan(dr[f]) else dr[f] for f in fields]
return values
if __name__ == '__main__':
if 0:
Stars.import_stars_hip()
quit()
elif 0:
Stars.add_simbad_col()
#Stars.override_rho_ori_b()
#Stars.override_delta_ori_b()
quit()
elif 0:
Stars.query_t_eff()
quit()
elif 0:
Stars.query_v_mag()
quit()
elif 0:
img = np.zeros((1024, 1024), dtype=np.uint8)
for i in range(1000):
Stars.plot_stars(img, tools.rand_q(math.radians(180)), cam, exposure=5, gain=1)
quit()
elif 1:
conn = sqlite3.connect(Stars.STARDB)
cursor = conn.cursor()
f_id, f_hip, f_sim, f_hd, f_magv, f_magb, f_teff, f_logg, f_feh, f_src = range(10)
r = cursor.execute("""
SELECT id, hip, simbad, hd, mag_v, mag_b, t_eff, log_g, fe_h, src
FROM deep_sky_objects
WHERE hd in (48915,34085,61421,39801,35468,37128,37742,37743,44743,38771,36486,48737,36861,33111,58715)
ORDER BY mag_v
""")
rows = r.fetchall()
stars = {}
print('id\thip\tsim\thd\tmag_v\tmag_b\tt_eff\tlog_g\tfe_h\tsrc')
for row in rows:
stars[row[f_hd]] = row
print('\t'.join([str(c) for c in row]))
conn.close()
quit()
from astropy.io import fits
import matplotlib.pyplot as plt
def testf(fdat, teff, logg, feh):
sp = S.Icat('k93models', float(teff), float(feh), float(logg))\
.renorm(0, 'vegamag', S.ObsBandpass('johnson,v'))
sp_real = S.ArraySpectrum(wave=fdat[0][0], flux=fdat[0][1], fluxunits='flam')\
.renorm(0, 'vegamag', S.ObsBandpass('johnson,v'))
plt.plot(sp_real.wave, sp_real.flux)
plt.plot(sp.wave, sp.flux)
plt.xlim(3000, 10000)
plt.show()
for hd in (48737, 35468, 39801): # Lambda Orionis (HD36861) Teff too high for model (37689K)
fname = r'C:\projects\s100imgs\spectra\%s.fits' % hd
fdat = fits.getdata(fname)
teff, logg, feh = [stars[hd][f] for f in (f_teff, f_logg, f_feh)]
if teff > 30000:
logg = max(logg, 4.0)
testf(fdat, teff, logg, feh or 0)
quit()
# cam = RosettaSystemModel(focused_attenuated=False).cam
cam = DidymosSystemModel(use_narrow_cam=True).cam
# cam_q = tools.rand_q(math.radians(180))
cam_q = quaternion.one
for i in range(100):
cam_q = tools.ypr_to_q(0, np.radians(1), 0) * cam_q
flux_density = Stars.flux_density(cam_q, cam)
img = cam.sense(flux_density, exposure=2, gain=2)
img = np.clip(img*255, 0, 255).astype('uint8')
img = ImageProc.adjust_gamma(img, 1.8)
sc = min(768/cam.width, 768/cam.height)
cv2.imshow('stars', cv2.resize(img, None, fx=sc, fy=sc))
cv2.waitKey()
print('done') | mit |
saiwing-yeung/scikit-learn | examples/calibration/plot_calibration.py | 66 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see https://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
Kismuz/btgym | btgym/research/model_based/datafeed/base.py | 1 | 29953 | ###############################################################################
#
# Copyright (C) 2017, 2018 Andrew Muzikin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from logbook import Logger, StreamHandler, WARNING
import datetime
import sys, os
import copy
import backtrader.feeds as btfeeds
import numpy as np
import pandas as pd
from btgym.datafeed.derivative import BTgymDataset2
from btgym.datafeed.multi import BTgymMultiData
def base_random_generator_fn(num_points=10, **kwargs):
"""
Base random uniform generating function. Provides synthetic data points.
Args:
num_points: trajectory length
kwargs: any function parameters, not used here
Returns:
1d array of generated values; here: randoms in [0,1]
"""
return np.random.random(num_points)
def base_bias_generator_fn(num_points=10, bias=1, **kwargs):
"""
Base bias generating function. Provides constant synthetic data points.
Args:
num_points: trajectory length
bias: data point constant value >=0
kwargs: any function parameters, not used here
Returns:
1d array of generated values; here: randoms in [0,1]
"""
assert bias >= 0, 'Only positive bias allowed, got: {}'.format(bias)
return np.ones(num_points) * bias
def base_generator_parameters_fn(**kwargs):
"""
Base parameters generating function. Provides arguments for data generating function.
It itself accept arguments specified via `generator_parameters_config` dictionary;
Returns:
dictionary of kwargs consistent with generating function used.
"""
return dict()
def base_random_uniform_parameters_fn(**kwargs):
"""
Provides samples for kwargs given.
If parameter is set as float - returns exactly given value;
if parameter is set as iterable of form [a, b] - uniformly randomly samples parameters value
form given interval.
Args:
**kwargs: any kwarg specifying float or iterable of two ordered floats
Returns:
dictionary of kwargs holding sampled values
"""
samples = {}
for key, value in kwargs.items():
if type(value) in [int, float, np.float64]:
interval = [value, value]
else:
interval = list(value)
assert len(interval) == 2 and interval[0] <= interval[-1], \
'Expected parameter <{}> be float or ordered interval, got: {}'.format(key, value)
samples[key] = np.random.uniform(low=interval[0], high=interval[-1])
return samples
def base_spread_generator_fn(num_points=10, alpha=1, beta=1, minimum=0, maximum=0):
"""
Generates spread values for single synthetic tragectory. Samples drawn from parametrized beta-distribution;
If base generated trajectory P is given, than High/Ask value = P + 1/2 * Spread; Low/Bid value = P - 1/2* Spread
Args:
num_points: trajectory length
alpha: beta-distribution alpha param.
beta: beta-distribution beta param.
minimum: spread minimum value
maximum: spread maximum value
Returns:
1d array of generated values;
"""
assert alpha > 0 and beta > 0, 'Beta-distribution parameters should be non-negative, got: {},{}'.format(alpha, beta)
assert minimum <= maximum, 'Spread min/max values should form ordered pair, got: {}/{}'.format(minimum, maximum)
return minimum + np.random.beta(a=alpha, b=beta, size=num_points) * (maximum - minimum)
class BaseDataGenerator:
"""
Base synthetic data provider class.
"""
def __init__(
self,
episode_duration=None,
timeframe=1,
generator_fn=base_random_generator_fn,
generator_parameters_fn=base_generator_parameters_fn,
generator_parameters_config=None,
spread_generator_fn=None,
spread_generator_parameters=None,
name='BaseSyntheticDataGenerator',
data_names=('default_asset',),
parsing_params=None,
target_period=-1,
global_time=None,
task=0,
log_level=WARNING,
_nested_class_ref=None,
_nested_params=None,
**kwargs
):
"""
Args:
episode_duration: dict, duration of episode in days/hours/mins
generator_fn callabale, should return generated data as 1D np.array
generator_parameters_fn: callable, should return dictionary of generator_fn kwargs
generator_parameters_config: dict, generator_parameters_fn args
spread_generator_fn: callable, should return values of spread to form {High, Low}
spread_generator_parameters: dict, spread_generator_fn args
timeframe: int, data periodicity in minutes
name: str
data_names: iterable of str
target_period: int or dict, if set to -1 - disables `test` sampling
global_time: dict {y, m, d} to set custom global time (only for plotting)
task: int
log_level: logbook.Logger level
**kwargs:
"""
# Logging:
self.log_level = log_level
self.task = task
self.name = name
self.filename = self.name + '_sample'
self.target_period = target_period
self.data_names = data_names
self.data_name = self.data_names[0]
self.sample_instance = None
self.metadata = {'sample_num': 0, 'type': None, 'parent_sample_type': None}
self.data = None
self.data_stat = None
self.sample_num = 0
self.is_ready = False
if _nested_class_ref is None:
self.nested_class_ref = BaseDataGenerator
else:
self.nested_class_ref = _nested_class_ref
if _nested_params is None:
self.nested_params = dict(
episode_duration=episode_duration,
timeframe=timeframe,
generator_fn=generator_fn,
generator_parameters_fn=generator_parameters_fn,
generator_parameters_config=generator_parameters_config,
name=name,
data_names=data_names,
task=task,
log_level=log_level,
_nested_class_ref=_nested_class_ref,
_nested_params=_nested_params,
)
else:
self.nested_params = _nested_params
StreamHandler(sys.stdout).push_application()
self.log = Logger('{}_{}'.format(self.name, self.task), level=self.log_level)
# Default sample time duration:
if episode_duration is None:
self.episode_duration = dict(
days=0,
hours=23,
minutes=55,
)
else:
self.episode_duration = episode_duration
# Btfeed parsing setup:
if parsing_params is None:
self.parsing_params = dict(
names=['ask', 'bid', 'mid'],
datetime=0,
timeframe=1,
open='mid',
high='ask',
low='bid',
close='mid',
volume=-1,
openinterest=-1
)
else:
self.parsing_params = parsing_params
self.columns_map = {
'open': 'mean',
'high': 'maximum',
'low': 'minimum',
'close': 'mean',
'bid': 'minimum',
'ask': 'maximum',
'mid': 'mean',
'volume': 'nothing',
}
self.nested_params['parsing_params'] = self.parsing_params
for key, value in self.parsing_params.items():
setattr(self, key, value)
# base data feed related:
self.params = {}
if global_time is None:
self.global_time = datetime.datetime(year=2018, month=1, day=1)
else:
self.global_time = datetime.datetime(**global_time)
self.global_timestamp = self.global_time.timestamp()
# Infer time indexes and sample number of records:
self.train_index = pd.timedelta_range(
start=datetime.timedelta(days=0, hours=0, minutes=0),
end=datetime.timedelta(**self.episode_duration),
freq='{}min'.format(self.timeframe)
)
self.test_index = pd.timedelta_range(
start=self.train_index[-1] + datetime.timedelta(minutes=self.timeframe),
periods=len(self.train_index),
freq='{}min'.format(self.timeframe)
)
self.train_index += self.global_time
self.test_index += self.global_time
self.episode_num_records = len(self.train_index)
self.generator_fn = generator_fn
self.generator_parameters_fn = generator_parameters_fn
if generator_parameters_config is not None:
self.generator_parameters_config = generator_parameters_config
else:
self.generator_parameters_config = {}
self.spread_generator_fn = spread_generator_fn
if spread_generator_parameters is not None:
self.spread_generator_parameters = spread_generator_parameters
else:
self.spread_generator_parameters = {}
def set_logger(self, level=None, task=None):
"""
Sets logbook logger.
Args:
level: logbook.level, int
task: task id, int
"""
if task is not None:
self.task = task
if level is not None:
self.log = Logger('{}_{}'.format(self.name, self.task), level=level)
def reset(self, **kwargs):
self.read_csv()
self.sample_num = 0
self.is_ready = True
def read_csv(self, **kwargs):
self.data = self.generate_data(self.generator_parameters_fn(**self.generator_parameters_config))
def generate_data(self, generator_params, sample_type=0):
"""
Generates data trajectory, performs base consistency checks.
Args:
generator_params: dict, data_generating_function parameters
sample_type: 0 - generate train data | 1 - generate test data
Returns:
data as pandas dataframe
"""
assert sample_type in [0, 1],\
'Expected sample type be either 0 (train), or 1 (test) got: {}'.format(sample_type)
# Generate data points:
data_array = self.generator_fn(num_points=self.episode_num_records, **generator_params)
assert len(data_array.shape) == 1 and data_array.shape[0] == self.episode_num_records,\
'Expected generated data to be 1D array of length {}, got data shape: {}'.format(
self.episode_num_records,
data_array.shape
)
if self.spread_generator_fn is not None:
spread_array = self.spread_generator_fn(
num_points=self.episode_num_records,
**self.spread_generator_parameters
)
assert len(spread_array.shape) == 1 and spread_array.shape[0] == self.episode_num_records, \
'Expected generated spread to be 1D array of length {}, got data shape: {}'.format(
self.episode_num_records,
spread_array.shape
)
else:
spread_array = np.zeros(self.episode_num_records)
data_dict = {
'mean': data_array,
'maximum': data_array + .5 * spread_array,
'minimum': data_array - .5 * spread_array,
'nothing': data_array * 0.0,
}
# negs = data_dict['minimum'] < 0
# if negs.any():
# self.log.warning('{} negative generated values detected'.format(negs.shape[0]))
# Make dataframe:
if sample_type:
index = self.test_index
else:
index = self.train_index
# Map dictionary of data to dataframe columns:
df = pd.DataFrame(data={name: data_dict[self.columns_map[name]] for name in self.names}, index=index)
# df = df.set_index('hh:mm:ss')
return df
def sample(self, get_new=True, sample_type=0, **kwargs):
"""
Samples continuous subset of data.
Args:
get_new (bool): not used;
sample_type (int or bool): 0 (train) or 1 (test) - get sample from train or test data subsets
respectively.
Returns:
Dataset instance with number of records ~ max_episode_len.
"""
try:
assert sample_type in [0, 1]
except AssertionError:
msg = 'Sampling attempt: expected sample type be in {}, got: {}'.format([0, 1], sample_type)
self.log.error(msg)
raise ValueError(msg)
if self.target_period == -1 and sample_type:
msg = 'Attempt to sample type {} given disabled target_period'.format(sample_type)
self.log.error(msg)
raise ValueError(msg)
if self.metadata['type'] is not None:
if self.metadata['type'] != sample_type:
self.log.warning(
'Attempt to sample type {} given current sample type {}, overriden.'.format(
sample_type,
self.metadata['type']
)
)
sample_type = self.metadata['type']
# Get sample:
self.sample_instance = self.sample_synthetic(sample_type)
self.sample_instance.metadata['type'] = sample_type
self.sample_instance.metadata['sample_num'] = self.sample_num
self.sample_instance.metadata['parent_sample_num'] = self.metadata['sample_num']
self.sample_instance.metadata['parent_sample_type'] = self.metadata['type']
self.sample_num += 1
return self.sample_instance
def sample_synthetic(self, sample_type=0):
"""
Get data_generator instance containing synthetic data.
Args:
sample_type (int or bool): 0 (train) or 1 (test) - get sample with train or test time periods
respectively.
Returns:
nested_class_ref instance
"""
# Generate data:
generator_params = self.generator_parameters_fn(**self.generator_parameters_config)
data = self.generate_data(generator_params, sample_type=sample_type)
# Make data_class instance:
sample_instance = self.nested_class_ref(**self.nested_params)
sample_instance.filename += '_{}'.format(self.sample_num)
self.log.info('New sample id: <{}>.'.format(sample_instance.filename))
# Add data and metadata:
sample_instance.data = data
sample_instance.metadata['generator'] = generator_params
sample_instance.metadata['first_row'] = 0
sample_instance.metadata['last_row'] = self.episode_num_records
return sample_instance
def describe(self):
"""
Returns summary dataset statistic as pandas dataframe:
- records count,
- data mean,
- data std dev,
- min value,
- 25% percentile,
- 50% percentile,
- 75% percentile,
- max value
for every data column.
"""
# Pretty straightforward, using standard pandas utility.
# The only caveat here is that if actual data has not been loaded yet, need to load, describe and unload again,
# thus avoiding passing big files to BT server:
flush_data = False
try:
assert not self.data.empty
pass
except (AssertionError, AttributeError) as e:
self.read_csv()
flush_data = True
self.data_stat = self.data.describe()
self.log.info('Data summary:\n{}'.format(self.data_stat.to_string()))
if flush_data:
self.data = None
self.log.info('Flushed data.')
return self.data_stat
def to_btfeed(self):
"""
Performs BTgymData-->bt.feed conversion.
Returns:
dict of type: {data_line_name: bt.datafeed instance}.
"""
try:
assert not self.data.empty
btfeed = btfeeds.PandasDirectData(
dataname=self.data,
timeframe=self.timeframe,
datetime=self.datetime,
open=self.open,
high=self.high,
low=self.low,
close=self.close,
volume=self.volume,
openinterest=self.openinterest
)
btfeed.numrecords = self.data.shape[0]
return {self.data_name: btfeed}
except (AssertionError, AttributeError) as e:
msg = 'Instance holds no data. Hint: forgot to call .read_csv()?'
self.log.error(msg)
raise AssertionError(msg)
def set_global_timestamp(self, timestamp):
pass
class BaseCombinedDataSet:
"""
Data provider class wrapper incorporates synthetic train and real test data streams.
"""
def __init__(
self,
train_data_config,
test_data_config,
train_class_ref=BaseDataGenerator,
test_class_ref=BTgymDataset2,
name='CombinedDataSet',
**kwargs
):
"""
Args:
filename: str, test data filename
parsing_params: dict test data parsing params
episode_duration_train: dict, duration of train episode in days/hours/mins
episode_duration_test: dict, duration of test episode in days/hours/mins
time_gap: dict test episode duration tolerance
start_00: bool, def=False
generator_fn callabale, should return generated data as 1D np.array
generator_parameters_fn: callable, should return dictionary of generator_fn kwargs
generator_parameters_config: dict, generator_parameters_fn args
timeframe: int, data periodicity in minutes
name: str
data_names: iterable of str
global_time: dict {y, m, d} to set custom global time (here for plotting only)
task: int
log_level: logbook.Logger level
**kwargs: common kwargs
"""
self.name = name
self.log = None
try:
self.task = kwargs['task']
except KeyError:
self.task = None
self.train_data_config = train_data_config
self.test_data_config = test_data_config
self.train_data_config.update(kwargs)
self.test_data_config.update(kwargs)
self.train_data_config['name'] = self.name + '/train'
self.test_data_config['name'] = self.name + '/test'
# Declare all test data come from target domain:
self.test_data_config['target_period'] = -1
self.test_data_config['test_period'] = -1
self.streams = {
'train': train_class_ref(**self.train_data_config),
'test': test_class_ref(**self.test_data_config),
}
self.sample_instance = None
self.sample_num = 0
self.is_ready = False
# Legacy parameters, left here for BTgym API_shell:
try:
self.parsing_params = kwargs['parsing_params']
except KeyError:
self.parsing_params = dict(
sep=',',
header=0,
index_col=0,
parse_dates=True,
names=['ask', 'bid', 'mid'],
dataname=None,
datetime=0,
nullvalue=0.0,
timeframe=1,
high=1, # 'ask',
low=2, # 'bid',
open=3, # 'mid',
close=3, # 'mid',
volume=-1,
openinterest=-1,
)
try:
self.sampling_params = kwargs['sampling_params']
except KeyError:
self.sampling_params = {}
self.params = {}
self.params.update(self.parsing_params)
self.params.update(self.sampling_params)
self.set_params(self.params)
self.data_names = self.streams['test'].data_names
self.global_timestamp = 0
def set_params(self, params_dict):
"""
Batch attribute setter.
Args:
params_dict: dictionary of parameters to be set as instance attributes.
"""
for key, value in params_dict.items():
setattr(self, key, value)
def set_logger(self, *args, **kwargs):
for stream in self.streams.values():
stream.set_logger(*args, **kwargs)
self.log = self.streams['test'].log
def reset(self, *args, **kwargs):
for stream in self.streams.values():
stream.reset(*args, **kwargs)
self.task = self.streams['test'].task
self.global_timestamp = self.streams['test'].global_timestamp
self.sample_num = 0
self.is_ready = True
def read_csv(self, *args, **kwargs):
for stream in self.streams.values():
stream.read_csv(*args, **kwargs)
def describe(self,*args, **kwargs):
return self.streams['test'].describe()
def set_global_timestamp(self, *args, **kwargs):
for stream in self.streams.values():
stream.set_global_timestamp(*args, **kwargs)
self.global_timestamp = self.streams['test'].global_timestamp
def to_btfeed(self):
raise NotImplementedError
def sample(self, sample_type=0, **kwargs):
"""
Samples continuous subset of data.
Args:
sample_type (int or bool): 0 (train) or 1 (test) - get sample from train or test data subsets
respectively.
Returns:
Dataset instance with number of records ~ max_episode_len,
"""
try:
assert sample_type in [0, 1]
except AssertionError:
self.log.exception(
'Sampling attempt: expected sample type be in {}, got: {}'.\
format([0, 1], sample_type)
)
raise AssertionError
if sample_type:
self.sample_instance = self.streams['test'].sample(sample_type=sample_type, **kwargs)
self.sample_instance.metadata['generator'] = {}
else:
self.sample_instance = self.streams['train'].sample(sample_type=sample_type, **kwargs)
# Common metadata:
self.sample_instance.metadata['type'] = sample_type
self.sample_instance.metadata['sample_num'] = self.sample_num
self.sample_instance.metadata['parent_sample_num'] = 0
self.sample_instance.metadata['parent_sample_type'] = None
self.sample_num += 1
return self.sample_instance
class BasePairDataGenerator(BTgymMultiData):
"""
Generates pair of data streams driven by single 2-level generating process.
TODO: make data generating process single stand-along function or class method, do not use BaseDataGenerator's
"""
def __init__(
self,
data_names,
process1_config=None, # bias generator
process2_config=None, # spread generator
data_class_ref=BaseDataGenerator,
name='PairDataGenerator',
_top_level=True,
**kwargs
):
assert len(list(data_names)) == 2, 'Expected `data_names` be pair of `str`, got: {}'.format(data_names)
if process1_config is None:
self.process1_config = {
'generator_fn': base_bias_generator_fn,
'generator_parameters_fn': base_generator_parameters_fn,
'generator_parameters_config': None,
}
else:
self.process1_config = process1_config
if process2_config is None:
self.process2_config = {
'generator_fn': base_random_generator_fn,
'generator_parameters_fn': base_generator_parameters_fn,
'generator_parameters_config': None,
}
else:
self.process2_config = process2_config
data_config = {name: {'filename': None, 'config': {}} for name in data_names}
# Let first asset hold p1 generating process:
self.a1_name = data_names[0]
data_config[self.a1_name]['config'].update(self.process1_config)
# Second asset will hold p2 generating process:
self.a2_name = data_names[-1]
data_config[self.a2_name]['config'].update(self.process2_config)
self.nested_kwargs = kwargs
self.get_new_sample = not _top_level
super(BasePairDataGenerator, self).__init__(
data_config=data_config,
data_names=data_names,
data_class_ref=data_class_ref,
name=name,
**kwargs
)
def sample(self, sample_type=0, **kwargs):
if self.get_new_sample:
# Get process1 trajectory:
p1_sample = self.data[self.a1_name].sample(sample_type=sample_type, **kwargs)
# Get p2 trajectory:
p2_sample = self.data[self.a2_name].sample(sample_type=sample_type, **kwargs)
idx_intersected = p1_sample.data.index.intersection(p2_sample.data.index)
self.log.info('p1/p2 shared num. records: {}'.format(len(idx_intersected)))
# TODO: move this generating process to stand-along function
# Combine processes:
data1 = p1_sample.data + 0.5 * p2_sample.data
data2 = p1_sample.data - 0.5 * p2_sample.data
metadata = copy.deepcopy(p2_sample.metadata)
else:
data1 = None
data2 = None
metadata = {}
metadata.update(
{'type': sample_type, 'sample_num': self.sample_num, 'parent_sample_type': self.sample_num, 'parent_sample_num': sample_type}
)
# Prepare empty instance of multi_stream data:
sample = BasePairDataGenerator(
data_names=self.data_names,
process1_config=self.process1_config,
process2_config=self.process2_config,
data_class_ref=self.data_class_ref,
# task=self.task,
# log_level=self.log_level,
name='sub_' + self.name,
_top_level=False,
**self.nested_kwargs
)
# TODO: maybe add p1 metadata
sample.metadata = copy.deepcopy(metadata)
# Populate sample with data:
sample.data[self.a1_name].data = data1
sample.data[self.a2_name].data = data2
sample.filename = {key: stream.filename for key, stream in self.data.items()}
self.sample_num += 1
return sample
class BasePairCombinedDataSet(BaseCombinedDataSet):
"""
Provides doubled streams of simulated train / real test data.
Suited for pairs or spread trading setup.
"""
def __init__(
self,
assets_filenames,
process1_config=None,
process2_config=None,
train_episode_duration=None,
test_episode_duration=None,
train_class_ref=BasePairDataGenerator,
test_class_ref=BTgymMultiData,
name='PairCombinedDataSet',
**kwargs
):
assert isinstance(assets_filenames, dict),\
'Expected `assets_filenames` type `dict`, got {} '.format(type(assets_filenames))
data_names = [name for name in assets_filenames.keys()]
assert len(data_names) == 2, 'Expected exactly two assets, got: {}'.format(data_names)
train_data_config = dict(
data_names=data_names,
process1_config=process1_config,
process2_config=process2_config,
data_class_ref=BaseDataGenerator,
episode_duration=train_episode_duration,
# name=name,
)
test_data_config = dict(
data_class_ref=BTgymDataset2,
data_config={asset_name: {'filename': file_name} for asset_name, file_name in assets_filenames.items()},
episode_duration=test_episode_duration,
# name=name,
)
super(BasePairCombinedDataSet, self).__init__(
train_data_config=train_data_config,
test_data_config=test_data_config,
train_class_ref=train_class_ref,
test_class_ref=test_class_ref,
name=name,
**kwargs
)
| lgpl-3.0 |
laurent-george/bokeh | examples/glyphs/trail.py | 33 | 4656 | # -*- coding: utf-8 -*-
from __future__ import print_function
from math import sin, cos, atan2, sqrt, radians
import numpy as np
import scipy.ndimage as im
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.browserlib import view
from bokeh.models.glyphs import Line, Patches
from bokeh.models.widgets import VBox
from bokeh.models import (
Plot, GMapPlot, GMapOptions,
DataRange1d, ColumnDataSource,
LinearAxis, Grid,
PanTool, WheelZoomTool, ResetTool)
from bokeh.sampledata.mtb import obiszow_mtb_xcm
def haversin(theta):
return sin(0.5*theta)**2
def distance(p1, p2):
"""Distance between (lat1, lon1) and (lat2, lon2). """
R = 6371
lat1, lon1 = p1
lat2, lon2 = p2
phi1 = radians(lat1)
phi2 = radians(lat2)
delta_lat = radians(lat2 - lat1)
delta_lon = radians(lon2 - lon1)
a = haversin(delta_lat) + cos(phi1)*cos(phi2)*haversin(delta_lon)
return 2*R*atan2(sqrt(a), sqrt(1-a))
def prep_data(dataset):
df = dataset.copy()
latlon = list(zip(df.lat, df.lon))
dist = np.array([ distance(latlon[i+1], latlon[i]) for i in range(len((latlon[:-1]))) ])
df["dist"] = np.concatenate(([0], np.cumsum(dist)))
slope = np.abs(100*np.diff(df.alt)/(1000*dist))
slope[np.where( slope < 4) ] = 0 # "green"
slope[np.where((slope >= 4) & (slope < 6))] = 1 # "yellow"
slope[np.where((slope >= 6) & (slope < 10))] = 2 # "pink"
slope[np.where((slope >= 10) & (slope < 15))] = 3 # "orange"
slope[np.where( slope >= 15 )] = 4 # "red"
slope = im.median_filter(slope, 6)
colors = np.empty_like(slope, dtype=object)
colors[np.where(slope == 0)] = "green"
colors[np.where(slope == 1)] = "yellow"
colors[np.where(slope == 2)] = "pink"
colors[np.where(slope == 3)] = "orange"
colors[np.where(slope == 4)] = "red"
df["colors"] = list(colors) + [None] # NOTE: add [None] just make pandas happy
return df
title = "Obiszów MTB XCM"
def trail_map(data):
lon = (min(data.lon) + max(data.lon))/2
lat = (min(data.lat) + max(data.lat))/2
map_options = GMapOptions(lng=lon, lat=lat, zoom=13)
plot = GMapPlot(title="%s - Trail Map" % title, map_options=map_options, plot_width=800, plot_height=800)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
xgrid = Grid(plot=plot, dimension=0, ticker=xaxis.ticker, grid_line_dash="dashed", grid_line_color="gray")
ygrid = Grid(plot=plot, dimension=1, ticker=yaxis.ticker, grid_line_dash="dashed", grid_line_color="gray")
plot.renderers.extend([xgrid, ygrid])
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
line_source = ColumnDataSource(dict(x=data.lon, y=data.lat, dist=data.dist))
line = Line(x="x", y="y", line_color="blue", line_width=2)
plot.add_glyph(line_source, line)
plot.x_range = DataRange1d()
plot.y_range = DataRange1d()
return plot
def altitude_profile(data):
plot = Plot(title="%s - Altitude Profile" % title, plot_width=800, plot_height=400)
xaxis = LinearAxis(axis_label="Distance (km)")
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_label="Altitude (m)")
plot.add_layout(yaxis, 'left')
xgrid = Grid(plot=plot, dimension=0, ticker=xaxis.ticker)
ygrid = Grid(plot=plot, dimension=1, ticker=yaxis.ticker)
plot.renderers.extend([xgrid, ygrid])
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
X, Y = data.dist, data.alt
y0 = min(Y)
patches_source = ColumnDataSource(dict(
xs = [ [X[i], X[i+1], X[i+1], X[i]] for i in range(len(X[:-1])) ],
ys = [ [y0, y0, Y[i+1], Y[i]] for i in range(len(Y[:-1])) ],
color = data.colors[:-1]
))
patches = Patches(xs="xs", ys="ys", fill_color="color", line_color="color")
plot.add_glyph(patches_source, patches)
line_source = ColumnDataSource(dict(
x = data.dist,
y = data.alt,
))
line = Line(x='x', y='y', line_color="black", line_width=1)
plot.add_glyph(line_source, line)
plot.x_range = DataRange1d()
plot.y_range = DataRange1d()
return plot
data = prep_data(obiszow_mtb_xcm)
trail = trail_map(data)
altitude = altitude_profile(data)
layout = VBox(children=[altitude, trail])
doc = Document()
doc.add(layout)
if __name__ == "__main__":
filename = "trail.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Trail map and altitude profile"))
print("Wrote %s" % filename)
view(filename) | bsd-3-clause |
gdetor/SI-RF-Structure | DiCarloProtocol/figure2-dicarlo.py | 1 | 18122 | # Copyright (c) 2014, Georgios Is. Detorakis ([email protected]) and
# Nicolas P. Rougier ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file is part of the source code accompany the peer-reviewed article:
# [1] "Structure of Receptive Fields in a Computational Model of Area 3b of
# Primary Sensory Cortex", Georgios Is. Detorakis and Nicolas P. Rougier,
# Frontiers in Computational Neuroscience, 2014.
#
# This script reproduces the second figure of DiCarlo et al., 1998 using a
# computational method given in [1].
import math
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
from scipy.spatial.distance import cdist
from scipy.ndimage.filters import gaussian_filter
from numpy.fft import rfft2, ifftshift, irfft2
def extract(Z, position, shape, fill=0):
# assert(len(position) == len(Z.shape))
# if len(shape) < len(Z.shape):
# shape = shape + Z.shape[len(Z.shape)-len(shape):]
R = np.ones(shape, dtype=Z.dtype)*fill
P = np.array(list(position)).astype(int)
Rs = np.array(list(R.shape)).astype(int)
Zs = np.array(list(Z.shape)).astype(int)
R_start = np.zeros((len(shape),)).astype(int)
R_stop = np.array(list(shape)).astype(int)
Z_start = (P-Rs//2)
Z_stop = (P+Rs//2)+Rs%2
R_start = (R_start - np.minimum(Z_start,0)).tolist()
Z_start = (np.maximum(Z_start,0)).tolist()
#R_stop = (R_stop - np.maximum(Z_stop-Zs,0)).tolist()
R_stop = np.maximum(R_start, (R_stop - np.maximum(Z_stop-Zs,0))).tolist()
Z_stop = (np.minimum(Z_stop,Zs)).tolist()
r = [slice(start,stop) for start,stop in zip(R_start,R_stop)]
z = [slice(start,stop) for start,stop in zip(Z_start,Z_stop)]
R[r] = Z[z]
return R
# -----------------------------------------------------------------------------
def thresholded(data, threshold):
return np.where(abs(data) < threshold, 0.0,data)
def locate_noise( input ):
n = input.shape[0]
data = input.copy()
for i in range(1,n-1):
for j in range(1,n-1):
count = 0
if data[i,j] != 0:
if data[i+1,j] != 0 and np.sign(data[i+1,j])==np.sign(data[i,j]):
count += 1
if data[i-1,j] != 0 and np.sign(data[i-1,j])==np.sign(data[i,j]):
count += 1
if data[i,j-1] != 0 and np.sign(data[i,j-1])==np.sign(data[i,j]):
count += 1
if data[i,j+1] != 0 and np.sign(data[i,j+1])==np.sign(data[i,j]):
count += 1
if count < 2:
data[i,j] = 0
return data
def cleanup(RF):
size = RF.shape[0]
#RF = gaussian_filter(RF, sigma=1.5)
#threshold = 0.05*np.abs(RF.max())
#RF = thresholded(RF.ravel(), threshold)
#RF = locate_noise(RF.reshape(size,size))
return RF
# -------------------------------------
def grid(n, xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0, noise=0.0):
_X = (np.resize(np.linspace(xmin,xmax,n),(n,n))).ravel()
_Y = (np.resize(np.linspace(ymin,ymax,n),(n,n)).T).ravel()
X = _X + np.random.uniform(-noise, noise, n*n)
Y = _Y + np.random.uniform(-noise, noise, n*n)
Imin, Imax = np.argwhere(X < xmin), np.argwhere(X > xmax)
while len(Imin) or len(Imax):
X[Imin] = _X[Imin] + np.random.uniform(-noise, noise, len(Imin))
X[Imax] = _X[Imax] + np.random.uniform(-noise, noise, len(Imax))
Imin, Imax = np.argwhere(X < xmin), np.argwhere(X > xmax)
Imin, Imax = np.argwhere(Y < ymin), np.argwhere(Y > ymax)
while len(Imin) or len(Imax):
Y[Imin] = _Y[Imin] + np.random.uniform(-noise, noise, len(Imin))
Y[Imax] = _Y[Imax] + np.random.uniform(-noise, noise, len(Imax))
Imin, Imax = np.argwhere(Y < ymin), np.argwhere(Y > ymax)
Z = np.zeros((n*n, 2))
Z[:,0], Z[:,1] = X.ravel(), Y.ravel()
return Z
def g(x,sigma = 0.1):
return np.exp(-x**2/sigma**2)
def fromdistance(fn, shape, center=None, dtype=float):
def distance(*args):
d = 0
for i in range(len(shape)):
d += ((args[i]-center[i])/float(max(1,shape[i]-1)))**2
return np.sqrt(d)/np.sqrt(len(shape))
if center == None:
center = np.array(list(shape))//2
return fn(np.fromfunction(distance,shape,dtype=dtype))
def Gaussian(shape,center,sigma=0.5):
def g(x):
return np.exp(-x**2/sigma**2)
return fromdistance(g,shape,center)
def generate_input(R,S):
"""
Given a grid of receptors and a list of stimuli positions, return the
corresponding input
"""
if len(S):
dX = np.abs(R[:,0].reshape(1,len(R)) - S[:,0].reshape(len(S),1))
dY = np.abs(R[:,1].reshape(1,len(R)) - S[:,1].reshape(len(S),1))
C = np.sqrt(dX*dX+dY*dY) / np.sqrt(2)
return g(C).max(axis=0)
return np.zeros(R.shape[0])
def dnf_response( n, Rn, stimulus, w, we, wi, time, dt ):
alpha, tau = 0.1, 1.0
U = np.random.random((n,n)) * .01
V = np.random.random((n,n)) * .01
V_shape = np.array(V.shape)
# Computes field input accordingly
D = (( np.abs( w - stimulus )).sum(axis=-1))/float(Rn*Rn)
I = ( 1.0 - D.reshape(n,n) ) * alpha
for j in range( int(time/dt) ):
Z = rfft2( V * alpha )
Le = irfft2( Z * we, V_shape).real
Li = irfft2( Z * wi, V_shape).real
U += ( -U + ( Le - Li ) + I )* dt * tau
V = np.maximum( U, 0.0 )
return V
def h(x, sigma=1.0):
return np.exp(-0.5*(x/sigma)**2)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
# Seed for reproductibility
# -------------------------
np.random.seed(137)
# Standard units
# --------------
second = 1.0
millisecond = 1e-3 * second
ms = millisecond
minute = 60 * second
meter = 1.0
millimeter = 1e-3 * meter
mm = millimeter
micrometer = 1e-6 * meter
# Simulation parameters
# ---------------------
dots_number = 750
drum_length = 250*mm
drum_width = 30*mm
drum_shift = 200*micrometer
drum_velocity = 40*mm / second
simulation_time = 5*minute
sampling_rate = 5*ms
dt = sampling_rate
skinpatch = 10*mm,10*mm # width x height
RF_sampling = 25,25
learning_som = False
learning = False
Rn = 16
# R = grid(Rn,noise=0.15)
# Generate the drum pattern
# -------------------------
drum = np.zeros( (dots_number,2) )
drum[:,0] = np.random.uniform(0,drum_length,dots_number)
drum[:,1] = np.random.uniform(0,drum_width, dots_number)
drum_x,drum_y = drum[:,0], drum[:,1]
# Show the drum
# -------------
if 0:
plt.figure(figsize = (16, 1+10 * drum_width/drum_length))
plt.subplot(111,aspect=1)
plt.scatter(drum_x, drum_y, s=10, facecolor='k', edgecolor='k')
plt.xlim(0,drum_length)
plt.xlabel("mm")
plt.ylim(0,drum_width)
plt.ylabel("mm")
plt.show()
print "Estimated number of samples: %d" % (simulation_time/dt)
# SOM learning
# -------------
Sn = 32
folder = '/home/Local/SOM/Attention/REF/'
W = np.load( folder+'weights050000.npy' )
R = np.zeros((Rn*Rn,2))
R[:,0] = np.load( folder+'gridxcoord.npy' )
R[:,1] = np.load( folder+'gridycoord.npy' )
RF_count = np.zeros((Sn,Sn,25,25))
RF_sum = np.zeros((Sn,Sn,25,25))
global_count = np.zeros((Sn,Sn))
global_sum = np.zeros((Sn,Sn))
scale = 960.0/(Sn*Sn)
x_inf, x_sup, y_inf, y_sup = 0.0, 1.0, 0.0, 1.0
X, Y = np.meshgrid( np.linspace(x_inf,x_sup,Sn+1,endpoint=True)[1:],
np.linspace(y_inf,y_sup,Sn+1,endpoint=True)[1:] )
D = np.sqrt( (X-0.5)**2 + (Y-0.5)**2 )
We = 3.65 * scale * h( D, 0.1 )
Wi = 2.40 * scale * h( D, 1.0 )
We_fft = rfft2( ifftshift( We[::-1,::-1] ) )
Wi_fft = rfft2( ifftshift( Wi[::-1,::-1] ) )
if learning:
# Run the simulated drum
for t in np.arange(0.0,simulation_time,dt):
z = t * drum_velocity
x = z % (drum_length - skinpatch[0])
y = int(z / (drum_length - skinpatch[0])) * drum_shift
# Maybe this should be adjusted since a stimulus lying outside the skin
# patch may still have influence on the input (for example, if it lies
# very near the border)
xmin, xmax = x, x+skinpatch[0]
ymin, ymax = y, y+skinpatch[1]
# Get dots contained on the skin patch (and normalize coordinates)
dots = drum[(drum_x > (xmin)) *
(drum_x < (xmax)) *
(drum_y > (ymin)) *
(drum_y < (ymax))]
dots -= (x,y)
dots /= skinpatch[0],skinpatch[1]
# Compute RF mask
RF_mask = np.zeros(RF_sampling)
for dot in dots:
index = (np.floor(dot*RF_sampling)).astype(int)
RF_mask[index[1],index[0]] = 1
# Compute corresponding input (according to receptors)
I = generate_input(R,dots)
# Generate the som answer
V = dnf_response( Sn, Rn, I, W, We_fft, Wi_fft, 10.0, 25.0*.001 )
# Compute the mean firing rate
global_sum += V
global_count += 1
# Compute the local mean firing rate
RF_sum += V.reshape(Sn,Sn,1,1)*RF_mask
RF_count += RF_mask
# Display current skin patch dots and mask
if 0:
plt.figure(figsize=(10,10))
plt.subplot(111,aspect=1)
plt.scatter(dots[:,0],dots[:,1], s=50, facecolor='w', edgecolor='k')
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.show()
mean = global_sum/(global_count+1)
RFs = RF_sum/(RF_count+1) - mean.reshape(Sn,Sn,1,1)
if learning: np.save( folder+"RFs.npy", RFs)
RFs = np.load( folder+"RFs.npy")
# Reconstitute the drum from model answers which does not make much sense
# We should use the RF of a given neuron in fact and modulate according to
# its answer or convolute the RF with current dot pattern
if 1:
Rc_y = (drum_length/skinpatch[0]) * Sn
Rc_x = (drum_width/skinpatch[1]) * Sn
Rc = np.zeros((Rc_x,Rc_y))
for t in np.arange(0.0,simulation_time,dt):
z = t * drum_velocity
x = z % (drum_length - skinpatch[0])
y = int(z / (drum_length - skinpatch[0])) * drum_shift
# Maybe this should be adjusted since a stimulus lying outside the skin
# patch may still have influence on the input (for example, if it lies
# very near the border)
xmin, xmax = x, x+skinpatch[0]
ymin, ymax = y, y+skinpatch[1]
# Get dots contained on the skin patch (and normalize coordinates)
dots = drum[(drum_x > (xmin)) *
(drum_x < (xmax)) *
(drum_y > (ymin)) *
(drum_y < (ymax))]
dots -= (x,y)
dots /= skinpatch[0],skinpatch[1]
# Compute RF mask
RF_mask = np.zeros(RF_sampling)
for dot in dots:
index = (np.floor(dot*RF_sampling)).astype(int)
RF_mask[index[1],index[0]] = 1
# Compute corresponding input (according to receptors)
I = generate_input(R,dots)
# Generate the neural field answer
V = dnf_response( Sn, Rn, I, W, We_fft, Wi_fft, 10.0, 25.0*.001 )
x = int((x/float(drum_length))*Rc_y)
y = int((y/float(drum_width))*Rc_x)
Rc[y:y+Sn,x:x+Sn] = np.maximum(V,Rc[y:y+Sn,x:x+Sn])
# Rc[y:y+Rn,x:x+Rn] += V
# Compute y limit (we may have ended before end of drum)
t = simulation_time
z = t * drum_velocity
x = z % (drum_length - skinpatch[0])
ymax = int(z / (drum_length - skinpatch[0])) * drum_shift + skinpatch[0]
plt.figure(figsize = (16, 1+10 * drum_width/drum_length))
plt.subplot(111,aspect=1)
plt.imshow(Rc, origin='lower', interpolation='bicubic', alpha=1,
cmap = plt.cm.gray_r, extent = [0, drum_length, 0, drum_width])
plt.scatter(drum_x, drum_y, s=5, facecolor='w', edgecolor='k', alpha=.5)
plt.xlim(0,drum_length)
plt.xlabel("mm")
#plt.ylim(0,drum_width)
plt.ylim(0,ymax)
plt.ylabel("mm")
plt.show()
# Show all RFs
if 0:
Z = np.zeros((Sn,25,Sn,25))
for i in range(Sn):
for j in range(Sn):
RF = cleanup(RFs[i,j])
# R = np.where(R<0, R/np.abs(R.min()),R/np.abs(R.max()))
Z[i,:,j,:] = RF
Z = Z.reshape(Sn*25,Sn*25)
plt.figure(figsize=(14,10))
plt.imshow(Z, interpolation='bicubic', origin='lower', cmap=plt.cm.PuOr_r, extent=(0,Sn,0,Sn))
plt.colorbar()
plt.xlim(0,Sn), plt.xticks(np.arange(Sn))
plt.ylim(0,Sn), plt.yticks(np.arange(Sn))
plt.grid()
plt.title("Normalized Receptive fields", fontsize=16)
plt.show()
# Show a random RF
if 0:
i,j = np.random.randint(0,Sn,2)
i,j = 8,8
RF = cleanup(RFs[i,j])
plt.figure(figsize=(8,6))
plt.imshow(RF, interpolation='nearest', origin='lower',
cmap=plt.cm.gray_r, extent=[0,10,0,10])
plt.colorbar()
lmin = 0.50 * RF.min()
lmax = 0.50 * RF.max()
#CS = plt.contour(zoom(RF,10), levels=[lmin,lmax], colors='w',
# origin='lower', extent=[0,10,0,10], linewidths=1, alpha=1.0)
#plt.clabel(CS, inline=1, fontsize=12)
plt.xlim(0,10), plt.xlabel("mm")
plt.ylim(0,10), plt.ylabel("mm")
plt.title("Normalized Receptive Field [%d,%d]" % (i,j), fontsize=16)
plt.show()
# Show excitatory/inhibitory ratio (scatter plot)
if 0:
matplotlib.rc('xtick', direction = 'out')
matplotlib.rc('ytick', direction = 'out')
matplotlib.rc('xtick.major', size = 8, width=1)
matplotlib.rc('xtick.minor', size = 4, width=1)
matplotlib.rc('ytick.major', size = 8, width=1)
matplotlib.rc('ytick.minor', size = 4, width=1)
Z = []
for i in range(Sn):
for j in range(Sn):
p = 25
RF = RFs[i,j]
RF_max = np.abs(RF.max())
#winner = np.unravel_index(np.argmax(RF), RF.shape)
#RF = extract(RF,winner,(p,p))
RF = cleanup(RFs[i,j])
exc = 100 * ((RF >= +0.1*RF_max).sum()/ float(p*p))
inh = 50 * ((RF <= -0.1*RF_max).sum()/ float(p*p))
Z.append([exc,inh])
Z = np.array(Z)
X,Y = Z[:,0], Z[:,1]
fig = plt.figure(figsize=(8,8), facecolor="white")
ax = plt.subplot(1,1,1,aspect=1)
plt.scatter(X+0.01,Y+0.01,s=5,color='k',alpha=0.25)
# Show some points
# I = [3,143,149,189,1,209,192,167,64,87,10,40,68,185,61,198]
# plt.scatter(X[I],Y[I],s=5,color='k')
# for i in range(len(I)):
# plt.annotate(" %c" % (chr(ord('A')+i)), (X[I[i]],Y[I[i]]), weight='bold')
# Select some points by cliking them
# letter = ord('A')
# def onclick(event):
# global letter
# #print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
# # event.button, event.x, event.y, event.xdata, event.ydata)
# C = (X-event.xdata)**2 + (Y-event.ydata)**2
# I = np.argmin(C)
# print I
# plt.ion()
# x,y = X[I],Y[I]
# plt.scatter(x,y,s=5,color='k')
# plt.annotate(" %c" % (chr(letter)), (x,y), weight='bold')
# plt.ioff()
# letter = letter+1
# cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.xlabel("Excitatory area (mm2)")
plt.ylabel("Inhibitory area (mm2)")
plt.xlim(1,100)
plt.ylim(1,100)
plt.xscale('log')
plt.yscale('log')
plt.xticks([1,10,100], ['1','10','100'])
plt.yticks([1,10,100], ['1','10','100'])
plt.plot([1,100],[1,100], ls='--', color='k')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.show()
| gpl-3.0 |
terkkila/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
FRESNA/atlite | atlite/utils.py | 1 | 5135 | # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2019 The Atlite Authors
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""
General utility functions for internal use.
"""
from .gis import maybe_swap_spatial_dims
import progressbar as pgb
from pathlib import Path
import pandas as pd
import xarray as xr
import textwrap
import re
import warnings
from .datasets import modules as datamodules
import logging
logger = logging.getLogger(__name__)
def make_optional_progressbar(show, prefix, max_value=None):
warnings.warn("make_optional_progressbar() is deprecated and will be removed "
"in the next version.", warnings.DeprecationWarning)
if show:
widgets = [
pgb.widgets.Percentage(),
' ',
pgb.widgets.SimpleProgress(
format='(%s)' %
pgb.widgets.SimpleProgress.DEFAULT_FORMAT),
' ',
pgb.widgets.Bar(),
' ',
pgb.widgets.Timer(),
' ',
pgb.widgets.ETA()]
if not prefix.endswith(": "):
prefix = prefix.strip() + ": "
maybe_progressbar = pgb.ProgressBar(prefix=prefix, widgets=widgets,
max_value=max_value)
else:
def maybe_progressbar(x):
return x
return maybe_progressbar
def migrate_from_cutout_directory(old_cutout_dir, path):
"""Convert an old style cutout directory to new style netcdf file"""
old_cutout_dir = Path(old_cutout_dir)
with xr.open_dataset(old_cutout_dir / "meta.nc") as meta:
newname = f"{old_cutout_dir.name}.nc"
module = meta.attrs["module"]
minX, maxX = meta.indexes['x'][[0, -1]]
minY, maxY = sorted(meta.indexes['y'][[0, -1]])
minT, maxT = meta.indexes['time'][[0, -1]].strftime("%Y-%m")
logger.warning(textwrap.dedent(f"""
Found an old-style directory-like cutout. It can manually be
recreated using
cutout = atlite.Cutout("{newname}",
module="{module}",
time=slice("{minT}", "{maxT}"),
x=slice({minX}, {maxX}),
y=slice({minY}, {maxY})
cutout.prepare()
but we are trying to offer an automated migration as well ...
"""))
try:
data = xr.open_mfdataset(str(old_cutout_dir / "[12]*.nc"),
combine="by_coords")
data.attrs.update(meta.attrs)
except xr.MergeError:
logger.exception(
"Automatic migration failed. Re-create the cutout "
"with the command above!")
raise
data = maybe_swap_spatial_dims(data)
module = data.attrs["module"]
data.attrs['prepared_features'] = list(datamodules[module].features)
for v in data:
data[v].attrs['module'] = module
fd = datamodules[module].features.items()
features = [k for k, l in fd if v in l]
data[v].attrs['feature'] = features.pop() if features else 'undefined'
path = Path(path).with_suffix(".nc")
logger.info(f"Writing cutout data to {path}. When done, load it again using"
f"\n\n\tatlite.Cutout('{path}')")
data.to_netcdf(path)
return data
def timeindex_from_slice(timeslice):
end = pd.Timestamp(timeslice.end) + pd.offsets.DateOffset(months=1)
return pd.date_range(timeslice.start, end, freq="1h", closed="left")
class arrowdict(dict):
"""
A subclass of dict, which allows you to get
items in the dict using the attribute syntax!
"""
def __getattr__(self, item):
try:
return self.__getitem__(item)
except KeyError as e:
raise AttributeError(e.args[0])
_re_pattern = re.compile('[a-zA-Z_][a-zA-Z0-9_]*')
def __dir__(self):
dict_keys = []
for k in self.keys():
if isinstance(k, str):
m = self._re_pattern.match(k)
if m:
dict_keys.append(m.string)
return dict_keys
class CachedAttribute(object):
'''
Computes attribute value and caches it in the instance.
From the Python Cookbook (Denis Otkidach)
This decorator allows you to create a property which can be
computed once and accessed many times. Sort of like memoization.
'''
def __init__(self, method, name=None, doc=None):
# record the unbound-method and the name
self.method = method
self.name = name or method.__name__
self.__doc__ = doc or method.__doc__
def __get__(self, inst, cls):
if inst is None:
# instance attribute accessed on class, return self
# You get here if you write `Foo.bar`
return self
# compute, cache and return the instance's attribute value
result = self.method(inst)
# setattr redefines the instance's attribute so this doesn't get called
# again
setattr(inst, self.name, result)
return result
| gpl-3.0 |
alsrgv/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 3 | 32746 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.deprecation import deprecated
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([(k,
out_el_shape(v, n_classes[k]
if n_classes is not None and
k in n_classes else None))
for k, v in list(y_shape.items())])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
@deprecated(None, 'Please convert numpy dtypes explicitly.')
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(
x, dict), y is not None and isinstance(y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else (dict(
[(k, check_array(v, v.dtype)) for k, v in list(y.items())])
if y_is_dict else check_array(y, y.dtype))
# self.n_classes is not None means we're converting raw target indices
# to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (
np.int64 if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())])
if x_is_dict else _check_dtype(self._x.dtype))
# self._output_dtype == np.float32 when y is None
self._output_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())])
if y_is_dict else (_check_dtype(self._y.dtype)
if y is not None else np.float32))
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
if x_is_dict:
num_samples = list(self._x.values())[0].shape[0]
elif tensor_util.is_tensor(self._x):
num_samples = self._x.shape[
0].value # shape will be a Dimension, extract an int
else:
num_samples = self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1))
if len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(
self._x.values())[0].shape[0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else {
self._input_placeholder.name:
extract(self._x, batch_indices)
})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = (self.output_shape, self._output_dtype,
self.n_classes)
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = (
[1] + list(y_first_el[0].shape
if isinstance(y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict(
[(k, np.zeros(shape[k], dtype[k])) for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
@deprecated(None, 'Please feed input to tf.data to support dask.')
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.compat.v1.placeholder for input features mini batch.
output_placeholder: tf.compat.v1.placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
yarikoptic/pystatsmodels | statsmodels/sandbox/examples/example_gam.py | 4 | 2337 | '''original example for checking how far GAM works
Note: uncomment plt.show() to display graphs
'''
example = 2 # 1,2 or 3
import numpy as np
import numpy.random as R
import matplotlib.pyplot as plt
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod.families import family
from statsmodels.genmod.generalized_linear_model import GLM
standardize = lambda x: (x - x.mean()) / x.std()
demean = lambda x: (x - x.mean())
nobs = 150
x1 = R.standard_normal(nobs)
x1.sort()
x2 = R.standard_normal(nobs)
x2.sort()
y = R.standard_normal((nobs,))
f1 = lambda x1: (x1 + x1**2 - 3 - 1 * x1**3 + 0.1 * np.exp(-x1/4.))
f2 = lambda x2: (x2 + x2**2 - 0.1 * np.exp(x2/4.))
z = standardize(f1(x1)) + standardize(f2(x2))
z = standardize(z) * 2 # 0.1
y += z
d = np.array([x1,x2]).T
if example == 1:
print "normal"
m = AdditiveModel(d)
m.fit(y)
x = np.linspace(-2,2,50)
print m
y_pred = m.results.predict(d)
plt.figure()
plt.plot(y, '.')
plt.plot(z, 'b-', label='true')
plt.plot(y_pred, 'r-', label='AdditiveModel')
plt.legend()
plt.title('gam.AdditiveModel')
import scipy.stats, time
if example == 2:
print "binomial"
f = family.Binomial()
b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])
b.shape = y.shape
m = GAM(b, d, family=f)
toc = time.time()
m.fit(b)
tic = time.time()
print tic-toc
if example == 3:
print "Poisson"
f = family.Poisson()
y = y/y.max() * 3
yp = f.link.inverse(y)
p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(y)], float)
p.shape = y.shape
m = GAM(p, d, family=f)
toc = time.time()
m.fit(p)
tic = time.time()
print tic-toc
plt.figure()
plt.plot(x1, standardize(m.smoothers[0](x1)), 'r')
plt.plot(x1, standardize(f1(x1)), linewidth=2)
plt.figure()
plt.plot(x2, standardize(m.smoothers[1](x2)), 'r')
plt.plot(x2, standardize(f2(x2)), linewidth=2)
plt.show()
## pylab.figure(num=1)
## pylab.plot(x1, standardize(m.smoothers[0](x1)), 'b')
## pylab.plot(x1, standardize(f1(x1)), linewidth=2)
## pylab.figure(num=2)
## pylab.plot(x2, standardize(m.smoothers[1](x2)), 'b')
## pylab.plot(x2, standardize(f2(x2)), linewidth=2)
## pylab.show()
| bsd-3-clause |
imaculate/scikit-learn | benchmarks/bench_mnist.py | 38 | 6799 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
------------------------------------------------------------
MLP_adam 53.46s 0.11s 0.0224
Nystroem-SVM 112.97s 0.92s 0.0228
MultilayerPerceptron 24.33s 0.14s 0.0287
ExtraTrees 42.99s 0.57s 0.0294
RandomForest 42.70s 0.49s 0.0318
SampledRBF-SVM 135.81s 0.56s 0.0486
LinearRegression-SAG 16.67s 0.06s 0.0824
CART 20.69s 0.02s 0.1219
dummy 0.00s 0.01s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM': make_pipeline(
Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM': make_pipeline(
RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LinearRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4),
'MultilayerPerceptron': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='sgd', learning_rate_init=0.2, momentum=0.9, verbose=1,
tol=1e-4, random_state=1),
'MLP-adam': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='adam', learning_rate_init=0.001, verbose=1,
tol=1e-4, random_state=1)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
billy-inn/scikit-learn | sklearn/tests/test_cross_validation.py | 27 | 41664 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
| bsd-3-clause |
ldirer/scikit-learn | sklearn/manifold/locally_linear.py | 14 | 26498 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from scipy.sparse.linalg import eigsh
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import stable_cumsum
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg : float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=1):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1, n_jobs=n_jobs).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``solver`` == 'arpack'.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None, n_jobs=1):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``solver`` == 'arpack'.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float64)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
# build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1] *
U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
# find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
# choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
# find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
# calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
# find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = stable_cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
# Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float64)
for i in range(N):
s_i = s_range[i]
# select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
# compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
# Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
# Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
# We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h) +
(1 - alpha_i) * w_reg[i, :, None])
# Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
# We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``eigen_solver`` == 'arpack'.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
random_state = check_random_state(self.random_state)
X = check_array(X, dtype=float)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg, n_jobs=self.n_jobs)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
benanne/theano-tutorial | 3_logistic_regression.py | 2 | 1394 | import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
import load
# load data
x_train, t_train, x_test, t_test = load.cifar10(dtype=theano.config.floatX)
labels_test = np.argmax(t_test, axis=1)
# visualize data
plt.imshow(x_train[0].reshape(32, 32), cmap=plt.cm.gray)
# define symbolic Theano variables
x = T.matrix()
t = T.matrix()
# define model: logistic regression
def floatX(x):
return np.asarray(x, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.1))
def model(x, w):
return T.nnet.softmax(T.dot(x, w))
w = init_weights((32 * 32, 10))
p_y_given_x = model(x, w)
y = T.argmax(p_y_given_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(p_y_given_x, t))
g = T.grad(cost, w)
updates = [(w, w - g * 0.001)]
# compile theano functions
train = theano.function([x, t], cost, updates=updates)
predict = theano.function([x], y)
# train model
batch_size = 50
for i in range(100):
print "iteration %d" % (i + 1)
for start in range(0, len(x_train), batch_size):
x_batch = x_train[start:start + batch_size]
t_batch = t_train[start:start + batch_size]
cost = train(x_batch, t_batch)
predictions_test = predict(x_test)
accuracy = np.mean(predictions_test == labels_test)
print "accuracy: %.5f" % accuracy
print | mit |
ycaihua/scikit-learn | sklearn/tests/test_pipeline.py | 17 | 12512 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import BaseEstimator, clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(BaseEstimator):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
""" Test the various init parameters of the pipeline.
"""
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params()
params2 = pipe2.get_params()
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
""" Test the various methods of the pipeline (anova).
"""
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
"""Test that the pipeline can take fit parameters
"""
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_methods_pca_svm():
"""Test the various methods of the pipeline (pca + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
"""Test the various methods of the pipeline (preprocessing + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
jni/networkx | examples/graph/knuth_miles.py | 36 | 2994 | #!/usr/bin/env python
"""
An example using networkx.Graph().
miles_graph() returns an undirected graph over the 128 US cities from
the datafile miles_dat.txt. The cities each have location and population
data. The edges are labeled with the distance betwen the two cities.
This example is described in Section 1.1 in Knuth's book [1,2].
References.
-----------
[1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
[2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2004-2006 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
def miles_graph():
""" Return the cites example graph in miles_dat.txt
from the Stanford GraphBase.
"""
# open file miles_dat.txt.gz (or miles_dat.txt)
import gzip
fh = gzip.open('knuth_miles.txt.gz','r')
G=nx.Graph()
G.position={}
G.population={}
cities=[]
for line in fh.readlines():
line = line.decode()
if line.startswith("*"): # skip comments
continue
numfind=re.compile("^\d+")
if numfind.match(line): # this line is distances
dist=line.split()
for d in dist:
G.add_edge(city,cities[i],weight=int(d))
i=i+1
else: # this line is a city, position, population
i=1
(city,coordpop)=line.split("[")
cities.insert(0,city)
(coord,pop)=coordpop.split("]")
(y,x)=coord.split(",")
G.add_node(city)
# assign position - flip x axis for matplotlib, shift origin
G.position[city]=(-int(x)+7500,int(y)-3000)
G.population[city]=float(pop)/1000.0
return G
if __name__ == '__main__':
import networkx as nx
import re
import sys
G=miles_graph()
print("Loaded miles_dat.txt containing 128 cities.")
print("digraph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
# make new graph of cites, edge if less then 300 miles between them
H=nx.Graph()
for v in G:
H.add_node(v)
for (u,v,d) in G.edges(data=True):
if d['weight'] < 300:
H.add_edge(u,v)
# draw with matplotlib/pylab
try:
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
# with nodes colored by degree sized by population
node_color=[float(H.degree(v)) for v in H]
nx.draw(H,G.position,
node_size=[G.population[v] for v in H],
node_color=node_color,
with_labels=False)
# scale the axes equally
plt.xlim(-5000,500)
plt.ylim(-2000,3500)
plt.savefig("knuth_miles.png")
except:
pass
| bsd-3-clause |
KevinNJ/Projects | Sallen Key Solver/SallenKey_Design.py | 1 | 5404 | # -*- coding: utf-8 -*-
from __future__ import division
import math
import random
import matplotlib.pyplot as plt
import scipy.signal as sig
from itertools import product
from misc import common_part_values, metric_prefix
from anneal import Annealer
# Setup optimization targets
target_q = 0.707 # 1/sqrt(2) - Which is the maximally flat response
target_freq = 500 # Hz
target_atten = -40 # dB
rvalues, cvalues = common_part_values()
def f0(system):
"""Return the natural frequency of the system."""
c1,c2,r1,r2 = system
fn = 1 / (2 * math.pi * math.sqrt(r1 * c1 * r2 * c2))
return fn
def q(system):
"""Return the Q Value of the system."""
c1,c2,r1,r2 = system
q = math.sqrt(c1 * c2 * r1 * r2)/ (c2 * (r1 + r2))
return q
def frf(system):
"""Return the Frequency Response Function of the system.
Returns a function which takes a frequency as an argument.
This function when evaluated at any frequency returns the complex
frequency response at that frequency.
Example: frf(system)(10) returns the complex FRF at 10 Hz
"""
def internal(f):
c1,c2,r1,r2 = system
w = 2 * math.pi * f
num = 1 / (c1*c2*r1*r2)
den = 1 / (c1*c2*r1*r2) + (r1+r2)/(c1*r1*r2) * 1j*w - w**2
return num/den
return internal
def dB(x):
"""Returns the argument in decibels"""
return 20 * math.log10(abs(x))
def stepResponse(system):
"""Computes the step response for a given system"""
c1,c2,r1,r2 = system
num = 1 / (c1*c2*r1*r2)
den = (1, (r1+r2)/(c1*r1*r2), 1/(c1*c2*r1*r2))
return sig.step((num,den))
def energy(system):
"""Computes the energy of a given system.
The energy is defined as decreasing towards zero as the system
approaches an ideal system.
"""
frf_ = frf(system)
f0_ = f0(system)
q_ = q(system)
c1,c2,r1,r2 = system
e = 0
e += abs(target_atten - dB(frf_(target_freq))) / abs(target_atten) # percent error off frequency @ attenuation
e += abs(target_q - q_) / abs(target_q) # percent error off ideal Q value
e += abs(c1-c2) / abs((c1+c2)/2) * 0.1 # percent difference in capacitor values
e += abs(r1-r2) / abs((r1+r2)/2) * 0.1 # percent difference in resistor values
return e
def move(system):
""" Changes the system randomly
This function makes a random change to one of the component values
in the system.
"""
component = random.randrange(0, 4)
if component == 0:
index = random.randrange(0, len(cvalues))
system[0] = cvalues[index]
elif component == 1:
index = random.randrange(0, len(cvalues))
system[1] = cvalues[index]
elif component == 2:
index = random.randrange(0, len(rvalues))
system[2] = rvalues[index]
elif component == 3:
index = random.randrange(0, len(rvalues))
system[3] = rvalues[index]
if __name__ == '__main__':
# set up simulated annealing algorithm
units=('F', 'F', u'Ω', u'Ω') # units of the values in the system
initial_system = [cvalues[0], cvalues[0], rvalues[0], rvalues[0]]
annealer = Annealer(energy, move)
schedule = annealer.auto(initial_system, minutes=0.1)
# run simulated annealing algorithm and compute properties of the final system
final_system, error = annealer.anneal(initial_system, schedule['tmax'], schedule['tmin'], schedule['steps'], updates=100)
final_frf = frf(final_system)
final_f0 = f0(final_system)
final_q = q(final_system)
final_vals = [metric_prefix(*s) for s in zip(final_system, units)]
print 'Soln: (%s), Remaining Energy: %s' % (', '.join(final_vals), error)
# calculate data for graphs
freqs = range(1000000) # response from 0 Hz to 1 MHz
response = [dB(final_frf(f)) for f in freqs]
natural = final_f0, dB(final_frf(final_f0))
target = target_freq, dB(final_frf(target_freq))
step_freqs, step_response = stepResponse(final_system)
plt.figure()
# bode response plot
ax = plt.subplot(2,1,1)
plt.semilogx(freqs,response)
plt.semilogx(natural[0], natural[1], 'r+', ms=10)
plt.annotate('Natural Freq: (%.2f Hz, %.2f dB) ' % natural, xy=natural, xytext=(10,10), textcoords='offset points')
plt.semilogx(target[0], target[1], 'r+', ms=10)
plt.annotate('target attenuation: (%.2f Hz, %.2f dB)'%target, xy=target, xytext=(10,10), textcoords='offset points')
plt.title('Bode Plot (F0: %.2f Hz, Q-Factor: %.2f)\n' % (final_f0, final_q) + 'Soln: (%s)' % ', '.join(final_vals))
plt.xlabel('Frequency [Hz]')
plt.ylabel('Gain [dB]')
lims=list(ax.get_ylim())
lims[1]=20
plt.ylim(lims)
# step response plot
plt.subplot(2,1,2)
plt.plot(step_freqs, step_response)
plt.title('Step Response')
plt.xlabel('Time (s)')
plt.ylabel('Response (v)')
plt.show()
"""
References:
[1] http://en.wikipedia.org/wiki/Sallen%E2%80%93Key_topology
[2] http://en.wikipedia.org/wiki/Q_factor
[3] http://sim.okawa-denshi.jp/en/OPstool.php
[4] http://www.falstad.com/circuit/
"""
| mit |
clemenshage/grslra | experiments/plot_lpnorms.py | 1 | 1564 | import matplotlib
from matplotlib import pyplot as plt
import numpy as np
matplotlib.rcParams.update({'font.size': 24})
matplotlib.rcParams.update({'text.usetex': True})
def lpnorm_scaled(x, p, mu):
return (lpnorm(x, p, mu) - lpnorm(0, p, mu)) / (lpnorm(1, p, mu) - lpnorm(0, p, mu))
def lpnorm(x, p, mu):
return (mu + x * x) ** (p / 2.0)
pvalues=[2.0, 1.0, 0.7, 0.4, 0.1]
mu = 1e-12
colors=['k', 'b', 'g', 'r', 'm']
x = np.linspace(-1, 1, 1001)
plt.figure(figsize=(15,8))
for i in xrange(pvalues.__len__()):
p = pvalues[i]
plt.plot(x, lpnorm_scaled(x, p, mu), color=colors[i], label='$p={:1.1f}$'.format(pvalues[i]), linewidth=3)
plt.legend()
axes = plt.gca()
axes.set_ylim([0,1])
axes.set_xlim([-1,1])
plt.grid(b=True, which='both', color='0.65',linestyle='-')
plt.tight_layout()
plt.legend()
plt.savefig('lpnorm_fixedmu.pdf', dpi=200)
muvalues=[0.01, 1e-3, 1e-4]
labels = ["$\\ell_2$", "$\\ell_1$", "$\\mu=0.01$", "$\\mu=0.001$", "$\\mu=10^{-4}$"]
plt.figure(figsize=(15,8))
plt.plot(x, lpnorm_scaled(x, 2.0, mu), color=colors[0], label=labels[0], linewidth=3)
plt.plot(x, lpnorm_scaled(x, 1.0, mu), color=colors[1], label=labels[1], linewidth=3)
for i in xrange(muvalues.__len__()):
mu = muvalues[i]
plt.plot(x, lpnorm_scaled(x, 0.1, mu), color=colors[i+2], label=labels[i+2], linewidth=3)
plt.legend()
axes = plt.gca()
axes.set_ylim([0,1])
axes.set_xlim([-1,1])
plt.grid(b=True, which='both', color='0.65',linestyle='-')
plt.tight_layout()
plt.legend(loc="lower left")
plt.savefig('lpnorm_fixedp.pdf', dpi=200)
| mit |
great-expectations/great_expectations | tests/execution_engine/test_sqlalchemy_execution_engine.py | 1 | 23179 | import logging
import os
import pandas as pd
import pytest
from great_expectations.core.batch_spec import (
RuntimeQueryBatchSpec,
SqlAlchemyDatasourceBatchSpec,
)
from great_expectations.data_context.util import file_relative_path
from great_expectations.exceptions import GreatExpectationsError
from great_expectations.exceptions.exceptions import InvalidConfigError
from great_expectations.exceptions.metric_exceptions import MetricProviderError
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.execution_engine.sqlalchemy_execution_engine import (
SqlAlchemyExecutionEngine,
)
# Function to test for spark dataframe equality
from great_expectations.self_check.util import build_sa_engine
from great_expectations.validator.validation_graph import MetricConfiguration
from tests.expectations.test_util import get_table_columns_metric
from tests.test_utils import get_sqlite_table_names, get_sqlite_temp_table_names
try:
sqlalchemy = pytest.importorskip("sqlalchemy")
except ImportError:
sqlalchemy = None
def test_instantiation_via_connection_string(sa, test_db_connection_string):
my_execution_engine = SqlAlchemyExecutionEngine(
connection_string=test_db_connection_string
)
assert my_execution_engine.connection_string == test_db_connection_string
assert my_execution_engine.credentials == None
assert my_execution_engine.url == None
my_execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
table_name="table_1",
schema_name="main",
sampling_method="_sample_using_limit",
sampling_kwargs={"n": 5},
)
)
def test_instantiation_via_url(sa):
db_file = file_relative_path(
__file__,
os.path.join("..", "test_sets", "test_cases_for_sql_data_connector.db"),
)
my_execution_engine = SqlAlchemyExecutionEngine(url="sqlite:///" + db_file)
assert my_execution_engine.connection_string is None
assert my_execution_engine.credentials is None
assert my_execution_engine.url[-36:] == "test_cases_for_sql_data_connector.db"
my_execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
table_name="table_partitioned_by_date_column__A",
sampling_method="_sample_using_limit",
sampling_kwargs={"n": 5},
)
)
def test_instantiation_via_credentials(sa, test_backends, test_df):
if "postgresql" not in test_backends:
pytest.skip("test_database_store_backend_get_url_for_key requires postgresql")
my_execution_engine = SqlAlchemyExecutionEngine(
credentials={
"drivername": "postgresql",
"username": "postgres",
"password": "",
"host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"port": "5432",
"database": "test_ci",
}
)
assert my_execution_engine.connection_string is None
assert my_execution_engine.credentials == {
"username": "postgres",
"password": "",
"host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"port": "5432",
"database": "test_ci",
}
assert my_execution_engine.url is None
# Note Abe 20201116: Let's add an actual test of get_batch_data_and_markers, which will require setting up test
# fixtures
# my_execution_engine.get_batch_data_and_markers(batch_spec=BatchSpec(
# table_name="main.table_1",
# sampling_method="_sample_using_limit",
# sampling_kwargs={
# "n": 5
# }
# ))
def test_instantiation_error_states(sa, test_db_connection_string):
with pytest.raises(InvalidConfigError):
SqlAlchemyExecutionEngine()
# Testing batching of aggregate metrics
def test_sa_batch_aggregate_metrics(caplog, sa):
import datetime
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 1, 2, 3, 3], "b": [4, 4, 4, 4, 4, 4]}), sa
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
desired_metric_1 = MetricConfiguration(
metric_name="column.max.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=dict(),
metric_dependencies={
"table.columns": table_columns_metric,
},
)
desired_metric_2 = MetricConfiguration(
metric_name="column.min.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=dict(),
metric_dependencies={
"table.columns": table_columns_metric,
},
)
desired_metric_3 = MetricConfiguration(
metric_name="column.max.aggregate_fn",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=dict(),
metric_dependencies={
"table.columns": table_columns_metric,
},
)
desired_metric_4 = MetricConfiguration(
metric_name="column.min.aggregate_fn",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=dict(),
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(
desired_metric_1,
desired_metric_2,
desired_metric_3,
desired_metric_4,
),
metrics=metrics,
)
metrics.update(results)
desired_metric_1 = MetricConfiguration(
metric_name="column.max",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=dict(),
metric_dependencies={
"metric_partial_fn": desired_metric_1,
"table.columns": table_columns_metric,
},
)
desired_metric_2 = MetricConfiguration(
metric_name="column.min",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=dict(),
metric_dependencies={
"metric_partial_fn": desired_metric_2,
"table.columns": table_columns_metric,
},
)
desired_metric_3 = MetricConfiguration(
metric_name="column.max",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=dict(),
metric_dependencies={
"metric_partial_fn": desired_metric_3,
"table.columns": table_columns_metric,
},
)
desired_metric_4 = MetricConfiguration(
metric_name="column.min",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=dict(),
metric_dependencies={
"metric_partial_fn": desired_metric_4,
"table.columns": table_columns_metric,
},
)
caplog.clear()
caplog.set_level(logging.DEBUG, logger="great_expectations")
start = datetime.datetime.now()
results = engine.resolve_metrics(
metrics_to_resolve=(
desired_metric_1,
desired_metric_2,
desired_metric_3,
desired_metric_4,
),
metrics=metrics,
)
metrics.update(results)
end = datetime.datetime.now()
print("t1")
print(end - start)
assert results[desired_metric_1.id] == 3
assert results[desired_metric_2.id] == 1
assert results[desired_metric_3.id] == 4
assert results[desired_metric_4.id] == 4
# Check that all four of these metrics were computed on a single domain
found_message = False
for record in caplog.records:
if (
record.message
== "SqlAlchemyExecutionEngine computed 4 metrics on domain_id ()"
):
found_message = True
assert found_message
# Ensuring functionality of compute_domain when no domain kwargs are given
def test_get_compute_domain_with_no_domain_kwargs(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={}, domain_type="table"
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select(["*"]).select_from(engine.active_batch_data.selectable)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring that with no domain nothing happens to the data itself
assert raw_data == domain_data, "Data does not match after getting compute domain"
assert compute_kwargs == {}, "Compute domain kwargs should be existent"
assert accessor_kwargs == {}, "Accessor kwargs have been modified"
# Testing for only untested use case - column_pair
def test_get_compute_domain_with_column_pair(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
# Fetching data, compute_domain_kwargs, accessor_kwargs
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={"column_A": "a", "column_B": "b"}, domain_type="column_pair"
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select(["*"]).select_from(engine.active_batch_data.selectable)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring that with no domain nothing happens to the data itself
assert raw_data == domain_data, "Data does not match after getting compute domain"
assert (
"column_A" not in compute_kwargs.keys()
and "column_B" not in compute_kwargs.keys()
), "domain kwargs should be existent"
assert accessor_kwargs == {
"column_A": "a",
"column_B": "b",
}, "Accessor kwargs have been modified"
# Building new engine so that values still found
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
data2, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={"column_A": "a", "column_B": "b"}, domain_type="identity"
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select([sa.column("a"), sa.column("b")]).select_from(
engine.active_batch_data.selectable
)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data2)).fetchall()
# Ensuring that with no domain nothing happens to the data itself
assert raw_data == domain_data, "Data does not match after getting compute domain"
assert compute_kwargs == {
"column_A": "a",
"column_B": "b",
}, "Compute domain kwargs should be existent"
assert accessor_kwargs == {}, "Accessor kwargs have been modified"
# Testing for only untested use case - multicolumn
def test_get_compute_domain_with_multicolumn(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None], "c": [1, 2, 3, None]}),
sa,
)
# Obtaining compute domain
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={"columns": ["a", "b", "c"]}, domain_type="multicolumn"
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select(["*"]).select_from(engine.active_batch_data.selectable)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring that with no domain nothing happens to the data itself
assert raw_data == domain_data, "Data does not match after getting compute domain"
assert compute_kwargs is not None, "Compute domain kwargs should be existent"
assert accessor_kwargs == {
"columns": ["a", "b", "c"]
}, "Accessor kwargs have been modified"
# Checking for identity
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={"columns": ["a", "b", "c"]}, domain_type="identity"
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select([sa.column("a"), sa.column("b"), sa.column("c")]).select_from(
engine.active_batch_data.selectable
)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring that with no domain nothing happens to the data itself
assert raw_data == domain_data, "Data does not match after getting compute domain"
assert compute_kwargs == {
"columns": ["a", "b", "c"]
}, "Compute domain kwargs should be existent"
assert accessor_kwargs == {}, "Accessor kwargs have been modified"
# Testing whether compute domain is properly calculated, but this time obtaining a column
def test_get_compute_domain_with_column_domain(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
# Loading batch data
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={"column": "a"}, domain_type=MetricDomainTypes.COLUMN
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select(["*"]).select_from(engine.active_batch_data.selectable)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring that column domain is now an accessor kwarg, and data remains unmodified
assert raw_data == domain_data, "Data does not match after getting compute domain"
assert compute_kwargs == {}, "Compute domain kwargs should be existent"
assert accessor_kwargs == {"column": "a"}, "Accessor kwargs have been modified"
# Testing for identity
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
# Loading batch data
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={"column": "a"}, domain_type=MetricDomainTypes.IDENTITY
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select([sa.column("a")]).select_from(engine.active_batch_data.selectable)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring that column domain is now an accessor kwarg, and data remains unmodified
assert raw_data == domain_data, "Data does not match after getting compute domain"
assert compute_kwargs == {"column": "a"}, "Compute domain kwargs should be existent"
assert accessor_kwargs == {}, "Accessor kwargs have been modified"
# What happens when we filter such that no value meets the condition?
def test_get_compute_domain_with_unmeetable_row_condition(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={
"row_condition": 'col("b") > 24',
"condition_parser": "great_expectations__experimental__",
},
domain_type="identity",
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select(["*"])
.select_from(engine.active_batch_data.selectable)
.where(sa.column("b") > 24)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring that column domain is now an accessor kwarg, and data remains unmodified
assert raw_data == domain_data, "Data does not match after getting compute domain"
# Ensuring compute kwargs have not been modified
assert (
"row_condition" in compute_kwargs.keys()
), "Row condition should be located within compute kwargs"
assert accessor_kwargs == {}, "Accessor kwargs have been modified"
# Testing to ensure that great expectation experimental parser also works in terms of defining a compute domain
def test_get_compute_domain_with_ge_experimental_condition_parser(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
# Obtaining data from computation
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={
"column": "b",
"row_condition": 'col("b") == 2',
"condition_parser": "great_expectations__experimental__",
},
domain_type="column",
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select(["*"])
.select_from(engine.active_batch_data.selectable)
.where(sa.column("b") == 2)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring that column domain is now an accessor kwarg, and data remains unmodified
assert raw_data == domain_data, "Data does not match after getting compute domain"
# Ensuring compute kwargs have not been modified
assert (
"row_condition" in compute_kwargs.keys()
), "Row condition should be located within compute kwargs"
assert accessor_kwargs == {"column": "b"}, "Accessor kwargs have been modified"
# Should react differently for domain type identity
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={
"column": "b",
"row_condition": 'col("b") == 2',
"condition_parser": "great_expectations__experimental__",
},
domain_type="identity",
)
# Ensuring data has been properly queried
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
raw_data = engine.engine.execute(
sa.select(["*"])
.select_from(engine.active_batch_data.selectable)
.where(sa.column("b") == 2)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring compute kwargs have not been modified
assert (
"row_condition" in compute_kwargs.keys()
), "Row condition should be located within compute kwargs"
assert accessor_kwargs == {}, "Accessor kwargs have been modified"
def test_get_compute_domain_with_nonexistent_condition_parser(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
# Expect GreatExpectationsError because parser doesn't exist
with pytest.raises(GreatExpectationsError) as e:
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={
"row_condition": "b > 24",
"condition_parser": "nonexistent",
},
domain_type=MetricDomainTypes.TABLE,
)
# Ensuring that we can properly inform user when metric doesn't exist - should get a metric provider error
def test_resolve_metric_bundle_with_nonexistent_metric(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 1, 2, 3, 3], "b": [4, 4, 4, 4, 4, 4]}), sa
)
desired_metric_1 = MetricConfiguration(
metric_name="column_values.unique",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=dict(),
)
desired_metric_2 = MetricConfiguration(
metric_name="column.min",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=dict(),
)
desired_metric_3 = MetricConfiguration(
metric_name="column.max",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=dict(),
)
desired_metric_4 = MetricConfiguration(
metric_name="column.does_not_exist",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=dict(),
)
# Ensuring a metric provider error is raised if metric does not exist
with pytest.raises(MetricProviderError) as e:
res = engine.resolve_metrics(
metrics_to_resolve=(
desired_metric_1,
desired_metric_2,
desired_metric_3,
desired_metric_4,
)
)
print(e)
def test_get_batch_data_and_markers_using_query(sqlite_view_engine, test_df):
my_execution_engine: SqlAlchemyExecutionEngine = SqlAlchemyExecutionEngine(
engine=sqlite_view_engine
)
test_df.to_sql("test_table_0", con=my_execution_engine.engine)
query: str = "SELECT * FROM test_table_0"
batch_data, batch_markers = my_execution_engine.get_batch_data_and_markers(
batch_spec=RuntimeQueryBatchSpec(
query=query,
)
)
assert len(get_sqlite_temp_table_names(sqlite_view_engine)) == 2
assert batch_markers.get("ge_load_time") is not None
def test_sa_batch_unexpected_condition_temp_table(caplog, sa):
def validate_tmp_tables():
temp_tables = [
name
for name in get_sqlite_temp_table_names(engine.engine)
if name.startswith("ge_tmp_")
]
tables = [
name
for name in get_sqlite_table_names(engine.engine)
if name.startswith("ge_tmp_")
]
assert len(temp_tables) == 0
assert len(tables) == 0
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 1, 2, 3, 3], "b": [4, 4, 4, 4, 4, 4]}), sa
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
validate_tmp_tables()
condition_metric = MetricConfiguration(
metric_name="column_values.unique.condition",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=dict(),
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,), metrics=metrics
)
metrics.update(results)
validate_tmp_tables()
desired_metric = MetricConfiguration(
metric_name="column_values.unique.unexpected_count",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=dict(),
metric_dependencies={
"unexpected_condition": condition_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
validate_tmp_tables()
| apache-2.0 |
tdhopper/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
almarklein/scikit-image | doc/examples/plot_gabors_from_lena.py | 3 | 3341 | """
=======================================================
Gabors / Primary Visual Cortex "Simple Cells" from Lena
=======================================================
How to build a (bio-plausible) "sparse" dictionary (or 'codebook', or
'filterbank') for e.g. image classification without any fancy math and
with just standard python scientific libraries?
Please find below a short answer ;-)
This simple example shows how to get Gabor-like filters [1]_ using just
the famous Lena image. Gabor filters are good approximations of the
"Simple Cells" [2]_ receptive fields [3]_ found in the mammalian primary
visual cortex (V1) (for details, see e.g. the Nobel-prize winning work
of Hubel & Wiesel done in the 60s [4]_ [5]_).
Here we use McQueen's 'kmeans' algorithm [6]_, as a simple biologically
plausible hebbian-like learning rule and we apply it (a) to patches of
the original Lena image (retinal projection), and (b) to patches of an
LGN-like [7]_ Lena image using a simple difference of gaussians (DoG)
approximation.
Enjoy ;-) And keep in mind that getting Gabors on natural image patches
is not rocket science.
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://en.wikipedia.org/wiki/Simple_cell
.. [3] http://en.wikipedia.org/wiki/Receptive_field
.. [4] http://en.wikipedia.org/wiki/K-means_clustering
.. [5] http://en.wikipedia.org/wiki/Lateral_geniculate_nucleus
.. [6] D. H. Hubel and T. N., Wiesel Receptive Fields of Single Neurones
in the Cat's Striate Cortex, J. Physiol. pp. 574-591 (148) 1959
.. [7] D. H. Hubel and T. N., Wiesel Receptive Fields, Binocular
Interaction, and Functional Architecture in the Cat's Visual Cortex,
J. Physiol. 160 pp. 106-154 1962
"""
import numpy as np
from scipy.cluster.vq import kmeans2
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from skimage import data
from skimage import color
from skimage.util.shape import view_as_windows
from skimage.util.montage import montage2d
np.random.seed(42)
patch_shape = 8, 8
n_filters = 49
lena = color.rgb2gray(data.lena())
# -- filterbank1 on original Lena
patches1 = view_as_windows(lena, patch_shape)
patches1 = patches1.reshape(-1, patch_shape[0] * patch_shape[1])[::8]
fb1, _ = kmeans2(patches1, n_filters, minit='points')
fb1 = fb1.reshape((-1,) + patch_shape)
fb1_montage = montage2d(fb1, rescale_intensity=True)
# -- filterbank2 LGN-like Lena
lena_dog = ndi.gaussian_filter(lena, .5) - ndi.gaussian_filter(lena, 1)
patches2 = view_as_windows(lena_dog, patch_shape)
patches2 = patches2.reshape(-1, patch_shape[0] * patch_shape[1])[::8]
fb2, _ = kmeans2(patches2, n_filters, minit='points')
fb2 = fb2.reshape((-1,) + patch_shape)
fb2_montage = montage2d(fb2, rescale_intensity=True)
# --
fig, axes = plt.subplots(2, 2, figsize=(7, 6))
ax0, ax1, ax2, ax3 = axes.ravel()
ax0.imshow(lena, cmap=plt.cm.gray)
ax0.set_title("Lena (original)")
ax1.imshow(fb1_montage, cmap=plt.cm.gray, interpolation='nearest')
ax1.set_title("K-means filterbank (codebook)\non Lena (original)")
ax2.imshow(lena_dog, cmap=plt.cm.gray)
ax2.set_title("Lena (LGN-like DoG)")
ax3.imshow(fb2_montage, cmap=plt.cm.gray, interpolation='nearest')
ax3.set_title("K-means filterbank (codebook)\non Lena (LGN-like DoG)")
for ax in axes.ravel():
ax.axis('off')
fig.subplots_adjust(hspace=0.3)
plt.show()
| bsd-3-clause |
peterbrook/assetjet | deploy/setup_esky.py | 1 | 2048 | import sys, os
from esky.bdist_esky import Executable
from distutils.core import setup
import assetjet
from deploy import exeName, appName
from glob import glob
def get_data_files(dirs):
"""
Recursively include data directories.
"""
results = []
for directory in dirs:
for root, dirs, files in os.walk(directory):
files = [os.path.join(root, file_) for file_ in files]
targetdir = os.path.relpath(root, os.path.join(directory, os.path.pardir))
results.append((targetdir, files))
return results
if sys.platform in ['win32','cygwin','win64']:
# Add http files
data_files = get_data_files([r'../app/src/httpdocs']) + [
r'../app/src/local_server.pyc']
# We can customise the executable's creation by passing an instance
# of Executable() instead of just the script name.
exe = Executable('../app/src/main.py',
icon='../resources/images/Pie-chart.ico',
gui_only=True,
name=exeName,
)
setup(
data_files = data_files,
name = appName,
version = assetjet.__version__,
scripts = [exe],
options = {'bdist_esky':{
# forcibly include some other modules
'includes': ['lxml.etree', 'lxml._elementpath',
'gzip','numpy',
'PySide.QtWebKit', 'PySide.QtNetwork', 'PySide.QtSvg'],
# forcibly exclude some other modules
'excludes': ['Tkinter', 'Tkconstants', 'pydoc', 'tcl', 'tk', 'matplotlib', 'PIL', 'nose', 'setuptools', 'xlrd', 'xlwt', 'PyQt4', 'markdown', 'IPython', 'docutils'],
# force esky to freeze the app using py2exe
'freezer_module': 'cx_freeze',
# tweak the options used by cx_freezer
'freezer_options': {'packages': ['pygments', 'sqlalchemy.dialects.sqlite', 'assetjet']}
}}
)
| gpl-3.0 |
ElDeveloper/scikit-learn | examples/decomposition/plot_pca_iris.py | 253 | 1801 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
akrherz/iem | htdocs/plotting/auto/scripts100/p160.py | 1 | 8613 | """
TODO: add table listing each forecast's peak and peak time...
"""
import datetime
import pytz
import numpy as np
import pandas as pd
from pandas.io.sql import read_sql
import matplotlib.dates as mdates
from pyiem.plot import figure_axes
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
MDICT = {"primary": "Primary Field", "secondary": "Secondary Field"}
def get_description():
"""Return a dict describing how to call this plotter"""
desc = dict()
desc["data"] = True
desc["cache"] = 3600
desc[
"description"
] = """This page presents a sphagetti plot of river stage
and forecasts. The plot is roughly centered on the date of your choice
with the plot showing any forecasts made three days prior to the date
and for one day afterwards. Sorry that you have to know the station ID
prior to using this page (will fix at some point). Presented timestamps
are hopefully all in the local timezone of the reporting station. If
you download the data, the timestamps are all in UTC.
"""
utc = datetime.datetime.utcnow()
desc["arguments"] = [
dict(
type="text",
name="station",
default="EKDI4",
label="Enter 5 Char NWSLI Station Code (sorry):",
),
dict(
type="datetime",
name="dt",
default=utc.strftime("%Y/%m/%d %H%M"),
label="Time to center plot at (UTC Time Zone):",
min="2013/01/01 0000",
),
dict(
type="select",
name="var",
options=MDICT,
label="Which Variable to Plot:",
default="primary",
),
]
return desc
def get_context(fdict):
"""Do the common work"""
pgconn = get_dbconn("hml")
cursor = pgconn.cursor()
ctx = get_autoplot_context(fdict, get_description())
ctx["station"] = ctx["station"].upper()
station = ctx["station"]
dt = ctx["dt"]
# Attempt to get station information
cursor.execute(
"SELECT name, tzname from stations where id = %s and network ~* 'DCP'",
(station,),
)
ctx["name"] = ""
ctx["tzname"] = "UTC"
if cursor.rowcount > 0:
row = cursor.fetchone()
ctx["name"] = row[0]
ctx["tzname"] = row[1]
ctx["fdf"] = read_sql(
f"""with fx as (
select id, issued, primaryname, primaryunits, secondaryname,
secondaryunits from hml_forecast where station = %s
and generationtime between %s and %s)
SELECT f.id,
f.issued at time zone 'UTC' as issued,
d.valid at time zone 'UTC' as valid,
d.primary_value, f.primaryname,
f.primaryunits, d.secondary_value, f.secondaryname,
f.secondaryunits from
hml_forecast_data_{dt.year} d JOIN fx f
on (d.hml_forecast_id = f.id) ORDER by f.id ASC, d.valid ASC
""",
pgconn,
params=(
station,
dt - datetime.timedelta(days=3),
dt + datetime.timedelta(days=1),
),
index_col=None,
)
if not ctx["fdf"].empty:
ctx["fdf"]["valid"] = ctx["fdf"]["valid"].dt.tz_localize(pytz.UTC)
ctx["fdf"]["issued"] = ctx["fdf"]["issued"].dt.tz_localize(pytz.UTC)
ctx["primary"] = "%s[%s]" % (
ctx["fdf"].iloc[0]["primaryname"],
ctx["fdf"].iloc[0]["primaryunits"],
)
ctx["secondary"] = "%s[%s]" % (
ctx["fdf"].iloc[0]["secondaryname"],
ctx["fdf"].iloc[0]["secondaryunits"],
)
# get obs
mints = ctx["fdf"]["valid"].min()
maxts = ctx["fdf"]["valid"].max()
else:
mints = dt - datetime.timedelta(days=3)
maxts = dt + datetime.timedelta(days=3)
df = read_sql(
"SELECT distinct valid at time zone 'UTC' as valid, "
"h.label, value from hml_observed_data d "
"JOIN hml_observed_keys h on (d.key = h.id) WHERE station = %s and "
"valid between %s and %s ORDER by valid ASC",
pgconn,
params=(station, mints, maxts),
index_col=None,
)
if df.empty:
raise NoDataFound("No Data Found.")
df["valid"] = df["valid"].dt.tz_localize(pytz.UTC)
ctx["odf"] = df.pivot("valid", "label", "value")
if not ctx["fdf"].empty:
ctx["fdf"].reset_index(inplace=True)
ctx["df"] = pd.merge(
ctx["fdf"],
ctx["odf"],
left_on="valid",
right_on="valid",
how="left",
sort=False,
)
ctx["title"] = "[%s] %s" % (ctx["station"], ctx["name"])
ctx["subtitle"] = "+/- 72 hours around %s" % (
ctx["dt"]
.replace(tzinfo=pytz.UTC)
.astimezone(pytz.timezone(ctx["tzname"]))
.strftime("%d %b %Y %-I:%M %p %Z"),
)
if "df" not in ctx or (ctx["df"].empty and not ctx["odf"].empty):
ctx["primary"] = ctx["odf"].columns[0]
ctx["secondary"] = ctx["odf"].columns[1]
return ctx
def highcharts(fdict):
"""generate highcharts"""
ctx = get_context(fdict)
if "df" not in ctx:
raise NoDataFound("No Data Found.")
df = ctx["df"]
df["ticks"] = df["valid"].astype(np.int64) // 10 ** 6
lines = []
fxs = df["id"].unique()
for fx in fxs:
df2 = df[df["id"] == fx]
issued = (
df2.iloc[0]["issued"]
.tz_convert(pytz.timezone(ctx["tzname"]))
.strftime("%-m/%-d %-I%p %Z")
)
v = df2[["ticks", ctx["var"] + "_value"]].to_json(orient="values")
lines.append(
"""{
name: '"""
+ issued
+ """',
type: 'line',
tooltip: {valueDecimal: 1},
data: """
+ v
+ """
}
"""
)
ctx["odf"]["ticks"] = ctx["odf"].index.values.astype(np.int64) // 10 ** 6
if ctx["var"] in ctx:
v = ctx["odf"][["ticks", ctx[ctx["var"]]]].to_json(orient="values")
lines.append(
"""{
name: 'Obs',
type: 'line',
color: 'black',
lineWidth: 3,
tooltip: {valueDecimal: 1},
data: """
+ v
+ """
}
"""
)
series = ",".join(lines)
return (
"""
$("#ap_container").highcharts({
time: {
useUTC: false,
timezone: '"""
+ ctx["tzname"]
+ """'
},
title: {text: '"""
+ ctx["title"]
+ """'},
subtitle: {text: '"""
+ ctx["subtitle"]
+ """'},
chart: {zoomType: 'x'},
tooltip: {
shared: true,
crosshairs: true,
xDateFormat: '%d %b %Y %I:%M %p'
},
xAxis: {
title: {text: '"""
+ ctx["tzname"]
+ """ Timezone'},
type: 'datetime'},
yAxis: {title: {text: '"""
+ ctx.get(ctx["var"], "primary")
+ """'}},
series: ["""
+ series
+ """]
});
"""
)
def plotter(fdict):
"""Go"""
ctx = get_context(fdict)
if "df" not in ctx or (ctx["df"].empty and ctx["odf"].empty):
raise NoDataFound("No Data Found!")
df = ctx["df"]
title = "\n".join([ctx["title"], ctx["subtitle"]])
(fig, ax) = figure_axes(title=title)
fxs = df["id"].unique()
for fx in fxs:
df2 = df[df["id"] == fx]
issued = (
df2.iloc[0]["issued"]
.tz_convert(pytz.timezone(ctx["tzname"]))
.strftime("%-m/%-d %-I%p %Z")
)
ax.plot(
df2["valid"], df2[ctx["var"] + "_value"], zorder=2, label=issued
)
if not ctx["odf"].empty:
ax.plot(
ctx["odf"].index.values,
ctx["odf"][ctx[ctx["var"]]],
lw=2,
color="k",
label="Obs",
zorder=4,
)
ax.set_ylabel(ctx[ctx["var"]])
ax.xaxis.set_major_locator(
mdates.AutoDateLocator(tz=pytz.timezone(ctx["tzname"]))
)
ax.xaxis.set_major_formatter(
mdates.DateFormatter("%-d %b\n%Y", tz=pytz.timezone(ctx["tzname"]))
)
pos = ax.get_position()
ax.grid(True)
ax.set_position([pos.x0, pos.y0, 0.74, 0.8])
ax.set_xlabel(f"Timestamps in {ctx['tzname']} Timezone")
ax.legend(loc=(1.0, 0.0))
df["issued"] = df["issued"].apply(lambda x: x.strftime("%Y-%m-%d %H:%M"))
df["valid"] = df["valid"].apply(lambda x: x.strftime("%Y-%m-%d %H:%M"))
return fig, df
if __name__ == "__main__":
plotter(dict(station="MLGO1", dt="2021-06-19 1653"))
| mit |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/scipy/stats/morestats.py | 4 | 94486 | # Author: Travis Oliphant, 2002
#
# Further updates and enhancements by many SciPy developers.
#
from __future__ import division, print_function, absolute_import
import math
import warnings
from collections import namedtuple
import numpy as np
from numpy import (isscalar, r_, log, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d,
sqrt, ceil, floor, array, poly1d, compress,
pi, exp, ravel, angle, count_nonzero)
from numpy.testing.decorators import setastest
from scipy._lib.six import string_types
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
Mean = namedtuple('Mean', ('statistic', 'minmax'))
Variance = namedtuple('Variance', ('statistic', 'minmax'))
Std_dev = namedtuple('Std_dev', ('statistic', 'minmax'))
def bayes_mvs(data, alpha=0.90):
r"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability ``alpha``.
See Also
--------
mvsdist
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
``alpha``.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
First a basic example to demonstrate the outputs:
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.bayes_mvs(data)
>>> mean
Mean(statistic=9.0, minmax=(7.1036502226125329, 10.896349777387467))
>>> var
Variance(statistic=10.0, minmax=(3.1767242068607087, 24.459103821334018))
>>> std
Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.9456146050146295))
Now we generate some normally distributed random data, and get estimates of
mean and standard deviation with 95% confidence intervals for those
estimates:
>>> n_samples = 100000
>>> data = stats.norm.rvs(size=n_samples)
>>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.hist(data, bins=100, normed=True, label='Histogram of data')
>>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')
>>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',
... alpha=0.2, label=r'Estimated mean (95% limits)')
>>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')
>>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,
... label=r'Estimated scale (95% limits)')
>>> ax.legend(fontsize=10)
>>> ax.set_xlim([-4, 4])
>>> ax.set_ylim([0, 0.5])
>>> plt.show()
"""
m, v, s = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
% alpha)
m_res = Mean(m.mean(), m.interval(alpha))
v_res = Variance(v.mean(), v.interval(alpha))
s_res = Std_dev(s.mean(), s.interval(alpha))
return m_res, v_res, s_res
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
See Also
--------
bayes_mvs
Notes
-----
The return values from ``bayes_mvs(data)`` is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if n < 2:
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if n > 1000: # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
fac = n * C / 2.
val = nm1 / 2.
mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
vdist = distributions.invgamma(val, scale=fac)
return mdist, vdist, sdist
def kstat(data, n=2):
r"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic k_n is the unique symmetric unbiased estimator of the
nth cumulant kappa_n.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
For a sample size n, the first few k-statistics are given by:
.. math::
k_{1} = \mu
k_{2} = \frac{n}{n-1} m_{2}
k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3}
k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}
where ``:math:\mu`` is the sample mean, ``:math:m_2`` is the sample
variance, and ``:math:m_i`` is the i-th sample central moment.
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
Examples
--------
>>> from scipy import stats
>>> rndm = np.random.RandomState(1234)
As sample size increases, n-th moment and n-th k-statistic converge to the
same number (although they aren't identical). In the case of the normal
distribution, they converge to zero.
>>> for n in [2, 3, 4, 5, 6, 7]:
... x = rndm.normal(size=10**n)
... m, k = stats.moment(x, 3), stats.kstat(x, 3)
... print("%.3g %.3g %.3g" % (m, k, m-k))
-0.631 -0.651 0.0194
0.0282 0.0283 -8.49e-05
-0.0454 -0.0454 1.36e-05
7.53e-05 7.53e-05 -2.26e-09
0.00166 0.00166 -4.99e-09
-2.88e-06 -2.88e-06 8.63e-13
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = np.zeros(n + 1, np.float64)
data = ravel(data)
N = data.size
# raise ValueError on empty input
if N == 0:
raise ValueError("Data input must not be empty")
# on nan input, return nan without warning
if np.isnan(np.sum(data)):
return np.nan
for k in range(1, n + 1):
S[k] = np.sum(data**k, axis=0)
if n == 1:
return S[1] * 1.0/N
elif n == 2:
return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
elif n == 4:
return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
(N*(N-1.0)*(N-2.0)*(N-3.0)))
else:
raise ValueError("Should not be here.")
def kstatvar(data, n=2):
r"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat: Returns the n-th k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
The variances of the first few k-statistics are given by:
.. math::
var(k_{1}) = \frac{\kappa^2}{n}
var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1}
var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} +
\frac{9 \kappa^2_{3}}{n - 1} +
\frac{6 n \kappa^3_{2}}{(n-1) (n-2)}
var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} +
\frac{48 \kappa_{3} \kappa_5}{n - 1} +
\frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} +
\frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} +
\frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data, n=2) * 1.0/N
elif n == 2:
k2 = kstat(data, n=2)
k4 = kstat(data, n=4)
return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(n):
"""
Approximations of uniform order statistic medians.
Parameters
----------
n : int
Sample size.
Returns
-------
v : 1d float array
Approximations of the order statistic medians.
References
----------
.. [1] James J. Filliben, "The Probability Plot Correlation Coefficient
Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
Order statistics of the uniform distribution on the unit interval
are marginally distributed according to beta distributions.
The expectations of these order statistic are evenly spaced across
the interval, but the distributions are skewed in a way that
pushes the medians slightly towards the endpoints of the unit interval:
>>> n = 4
>>> k = np.arange(1, n+1)
>>> from scipy.stats import beta
>>> a = k
>>> b = n-k+1
>>> beta.mean(a, b)
array([ 0.2, 0.4, 0.6, 0.8])
>>> beta.median(a, b)
array([ 0.15910358, 0.38572757, 0.61427243, 0.84089642])
The Filliben approximation uses the exact medians of the smallest
and greatest order statistics, and the remaining medians are approximated
by points spread evenly across a sub-interval of the unit interval:
>>> from scipy.morestats import _calc_uniform_order_statistic_medians
>>> _calc_uniform_order_statistic_medians(n)
array([ 0.15910358, 0.38545246, 0.61454754, 0.84089642])
This plot shows the skewed distributions of the order statistics
of a sample of size four from a uniform distribution on the unit interval:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0.0, 1.0, num=50, endpoint=True)
>>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)]
>>> plt.figure()
>>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3])
"""
v = np.zeros(n, dtype=np.float64)
v[-1] = 0.5**(1.0 / n)
v[0] = 1 - v[-1]
i = np.arange(2, n)
v[1:-1] = (i - 0.3175) / (n + 0.365)
return v
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def _add_axis_labels_title(plot, xlabel, ylabel, title):
"""Helper function to add axes labels and a title to stats plots"""
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title(title)
plot.set_xlabel(xlabel)
plot.set_ylabel(ylabel)
else:
# matplotlib.pyplot module
plot.title(title)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
def probplot(x, sparams=(), dist='norm', fit=True, plot=None):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample//2,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
_perform_fit = fit or (plot is not None)
if x.size == 0:
if _perform_fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if _perform_fit:
# perform a linear least squares fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
"""
Calculate the shape parameter that maximizes the PPCC
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. ppcc_max returns the shape parameter that would maximize the
probability plot correlation coefficient for the given data to a
one-parameter family of distributions.
Parameters
----------
x : array_like
Input array.
brack : tuple, optional
Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c)
then they are assumed to be a starting interval for a downhill bracket
search (see `scipy.optimize.brent`).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
Returns
-------
shape_value : float
The shape parameter at which the probability plot correlation
coefficient reaches its max value.
See also
--------
ppcc_plot, probplot, boxcox
Notes
-----
The brack keyword serves as a starting point which is useful in corner
cases. One can use a plot to obtain a rough visual estimate of the location
for the maximum to start the search near it.
References
----------
.. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
.. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
... random_state=1234567) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(8, 6))
>>> ax = fig.add_subplot(111)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax)
We calculate the value where the shape should reach its maximum and a red
line is drawn there. The line should coincide with the highest point in the
ppcc_plot.
>>> max = stats.ppcc_max(x)
>>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
"""
Calculate and optionally plot probability plot correlation coefficient.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. It cannot be used for distributions without shape parameters
(like the normal distribution) or with multiple shape parameters.
By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A
Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed
distributions via an approximately normal one, and is therefore particularly
useful in practice.
Parameters
----------
x : array_like
Input array.
a, b: scalar
Lower and upper bounds of the shape parameter to use.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
plot : object, optional
If given, plots PPCC against the shape parameter.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`a` to `b`).
Returns
-------
svals : ndarray
The shape values for which `ppcc` was calculated.
ppcc : ndarray
The calculated probability plot correlation coefficient values.
See also
--------
ppcc_max, probplot, boxcox_normplot, tukeylambda
References
----------
J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234567)
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> fig = plt.figure(figsize=(12, 4))
>>> ax1 = fig.add_subplot(131)
>>> ax2 = fig.add_subplot(132)
>>> ax3 = fig.add_subplot(133)
>>> res = stats.probplot(x, plot=ax1)
>>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax3)
>>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
if b <= a:
raise ValueError("`b` has to be larger than `a`.")
svals = np.linspace(a, b, num=N)
ppcc = np.empty_like(svals)
for k, sval in enumerate(svals):
_, r2 = probplot(x, sval, dist=dist, fit=True)
ppcc[k] = r2[-1]
if plot is not None:
plot.plot(svals, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='Shape Values',
ylabel='Prob Plot Corr. Coef.',
title='(%s) PPCC Plot' % dist)
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and ``alpha`` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given ``alpha``.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when ``alpha`` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='$\lambda$',
ylabel='Prob Plot Corr. Coef.',
title='Box-Cox Normality Plot')
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
kstest : The Kolmogorov-Smirnov test for goodness of fit.
Notes
-----
The algorithm used is described in [4]_ but censoring parameters as
described are not implemented. For N > 5000 the W test statistic is accurate
but the p-value may not be.
The chance of rejecting the null hypothesis when it is true is close to 5%
regardless of sample size.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for
normality (complete samples), Biometrika, Vol. 52, pp. 591-611.
.. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk,
Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of
Statistical Modeling and Analytics, Vol. 2, pp. 21-33.
.. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = stats.norm.rvs(loc=5, scale=3, size=100)
>>> stats.shapiro(x)
(0.9772805571556091, 0.08144091814756393)
"""
if a is not None or reta:
warnings.warn("input parameters 'a' and 'reta' are scheduled to be "
"removed in version 0.18.0", FutureWarning)
x = np.ravel(x)
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N, 'f')
init = 0
else:
if len(a) != N // 2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0, 2]:
warnings.warn("Input data for shapiro has range zero. The results "
"may not be accurate.")
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
AndersonResult = namedtuple('AndersonResult', ('statistic',
'critical_values',
'significance_level'))
def anderson(x, dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1' is a synonym for 'gumbel'
Returns
-------
statistic : float
The Anderson-Darling test statistic
critical_values : list
The critical values for this distribution
significance_level : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm', 'expon', 'gumbel', 'extreme1', 'logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y - xbar) / s
z = distributions.norm.cdf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
elif dist == 'expon':
w = y / xbar
z = distributions.expon.cdf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
elif dist == 'logistic':
def rootfunc(ab, xj, N):
a, b = ab
tmp = (xj - a) / b
tmp2 = exp(tmp)
val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N,
np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
return array(val)
sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
w = (y - sol[0]) / sol[1]
z = distributions.logistic.cdf(w)
sig = array([25, 10, 5, 2.5, 1, 0.5])
critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
else: # (dist == 'gumbel') or (dist == 'extreme1'):
xbar, s = distributions.gumbel_l.fit(x)
w = (y - xbar) / s
z = distributions.gumbel_l.cdf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
i = arange(1, N + 1)
A2 = -N - np.sum((2*i - 1.0) / N * (log(z) + log(1 - z[::-1])), axis=0)
return AndersonResult(A2, critical, sig)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
Anderson_ksampResult = namedtuple('Anderson_ksampResult',
('statistic', 'critical_values',
'significance_level'))
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
statistic : float
Normalized k-sample Anderson-Darling test statistic.
critical_values : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
significance_level : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
H = (1. / n).sum()
hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()
h = hs_cs[-1] + 1
g = (hs_cs / arange(2, N)).sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return Anderson_ksampResult(A2, critical, p)
AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))
def ansari(x, y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
statistic : float
The Ansari-Bradley test statistic
pvalue : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x, y = asarray(x), asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m + n
xy = r_[x, y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank, N - rank + 1)), 0)
AB = np.sum(symrank[:n], axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and (m < 55 or n < 55):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n, m)
ind = AB - astart
total = np.sum(a1, axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if ind == cind:
pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total
else:
pval = 2.0 * np.sum(a1[:cind], axis=0) / total
else:
find = int(floor(ind))
if ind == floor(ind):
pval = 2.0 * np.sum(a1[find:], axis=0) / total
else:
pval = 2.0 * np.sum(a1[find+1:], axis=0) / total
return AnsariResult(AB, min(1.0, pval))
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n * (N+1.0)**2 / 4.0 / N
varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
else:
mnAB = n * (N+2.0) / 4.0
varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
if repeats: # adjust variance estimates
# compute np.sum(tj * rj**2,axis=0)
fac = np.sum(symrank**2, axis=0)
if N % 2: # N odd
varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
else: # N even
varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
z = (AB - mnAB) / sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AnsariResult(AB, pval)
BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value of the test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
levene : A robust parametric test for equality of k variances
Notes
-----
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical
Tests. Proceedings of the Royal Society of London. Series A,
Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return BartlettResult(np.nan, np.nan)
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k, 'd')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = np.sum(Ni, axis=0)
spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0)
denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) -
1.0/(Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return BartlettResult(T, pval)
LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [None] * k
for i in range(k):
Zij[i] = abs(asarray(args[i]) - Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i] * Ni[i]
Zbar /= Ntot
numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0)
denom = (k - 1.0) * dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return LeveneResult(W, pval)
@setastest(False)
def binom_test(x, n=None, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = distributions.binom.cdf(x, n, p)
return pval
if alternative == 'greater':
pval = distributions.binom.sf(x-1, n, p)
return pval
# if alternative was neither 'less' nor 'greater', then it's 'two-sided'
d = distributions.binom.pmf(x, n, p)
rerr = 1 + 1e-7
if x == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif x < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n - y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = []
for k in range(len(g) - 1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue'))
def fligner(*args, **kwds):
"""
Perform Fligner-Killeen test for equality of variance.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner-Killeen's test is
distribution free when populations are identical [2]_.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the hypothesis test.
See Also
--------
bartlett : A parametric test for equality of k variances in normal samples
levene : A robust parametric test for equality of k variances
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A
comparative study of tests for homogeneity of variances, with
applications to the outer continental shelf biding data.
Technometrics, 23(4), 351-361.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return FlignerResult(np.nan, np.nan)
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, np.sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return FlignerResult(Xsq, pval)
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis : int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(1234)
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue'))
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
statistic : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
pvalue : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if y is None:
d = x
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x - y
if zero_method == "wilcox":
# Keep all non-zero differences
d = compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r, axis=0)
r_minus = np.sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return WilcoxonResult(T, prob)
@setastest(False)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> from scipy.stats import median_test
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
grand_median = np.median(np.concatenate(data))
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.where((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None] * N
plist[0] = poly1d(1)
for n in range(1, N):
plist[n] = plist[n-1].deriv() - poly1d([1, 0]) * plist[n-1]
return plist
# Note: when removing pdf_fromgamma, also remove the _hermnorm support function
@np.deprecate(message="scipy.stats.pdf_fromgamma is deprecated in scipy 0.16.0 "
"in favour of statsmodels.distributions.ExpandedNormal.")
def pdf_fromgamma(g1, g2, g3=0.0, g4=None):
if g4 is None:
g4 = 3 * g2**2
sigsq = 1.0 / g2
sig = sqrt(sigsq)
mu = g1 * sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] /= sig**k
# Add all of the terms to polynomial
totp = (p12[0] - g1/6.0*p12[3] +
g2/24.0*p12[4] + g1**2/72.0 * p12[6] -
g3/120.0*p12[5] - g1*g2/144.0*p12[7] - g1**3.0/1296.0*p12[9] +
g4/720*p12[6] + (g2**2/1152.0 + g1*g3/720)*p12[8] +
g1**2 * g2/1728.0*p12[10] + g1**4.0 / 31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi) / sig
def thefunc(x):
xn = (x - mu) / sig
return totp(xn) * exp(-xn**2 / 2.)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high - low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = angle(np.mean(exp(1j * ang), axis=axis))
mask = res < 0
if mask.ndim > 0:
res[mask] += 2*pi
elif mask:
res += 2*pi
return res*(high - low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j * ang), axis=axis)
R = abs(res)
return ((high - low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j * ang), axis=axis)
R = abs(res)
return ((high - low)/2.0/pi) * sqrt(-2*log(R))
# Tests to include (from R) -- some of these already in stats.
########
# X Ansari-Bradley
# X Bartlett (and Levene)
# X Binomial
# Y Pearson's Chi-squared (stats.chisquare)
# Y Association Between Paired samples (stats.pearsonr, stats.spearmanr)
# stats.kendalltau) -- these need work though
# Fisher's exact test
# X Fligner-Killeen Test
# Y Friedman Rank Sum (stats.friedmanchisquare?)
# Y Kruskal-Wallis
# Y Kolmogorov-Smirnov
# Cochran-Mantel-Haenszel Chi-Squared for Count
# McNemar's Chi-squared for Count
# X Mood Two-Sample
# X Test For Equal Means in One-Way Layout (see stats.ttest also)
# Pairwise Comparisons of proportions
# Pairwise t tests
# Tabulate p values for pairwise comparisons
# Pairwise Wilcoxon rank sum tests
# Power calculations two sample test of prop.
# Power calculations for one and two sample t tests
# Equal or Given Proportions
# Trend in Proportions
# Quade Test
# Y Student's T Test
# Y F Test to compare two variances
# XY Wilcoxon Rank Sum and Signed Rank Tests
| mit |
mayavanand/RMMAFinalProject | azimuth/features/featurization.py | 1 | 26462 | import pandas
import time
import sklearn
import numpy as np
import Bio.SeqUtils as SeqUtil
import Bio.Seq as Seq
import util
import sys
import Bio.SeqUtils.MeltingTemp as Tm
import pickle
import itertools
def featurize_data(data, learn_options, Y, gene_position, pam_audit=True, length_audit=True):
'''
assumes that data contains the 30mer
returns set of features from which one can make a kernel for each one
'''
all_lens = data['30mer'].apply(len).values
unique_lengths = np.unique(all_lens)
num_lengths = len(unique_lengths)
assert num_lengths == 1, "should only have sequences of a single length, but found %s: %s" % (num_lengths, str(unique_lengths))
print "Constructing features..."
t0 = time.time()
feature_sets = {}
if learn_options["nuc_features"]:
# spectrum kernels (position-independent) and weighted degree kernels (position-dependent)
get_all_order_nuc_features(data['30mer'], feature_sets, learn_options, learn_options["order"], max_index_to_use=30)
check_feature_set(feature_sets)
if learn_options["gc_features"]:
gc_above_10, gc_below_10, gc_count = gc_features(data, length_audit)
feature_sets['gc_above_10'] = pandas.DataFrame(gc_above_10)
feature_sets['gc_below_10'] = pandas.DataFrame(gc_below_10)
feature_sets['gc_count'] = pandas.DataFrame(gc_count)
if learn_options["pam_features"]:
pam_above_1, pam_equals_1, pam_count = pam_features(data, length_audit)
feature_sets['pam_above_1'] = pandas.DataFrame(pam_above_1)
feature_sets['pam_equals_1'] = pandas.DataFrame(pam_equals_1)
feature_sets['pam_count'] = pandas.DataFrame(pam_count)
'''
if learn_options["repeat_features"]:
repeat_above_0, repeat_equals_0, repeat_count = repeat_features(data, length_audit)
feature_sets['repeat_above_0'] = pandas.DataFrame(repeat_above_0)
feature_sets['repeat_equals_1'] = pandas.DataFrame(repeat_equals_0)
feature_sets['repeat_count'] = pandas.DataFrame(repeat_count)
'''
if learn_options["include_gene_position"]:
# gene_position_columns = ["Amino Acid Cut position", "Percent Peptide", "Nucleotide cut position"]
# gene_position_columns = ["Percent Peptide", "Nucleotide cut position"]
for set in gene_position.columns:
set_name = set
feature_sets[set_name] = pandas.DataFrame(gene_position[set])
feature_sets["Percent Peptide <50%"] = feature_sets["Percent Peptide"] < 50
feature_sets["Percent Peptide <50%"]['Percent Peptide <50%'] = feature_sets["Percent Peptide <50%"].pop("Percent Peptide")
if learn_options["include_gene_effect"]:
print "including gene effect"
gene_names = Y['Target gene']
enc = sklearn.preprocessing.OneHotEncoder()
label_encoder = sklearn.preprocessing.LabelEncoder()
label_encoder.fit(gene_names)
one_hot_genes = np.array(enc.fit_transform(label_encoder.transform(gene_names)[:, None]).todense())
feature_sets["gene effect"] = pandas.DataFrame(one_hot_genes,
columns=["gene_%d" % i for i in range(one_hot_genes.shape[1])], index=gene_names.index)
if learn_options['include_known_pairs']:
feature_sets['known pairs'] = pandas.DataFrame(Y['test'])
if learn_options["include_NGGX_interaction"]:
feature_sets["NGGX"] = NGGX_interaction_feature(data, pam_audit)
#if learn_options["include_NGGXX_interaction"]:
# feature_sets["NGGXX"] = NGGXX_interaction_feature(data, pam_audit)
if learn_options["include_Tm"]:
feature_sets["Tm"] = Tm_feature(data, pam_audit)
if learn_options["include_sgRNAscore"]:
feature_sets["sgRNA Score"] = pandas.DataFrame(data["sgRNA Score"])
if learn_options["include_drug"]:
# feature_sets["drug"] = pandas.DataFrame(data["drug"])
drug_names = Y.index.get_level_values('drug').tolist()
enc = sklearn.preprocessing.OneHotEncoder()
label_encoder = sklearn.preprocessing.LabelEncoder()
label_encoder.fit(drug_names)
one_hot_drugs = np.array(enc.fit_transform(label_encoder.transform(drug_names)[:, None]).todense())
feature_sets["drug"] = pandas.DataFrame(one_hot_drugs, columns=["drug_%d" % i for i in range(one_hot_drugs.shape[1])], index=drug_names)
if learn_options['include_strand']:
feature_sets['Strand effect'] = (pandas.DataFrame(data['Strand']) == 'sense')*1
if learn_options["include_gene_feature"]:
feature_sets["gene features"] = gene_feature(Y, data, learn_options)
if learn_options["include_gene_guide_feature"] > 0:
tmp_feature_sets = gene_guide_feature(Y, data, learn_options)
for key in tmp_feature_sets:
feature_sets[key] = tmp_feature_sets[key]
if learn_options["include_microhomology"]:
feature_sets["microhomology"] = get_micro_homology_features(Y['Target gene'], learn_options, data)
t1 = time.time()
print "\t\tElapsed time for constructing features is %.2f seconds" % (t1-t0)
check_feature_set(feature_sets)
if learn_options['normalize_features']:
assert("should not be here as doesn't make sense when we make one-off predictions, but could make sense for internal model comparisons when using regularized models")
feature_sets = normalize_feature_sets(feature_sets)
check_feature_set(feature_sets)
return feature_sets
def check_feature_set(feature_sets):
'''
Ensure the # of people is the same in each feature set
'''
assert feature_sets != {}, "no feature sets present"
N = None
for ft in feature_sets.keys():
N2 = feature_sets[ft].shape[0]
if N is None:
N = N2
else:
assert N >= 1, "should be at least one individual"
assert N == N2, "# of individuals do not match up across feature sets"
for set in feature_sets.keys():
if np.any(np.isnan(feature_sets[set])):
raise Exception("found Nan in set %s" % set)
def NGGX_interaction_feature(data, pam_audit=True):
'''
assuming 30-mer, grab the NGGX _ _ positions, and make a one-hot
encoding of the NX nucleotides yielding 4x4=16 features
'''
sequence = data['30mer'].values
feat_NX = pandas.DataFrame()
# check that GG is where we think
for seq in sequence:
if pam_audit and seq[25:27] != "GG":
raise Exception("expected GG but found %s" % seq[25:27])
NX = seq[24]+seq[27]
NX_onehot = nucleotide_features(NX,order=2, feature_type='pos_dependent', max_index_to_use=2, prefix="NGGX")
# NX_onehot[:] = np.random.rand(NX_onehot.shape[0]) ##TESTING RANDOM FEATURE
feat_NX = pandas.concat([feat_NX, NX_onehot], axis=1)
return feat_NX.T
def NGGXX_interaction_feature(data, pam_audit=True):
#added by Maya and Rachel
#assuming 30-mer, grab the NGGXX _ _ positions, and make a one-hot
#encoding of the NXX nucleotides yielding 4x4x4=64 features
sequence = data['30mer'].values
feat_NXX = pandas.DataFrame()
# check that GG is where we think
for seq in sequence:
if pam_audit and seq[25:27] != "GG":
raise Exception("expected GG but found %s" % seq[25:27])
NXX = seq[24]+seq[27]+seq[28]
NXX_onehot = nucleotide_features(NXX, order=3, feature_type='pos_dependent', max_index_to_use=3, prefix="NGGXX")
# NXX_onehot[:] = np.random.rand(NXX_onehot.shape[0]) ##TESTING RANDOM FEATURE
feat_NXX = pandas.concat([feat_NXX, NXX_onehot], axis=1)
return feat_NXX.T
def countPAM(s, length_audit=True):
#added by Maya and Rachel
#number of PAMs for the entire 30mer
if length_audit:
assert len(s) == 30
#check to ensure s of right length
numPams = 0
i = 1
while(i < 30):
if s[i] == 'G':
if s[i+1] == 'G':
numPams = numPams+1
i = i+3
return numPams
def countRepeats(s, length_audit=True):
#added by Maya and Rachel
#number of repeats for the entire 30mer
D = {}
i = 0
numRepeats = 0
while(i < 30):
codon = s[i] + s[i+1] + s[i+2]
if codon in D.keys():
D[codon] = D[codon] + 1
else:
D[codon] = 1
i = i+3
for key in D.keys():
if D[key] != 1:
numRepeats = numRepeats + D[key] - 1
return numRepeats
def get_all_order_nuc_features(data, feature_sets, learn_options, maxorder, max_index_to_use, prefix=""):
for order in range(1, maxorder+1):
print "\t\tconstructing order %s features" % order
nuc_features_pd, nuc_features_pi = apply_nucleotide_features(data, order, learn_options["num_proc"],
include_pos_independent=True, max_index_to_use=max_index_to_use, prefix=prefix)
feature_sets['%s_nuc_pd_Order%i' % (prefix, order)] = nuc_features_pd
if learn_options['include_pi_nuc_feat']:
feature_sets['%s_nuc_pi_Order%i' % (prefix, order)] = nuc_features_pi
check_feature_set(feature_sets)
print "\t\t\t\t\t\t\tdone"
def countGC(s, length_audit=True):
'''
GC content for only the 20mer, as per the Doench paper/code
'''
if length_audit:
assert len(s) == 30, "seems to assume 30mer"
return len(s[4:24].replace('A', '').replace('T', ''))
def SeqUtilFeatures(data):
'''
assuming '30-mer'is a key
get melting temperature features from:
0-the 30-mer ("global Tm")
1-the Tm (melting temperature) of the DNA:RNA hybrid from positions 16 - 20 of the sgRNA, i.e. the 5nts immediately proximal of the NGG PAM
2-the Tm of the DNA:RNA hybrid from position 8 - 15 (i.e. 8 nt)
3-the Tm of the DNA:RNA hybrid from position 3 - 7 (i.e. 5 nt)
'''
sequence = data['30mer'].values
num_features = 1
featarray = np.ones((sequence.shape[0], num_features))
for i, seq in enumerate(sequence):
assert len(seq) == 30, "seems to assume 30mer"
featarray[i, 0] = SeqUtil.molecular_weight(str(seq))
feat = pandas.DataFrame(pandas.DataFrame(featarray))
return feat
def organism_feature(data):
'''
Human vs. mouse
'''
organism = np.array(data['Organism'].values)
feat = pandas.DataFrame(pandas.DataFrame(featarray))
import ipdb; ipdb.set_trace()
return feat
def get_micro_homology_features(gene_names, learn_options, X):
# originally was flipping the guide itself as necessary, but now flipping the gene instead
print "building microhomology features"
feat = pandas.DataFrame(index=X.index)
feat["mh_score"] = ""
feat["oof_score"] = ""
#with open(r"tmp\V%s_gene_mismatches.csv" % learn_options["V"],'wb') as f:
if True:
# number of nulceotides to take to the left and right of the guide
k_mer_length_left = 9
k_mer_length_right = 21
for gene in gene_names.unique():
gene_seq = Seq.Seq(util.get_gene_sequence(gene)).reverse_complement()
guide_inds = np.where(gene_names.values == gene)[0]
print "getting microhomology for all %d guides in gene %s" % (len(guide_inds), gene)
for j, ps in enumerate(guide_inds):
guide_seq = Seq.Seq(X['30mer'][ps])
strand = X['Strand'][ps]
if strand=='sense':
gene_seq = gene_seq.reverse_complement()
# figure out the sequence to the left and right of this guide, in the gene
ind = gene_seq.find(guide_seq)
if ind==-1:
gene_seq = gene_seq.reverse_complement()
ind = gene_seq.find(guide_seq)
#assert ind != -1, "still didn't work"
#print "shouldn't get here"
else:
#print "all good"
pass
#assert ind != -1, "could not find guide in gene"
if ind==-1:
#print "***could not find guide %s for gene %s" % (str(guide_seq), str(gene))
#if.write(str(gene) + "," + str(guide_seq))
mh_score = 0
oof_score = 0
else:
#print "worked"
assert gene_seq[ind:(ind+len(guide_seq))]==guide_seq, "match not right"
left_win = gene_seq[(ind - k_mer_length_left):ind]
right_win = gene_seq[(ind + len(guide_seq)):(ind + len(guide_seq) + k_mer_length_right)]
#if strand=='antisense':
# # it's arbitrary which of sense and anti-sense we flip, we just want
# # to keep them in the same relative alphabet/direction
# left_win = left_win.reverse_complement()
# right_win = right_win.reverse_complement()
assert len(left_win.tostring())==k_mer_length_left
assert len(right_win.tostring())==k_mer_length_right
sixtymer = str(left_win) + str(guide_seq) + str(right_win)
assert len(sixtymer)==60, "should be of length 60"
mh_score, oof_score = microhomology.compute_score(sixtymer)
feat.ix[ps,"mh_score"] = mh_score
feat.ix[ps,"oof_score"] = oof_score
print "computed microhomology of %s" % (str(gene))
return pandas.DataFrame(feat, dtype='float')
def local_gene_seq_features(gene_names, learn_options, X):
print "building local gene sequence features"
feat = pandas.DataFrame(index=X.index)
feat["gene_left_win"] = ""
feat["gene_right_win"] = ""
# number of nulceotides to take to the left and right of the guide
k_mer_length = learn_options['include_gene_guide_feature']
for gene in gene_names.unique():
gene_seq = Seq.Seq(util.get_gene_sequence(gene)).reverse_complement()
for ps in np.where(gene_names.values==gene)[0]:
guide_seq = Seq.Seq(X['30mer'][ps])
strand = X['Strand'][ps]
if strand=='sense':
guide_seq = guide_seq.reverse_complement()
#gene_seq = gene_seq.reverse_complement()
# figure out the sequence to the left and right of this guide, in the gene
ind = gene_seq.find(guide_seq)
if ind ==-1:
#gene_seq = gene_seq.reverse_complement()
#ind = gene_seq.find(guide_seq)
assert ind != -1, "could not find guide in gene"
assert gene_seq[ind:(ind+len(guide_seq))]==guide_seq, "match not right"
left_win = gene_seq[(ind - k_mer_length):ind]
right_win = gene_seq[(ind + len(guide_seq)):(ind + len(guide_seq) + k_mer_length)]
if strand=='antisense':
# it's arbitrary which of sense and anti-sense we flip, we just want
# to keep them in the same relative alphabet/direction
left_win = left_win.reverse_complement()
right_win = right_win.reverse_complement()
assert not left_win.tostring()=="", "k_mer_context, %s, is too large" % k_mer_length
assert not left_win.tostring()=="", "k_mer_context, %s, is too large" % k_mer_length
assert len(left_win)==len(right_win), "k_mer_context, %s, is too large" % k_mer_length
feat.ix[ps,"gene_left_win"] = left_win.tostring()
feat.ix[ps,"gene_right_win"] = right_win.tostring()
print "featurizing local context of %s" % (gene)
feature_sets = {}
get_all_order_nuc_features(feat["gene_left_win"], feature_sets, learn_options, learn_options["order"], max_index_to_use=sys.maxint, prefix="gene_left_win")
get_all_order_nuc_features(feat["gene_right_win"], feature_sets, learn_options, learn_options["order"], max_index_to_use=sys.maxint, prefix="gene_right_win")
return feature_sets
def gene_feature(Y, X, learn_options):
'''
Things like the sequence of the gene, the DNA Tm of the gene, etc.
'''
gene_names = Y['Target gene']
gene_length = np.zeros((gene_names.values.shape[0], 1))
gc_content = np.zeros((gene_names.shape[0], 1))
temperature = np.zeros((gene_names.shape[0], 1))
molecular_weight = np.zeros((gene_names.shape[0], 1))
for gene in gene_names.unique():
seq = util.get_gene_sequence(gene)
gene_length[gene_names.values==gene] = len(seq)
gc_content[gene_names.values==gene] = SeqUtil.GC(seq)
temperature[gene_names.values==gene] = Tm.Tm_staluc(seq, rna=False)
molecular_weight[gene_names.values==gene] = SeqUtil.molecular_weight(seq, 'DNA')
all = np.concatenate((gene_length, gc_content, temperature, molecular_weight), axis=1)
df = pandas.DataFrame(data=all, index=gene_names.index, columns=['gene length',
'gene GC content',
'gene temperature',
'gene molecular weight'])
return df
def gene_guide_feature(Y, X, learn_options):
#features, which are related to parts of the gene-local to the guide, and
#possibly incorporating the guide or interactions with it
#expensive, so pickle if necessary
gene_file = r"..\data\gene_seq_feat_V%s_km%s.ord%s.pickle" % (learn_options['V'], learn_options['include_gene_guide_feature'], learn_options['order'])
if False: #os.path.isfile(gene_file): #while debugging, comment out
print "loading local gene seq feats from file %s" % gene_file
with open(gene_file, "rb") as f: feature_sets = pickle.load(f)
else:
feature_sets = local_gene_seq_features(Y['Target gene'], learn_options, X)
print "writing local gene seq feats to file %s" % gene_file
with open(gene_file, "wb") as f: pickle.dump(feature_sets, f)
return feature_sets
def gc_cont(seq):
return (seq.count('G') + seq.count('C'))/float(len(seq))
def Tm_feature(data, pam_audit=True):
'''
assuming '30-mer'is a key
get melting temperature features from:
0-the 30-mer ("global Tm")
1-the Tm (melting temperature) of the DNA:RNA hybrid from positions 16 - 20 of the sgRNA, i.e. the 5nts immediately proximal of the NGG PAM
2-the Tm of the DNA:RNA hybrid from position 8 - 15 (i.e. 8 nt)
3-the Tm of the DNA:RNA hybrid from position 3 - 7 (i.e. 5 nt)
'''
sequence = data['30mer'].values
featarray = np.ones((sequence.shape[0],4))
for i, seq in enumerate(sequence):
if pam_audit and seq[25:27]!="GG":
raise Exception("expected GG but found %s" % seq[25:27])
rna = False
featarray[i,0] = Tm.Tm_staluc(seq, rna=rna) #30mer Tm
featarray[i,1] = Tm.Tm_staluc(seq[19:24], rna=rna) #5nts immediately proximal of the NGG PAM
featarray[i,2] = Tm.Tm_staluc(seq[11:19], rna=rna) #8-mer
featarray[i,3] = Tm.Tm_staluc(seq[6:11], rna=rna) #5-mer
feat = pandas.DataFrame(featarray, index=data.index, columns=["Tm global_%s" % rna, "5mer_end_%s" %rna, "8mer_middle_%s" %rna, "5mer_start_%s" %rna])
return feat
def gc_features(data, audit=True):
gc_count = data['30mer'].apply(lambda seq: countGC(seq, audit))
gc_count.name = 'GC count'
gc_above_10 = (gc_count > 10)*1
gc_above_10.name = 'GC > 10'
gc_below_10 = (gc_count < 10)*1
gc_below_10.name = 'GC < 10'
return gc_above_10, gc_below_10, gc_count
def pam_features(data, audit=True):
pam_count = data['30mer'].apply(lambda seq: countPAM(seq, audit))
pam_count.name = 'PAM count'
pam_above_1 = (pam_count > 1)*1
pam_above_1.name = 'PAM > 1'
pam_equals_1 = (pam_count < 2)*1
pam_equals_1.name = 'PAM = 1'
return pam_above_1, pam_equals_1, pam_count
def repeat_features(data, audit=True):
repeat_count = data['30mer'].apply(lambda seq: countRepeats(seq, audit))
repeat_count.name = 'repeat count'
repeat_above_0 = (repeat_count > 0)*1
repeat_above_0.name = 'repeat > 0'
repeat_equals_0 = (repeat_count < 1)*1
repeat_equals_0.name = 'repeat < 1'
return repeat_above_0, repeat_equals_0, repeat_count
def normalize_features(data,axis):
'''
input: Pandas.DataFrame of dtype=np.float64 array, of dimensions
mean-center, and unit variance each feature
'''
data -= data.mean(axis)
data /= data.std(axis)
# remove rows with NaNs
data = data.dropna(1)
if np.any(np.isnan(data.values)): raise Exception("found NaN in normalized features")
return data
def apply_nucleotide_features(seq_data_frame, order, num_proc, include_pos_independent, max_index_to_use, prefix=""):
fast = True
if include_pos_independent:
feat_pd = seq_data_frame.apply(nucleotide_features, args=(order, max_index_to_use, prefix, 'pos_dependent'))
feat_pi = seq_data_frame.apply(nucleotide_features, args=(order, max_index_to_use, prefix, 'pos_independent'))
assert not np.any(np.isnan(feat_pd)), "nans here can arise from sequences of different lengths"
assert not np.any(np.isnan(feat_pi)), "nans here can arise from sequences of different lengths"
return feat_pd, feat_pi
else:
feat_pd = seq_data_frame.apply(nucleotide_features, args=(order, max_index_to_use, prefix, 'pos_dependent'))
assert not np.any(np.isnan(feat_pd)), "found nan in feat_pd"
return feat_pd
def get_alphabet(order, raw_alphabet = ['A', 'T', 'C', 'G']):
alphabet = ["".join(i) for i in itertools.product(raw_alphabet, repeat=order)]
return alphabet, raw_alphabet
def nucleotide_features(s, order, max_index_to_use, prefix="", feature_type='all', raw_alphabet = ['A', 'T', 'C', 'G']):
'''
compute position-specific order-mer features for the 4-letter alphabet
(e.g. for a sequence of length 30, there are 30*4 single nucleotide features
and (30-1)*4^2=464 double nucleotide features
'''
assert feature_type in ['all', 'pos_independent', 'pos_dependent']
if max_index_to_use <= len(s):
#print "WARNING: trimming max_index_to use down to length of string=%s" % len(s)
max_index_to_use = len(s)
if max_index_to_use is not None:
s = s[:max_index_to_use]
#assert(len(s)==30, "length not 30")
#s = s[:30] #cut-off at thirty to clean up extra data that they accidentally left in, and were instructed to ignore in this way
alphabet, raw_alphabet = get_alphabet(order, raw_alphabet = raw_alphabet)
features_pos_dependent = np.zeros(len(alphabet)*(len(s)-(order-1)))
features_pos_independent = np.zeros(np.power(len(raw_alphabet),order))
for position in range(0, len(s)-order+1, 1):
nucl = s[position:position+order]
features_pos_dependent[alphabet.index(nucl) + (position*len(alphabet))] = 1.0
features_pos_independent[alphabet.index(nucl)] += 1.0
index_dependent = ['%s_pd.Order%d_P%d' % (prefix, order, i) for i in range(len(features_pos_dependent))]
if np.any(np.isnan(features_pos_dependent)):
raise Exception("found nan features in features_pos_dependent")
if np.any(np.isnan(features_pos_independent)):
raise Exception("found nan features in features_pos_independent")
if feature_type == 'all' or feature_type == 'pos_independent':
index_independent = ['%s_pi.Order%d_P%d' % (prefix, order,i) for i in range(len(features_pos_independent))]
if feature_type == 'all':
res = pandas.Series(features_pos_dependent,index=index_dependent), pandas.Series(features_pos_independent,index=index_independent)
assert not np.any(np.isnan(res.values))
return res
else:
res = pandas.Series(features_pos_independent, index=index_independent)
assert not np.any(np.isnan(res.values))
return res
res = pandas.Series(features_pos_dependent, index=index_dependent)
assert not np.any(np.isnan(res.values))
return res
def nucleotide_features_dictionary(prefix=''):
seqname = ['-4', '-3', '-2', '-1']
seqname.extend([str(i) for i in range(1,21)])
seqname.extend(['N', 'G', 'G', '+1', '+2', '+3'])
orders = [1, 2, 3]
sequence = 30
feature_names_dep = []
feature_names_indep = []
index_dependent = []
index_independent = []
for order in orders:
raw_alphabet = ['A', 'T', 'C', 'G']
alphabet = ["".join(i) for i in itertools.product(raw_alphabet, repeat=order)]
features_pos_dependent = np.zeros(len(alphabet)*(sequence-(order-1)))
features_pos_independent = np.zeros(np.power(len(raw_alphabet),order))
index_dependent.extend(['%s_pd.Order%d_P%d' % (prefix, order, i) for i in range(len(features_pos_dependent))])
index_independent.extend(['%s_pi.Order%d_P%d' % (prefix, order,i) for i in range(len(features_pos_independent))])
for pos in range(sequence-(order-1)):
for letter in alphabet:
feature_names_dep.append('%s_%s' % (letter, seqname[pos]))
for letter in alphabet:
feature_names_indep.append('%s' % letter)
assert len(feature_names_indep) == len(index_independent)
assert len(feature_names_dep) == len(index_dependent)
index_all = index_dependent + index_independent
feature_all = feature_names_dep + feature_names_indep
return dict(zip(index_all, feature_all))
def normalize_feature_sets(feature_sets):
'''
zero-mean, unit-variance each feature within each set
'''
print "Normalizing features..."
t1 = time.time()
new_feature_sets = {}
for set in feature_sets:
new_feature_sets[set] = normalize_features(feature_sets[set],axis=0)
if np.any(np.isnan(new_feature_sets[set].values)):
raise Exception("found Nan feature values in set=%s" % set)
assert new_feature_sets[set].shape[1] > 0, "0 columns of features"
t2 = time.time()
print "\t\tElapsed time for normalizing features is %.2f seconds" % (t2-t1)
return new_feature_sets
| bsd-3-clause |
RuthAngus/granola | granola/download.py | 1 | 1294 | # Downloading Kepler light curves
import os
import pandas as pd
import kplr
import kepler_data as kd
def get_lc(id, KPLR_DIR="/Users/ruthangus/.kplr/data/lightcurves"):
"""
Downloads the kplr light curve and loads x, y and yerr.
"""
kid = str(int(id)).zfill(9)
path = os.path.join(KPLR_DIR, "{}".format(kid))
if not os.path.exists(path):
client = kplr.API()
star = client.star(kid)
print("Downloading LC...")
star.get_light_curves(fetch=True, short_cadence=False)
x, y, yerr = kd.load_kepler_data(os.path.join(KPLR_DIR,
"{}".format(kid)))
else:
x, y, yerr = kd.load_kepler_data(os.path.join(KPLR_DIR,
"{}".format(kid)))
x -= x[0]
return x, y, yerr
if __name__ == "__main__":
DATA_DIR = "/Users/ruthangus/projects/granola/granola/data"
# load KIC-TGAS
data = pd.read_csv(os.path.join(DATA_DIR, "kic_tgas.csv"))
# cut on temperature and logg
m = (data.teff.values < 6250) * (4 < data.logg.values)
data = data.iloc[m]
for i, kic in enumerate(data.kepid.values[275:400]):
print(kic, i, "of", len(data.kepid.values[275:400]))
x, y, yerr = get_lc(kic)
| mit |
phdowling/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
SophieIPP/ipp-macro-series-parser | ipp_macro_series_parser/denombrements_fiscaux/agregats_ipp.py | 1 | 36244 | # -*- coding: utf-8 -*-
# TAXIPP -- A French microsimulation model
# By: IPP <[email protected]>
#
# Copyright (C) 2012, 2013, 2014, 2015 IPP
# https://github.com/taxipp
#
# This file is part of TAXIPP.
#
# TAXIPP is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# TAXIPP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import collections
import numpy
import os
import pandas
import pkg_resources
from py_expression_eval import Parser
from ipp_macro_series_parser.config import Config
from ipp_macro_series_parser.denombrements_fiscaux.parsers import (
get_denombrements_fiscaux_data_frame)
from ipp_macro_series_parser.data_extraction import get_or_construct_value
config_parser = Config(
config_files_directory = os.path.join(pkg_resources.get_distribution('ipp-macro-series-parser').location)
)
def update_index_by_variable_name_appearing_in_formula(index_by_variable_name, formula):
parser_formula = Parser()
try:
expr = parser_formula.parse(formula)
except Exception, e:
print formula
raise(e)
formula_variables = expr.variables()
components = dict(
(formula_variable, {'code': formula_variable}) for formula_variable in formula_variables
)
index_by_variable_name.update(components)
return index_by_variable_name
def create_index_by_variable_name(formula_by_variable_name, level_2_formula_by_variable_name = None):
index_by_variable_name = dict()
for variable_name, formula in formula_by_variable_name.iteritems():
if not formula:
continue
index_by_variable_name[variable_name] = {
'code': None,
'formula': formula,
}
if isinstance(formula, list):
for single_formula in formula:
index_by_variable_name = update_index_by_variable_name_appearing_in_formula(
index_by_variable_name, single_formula['formula'])
else:
index_by_variable_name = update_index_by_variable_name_appearing_in_formula(index_by_variable_name, formula)
if level_2_formula_by_variable_name is not None:
level_2_index_by_variable_name = dict()
for variable_name, formula in level_2_formula_by_variable_name.iteritems():
level_2_index_by_variable_name[variable_name] = dict(
formula = formula,
)
index_by_variable_name.update(level_2_index_by_variable_name)
return index_by_variable_name
def build_aggregates(raw_data, formula_by_variable_name, level_2_formula_by_variable_name = None, years = None,
fill_value = numpy.NaN):
assert years is not None
aggregates = None
index_by_variable_name = create_index_by_variable_name(formula_by_variable_name, level_2_formula_by_variable_name)
for variable_name in formula_by_variable_name.keys() + level_2_formula_by_variable_name.keys():
serie, formula = get_or_construct_value(
raw_data, variable_name, index_by_variable_name, years = years, fill_value = fill_value)
serie = serie.reset_index().drop_duplicates().set_index('year')
assert not numpy.any(serie.index.duplicated()), 'Duplicated index for {} : {}'.format(
variable_name, serie)
if aggregates is None:
aggregates = serie
else:
try:
aggregates = pandas.concat([aggregates, serie], axis = 1, verify_integrity = True)
except Exception, e:
print "aggregates", aggregates
print "serie", serie
raise(e)
return aggregates
formula_by_variable_name = dict(
## Salaires
salaires_imposables = [
dict(
start = 1990,
end = 2004,
formula = 'f1aj + f1bj + f1cj + f1dj + f1ej + + f1fj',
),
dict(
start = 2005,
end = 2006,
formula = 'f1aj + f1bj + f1cj + f1dj + f1ej',
),
dict(
start = 2007,
end = 2013,
formula = 'f1aj + f1bj + f1cj + f1dj',
),
dict(
start = 2014,
end = 2015,
formula = 'f1aj + f1bj + f1cj + f1dj',
),
],
heures_supplementaires = [
dict(
start = 2007,
end = 2013,
formula = ' + f1au + f1bu + f1cu + f1du', # les heures sup effectuées en 2012 payées en 2013 ...
),
],
## Bénéfices agricoles
benefices_agricoles_forfait_exoneres = 'f5hn + f5in + f5jn', # frag_exon
benefices_agricoles_forfait_imposables = 'f5ho + f5io + f5jo', # frag_impo
benefices_agricoles_reels_exoneres = 'f5hb + f5ib + f5jb', # arag_exon
benefices_agricoles_reels_imposables = [
dict(
start = 1990,
end = 2005,
formula = 'f5hc + f5ic + f5jc + f5hd + f5id + f5jd'
),
dict(
start = 2006,
end = 2013,
formula = 'f5hc + f5ic + f5jc'
),
], # arag_impg TODO: check last values in openfisca
benefices_agricoles_reels_deficits = 'f5hf + f5if + f5jf', # arag_defi
benefices_agricoles_reels_sans_cga_exoneres = 'f5hh + f5ih + f5jh', # nrag_exon
benefices_agricoles_reels_sans_cga_imposables = [
dict(
start = 1990,
end = 2005,
formula = 'f5hi + f5ii + f5ji + f5hj + f5ij + f5jj',
),
dict(
start = 2006,
end = 2013,
formula = 'f5hi + f5ii + f5ji',
),
], # nrag_impg TODO: check last values in openfisca
# TODO voir années antérieures à 2006
benefices_agricoles_reels_sans_cga_deficits = 'f5hl + f5il + f5jl', # nrag_defi
# TODO: benefices_agricoles_ = 'f5hm + f5im + f5jm', # nrag_ajag
## Bénéfices industriels et commerciaux professionnels (déclaration complémentaire, cadres 5B)
benefices_industriels_commerciaux_professionnels_micro_entreprise_vente = 'f5ko + f5lo + f5mo', # mbic_impv
# TODO erreur car 2 fois la mm ligne
benefices_industriels_commerciaux_professionnels_micro_entreprise_services = 'f5kp + f5lp + f5mp', # mbic_imps
benefices_industriels_commerciaux_professionnels_reels_exoneres = 'f5kb + f5lb + f5mb', # mbic_imps
benefices_industriels_commerciaux_professionnels_reels_imposables_normal = [
dict(
start = 2003,
end = 2009,
formula = 'f5kc + f5lc + f5mc', # abic_impn
),
],
benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie = [
dict(
start = 2003,
end = 2009,
formula = 'f5kd + f5ld + f5md', # abic_imps
),
],
benefices_industriels_commerciaux_professionnels_reels_imposables_normal_et_simplifie = [
dict(
start = 2010,
end = 2014,
formula = 'f5kc + f5lc + f5mc', # abic_impn
),
],
benefices_industriels_commerciaux_professionnels_reels_exoneres_sans_cga = 'f5kh + f5lh + f5mh', # nbic_exon
benefices_industriels_commerciaux_professionnels_reels_imposables_normal_sans_cga = 'f5ki + f5li + f5mi', # nbic_impn
benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie_sans_cga = 'f5kj + f5lj + f5mj', # nbic_mvct
deficits_industriels_commerciaux_professionnels_normal = [
dict(
start = 2006, # au moins
end = 2009,
formula = 'f5kf + f5lf + f5mf', # abic_defn
),
],
deficits_industriels_commerciaux_professionnels_simplifie = [
dict(
start = 2006, # au moins
end = 2009,
formula = 'f5kg + f5lg + f5mg', # abic_defs
),
],
deficits_industriels_commerciaux_professionnels_normal_et_simplifie = [
dict(
start = 2010,
end = 2014, # au moins
formula = 'f5kf + f5lf + f5mf',
)
],
deficits_industriels_commerciaux_professionnels_normal_sans_cga = [
dict(
start = 2006, # au moins
end = 2009,
formula = 'f5kl + f5ll + f5ml', # nbic_defn
),
],
deficits_industriels_commerciaux_professionnels_simplifie_sans_cga = [
dict(
start = 2006, # au moins
end = 2009,
formula = 'f5km + f5lm + f5mm',
),
],
deficits_industriels_commerciaux_professionnels_normal_et_simplifie_sans_cga = [
dict(
start = 2010,
end = 2014, # au moins
formula = 'f5kl + f5ll + f5ml',
),
],
# deficits_industriels_commerciaux_professionnels_locations = [
## dict(
## start = 1990,
## end = 2008,
## formula = 'f5km + f5lm + f5mm',
## ),
# dict(
# start = 2009,
# end = 2014,
# formula = 'f5qa + f5ra + f5sa',
# ),
# ], # nbic_defs
## Bénéfices industriels et commerciaux non professionnels (déclaration complémentaire, cadres 5C)
benefices_industriels_commerciaux_non_professionnels_micro_entreprise_exoneres = 'f5nn + f5on + f5pn', # macc_exon
benefices_industriels_commerciaux_non_professionnels_micro_entreprise_vente = 'f5no + f5oo + f5po', # macc_impv
benefices_industriels_commerciaux_non_professionnels_micro_entreprise_services = 'f5np + f5op + f5op', # macc_impS
benefices_industriels_commerciaux_non_professionnels_reels_exoneres = 'f5nb + f5ob + f5pb', # aacc_exon
# benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal = 'f5nc + f5nc + f5nc', # aacc_impn # TODO: normal si 3 fois la même chose ?
benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal = [
dict(
start = 2003,
end = 2009,
formula = 'f5nc + f5oc + f5pc', # aacc_impn
),
],
# benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie = 'f5nd + f5nd + f5nd', # aacc_imps TODO: ceci avant 2010 mais après locations meublées pro, # TODO: normal si 3 fois la même chose ?
benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie = [
dict(
start = 2003,
end = 2009,
formula = 'f5nd + f5od + f5pd', # aacc_imps TODO: ceci avant 2010 mais après locations meublées pro,
),
],
benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal_et_simplifie = [
dict(
start = 2010,
end = 2014,
formula = 'f5nc + f5oc + f5pc',
),
],
benefices_industriels_commerciaux_non_professionnels_reels_exoneres_sans_cga = 'f5nh + f5oh + f5ph', # nacc_exon
benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal_sans_cga = 'f5ni + f5ni + f5ni', # nacc_impn
benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie_sans_cga = 'f5nj + f5nj + f5nj', # nacc_meup TODO: ceci avant 2012 mais après locations déjà soumises aux prélèvements sociaux,
deficits_industriels_commerciaux_non_professionnels_normal = [ #'f5nf + f5of + f5pf', # aacc_defn
dict(
start = 2002, # au moins
end = 2009,
formula = 'f5nf + f5of + f5pf',
),
],
deficits_industriels_commerciaux_non_professionnels_simplifie = [ #'f5ng + f5og + f5pg', # aacc_gits
dict(
start = 2002, # au moins
end = 2009,
formula = 'f5ng + f5og + f5pg',
),
],
deficits_industriels_commerciaux_non_professionnels_normal_sans_cga = [
dict(
start = 2002, # au moins
end = 2009,
formula = 'f5nl + f5ol + f5pl',
),
],
deficits_industriels_commerciaux_non_professionnels_simplifie_sans_cga = [
dict(
start = 2002, # au moins
end = 2009,
formula = 'f5nm + f5om + f5pm',
),
],
deficits_industriels_commerciaux_non_professionnels_normal_et_simplifie = [
dict(
start = 2010,
end = 2014, # au moins
formula = 'f5nf + f5of + f5pf',
),
],
deficits_industriels_commerciaux_non_professionnels_normal_et_simplifie_sans_cga = [
dict(
start = 2010,
end = 2014, # au moins
formula = 'f5nl + f5ol + f5pl',
),
],
# deficits_industriels_commerciaux_non_professionnels_sans_cga = 'f5nl + f5ol + f5pl', # nacc_defn
# TODO: Locations déjà soumises aux prélèvements sociaux sans CGA (régime du bénéfice réel)
# deficits_industriels_commerciaux_non_professionnels_locations = 'f5ny + f5oy + f5py',
# - Détails Bénéfices non commerciaux professionnels (déclaration complémentaire, cadres 5D)
benefices_non_commerciaux_professionnels_micro_entreprise_imposables = 'f5hq + f5iq + f5jq', # mbnc_impo
benefices_non_commerciaux_professionnels_declaration_controlee = 'f5qc + f5rc + f5sc', #
benefices_non_commerciaux_professionnels_declaration_controlee_sans_cga = 'f5qi + f5ri + f5si', #
deficits_non_commerciaux_professionnels_declaration_controlee = 'f5qe + f5re + f5se', #
deficits_non_commerciaux_professionnels_declaration_controlee_sans_cga = 'f5qk + f5rk + f5sk', #
# - Détails Bénéfices non commerciaux non professionnels (déclaration complémentaire, cadres 5E)
benefices_non_commerciaux_non_professionnels_micro_entreprise_imposables = 'f5ku + f5lu + f5mu',
benefices_non_commerciaux_non_professionnels_declaration_controlee = 'f5jg + f5rf + f5sf',
benefices_non_commerciaux_non_professionnels_declaration_controlee_sans_cga = 'f5sn + f5ns + f5os',
deficits_non_commerciaux_non_professionnels_declaration_controlee = 'f5jj + f5rg + f5sg',
deficits_non_commerciaux_non_professionnels_declaration_controlee_sans_cga = 'f5sp + f5nu + f5ou',
# - Revenus fonciers
revenus_fonciers_regime_normal = 'f4ba', # f4ba
revenus_fonciers_micro_foncier = 'f4be', # f4be
# Missing financier (tout est par case dans of)
# - Déficits des années antérieures non encore déduits
# Missing foncier
# - rentes viagères : 'f1aw', 'f1bw', 'f1cw', 'f1dw'
# - Déficits : 'f4bb', et suivantes... 'f4bd'
# Missing plus value
# - Gains de levée d'options sur titres 'f1tv' 'f1uv' 'f1tw' 'f1uw' 'f1tx' 'f1ux'
# Missing revenus_de_remplacement
# - chomeur_longue_duree: 'f1ai', 'f1bi', 'f1ci', 'f1di'
# Missing salarie
# - sal_pen_exo_etr (start = 2013, 1ac, 1bc, 1cc, 1cd)
frais_reels = [
dict(
end = 2014,
start = 2005,
formula = 'f1ak + f1bk + f1ck + f1dk',
),
dict(
end = 2004,
start = 2004,
formula = 'f1ak + f1bk + f1ck + f1dk + f1ek',
),
dict(
start = 2003,
end = 2003,
formula = 'f1ak + f1bk + f1ck + f1dk + f1ek + f1fk',
),
],
# - hsup (f1au, f1bu, f1cu, f1du, f1eu) start 2007
# -
allocations_chomage = [
dict(
start = 2007,
end = 2013,
formula = 'f1ap + f1bp + f1cp + f1dp',
),
dict(
start = 2005,
end = 2006,
formula = 'f1ap + f1bp + f1cp + f1dp + f1ep',
),
dict(
start = 2000,
end = 2004,
formula = 'f1ap + f1bp + f1cp + f1dp + f1ep + f1fp',
),
], # choi
#
pensions_de_retraite = [
dict(
start = 2007,
end = 2013,
formula = 'f1as + f1bs + f1cs + f1ds',
),
dict(
start = 2005,
end = 2006,
formula = 'f1as + f1bs + f1cs + f1ds + f1es',
),
dict(
start = 2000,
end = 2004,
formula = 'f1as + f1bs + f1cs + f1ds + f1es + f1fs',
),
], # rsti
dividendes_imposes_au_bareme = 'f2dc + f2fu', # 'f2dc + f2fu' non agrégés
interet_imposes_au_bareme = 'f2ts + f2go + f2tr', # non agrégés
assurances_vie_imposees_au_bareme = 'f2ch', # non agrégés
dividendes_imposes_au_prelevement_liberatoire = 'f2da',
interets_imposes_au_prelevement_liberatoire = 'f2ee',
assurances_vie_imposees_au_prelevement_liberatoire = 'f2dh',
plus_values_mobilieres_regime_normal = 'f3vg',
plus_values_mobilieres_stock_options = 'f3vf + f3vi', # PV stock options 1, stock options 2, TODO Différencier ?
plus_values_mobilieres_retraite_dirigeant = 'f3va', # TODO f3vb ?
plus_values_professionnelles_regime_normal = [
dict(
start = 2007, # au moins
end = 2009,
formula = 'f5hz + f5iz + f5jz', # TODO: ceci n'est valable qu'avant 2010
),
dict(
start = 2010,
end = 2013, # DONE
formula = 'f5hx + f5ix + f5jx + f5he + f5ie + f5je + f5kq + f5lq + f5ke + f5le + f5me + f5nq + f5oq + f5pq + f5ne + f5oe + f5pe + f5hr + f5ir + f5jr + f5qd + f5rd + f5sd + f5kv + f5lv + f5mv + f5so + f5nt', # + f5mq + f5ot
),
],
plus_values_professionnelles_retraite_dirigeant = 'f5hg + f5ig',
revenus_distribues_pea_exoneres = [
dict(
start = 2009,
end = 2009,
formula = 'f2gr',
),
],
pensions_alimentaires_percues = 'f1ao + f1bo + f1co + f1do + f1eo + f1fo', # pensions_alimentaires_percues
pensions_alimentaires_verses = 'f6gi + f6gj + f6el + f6em + f6gp + f6gu + f6dd',
)
level_2_formula_by_variable_name = dict(
salaires = 'salaires_imposables + heures_supplementaires',
revenus_d_activite_non_salariee = 'benefices_agricoles + benefices_industriels_commerciaux + benefices_non_commerciaux', # + revenus_activite_non_salariee_exoneres',
# TODO get parameters form openfisca legislation
benefices_agricoles = 'benefices_agricoles_bruts - 0.5 * deficits_agricoles',
benefices_agricoles_bruts = 'benefices_agricoles_forfait_imposables + benefices_agricoles_reels_imposables + 1.25 * benefices_agricoles_reels_sans_cga_imposables', # TODO get parameters form openfisca legislation
deficits_agricoles = 'benefices_agricoles_reels_deficits + benefices_agricoles_reels_sans_cga_deficits',
# Bénéfices industriels et commerciaux
benefices_industriels_commerciaux = 'benefices_industriels_commerciaux_professionnels + benefices_industriels_commerciaux_non_professionnels',
benefices_industriels_commerciaux_bruts = 'benefices_industriels_commerciaux_professionnels_bruts + benefices_industriels_commerciaux_non_professionnels_bruts',
deficits_industriels_commerciaux = 'deficits_industriels_commerciaux_professionnels + deficits_industriels_commerciaux_non_professionnels',
# - Bénéfices industriels et commerciaux professionnels
benefices_industriels_commerciaux_professionnels = 'benefices_industriels_commerciaux_professionnels_bruts - 0.5 * deficits_industriels_commerciaux_professionnels',
benefices_industriels_commerciaux_professionnels_bruts = 'benefices_industriels_commerciaux_professionnels_micro_entreprise + benefices_industriels_commerciaux_professionnels_reels',
benefices_industriels_commerciaux_professionnels_micro_entreprise = '(1 - 0.71) * benefices_industriels_commerciaux_professionnels_micro_entreprise_vente + (1 - 0.5) * benefices_industriels_commerciaux_professionnels_micro_entreprise_services', # TODO check and use legislation parameters
benefices_industriels_commerciaux_professionnels_reels = 'benefices_industriels_commerciaux_professionnels_reels_avec_cga + benefices_industriels_commerciaux_professionnels_reels_sans_cga',
benefices_industriels_commerciaux_professionnels_reels_avec_cga = 'benefices_industriels_commerciaux_professionnels_reels_imposables_normal + benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie',
benefices_industriels_commerciaux_professionnels_reels_sans_cga = '1.25 * (benefices_industriels_commerciaux_professionnels_reels_imposables_normal_sans_cga + benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie_sans_cga)', # TODO check and use legislation
deficits_industriels_commerciaux_professionnels = [
dict(
start = 2006,
end = 2009,
formula = 'deficits_industriels_commerciaux_professionnels_normal + deficits_industriels_commerciaux_professionnels_simplifie + deficits_industriels_commerciaux_professionnels_normal_sans_cga + deficits_industriels_commerciaux_professionnels_simplifie_sans_cga',
),
dict(
start = 2010,
end = 2014,
formula = 'deficits_industriels_commerciaux_professionnels_normal_et_simplifie + deficits_industriels_commerciaux_professionnels_normal_et_simplifie_sans_cga'
),
],
# - Bénéfices industriels et commerciaux non professionnels (déclaration complémentaire, cadres 5C)
benefices_industriels_commerciaux_non_professionnels = 'benefices_industriels_commerciaux_non_professionnels_bruts - 0.5 * deficits_industriels_commerciaux_non_professionnels',
benefices_industriels_commerciaux_non_professionnels_bruts = 'benefices_industriels_commerciaux_non_professionnels_micro_entreprise + benefices_industriels_commerciaux_non_professionnels_reels',
benefices_industriels_commerciaux_non_professionnels_micro_entreprise = '(1 - 0.71) * benefices_industriels_commerciaux_non_professionnels_micro_entreprise_vente + (1 - 0.5) * benefices_industriels_commerciaux_non_professionnels_micro_entreprise_services', # TODO check and use legislation parameters
benefices_industriels_commerciaux_non_professionnels_reels = 'benefices_industriels_commerciaux_non_professionnels_reels_avec_cga + benefices_industriels_commerciaux_non_professionnels_reels_sans_cga',
benefices_industriels_commerciaux_non_professionnels_reels_avec_cga = [
dict(
start = 2003,
end = 2009,
formula = 'benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal + benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie',
),
dict(
start = 2010,
end = 2014,
formula = 'benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal_et_simplifie',
),
],
benefices_industriels_commerciaux_non_professionnels_reels_sans_cga = '1.25 * (benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal_sans_cga + benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie_sans_cga)', # TODO check and use legislation
# Bénéfices non commerciaux
benefices_non_commerciaux = 'benefices_non_commerciaux_professionnels + benefices_non_commerciaux_non_professionnels',
benefices_non_commerciaux_bruts = 'benefices_non_commerciaux_professionnels_bruts + benefices_non_commerciaux_non_professionnels_bruts',
deficits_non_commerciaux = 'deficits_non_commerciaux_professionnels + deficits_non_commerciaux_non_professionnels',
deficits_industriels_commerciaux_non_professionnels = [
dict(
start = 2002, # au moins
end = 2009,
formula = 'deficits_industriels_commerciaux_non_professionnels_normal + deficits_industriels_commerciaux_non_professionnels_simplifie + deficits_industriels_commerciaux_non_professionnels_normal_sans_cga + deficits_industriels_commerciaux_non_professionnels_simplifie_sans_cga'
),
dict(
start = 2010,
end = 2014, # au moins
formula = 'deficits_industriels_commerciaux_non_professionnels_normal_et_simplifie + deficits_industriels_commerciaux_non_professionnels_normal_et_simplifie_sans_cga',
),
],
# - Bénéfices non commerciaux professionnels (déclaration complémentaire, cadres 5D)
benefices_non_commerciaux_professionnels = 'benefices_non_commerciaux_professionnels_bruts - 0.5 * deficits_non_commerciaux_professionnels',
benefices_non_commerciaux_professionnels_bruts = '(1 - 0.34) * benefices_non_commerciaux_professionnels_micro_entreprise_imposables + benefices_non_commerciaux_professionnels_declaration_controlee + 1.25 * benefices_non_commerciaux_professionnels_declaration_controlee_sans_cga',
deficits_non_commerciaux_professionnels = 'deficits_non_commerciaux_professionnels_declaration_controlee + deficits_non_commerciaux_professionnels_declaration_controlee_sans_cga',
# - Bénéfices non commerciaux non professionnels (déclaration complémentaire, cadres 5E)
benefices_non_commerciaux_non_professionnels = 'benefices_non_commerciaux_non_professionnels_bruts - 0.5 * deficits_non_commerciaux_non_professionnels',
benefices_non_commerciaux_non_professionnels_bruts = '(1 - 0.34) * benefices_non_commerciaux_non_professionnels_micro_entreprise_imposables + benefices_non_commerciaux_non_professionnels_declaration_controlee + 1.25 * benefices_non_commerciaux_non_professionnels_declaration_controlee_sans_cga',
deficits_non_commerciaux_non_professionnels = 'deficits_non_commerciaux_non_professionnels_declaration_controlee + deficits_non_commerciaux_non_professionnels_declaration_controlee_sans_cga',
# Revenus Fonciers
revenus_fonciers = 'revenus_fonciers_regime_normal + revenus_fonciers_micro_foncier',
revenus_de_remplacement = 'pensions_de_retraite + allocations_chomage',
revenus_financiers_hors_plus_values = 'revenus_imposes_au_bareme + revenus_imposes_au_prelevement_liberatoire',
revenus_financiers = 'revenus_imposes_au_bareme + revenus_imposes_au_prelevement_liberatoire + plus_values',
plus_values = 'plus_values_mobilieres + plus_values_professionnelles',
plus_values_mobilieres = 'plus_values_mobilieres_regime_normal + plus_values_mobilieres_stock_options + plus_values_mobilieres_retraite_dirigeant', # analysis:ignore
plus_values_professionnelles = 'plus_values_professionnelles_regime_normal + plus_values_professionnelles_retraite_dirigeant', # analysis:ignore
revenus_imposes_au_bareme = 'dividendes_imposes_au_bareme + interet_imposes_au_bareme + assurances_vie_imposees_au_bareme', # analysis:ignore
revenus_imposes_au_prelevement_liberatoire = 'dividendes_imposes_au_prelevement_liberatoire + interets_imposes_au_prelevement_liberatoire + assurances_vie_imposees_au_prelevement_liberatoire', #analysis:ignore
)
#
#raw_data = get_denombrements_fiscaux_data_frame(years = [2010], fill_value = 0)
#aggregates = build_aggregates(
# raw_data,
# formula_by_variable_name,
# level_2_formula_by_variable_name = level_2_formula_by_variable_name,
# years = [2010],
# fill_value = numpy.NaN,
# )
def build_irpp_tables(years = None, fill_value = numpy.NaN):
assert years is not None
assert isinstance(years, list)
raw_data = get_denombrements_fiscaux_data_frame(years = years, fill_value = 0)
aggregates = build_aggregates(
raw_data,
formula_by_variable_name,
level_2_formula_by_variable_name = level_2_formula_by_variable_name,
years = years,
fill_value = fill_value,
)
data_frame_by_irpp_table_name = collections.OrderedDict([
# 1. Tableau IRPP1: Les revenus figurant dans les déclarations de revenus
('irpp_1', aggregates[[
'salaires',
'salaires_imposables',
'heures_supplementaires',
# TODO
# 'revenus_d_activite_non_salariee'
# 'ba',
# 'bic',
# 'bnc',
# 'revenus_activite_non_salariee_exoneres',
'revenus_de_remplacement',
'pensions_de_retraite',
'allocations_chomage',
'revenus_fonciers',
'revenus_fonciers_regime_normal',
'revenus_fonciers_micro_foncier',
'revenus_financiers',
'frais_reels',
'pensions_alimentaires_percues',
]]),
# 2. Tableau IRPP2: Détails des revenus financiers (intérêts, dividendes, plus-values) figurant dans les
# déclations de revenus (imposition au barème, imposition au prélèvement forfaitaire libératoire (PL) et
# plus-values)
('irpp_2', aggregates[[
'revenus_imposes_au_bareme',
'dividendes_imposes_au_bareme',
'interet_imposes_au_bareme',
'assurances_vie_imposees_au_bareme',
'revenus_imposes_au_prelevement_liberatoire',
'dividendes_imposes_au_prelevement_liberatoire',
'interets_imposes_au_prelevement_liberatoire',
'assurances_vie_imposees_au_prelevement_liberatoire',
'plus_values',
'revenus_financiers',
'revenus_financiers_hors_plus_values'
]]),
# 3. Tableau IRPP3: Plus-values mobilières et professionnelles
('irpp_3', aggregates[[
'plus_values',
'plus_values_mobilieres',
'plus_values_mobilieres_regime_normal',
'plus_values_mobilieres_stock_options',
'plus_values_mobilieres_retraite_dirigeant',
'plus_values_professionnelles',
'plus_values_professionnelles_regime_normal',
'plus_values_professionnelles_retraite_dirigeant',
]]),
('irpp_4', aggregates[[
'revenus_d_activite_non_salariee',
'benefices_agricoles',
'benefices_agricoles_bruts',
'deficits_agricoles',
'benefices_industriels_commerciaux',
'benefices_industriels_commerciaux_bruts',
'deficits_industriels_commerciaux',
# 'bnc',
# 'revenus_activite_non_salariee_exoneres',
]]),
# ('irpp_5_a', aggregates[[
# 'benefices_agricoles',
# 'benefices_agricoles_forfait_exoneres',
# 'benefices_agricoles_forfait_imposables',
# 'benefices_agricoles_reels_exoneres',
# 'benefices_agricoles_reels_imposables',
# 'benefices_agricoles_reels_deficits',
# 'benefices_agricoles_reels_sans_cga_exoneres',
# 'benefices_agricoles_reels_sans_cga_imposables',
# 'benefices_agricoles_reels_sans_cga_deficits',
# ]])
])
return data_frame_by_irpp_table_name
of_name_by_irpp_table_name = dict(
salaires_imposables = 'salaire_imposable',
heures_supplementaires = 'hsup',
benefices_agricoles_forfait_exoneres = 'frag_exon',
benefices_agricoles_forfait_imposables = 'frag_impo',
benefices_agricoles_reels_exoneres = 'arag_exon',
benefices_agricoles_reels_sans_cga_deficits = 'nrag_defi',
benefices_agricoles_reels_imposables = 'arag_impg',
benefices_agricoles_reels_deficits = 'arag_defi',
benefices_agricoles_reels_sans_cga_exoneres = 'nrag_exon',
benefices_agricoles_reels_sans_cga_imposables = 'arag_defi',
benefices_industriels_commerciaux_professionnels_micro_entreprise_vente = 'mbic_impv',
benefices_industriels_commerciaux_professionnels_micro_entreprise_services = 'mbic_imps',
benefices_industriels_commerciaux_professionnels_reels_exoneres = 'mbic_imps',
benefices_industriels_commerciaux_professionnels_reels_imposables_normal = 'abic_impn',
benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie = 'abic_imps',
benefices_industriels_commerciaux_professionnels_reels_exoneres_sans_cga = 'nbic_exon',
benefices_industriels_commerciaux_professionnels_reels_imposables_normal_sans_cga = 'nbic_impn',
benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie_sans_cga = 'nbic_mvct',
deficits_industriels_commerciaux_professionnels_normal = 'abic_defn',
deficits_industriels_commerciaux_professionnels_simplifie = 'abic_defs',
deficits_industriels_commerciaux_professionnels_sans_cga = 'nbic_defn',
deficits_industriels_commerciaux_professionnels_locations = 'nbic_defs',
benefices_industriels_commerciaux_non_professionnels_micro_entreprise_exoneres = 'macc_exon',
benefices_industriels_commerciaux_non_professionnels_micro_entreprise_vente = 'macc_impv',
benefices_industriels_commerciaux_non_professionnels_micro_entreprise_services = 'macc_impS',
benefices_industriels_commerciaux_non_professionnels_reels_exoneres = 'aacc_exon',
benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal = 'aacc_impn',
benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie = 'aacc_imps',
benefices_industriels_commerciaux_non_professionnels_reels_exoneres_sans_cga = 'nacc_exon',
benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal_sans_cga = 'nacc_impn',
benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie_sans_cga = 'nacc_meup',
deficits_industriels_commerciaux_non_professionnels_normal = 'aacc_defn',
deficits_industriels_commerciaux_non_professionnels_simplifie = 'aacc_gits',
deficits_industriels_commerciaux_non_professionnels_sans_cga = 'nacc_defn',
benefices_non_commerciaux_professionnels_micro_entreprise_imposables = 'mbnc_impo',
benefices_non_commerciaux_professionnels_declaration_controlee = '',
benefices_non_commerciaux_professionnels_declaration_controlee_sans_cga = '',
deficits_non_commerciaux_professionnels_declaration_controlee = '',
deficits_non_commerciaux_professionnels_declaration_controlee_sans_cga = '',
benefices_non_commerciaux_non_professionnels_micro_entreprise_imposables = '',
benefices_non_commerciaux_non_professionnels_declaration_controlee = '',
benefices_non_commerciaux_non_professionnels_declaration_controlee_sans_cga = '',
revenus_fonciers_regime_normal = 'f4ba', # f4ba
revenus_fonciers_micro_foncier = 'f4be', # f4be
allocations_chomage = 'cho',
pensions_de_retraite = 'rst',
# dividendes_imposes_au_bareme = 'f2dc + f2fu', # 'f2dc + f2fu' non agrégés
# interet_imposes_au_bareme = 'f2ts + f2go + f2tr', # non agrégés
assurances_vie_imposees_au_bareme = 'f2ch', # non agrégés
dividendes_imposes_au_prelevement_liberatoire = 'f2da',
interets_imposes_au_prelevement_liberatoire = 'f2ee',
assurances_vie_imposees_au_prelevement_liberatoire = 'f2dh',
plus_values_mobilieres_regime_normal = 'f3vg',
# plus_values_mobilieres_stock_options = 'f3vf + f3vi', # PV stock options 1, stock options 2, TODO Différencier ?
plus_values_mobilieres_retraite_dirigeant = 'f3va', # TODO f3vb ?
# plus_values_professionnelles_regime_normal = 'f5hz + f5iz + f5jz', # TODO: ceci n'est valable qu'avant 2010
# plus_values_professionnelles_retraite_dirigeant = 'f5hg + f5ig',
revenus_distribues_pea_exoneres = 'f2gr',
pensions_alimentaires_percues = 'pensions_alimentaires_percues', # pensions_alimentaires_percues
# pensions_alimentaires_versess = 'f6gi + f6gj + f6el + f6em + f6gp + f6gu + f6dd',
)
if __name__ == '__main__':
data_frame_by_irpp_table_name = build_irpp_tables(years = range(2008, 2013), fill_value = 0)
| gpl-3.0 |
kcompher/BuildingMachineLearningSystemsWithPython | ch08/stacked.py | 4 | 1057 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
from sklearn.linear_model import LinearRegression
from load_ml100k import load
import numpy as np
import similar_movie
import usermodel
import corrneighbours
reviews = load()
reg = LinearRegression()
es = np.array([
usermodel.all_estimates(reviews),
corrneighbours.all_estimates(reviews),
similar_movies.all_estimates(reviews),
])
reviews = reviews.toarray()
total_error = 0.0
coefficients = []
for u in xrange(reviews.shape[0]):
es0 = np.delete(es, u, 1)
r0 = np.delete(reviews, u, 0)
X, Y = np.where(r0 > 0)
X = es[:, X, Y]
y = r0[r0 > 0]
reg.fit(X.T, y)
coefficients.append(reg.coef_)
r0 = reviews[u]
X = np.where(r0 > 0)
p0 = reg.predict(es[:, u, X].squeeze().T)
err0 = r0[r0 > 0] - p0
total_error += np.dot(err0, err0)
print(u)
| mit |
jseabold/statsmodels | statsmodels/tsa/tests/test_adfuller_lag.py | 4 | 1833 | # -*- coding: utf-8 -*-
"""Test for autolag of adfuller
Created on Wed May 30 21:39:46 2012
Author: Josef Perktold
"""
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
import statsmodels.tsa.stattools as tsast
from statsmodels.datasets import macrodata
def test_adf_autolag():
#see issue #246
#this is mostly a unit test
d2 = macrodata.load_pandas().data
for k_trend, tr in enumerate(['nc', 'c', 'ct', 'ctt']):
#[None:'nc', 0:'c', 1:'ct', 2:'ctt']
x = np.log(d2['realgdp'].values)
xd = np.diff(x)
#check exog
adf3 = tsast.adfuller(x, maxlag=None, autolag='aic',
regression=tr, store=True, regresults=True)
st2 = adf3[-1]
assert_equal(len(st2.autolag_results), 15 + 1) #+1 for lagged level
for i, res in sorted(st2.autolag_results.items())[:5]:
lag = i - k_trend
#assert correct design matrices in _autolag
assert_equal(res.model.exog[-10:,k_trend], x[-11:-1])
assert_equal(res.model.exog[-1,k_trend+1:], xd[-lag:-1][::-1])
#min-ic lag of dfgls in Stata is also 2, or 9 for maic with notrend
assert_equal(st2.usedlag, 2)
#same result with lag fixed at usedlag of autolag
adf2 = tsast.adfuller(x, maxlag=2, autolag=None, regression=tr)
assert_almost_equal(adf3[:2], adf2[:2], decimal=12)
tr = 'c'
#check maxlag with autolag
adf3 = tsast.adfuller(x, maxlag=5, autolag='aic',
regression=tr, store=True, regresults=True)
assert_equal(len(adf3[-1].autolag_results), 5 + 1)
adf3 = tsast.adfuller(x, maxlag=0, autolag='aic',
regression=tr, store=True, regresults=True)
assert_equal(len(adf3[-1].autolag_results), 0 + 1)
| bsd-3-clause |
tiagofrepereira2012/tensorflow | tensorflow/examples/learn/text_classification.py | 12 | 6651 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def estimator_spec_for_softmax_classification(
logits, labels, mode):
"""Returns EstimatorSpec instance for softmax classification."""
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def bag_of_words_model(features, labels, mode):
"""A bag-of-words model. Note it disregards the word order in the text."""
bow_column = tf.feature_column.categorical_column_with_identity(
WORDS_FEATURE, num_buckets=n_words)
bow_embedding_column = tf.feature_column.embedding_column(
bow_column, dimension=EMBEDDING_SIZE)
bow = tf.feature_column.input_layer(
features,
feature_columns=[bow_embedding_column])
logits = tf.layers.dense(bow, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def rnn_model(features, labels, mode):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.contrib.rnn.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.contrib.rnn.static_rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for softmax
# classification over output classes.
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_transform_train = vocab_processor.fit_transform(x_train)
x_transform_test = vocab_processor.transform(x_test)
x_train = np.array(list(x_transform_train))
x_test = np.array(list(x_transform_test))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
# Subtract 1 because VocabularyProcessor outputs a word-id matrix where word
# ids start from 1 and 0 means 'no word'. But
# categorical_column_with_identity assumes 0-based count and uses -1 for
# missing word.
x_train -= 1
x_test -= 1
model_fn = bag_of_words_model
classifier = tf.estimator.Estimator(model_fn=model_fn)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
rrohan/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
ZhouJiaLinmumu/Grasp-and-lift-EEG-challenge | lvl2/genEns.py | 4 | 3742 | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 15 14:12:12 2015
@author: rc, alex
"""
import os
import sys
if __name__ == '__main__' and __package__ is None:
filePath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(filePath)
import numpy as np
import yaml
from copy import deepcopy
from collections import OrderedDict
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import LeaveOneLabelOut
from preprocessing.aux import getEventNames
from utils.ensembles import createEnsFunc, loadPredictions, getLvl1ModelList
from ensembling.WeightedMean import WeightedMeanClassifier
from ensembling.NeuralNet import NeuralNet
from ensembling.XGB import XGB
def _from_yaml_to_func(method, params):
"""go from yaml to method.
Need to be here for accesing local variables.
"""
prm = dict()
if params is not None:
for key, val in params.iteritems():
prm[key] = eval(str(val))
return eval(method)(**prm)
# ## here read YAML and build models ###
yml = yaml.load(open(sys.argv[1]))
fileName = yml['Meta']['file']
if 'subsample' in yml['Meta']:
subsample = yml['Meta']['subsample']
else:
subsample = 1
modelName, modelParams = yml['Model'].iteritems().next()
model_base = _from_yaml_to_func(modelName, modelParams)
ensemble = yml['Model'][modelName]['ensemble']
addSubjectID = True if 'addSubjectID' in yml.keys() else False
mode = sys.argv[2]
if mode == 'val':
test = False
elif mode == 'test':
test = True
else:
raise('Invalid mode. Please specify either val or test')
print('Running %s in mode %s, predictions will be saved as %s' % (modelName,mode,fileName))
######
cols = getEventNames()
ids = np.load('../infos_test.npy')
subjects_test = ids[:, 1]
series_test = ids[:, 2]
ids = ids[:, 0]
labels = np.load('../infos_val.npy')
subjects = labels[:, -2]
series = labels[:, -1]
labels = labels[:, :-2]
allCols = range(len(cols))
# ## loading predictions ###
files = getLvl1ModelList()
preds_val = OrderedDict()
for f in files:
loadPredictions(preds_val, f[0], f[1])
# validity check
for m in ensemble:
assert(m in preds_val)
# ## train/test ###
aggr = createEnsFunc(ensemble)
dataTrain = aggr(preds_val)
preds_val = None
# optionally adding subjectIDs
if addSubjectID:
dataTrain = np.c_[dataTrain, subjects]
np.random.seed(4234521)
if test:
# train the model
model = deepcopy(model_base)
model.fit(dataTrain[::subsample], labels[::subsample])
dataTrain = None
# load test data
preds_test = OrderedDict()
for f in files:
loadPredictions(preds_test, f[0], f[1], test=True)
dataTest = aggr(preds_test)
preds_test = None
# switch to add subjects
if addSubjectID:
dataTest = np.c_[dataTest, subjects_test]
# get predictions
p = model.predict_proba(dataTest)
np.save('test/test_%s.npy' % fileName, [p])
else:
auc_tot = []
p = np.zeros(labels.shape)
cv = LeaveOneLabelOut(series)
for fold, (train, test) in enumerate(cv):
model = deepcopy(model_base)
if modelName == 'NeuralNet':
# passing also test data to print out test error during training
model.fit(dataTrain[train], labels[train], dataTrain[test],
labels[test])
else:
model.fit(dataTrain[train][::subsample], labels[train][::subsample])
p[test] = model.predict_proba(dataTrain[test])
auc = [roc_auc_score(labels[test][:, col], p[test][:, col])
for col in allCols]
auc_tot.append(np.mean(auc))
print('Fold %d, score: %.5f' % (fold, auc_tot[-1]))
print('AUC: %.5f' % np.mean(auc_tot))
np.save('val/val_%s.npy' % fileName, [p])
| bsd-3-clause |
mjharriso/ConDistAreas | ConDistAreas.py | 1 | 4102 | #Coded by Matthew Harrison, July, 2015.
#Read ESRI shapefiles and calculate district areas
#Using Albers Equal Area Projection for North America
#Including Alaska and Hawaii
from mpl_toolkits.basemap import Basemap
from pyproj import Proj
from shapely.geometry import LineString, Point, shape
import fiona
from fiona import collection
import numpy as np
import pandas
import argparse
#Shapefiles should have been downladed from
#http://cdmaps.polisci.ucla.edu/
#and unzipped in the current directory.
#for con in np.arange(106,114):
for con in [114]:
fnam='districtShapes/districts'+str(con)+'.shp'
print fnam
districts=fiona.open(fnam)
lat1=districts.bounds[1]
lat2=districts.bounds[3]
m = Proj(proj='aea',lat_1=lat1,lat_2=lat2,lat_0=np.mean((lat1,lat2)))
Districts=[]
for pol in fiona.open(fnam):
if pol['geometry'] is None:
print 'Bad polygon',pol['properties']
continue
# Polygons
coords=pol['geometry']['coordinates']
if pol['geometry']['type'] == 'Polygon':
lons=[];lats=[]
for c in coords[0]:
lons.append(c[0])
lats.append(c[1])
try:
x,y=m(lons,lats)
except:
print pol['properties']
print pol['geometry']['type']
raise
poly={'type':'Polygon','coordinates':[zip(x,y)]}
center=shape(poly).centroid
ccoords= shape(center).coords[:][0]
xc=ccoords[0];yc=ccoords[1]
lonc,latc=m(xc,yc,inverse=True,radians=False)
Districts.append({'STATENAME':pol['properties']['STATENAME'],
'DISTRICT':pol['properties']['DISTRICT'],
'COUNTY':pol['properties']['COUNTY'],
'ID':pol['properties']['ID'],'area':shape(poly).area,'centroid':[lonc,latc]})
# print shape(poly).centroid
elif pol['geometry']['type'] == 'MultiPolygon':
# Multiple Polygons
for p in coords:
lons=[];lats=[]
for c in p[0]:
lons.append(c[0])
lats.append(c[1])
try:
x,y=m(lons,lats)
except:
print pol['properties']
print pol['geometry']['type']
raise
poly={'type':'Polygon','coordinates':[zip(x,y)]}
center=shape(poly).centroid
ccoords= shape(center).coords[:][0]
xc=ccoords[0];yc=ccoords[1]
lonc,latc=m(xc,yc,inverse=True,radians=False)
Districts.append({'STATENAME':pol['properties']['STATENAME'],
'DISTRICT':pol['properties']['DISTRICT'],
'COUNTY':pol['properties']['COUNTY'],
'ID':pol['properties']['ID'],'area':shape(poly).area,'centroid':[lonc,latc]})
# print shape(poly).centroid.wkt
Districts=sorted(Districts,key=lambda d:(d['STATENAME'],int(d['DISTRICT'])))
# Write Areas to csv
filenam='areas'+str(con)+'.txt'
f=open(filenam,'w')
pr=None
for d in Districts:
if pr is not None:
if d['STATENAME'] != pr['STATENAME']:
print d['STATENAME']
if d['DISTRICT']==pr['DISTRICT']:
a=a+d['area']
center.append(d['centroid'])
else:
line=pr['ID'],pr['DISTRICT'],'area='+str(a/1.e6),pr['STATENAME']+'\n'
f.write(','.join(line))
line=pr['ID'],pr['DISTRICT'],'centroid='+str(center)+'\n'
f.write(','.join(line))
a=d['area']
center=[d['centroid']]
pr=d.copy()
else:
pr=d.copy()
a=d['area']
center=[d['centroid']]
line=pr['ID'],pr['DISTRICT'],'area='+str(a/1.e6),pr['STATENAME']+'\n'
f.write(','.join(line))
line=pr['ID'],pr['DISTRICT'],'centroid='+str(center)+'\n'
f.write(','.join(line))
f.close()
| gpl-2.0 |
HKUST-SING/tensorflow | tensorflow/examples/learn/mnist.py | 45 | 3999 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(
tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv_model(feature, target, mode):
"""2-layer convolution model."""
# Convert the target to a one-hot tensor of shape (batch_size, 10) and
# with a on-value of 1 for each one-hot vector of length 10.
target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)
# Reshape feature to 4d tensor with 2nd and 3rd dimensions being
# image width and height final dimension being the number of color channels.
feature = tf.reshape(feature, [-1, 28, 28, 1])
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = layers.convolution2d(
feature, 32, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
h_conv2 = layers.convolution2d(
h_pool1, 64, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# Densely connected layer with 1024 neurons.
h_fc1 = layers.dropout(
layers.fully_connected(
h_pool2_flat, 1024, activation_fn=tf.nn.relu),
keep_prob=0.5,
is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='SGD',
learning_rate=0.001)
return tf.argmax(logits, 1), loss, train_op
def main(unused_args):
### Download and load MNIST dataset.
mnist = learn.datasets.load_dataset('mnist')
### Linear classifier.
feature_columns = learn.infer_real_valued_columns_from_input(
mnist.train.images)
classifier = learn.LinearClassifier(
feature_columns=feature_columns, n_classes=10)
classifier.fit(mnist.train.images,
mnist.train.labels.astype(np.int32),
batch_size=100,
steps=1000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
classifier = learn.Estimator(model_fn=conv_model)
classifier.fit(mnist.train.images,
mnist.train.labels,
batch_size=100,
steps=20000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
sarahgrogan/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
val-iisc/sketch-parse | exp-src/table3.py | 1 | 8623 | import scipy
from scipy import ndimage
import cv2
import numpy as np
import sys
import torch
import resnet_dilated_frozen_r5_D #TODO
import resnet_dilated_frozen_r5_D #TODO
import resnet_dilated_frozen_r5_D_pose #TODO
import resnet_dilated_frozen_r5_D_pose #TODO
from torch.autograd import Variable
import torchvision.models as models
import torch.nn.functional as F
from collections import OrderedDict
import os
from os import walk
import matplotlib.pyplot as plt
import torch.nn as nn
#import quant
#import pdb
#import matlab.engine
#eng = matlab.engine.start_matlab()
def get_iou(pred,gt,class_):
gt = gt.astype(np.float32)
pred = pred.astype(np.float32)
max_label_dict = {'cow':4,'horse':4,'cat':4,'dog':4,'sheep':4,'bus':6,'car':5,'bicycle':4,'motorbike':4, 'bird':8, 'airplane':5}
max_label = max_label_dict[class_]
count = np.zeros((max_label+1,))
for j in range(max_label+1):
x = np.where(pred==j)
p_idx_j = set(zip(x[0].tolist(),x[1].tolist()))
x = np.where(gt==j)
GT_idx_j = set(zip(x[0].tolist(),x[1].tolist()))
#pdb.set_trace()
n_jj = set.intersection(p_idx_j,GT_idx_j)
u_jj = set.union(p_idx_j,GT_idx_j)
if len(GT_idx_j)!=0:
count[j] = float(len(n_jj))/float(len(u_jj))
result_class = count
Aiou = np.sum(result_class[:])/float(len(np.unique(gt)))
return Aiou
def merge_parts(map_, i):
if i == 4:
map_ = change_parts(map_,7,2)
map_ = change_parts(map_,8,5)
return map_
def change_parts(map_,a,b):
temp = np.where(map_==a)
map_[temp[0],temp[1]] = b
return map_
gpu0 = 0
torch.cuda.set_device(gpu0)
#caffe.set_mode_gpu()
#caffe.set_device(gpu0)
#net = caffe.Net('data/train_d1_contour1by2.prototxt', 'data/train_d1_contour1by2_iter_20000.caffemodel',caffe.TEST)
sketch_root = 'data/sketch-dataset/PNG_untouched/'
model_A = getattr(resnet_dilated_frozen_r5_D,'Res_Deeplab')() #TODO
model_B = getattr(resnet_dilated_frozen_r5_D,'Res_Deeplab')() #TODO
model_C = getattr(resnet_dilated_frozen_r5_D_pose,'Res_Deeplab')() #TODO
model_D = getattr(resnet_dilated_frozen_r5_D_pose,'Res_Deeplab')() #TODO
model_E = getattr(resnet_dilated_frozen_r5_D_pose,'Res_Deeplab')() #TODO
model_A.eval()
model_B.eval()
model_C.eval()
model_D.eval()
model_E.eval()
counter = 0
model_A.cuda()
model_B.cuda()
model_C.cuda()
model_D.cuda()
model_E.cuda()
file_data = open('pred_gt.txt').readlines()
dict_pred = {}
dict_label = {}
for i in file_data:
i_split = i[:-1].split(' ')
dict_pred[i_split[0]] = int(i_split[1])
dict_label[i_split[0]] = int(i_split[2])
prefix_A= 'model_r5_C3_14000.pth' #B_r5
prefix_B= 'model_r5_C3seg2_14000.pth' #BS_r5
prefix_C= 'model_r5_p50x_D5_19000.pth' #BP_r5
prefix_D= 'model_r5_p50x_D1_17000.pth' #BSP_r5
prefix_E= 'model_r5_p50x_D1_17000.pth' #BSP_r5 with 100% router accuracy
for iter in range(1):
saved_state_dict_A = torch.load('/data1/ravikiran/pytorch-resnet/snapshots/'+prefix_A)
saved_state_dict_B = torch.load('/data1/ravikiran/pytorch-resnet/snapshots/'+prefix_B)
saved_state_dict_C = torch.load('/data1/ravikiran/pytorch-resnet/snapshots/'+prefix_C)
saved_state_dict_D = torch.load('/data1/ravikiran/pytorch-resnet/snapshots/'+prefix_D)
saved_state_dict_E = torch.load('/data1/ravikiran/pytorch-resnet/snapshots/'+prefix_E)
#saved_state_dict = torch.load('/data1/ravikiran/pytorch-resnet/snapshots/DeepLab_20k_GB_fix_noCUDNN_bsize1_20k_SegnetLoss_prototype_20000.pth')
if counter==0:
print prefix_A
print prefix_B
print prefix_C
print prefix_D
print prefix_E
counter+=1
#saved_state_dict = torch.load('/data1/ravikiran/pytorch-resnet/MS_DeepLab_resnet_tained_sketches.pth')
model_A.load_state_dict(saved_state_dict_A)
model_B.load_state_dict(saved_state_dict_B)
model_C.load_state_dict(saved_state_dict_C)
model_D.load_state_dict(saved_state_dict_D)
model_E.load_state_dict(saved_state_dict_E)
class_list = ['cow-0', 'horse-0','cat-1','dog-1','sheep-1','bus-2','car-2','bicycle-3','motorbike-3','airplane-4','bird-4'] #TODO
pytorch_list_A = []
pytorch_list_B = []
pytorch_list_C = []
pytorch_list_D = []
pytorch_list_E = []
class_ious_A = []
class_ious_B = []
class_ious_C = []
class_ious_D = []
class_ious_E = []
for class_selector in class_list:
pytorch_per_class_A = []
pytorch_per_class_B = []
pytorch_per_class_C = []
pytorch_per_class_D = []
pytorch_per_class_E = []
class_split = class_selector.split('-')
class_ = class_split[0]
selector = int(class_split[1])
gt_path = 'data/sketch-dataset/test_GT/'+class_
img_list = next(os.walk(gt_path))[2]
path = sketch_root + class_
for i in img_list:
img = cv2.imread(path+'/'+i)
kernel = np.ones((2,2),np.uint8)
# img = cv2.erode(img[:,:,0],kernel,iterations = 1)
img = ndimage.grey_erosion(img[:,:,0].astype(np.uint8), size=(2,2))
img = np.repeat(img[:,:,np.newaxis],3,2)
gt = cv2.imread(gt_path+'/'+i)
selector_pred = dict_pred[i]
output_A = model_A([Variable(torch.from_numpy(img[np.newaxis, :].transpose(0,3,1,2)).float(),volatile=True).cuda(),selector_pred])
output_B = model_B([Variable(torch.from_numpy(img[np.newaxis, :].transpose(0,3,1,2)).float(),volatile=True).cuda(),selector_pred])
output_C = model_C([Variable(torch.from_numpy(img[np.newaxis, :].transpose(0,3,1,2)).float(),volatile=True).cuda(),selector_pred])
output_D = model_D([Variable(torch.from_numpy(img[np.newaxis, :].transpose(0,3,1,2)).float(),volatile=True).cuda(),selector_pred])
output_E = model_E([Variable(torch.from_numpy(img[np.newaxis, :].transpose(0,3,1,2)).float(),volatile=True).cuda(),selector])
#for k in range(4):
# output_temp = output[k].cpu().data[0].numpy()
# output_temp = output_temp.transpose(1,2,0)
# output_temp = np.argmax(output_temp,axis = 2)
# plt.imshow(output_temp)
# plt.show()
interp = nn.UpsamplingBilinear2d(size=(321, 321))
output_A = merge_parts(np.argmax(interp(output_A[3]).cpu().data[0].numpy().transpose(1,2,0),axis =2),selector_pred)
output_B = merge_parts(np.argmax(interp(output_B[3]).cpu().data[0].numpy().transpose(1,2,0),axis =2),selector_pred)
output_C = merge_parts(np.argmax(interp(output_C[3]).cpu().data[0].numpy().transpose(1,2,0),axis =2),selector_pred)
output_D = merge_parts(np.argmax(interp(output_D[3]).cpu().data[0].numpy().transpose(1,2,0),axis =2),selector_pred)
output_E = merge_parts(np.argmax(interp(output_D[3]).cpu().data[0].numpy().transpose(1,2,0),axis =2),selector)
gt = merge_parts(gt, selector)
iou_pytorch_A = get_iou(output_A,gt,class_)
iou_pytorch_B = get_iou(output_B,gt,class_)
iou_pytorch_C = get_iou(output_C,gt,class_)
iou_pytorch_D = get_iou(output_D,gt,class_)
iou_pytorch_E = get_iou(output_E,gt,class_)
pytorch_list_A.append(iou_pytorch_A)
pytorch_list_B.append(iou_pytorch_B)
pytorch_list_C.append(iou_pytorch_C)
pytorch_list_D.append(iou_pytorch_D)
pytorch_list_E.append(iou_pytorch_E)
pytorch_per_class_A.append(iou_pytorch_A)
pytorch_per_class_B.append(iou_pytorch_B)
pytorch_per_class_C.append(iou_pytorch_C)
pytorch_per_class_D.append(iou_pytorch_D)
pytorch_per_class_E.append(iou_pytorch_E)
class_ious_A.append(np.sum(np.asarray(pytorch_per_class_A))/len(pytorch_per_class_A))
class_ious_B.append(np.sum(np.asarray(pytorch_per_class_B))/len(pytorch_per_class_B))
class_ious_C.append(np.sum(np.asarray(pytorch_per_class_C))/len(pytorch_per_class_C))
class_ious_D.append(np.sum(np.asarray(pytorch_per_class_D))/len(pytorch_per_class_D))
class_ious_E.append(np.sum(np.asarray(pytorch_per_class_E))/len(pytorch_per_class_E))
print 'B r5', np.sum(np.asarray(pytorch_list_A))/len(pytorch_list_A),'per class', class_ious_A
print 'BC r5', np.sum(np.asarray(pytorch_list_B))/len(pytorch_list_B),'per class', class_ious_B
print 'BP r5', np.sum(np.asarray(pytorch_list_C))/len(pytorch_list_C),'per class', class_ious_C
print 'BCP r5', np.sum(np.asarray(pytorch_list_D))/len(pytorch_list_D),'per class', class_ious_D
print 'BCP r5 with 100% classifier ', np.sum(np.asarray(pytorch_list_E))/len(pytorch_list_E),'per class', class_ious_E
| mit |
smukoehler/SDB-control | mpcdriver.py | 1 | 3535 | import urlparse
import datetime
import urllib2
from smap.driver import SmapDriver
from smap.util import periodicSequentialCall
from smap.contrib import dtutil
from sklearn import linear_model
from smap.archiver.client import RepublishClient
from functools import partial
from mpc import *
class SimpleMPC(SmapDriver):
def setup(self, opts):
self.rate = float( opts.get('rate' , 120 ) )
self.archiver_url = opts.get('archiver')
self.input_variables = opts.get('input_variables', None)
self.state_variables = opts.get('state_variables', None).split(',')
self.read_stream_data()
self.setup_model()
'''
Create MPC kernel
'''
def setup_model(self):
self.mpc_model = MPC()
'''
Function that runs periodically to update the model
'''
def start(self):
self._loop = periodicSequentialCall(self.predict)
self._loop.start(self.rate)
for clientlist in self.repubclients.itervalues():
for c in clientlist:
c.connect()
def predict(self):
# Input vector at step t-1
input_vector_t_1 = self.construct_input(-1)
# State vector at step t-1
state_vector_t_1 = self.construct_state(-1)
if input_vector_t_1 == None or state_vector_t_1 == None:
return
# Call mpc kernel to add data
self.mpc_model.add_data( input_vector_t_1 , state_vector_t_1 )
# Input vector at time t
input_vector_t = self.construct_input(0)
# predict by calling at mpc kernel
prediction = self.mpc_model.predict( input_vector_t )
# Get model parameters
params = self.mpc_model.get_model()
# Do post processing
self.post_processing(i , prediction, self.construct_state(i)[0], params)
'''
Reads data to be supplied to build the model
'''
def read_stream_data(self):
self.points = {}
self.repubclients = {}
for name in self.input_variables:
point = name.strip()
self.points[point] = []
self.repubclients[point] = [RepublishClient(self.archiver_url, partial(self.cb, point), restrict="Metadata/Name = '" + str(point) + "'")]
for name in self.state_variables:
point = name.strip()
self.points[point] = []
self.repubclients[point] = [RepublishClient(self.archiver_url, partial(self.cb, point), restrict="Metadata/Name = '" + str(point) + "'")]
def cb(self, point, _, data):
value = data[-1][-1][1]
print 'Received',point,'=',value
self.points[point].append(value)
'''
Constructs an input vector at a particular timestep
'''
def construct_input(self, step):
input_vector = []
try:
for point in self.input_variables:
input_vector.append( self.points[point][ step - 1] )
for point in self.state_variables:
input_vector.append( self.points[point][ step - 2] )
except:
return None
return input_vector
'''
Constructs the state vector at a particular timestep
'''
def construct_state(self, step):
state_vector = []
try:
for point in self.state_variables:
state_vector.append( self.points[point][ step - 1 ])
except:
return None
return state_vector
'''
Do post processing
'''
def post_processing(self, step, prediction, state_t, params ):
# Do post processing
for i in range(len(self.state_variables)):
self.add('/' + self.state_variables[i] + "-predicted" , prediction[i] )
for j in range(len(self.input_variables)):
self.add('/' + self.state_variables[i] + "-mpc-param-effect-of-" + self.input_variables[j], params[j])
for j in range(len(self.state_variables)):
self.add('/' + self.state_variables[i] + "-mpc-param-effect-of-" + self.state_variables[j], params[ len(self.input_variables) + j])
| bsd-2-clause |
gavruskin/microinteractions | data_preprocess_Development.py | 1 | 9649 | import pandas as pd
data = pd.read_csv("DevelopmentData.csv")
n = len(data.columns)
# Add all parameters (Taylor coefficients) as 0 in rows following the data:
for i in range(data.shape[0]):
for j in range(n+2, n+34):
data.set_value(i, j, 0)
data.rename(columns={n+2: "a", n+3: "a1", n+4: "a2", n+5: "a3", n+6: "a4", n+7: "a5",
n+8: "b12", n+9: "b13", n+10: "b14", n+11: "b15", n+12: "b23", n+13: "b24",
n+14: "b25", n+15: "b34", n+16: "b35", n+17: "b45", n+18: "c123", n+19: "c124",
n+20: "c125", n+21: "c134", n+22: "c135", n+23: "c145", n+24: "c234", n+25: "c235",
n+26: "c245", n+27: "c345", n+28: "d1234", n+29: "d1235", n+30: "d1245",
n+31: "d1345", n+32: "d2345", n+33: "e12345"}, inplace=True)
# Change coefficients corresponding to present effects to 1:
for index, row in data.iterrows():
combo = row["treat"]
if combo == 1:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
if combo == 2:
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
if combo == 3:
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
if combo == 4:
data.set_value(index, "a", 1)
data.set_value(index, "a4", 1)
if combo == 5:
data.set_value(index, "a", 1)
data.set_value(index, "a5", 1)
if combo == 6:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "b12", 1)
if combo == 7:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "b13", 1)
if combo == 8:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b14", 1)
if combo == 9:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b15", 1)
if combo == 10:
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "b23", 1)
if combo == 11:
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b24", 1)
if combo == 12:
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b25", 1)
if combo == 13:
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b34", 1)
if combo == 14:
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b35", 1)
if combo == 15:
data.set_value(index, "a", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b45", 1)
if combo == 16:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "b12", 1)
data.set_value(index, "b13", 1)
data.set_value(index, "b23", 1)
data.set_value(index, "c123", 1)
if combo == 17:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b12", 1)
data.set_value(index, "b14", 1)
data.set_value(index, "b24", 1)
data.set_value(index, "c124", 1)
if combo == 18:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", 1)
data.set_value(index, "b15", 1)
data.set_value(index, "b25", 1)
data.set_value(index, "c125", 1)
if combo == 22:
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b23", 1)
data.set_value(index, "b24", 1)
data.set_value(index, "b34", 1)
data.set_value(index, "c234", 1)
if combo == 25:
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b34", 1)
data.set_value(index, "b35", 1)
data.set_value(index, "b45", 1)
data.set_value(index, "c345", 1)
if combo == 19:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b13", 1)
data.set_value(index, "b14", 1)
data.set_value(index, "b34", 1)
data.set_value(index, "c134", 1)
if combo == 20:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b13", 1)
data.set_value(index, "b15", 1)
data.set_value(index, "b35", 1)
data.set_value(index, "c135", 1)
if combo == 21:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b14", 1)
data.set_value(index, "b15", 1)
data.set_value(index, "b45", 1)
data.set_value(index, "c145", 1)
if combo == 24:
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b24", 1)
data.set_value(index, "b25", 1)
data.set_value(index, "b45", 1)
data.set_value(index, "c245", 1)
if combo == 23:
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b23", 1)
data.set_value(index, "b25", 1)
data.set_value(index, "b35", 1)
data.set_value(index, "c235", 1)
if combo == 26:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b12", 1)
data.set_value(index, "b13", 1)
data.set_value(index, "b14", 1)
data.set_value(index, "b23", 1)
data.set_value(index, "b24", 1)
data.set_value(index, "b34", 1)
data.set_value(index, "c123", 1)
data.set_value(index, "c124", 1)
data.set_value(index, "c134", 1)
data.set_value(index, "c234", 1)
data.set_value(index, "d1234", 1)
if combo == 27:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", 1)
data.set_value(index, "b13", 1)
data.set_value(index, "b15", 1)
data.set_value(index, "b23", 1)
data.set_value(index, "b25", 1)
data.set_value(index, "b35", 1)
data.set_value(index, "c123", 1)
data.set_value(index, "c125", 1)
data.set_value(index, "c135", 1)
data.set_value(index, "c235", 1)
data.set_value(index, "d1235", 1)
if combo == 28:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", 1)
data.set_value(index, "b14", 1)
data.set_value(index, "b15", 1)
data.set_value(index, "b24", 1)
data.set_value(index, "b25", 1)
data.set_value(index, "b45", 1)
data.set_value(index, "c124", 1)
data.set_value(index, "c125", 1)
data.set_value(index, "c145", 1)
data.set_value(index, "c245", 1)
data.set_value(index, "d1245", 1)
if combo == 29:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b13", 1)
data.set_value(index, "b14", 1)
data.set_value(index, "b15", 1)
data.set_value(index, "b34", 1)
data.set_value(index, "b35", 1)
data.set_value(index, "b45", 1)
data.set_value(index, "c134", 1)
data.set_value(index, "c135", 1)
data.set_value(index, "c145", 1)
data.set_value(index, "c345", 1)
data.set_value(index, "d1345", 1)
if combo == 30:
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b23", 1)
data.set_value(index, "b24", 1)
data.set_value(index, "b25", 1)
data.set_value(index, "b34", 1)
data.set_value(index, "b35", 1)
data.set_value(index, "b45", 1)
data.set_value(index, "c234", 1)
data.set_value(index, "c235", 1)
data.set_value(index, "c245", 1)
data.set_value(index, "c345", 1)
data.set_value(index, "d2345", 1)
if combo == 31:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", 1)
data.set_value(index, "b13", 1)
data.set_value(index, "b14", 1)
data.set_value(index, "b15", 1)
data.set_value(index, "b23", 1)
data.set_value(index, "b24", 1)
data.set_value(index, "b25", 1)
data.set_value(index, "b34", 1)
data.set_value(index, "b35", 1)
data.set_value(index, "b45", 1)
data.set_value(index, "c123", 1)
data.set_value(index, "c124", 1)
data.set_value(index, "c125", 1)
data.set_value(index, "c134", 1)
data.set_value(index, "c135", 1)
data.set_value(index, "c145", 1)
data.set_value(index, "c234", 1)
data.set_value(index, "c235", 1)
data.set_value(index, "c245", 1)
data.set_value(index, "c345", 1)
data.set_value(index, "d1234", 1)
data.set_value(index, "d1235", 1)
data.set_value(index, "d1245", 1)
data.set_value(index, "d1345", 1)
data.set_value(index, "d2345", 1)
data.set_value(index, "e12345", 1)
if combo == 32:
data.set_value(index, "a", 1)
data.to_csv("DevelopmentData_processed.csv")
| mit |
CylanceSPEAR/IntroductionToMachineLearningForSecurityPros | IDPanel/train_lr_model.py | 1 | 4200 | from idpanel.training.vectorization import load_raw_feature_vectors
from idpanel.training.features import load_raw_features
from idpanel.labels import load_labels
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
import warnings
from sklearn.metrics import roc_curve, auc
import numpy as np
import matplotlib.pyplot as plt
import pickle
def classify(model, sample):
labels = sorted(model.keys())
proba = []
for label in labels:
proba.append(model[label].predict_proba(sample)[0, 1])
label = None
proba = np.array(proba)
if (proba > 0.5).sum() > 0:
label = labels[proba.argmax()]
return label, labels, proba
if __name__ == "__main__" or True:
from argparse import ArgumentParser
parser = ArgumentParser(
prog=__file__,
description="Train Logistic Regression Model",
)
parser.add_argument("-p", "--penalty", choices=["l1", "l2"], default="l2")
parser.add_argument("-d", "--dual", action='store_true', default=False)
parser.add_argument("-C", type=float, default=1.0)
parser.add_argument("-f", "--fit-intercept", default=True, action='store_true')
parser.add_argument("-i", "--intercept-scaling", type=float, default=1.0)
parser.add_argument("-m", "--max-iter", type=int, default=100)
parser.add_argument("-s", "--solver", choices=["newton-cg", "lbfgs", "liblinear", "sag"], default="liblinear")
parser.add_argument("-t", "--tol", type=float, default=0.0001)
args = parser.parse_args()
warnings.warn = lambda x, y: x
label_indeces = load_labels()
raw_features = load_raw_features()
original_labels, names, vectors = load_raw_feature_vectors()
labels = [1 if l != "not_panel" else 0 for l in original_labels]
vectors = np.array(vectors)
print "Creating training and testing sets"
X_train, X_test, y_train, y_test = train_test_split(vectors, labels, stratify=labels)
print X_train.shape[0], "samples in training set,", len(set(list(y_train))), "labels in training set"
print X_test.shape[0], "samples in training set,", len(set(list(y_test))), "labels in testing set"
lr = LogisticRegression(
n_jobs=-1,
penalty=args.penalty,
dual=args.dual,
C=args.C,
fit_intercept=args.fit_intercept,
intercept_scaling=args.intercept_scaling,
max_iter=args.max_iter,
solver=args.solver,
tol=args.tol
)
lr.fit(X_train, y_train)
#print (lr.feature_importances_ != 0).sum()
pred = lr.predict(X_test)
pred_proba = lr.predict_proba(X_test)
print "Confusion Matrix:"
print confusion_matrix(y_test, pred)
#print np.array(y_test) == 1
pos_hist, pos_bin_edges = np.histogram(pred_proba[np.array(y_test) == 1, 1],
bins=[0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1])
neg_hist, neg_bin_edges = np.histogram(pred_proba[np.array(y_test) == 0, 1],
bins=[0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1])
fig, (ax1, ax2) = plt.subplots(2, 1)
#print pos_hist.shape, pos_bin_edges.shape
#print neg_hist.tolist()
ax1.plot(pos_bin_edges[:-1] + 0.05, pos_hist, color='green', linestyle='solid', label="Positives")
ax1.plot(neg_bin_edges[:-1] + 0.05, neg_hist, color='red', linestyle='solid', label="Negatives")
ax1.set_xlim(0.0, 1.0)
ax1.set_ylim(0.0, max(neg_hist.max(), pos_hist.max()))
ax1.set_xlabel('Threshold')
ax1.set_ylabel('Sample Count')
ax1.set_title('Positive Classification Thresholds')
ax1.legend(loc="upper left")
fpr, tpr, _ = roc_curve(y_test, pred_proba[:, 1])
roc_auc = auc(fpr, tpr)
ax2.plot(fpr, tpr, linewidth=4)
ax2.plot([0, 1], [0, 1], 'r--')
#ax2.xlim([0.0, 1.0])
#ax2.ylim([0.0, 1.05])
ax2.set_xlabel('False Positive Rate')
ax2.set_ylabel('True Positive Rate')
ax2.set_title('Logistic Regression ROC Curve')
#ax2.legend(loc="lower right")
plt.show()
with open("bot_model.lrmdl", "w") as f:
pickle.dump({"model": lr, "relevant_features": lr.coef_ != 0}, f)
| gpl-3.0 |
Titan-C/scikit-learn | sklearn/linear_model/omp.py | 8 | 31640 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_offset, y_offset, X_scale)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues : array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, ensure_min_features=2,
estimator=self)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv.split(X))
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause |
tkhirianov/kpk2016 | graphs/input_graph.py | 1 | 2588 | import networkx
import matplotlib.pyplot as plt
def input_edges_list():
"""считывает список рёбер в форме:
в первой строке N - число рёбер,
затем следует N строк из двух слов и одного числа
слова - названия вершин, концы ребра, а число - его вес
return граф в форме словаря рёбер и соответствующих им весов
"""
N = int(input('Введите количество рёбер:'))
G = {}
for i in range(N):
vertex1, vertex2, weight = input().split()
weight = float(weight)
G[(vertex1, vertex2)] = weight
return G
def edges_list_to_adjacency_list(E):
"""E - граф в форме словаря рёбер и соответствующих им весов
return граф в форме словаря словарей смежности с весами
"""
G = {}
for vertex1, vertex2 in E:
weight = E[(vertex1, vertex2)]
# добавляю ребро (vertex1, vertex2)
if vertex1 not in G:
G[vertex1] = {vertex2:weight}
else: # значит такая вершина уже встречалась
G[vertex1][vertex2] = weight
# граф не направленный, поэтому добавляю ребро (vertex2, vertex1)
if vertex2 not in G:
G[vertex2] = {vertex1:weight}
else: # значит такая вершина уже встречалась
G[vertex2][vertex1] = weight
return G
def dfs(G, start, called = set(), skelet = set()):
called.add(start)
for neighbour in G[start]:
if neighbour not in called:
dfs(G, neighbour, called, skelet)
skelet.add((start, neighbour))
s = """A B 1
B D 1
B C 2
C A 2
C D 3
D E 5""".split('\n')
E = {}
for line in s:
a, b, weight = line.split()
E[(a, b)] = int(weight)
A = edges_list_to_adjacency_list(E)
called = set()
skelet = set()
dfs(A, 'A', called, skelet)
print(called)
print(skelet)
G = networkx.Graph(A)
position = networkx.spring_layout(G) # positions for all nodes
networkx.draw(G, position)
networkx.draw_networkx_labels(G, position)
networkx.draw_networkx_edge_labels(G, position, edge_labels=E)
# нарисуем остовное дерево:
networkx.draw_networkx_edges(G, position, edgelist=skelet,
width=5, alpha=0.5, edge_color='red')
plt.show() # display
| gpl-3.0 |
alberto-antonietti/nest-simulator | pynest/examples/hh_phaseplane.py | 12 | 5096 | # -*- coding: utf-8 -*-
#
# hh_phaseplane.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Numerical phase-plane analysis of the Hodgkin-Huxley neuron
----------------------------------------------------------------
hh_phaseplane makes a numerical phase-plane analysis of the Hodgkin-Huxley
neuron (``hh_psc_alpha``). Dynamics is investigated in the V-n space (see remark
below). A constant DC can be specified and its influence on the nullclines
can be studied.
Remark
~~~~~~~~
To make the two-dimensional analysis possible, the (four-dimensional)
Hodgkin-Huxley formalism needs to be artificially reduced to two dimensions,
in this case by 'clamping' the two other variables, `m` and `h`, to
constant values (`m_eq` and `h_eq`).
"""
import nest
import numpy as np
from matplotlib import pyplot as plt
amplitude = 100. # Set externally applied current amplitude in pA
dt = 0.1 # simulation step length [ms]
v_min = -100. # Min membrane potential
v_max = 42. # Max membrane potential
n_min = 0.1 # Min inactivation variable
n_max = 0.81 # Max inactivation variable
delta_v = 2. # Membrane potential step length
delta_n = 0.01 # Inactivation variable step length
V_vec = np.arange(v_min, v_max, delta_v)
n_vec = np.arange(n_min, n_max, delta_n)
num_v_steps = len(V_vec)
num_n_steps = len(n_vec)
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
nest.SetKernelStatus({'resolution': dt})
neuron = nest.Create('hh_psc_alpha')
# Numerically obtain equilibrium state
nest.Simulate(1000)
m_eq = neuron[0].Act_m
h_eq = neuron[0].Inact_h
neuron.I_e = amplitude # Apply external current
# Scan state space
print('Scanning phase space')
V_matrix = np.zeros([num_n_steps, num_v_steps])
n_matrix = np.zeros([num_n_steps, num_v_steps])
# pp_data will contain the phase-plane data as a vector field
pp_data = np.zeros([num_n_steps * num_v_steps, 4])
count = 0
for i, V in enumerate(V_vec):
for j, n in enumerate(n_vec):
# Set V_m and n
neuron.set(V_m=V, Act_n=n, Act_m=m_eq, Inact_h=h_eq)
# Find state
V_m = neuron[0].V_m
Act_n = neuron[0].Act_n
# Simulate a short while
nest.Simulate(dt)
# Find difference between new state and old state
V_m_new = neuron[0].V_m - V
Act_n_new = neuron[0].Act_n - n
# Store in vector for later analysis
V_matrix[j, i] = abs(V_m_new)
n_matrix[j, i] = abs(Act_n_new)
pp_data[count] = np.array([V_m, Act_n, V_m_new, Act_n_new])
if count % 10 == 0:
# Write updated state next to old state
print('')
print('Vm: \t', V_m)
print('new Vm:\t', V_m_new)
print('Act_n:', Act_n)
print('new Act_n:', Act_n_new)
count += 1
# Set state for AP generation
neuron.set(V_m=-34., Act_n=0.2, Act_m=m_eq, Inact_h=h_eq)
print('')
print('AP-trajectory')
# ap will contain the trace of a single action potential as one possible
# numerical solution in the vector field
ap = np.zeros([1000, 2])
for i in range(1, 1001):
# Find state
V_m = neuron[0].V_m
Act_n = neuron[0].Act_n
if i % 10 == 0:
# Write new state next to old state
print('Vm: \t', V_m)
print('Act_n:', Act_n)
ap[i - 1] = np.array([V_m, Act_n])
# Simulate again
neuron.set(Act_m=m_eq, Inact_h=h_eq)
nest.Simulate(dt)
# Make analysis
print('')
print('Plot analysis')
nullcline_V = []
nullcline_n = []
print('Searching nullclines')
for i in range(0, len(V_vec)):
index = np.nanargmin(V_matrix[:][i])
if index != 0 and index != len(n_vec):
nullcline_V.append([V_vec[i], n_vec[index]])
index = np.nanargmin(n_matrix[:][i])
if index != 0 and index != len(n_vec):
nullcline_n.append([V_vec[i], n_vec[index]])
print('Plotting vector field')
factor = 0.1
for i in range(0, np.shape(pp_data)[0], 3):
plt.plot([pp_data[i][0], pp_data[i][0] + factor * pp_data[i][2]],
[pp_data[i][1], pp_data[i][1] + factor * pp_data[i][3]],
color=[0.6, 0.6, 0.6])
plt.plot(nullcline_V[:][0], nullcline_V[:][1], linewidth=2.0)
plt.plot(nullcline_n[:][0], nullcline_n[:][1], linewidth=2.0)
plt.xlim([V_vec[0], V_vec[-1]])
plt.ylim([n_vec[0], n_vec[-1]])
plt.plot(ap[:][0], ap[:][1], color='black', linewidth=1.0)
plt.xlabel('Membrane potential V [mV]')
plt.ylabel('Inactivation variable n')
plt.title('Phase space of the Hodgkin-Huxley Neuron')
plt.show()
| gpl-2.0 |
jorge2703/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
calico/basenji | bin/sonnet_sat_bed.py | 1 | 9499 | #!/usr/bin/env python
# Copyright 2017 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import gc
import json
import os
import pdb
import pickle
from queue import Queue
import random
import sys
from threading import Thread
import h5py
import numpy as np
import pandas as pd
import pysam
import tensorflow as tf
if tf.__version__[0] == '1':
tf.compat.v1.enable_eager_execution()
from basenji import bed
from basenji import dna_io
from basenji import seqnn
from basenji import stream
from basenji_sat_bed import satmut_gen, ScoreWorker
'''
sonnet_sat_bed.py
Perform an in silico saturation mutagenesis of sequences in a BED file.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <model> <bed_file>'
parser = OptionParser(usage)
parser.add_option('-d', dest='mut_down',
default=0, type='int',
help='Nucleotides downstream of center sequence to mutate [Default: %default]')
parser.add_option('-f', dest='genome_fasta',
default=None,
help='Genome FASTA for sequences [Default: %default]')
parser.add_option('-l', dest='mut_len',
default=0, type='int',
help='Length of center sequence to mutate [Default: %default]')
parser.add_option('-o', dest='out_dir',
default='sat_mut', help='Output directory [Default: %default]')
parser.add_option('--plots', dest='plots',
default=False, action='store_true',
help='Make heatmap plots [Default: %default]')
parser.add_option('-p', dest='processes',
default=None, type='int',
help='Number of processes, passed by multi script')
parser.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Ensemble forward and reverse complement predictions [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option('--species', dest='species',
default='human')
parser.add_option('--stats', dest='sad_stats',
default='sum',
help='Comma-separated list of stats to save (sum/center/scd). [Default: %default]')
parser.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
parser.add_option('-u', dest='mut_up',
default=0, type='int',
help='Nucleotides upstream of center sequence to mutate [Default: %default]')
(options, args) = parser.parse_args()
if len(args) == 2:
# single worker
model_file = args[0]
bed_file = args[1]
elif len(args) == 3:
# master script
options_pkl_file = args[0]
model_file = args[1]
bed_file = args[2]
# load options
options_pkl = open(options_pkl_file, 'rb')
options = pickle.load(options_pkl)
options_pkl.close()
elif len(args) == 4:
# multi worker
options_pkl_file = args[0]
model_file = args[1]
bed_file = args[2]
worker_index = int(args[3])
# load options
options_pkl = open(options_pkl_file, 'rb')
options = pickle.load(options_pkl)
options_pkl.close()
# update output directory
options.out_dir = '%s/job%d' % (options.out_dir, worker_index)
else:
parser.error('Must provide parameter and model files and BED file')
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
options.shifts = [int(shift) for shift in options.shifts.split(',')]
options.sad_stats = [sad_stat.lower() for sad_stat in options.sad_stats.split(',')]
if options.mut_up > 0 or options.mut_down > 0:
options.mut_len = options.mut_up + options.mut_down
else:
assert(options.mut_len > 0)
options.mut_up = options.mut_len // 2
options.mut_down = options.mut_len - options.mut_up
# read targets
if options.targets_file is None:
target_slice = None
else:
targets_df = pd.read_table(options.targets_file, index_col=0)
target_slice = targets_df.index
#################################################################
# setup model
seqnn_model = tf.saved_model.load(model_file).model
# query num model targets
seq_length = seqnn_model.predict_on_batch.input_signature[0].shape[1]
null_1hot = np.zeros((1,seq_length,4))
null_preds = seqnn_model.predict_on_batch(null_1hot)
null_preds = null_preds[options.species].numpy()
_, preds_length, num_targets = null_preds.shape
#################################################################
# sequence dataset
# read sequences from BED
seqs_dna, seqs_coords = bed.make_bed_seqs(
bed_file, options.genome_fasta, seq_length, stranded=True)
# filter for worker SNPs
if options.processes is not None:
worker_bounds = np.linspace(0, len(seqs_dna), options.processes+1, dtype='int')
seqs_dna = seqs_dna[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
seqs_coords = seqs_coords[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
num_seqs = len(seqs_dna)
# determine mutation region limits
seq_mid = seq_length // 2
mut_start = seq_mid - options.mut_up
mut_end = mut_start + options.mut_len
# make sequence generator
seqs_gen = satmut_gen(seqs_dna, mut_start, mut_end)
#################################################################
# setup output
scores_h5_file = '%s/scores.h5' % options.out_dir
if os.path.isfile(scores_h5_file):
os.remove(scores_h5_file)
scores_h5 = h5py.File(scores_h5_file, 'w')
scores_h5.create_dataset('seqs', dtype='bool',
shape=(num_seqs, options.mut_len, 4))
for sad_stat in options.sad_stats:
scores_h5.create_dataset(sad_stat, dtype='float16',
shape=(num_seqs, options.mut_len, 4, num_targets))
# store mutagenesis sequence coordinates
scores_chr = []
scores_start = []
scores_end = []
scores_strand = []
for seq_chr, seq_start, seq_end, seq_strand in seqs_coords:
scores_chr.append(seq_chr)
scores_strand.append(seq_strand)
if seq_strand == '+':
score_start = seq_start + mut_start
score_end = score_start + options.mut_len
else:
score_end = seq_end - mut_start
score_start = score_end - options.mut_len
scores_start.append(score_start)
scores_end.append(score_end)
scores_h5.create_dataset('chr', data=np.array(scores_chr, dtype='S'))
scores_h5.create_dataset('start', data=np.array(scores_start))
scores_h5.create_dataset('end', data=np.array(scores_end))
scores_h5.create_dataset('strand', data=np.array(scores_strand, dtype='S'))
preds_per_seq = 1 + 3*options.mut_len
score_threads = []
score_queue = Queue()
for i in range(1):
sw = ScoreWorker(score_queue, scores_h5, options.sad_stats,
mut_start, mut_end)
sw.start()
score_threads.append(sw)
#################################################################
# predict scores, write output
# find center
center_start = preds_length // 2
if preds_length % 2 == 0:
center_end = center_start + 2
else:
center_end = center_start + 1
# initialize predictions stream
preds_stream = stream.PredStreamSonnet(seqnn_model, seqs_gen,
rc=options.rc, shifts=options.shifts, species=options.species)
# predictions index
pi = 0
for si in range(num_seqs):
print('Predicting %d' % si, flush=True)
# collect sequence predictions
seq_preds_sum = []
seq_preds_center = []
seq_preds_scd = []
preds_mut0 = preds_stream[pi]
for spi in range(preds_per_seq):
preds_mut = preds_stream[pi]
preds_sum = preds_mut.sum(axis=0)
seq_preds_sum.append(preds_sum)
if 'center' in options.sad_stats:
preds_center = preds_mut[center_start:center_end,:].sum(axis=0)
seq_preds_center.append(preds_center)
if 'scd' in options.sad_stats:
preds_scd = np.sqrt(((preds_mut-preds_mut0)**2).sum(axis=0))
seq_preds_scd.append(preds_scd)
pi += 1
seq_preds_sum = np.array(seq_preds_sum)
seq_preds_center = np.array(seq_preds_center)
seq_preds_scd = np.array(seq_preds_scd)
# wait for previous to finish
score_queue.join()
# queue sequence for scoring
seq_pred_stats = (seq_preds_sum, seq_preds_center, seq_preds_scd)
score_queue.put((seqs_dna[si], seq_pred_stats, si))
# queue sequence for plotting
if options.plots:
plot_queue.put((seqs_dna[si], seq_preds_sum, si))
gc.collect()
# finish queue
print('Waiting for threads to finish.', flush=True)
score_queue.join()
# close output HDF5
scores_h5.close()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| apache-2.0 |
dhimmel/networkx | networkx/convert_matrix.py | 13 | 33243 | """Functions to convert NetworkX graphs to and from numpy/scipy matrices.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a 10 node random graph from a numpy matrix
>>> import numpy
>>> a = numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10))
>>> D = nx.DiGraph(a)
or equivalently
>>> D = nx.to_networkx_graph(a,create_using=nx.DiGraph())
See Also
--------
nx_pygraphviz, nx_pydot
"""
# Copyright (C) 2006-2014 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import warnings
import itertools
import networkx as nx
from networkx.convert import _prep_create_using
from networkx.utils import not_implemented_for
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
__all__ = ['from_numpy_matrix', 'to_numpy_matrix',
'from_pandas_dataframe', 'to_pandas_dataframe',
'to_numpy_recarray',
'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix']
def to_pandas_dataframe(G, nodelist=None, multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a Pandas DataFrame.
Parameters
----------
G : graph
The NetworkX graph used to construct the Pandas DataFrame.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None, optional
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float, optional
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
df : Pandas DataFrame
Graph adjacency matrix
Notes
-----
The DataFrame entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the 'multigraph_weight' parameter. The default is to
sum the weight attributes for each of the parallel edges.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Pandas DataFrame can be modified as follows:
>>> import pandas as pd
>>> import numpy as np
>>> G = nx.Graph([(1,1)])
>>> df = nx.to_pandas_dataframe(G)
>>> df
1
1 1
>>> df.values[np.diag_indices_from(df)] *= 2
>>> df
1
1 2
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_pandas_dataframe(G, nodelist=[0,1,2])
0 1 2
0 0 2 0
1 1 0 0
2 0 0 4
"""
import pandas as pd
M = to_numpy_matrix(G, nodelist, None, None, multigraph_weight, weight, nonedge)
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
df = pd.DataFrame(data=M, index = nodelist ,columns = nodelist)
return df
def from_pandas_dataframe(df, source, target, edge_attr=None,
create_using=None):
"""Return a graph from Pandas DataFrame.
The Pandas DataFrame should contain at least two columns of node names and
zero or more columns of node attributes. Each row will be processed as one
edge instance.
Note: This function iterates over DataFrame.values, which is not
guaranteed to retain the data type across columns in the row. This is only
a problem if your row is entirely numeric and a mix of ints and floats. In
that case, all values will be returned as floats. See the
DataFrame.iterrows documentation for an example.
Parameters
----------
df : Pandas DataFrame
An edge list representation of a graph
source : str or int
A valid column name (string or iteger) for the source nodes (for the
directed case).
target : str or int
A valid column name (string or iteger) for the target nodes (for the
directed case).
edge_attr : str or int, iterable, True
A valid column name (str or integer) or list of column names that will
be used to retrieve items from the row and add them to the graph as edge
attributes. If `True`, all of the remaining columns will be added.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
See Also
--------
to_pandas_dataframe
Examples
--------
Simple integer weights on edges:
>>> import pandas as pd
>>> import numpy as np
>>> r = np.random.RandomState(seed=5)
>>> ints = r.random_integers(1, 10, size=(3,2))
>>> a = ['A', 'B', 'C']
>>> b = ['D', 'A', 'E']
>>> df = pd.DataFrame(ints, columns=['weight', 'cost'])
>>> df[0] = a
>>> df['b'] = b
>>> df
weight cost 0 b
0 4 7 A D
1 7 1 B A
2 10 9 C E
>>> G=nx.from_pandas_dataframe(df, 0, 'b', ['weight', 'cost'])
>>> G['E']['C']['weight']
10
>>> G['E']['C']['cost']
9
"""
g = _prep_create_using(create_using)
# Index of source and target
src_i = df.columns.get_loc(source)
tar_i = df.columns.get_loc(target)
if edge_attr:
# If all additional columns requested, build up a list of tuples
# [(name, index),...]
if edge_attr is True:
# Create a list of all columns indices, ignore nodes
edge_i = []
for i, col in enumerate(df.columns):
if col is not source and col is not target:
edge_i.append((col, i))
# If a list or tuple of name is requested
elif isinstance(edge_attr, (list, tuple)):
edge_i = [(i, df.columns.get_loc(i)) for i in edge_attr]
# If a string or int is passed
else:
edge_i = [(edge_attr, df.columns.get_loc(edge_attr)),]
# Iteration on values returns the rows as Numpy arrays
for row in df.values:
g.add_edge(row[src_i], row[tar_i], {i:row[j] for i, j in edge_i})
# If no column names are given, then just return the edges.
else:
for row in df.values:
g.add_edge(row[src_i], row[tar_i])
return g
def to_numpy_matrix(G, nodelist=None, dtype=None, order=None,
multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a NumPy matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in ``nodelist``.
If ``nodelist`` is None, then the ordering is produced by G.nodes().
dtype : NumPy data type, optional
A valid single NumPy data type used to initialize the array.
This must be a simple type such as int or numpy.float64 and
not a compound data type (see to_numpy_recarray)
If None, then the NumPy default is used.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None optional (default = 'weight')
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float (default = 0.0)
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
M : NumPy matrix
Graph adjacency matrix
See Also
--------
to_numpy_recarray, from_numpy_matrix
Notes
-----
The matrix entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the ``multigraph_weight`` parameter. The default is to
sum the weight attributes for each of the parallel edges.
When ``nodelist`` does not contain every node in ``G``, the matrix is built
from the subgraph of ``G`` that is induced by the nodes in ``nodelist``.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Numpy matrix can be modified as follows:
>>> import numpy as np
>>> G = nx.Graph([(1, 1)])
>>> A = nx.to_numpy_matrix(G)
>>> A
matrix([[ 1.]])
>>> A.A[np.diag_indices_from(A)] *= 2
>>> A
matrix([[ 2.]])
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_numpy_matrix(G, nodelist=[0,1,2])
matrix([[ 0., 2., 0.],
[ 1., 0., 0.],
[ 0., 0., 4.]])
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
# Initially, we start with an array of nans. Then we populate the matrix
# using data from the graph. Afterwards, any leftover nans will be
# converted to the value of `nonedge`. Note, we use nans initially,
# instead of zero, for two reasons:
#
# 1) It can be important to distinguish a real edge with the value 0
# from a nonedge with the value 0.
#
# 2) When working with multi(di)graphs, we must combine the values of all
# edges between any two nodes in some manner. This often takes the
# form of a sum, min, or max. Using the value 0 for a nonedge would
# have undesirable effects with min and max, but using nanmin and
# nanmax with initially nan values is not problematic at all.
#
# That said, there are still some drawbacks to this approach. Namely, if
# a real edge is nan, then that value is a) not distinguishable from
# nonedges and b) is ignored by the default combinator (nansum, nanmin,
# nanmax) functions used for multi(di)graphs. If this becomes an issue,
# an alternative approach is to use masked arrays. Initially, every
# element is masked and set to some `initial` value. As we populate the
# graph, elements are unmasked (automatically) when we combine the initial
# value with the values given by real edges. At the end, we convert all
# masked values to `nonedge`. Using masked arrays fully addresses reason 1,
# but for reason 2, we would still have the issue with min and max if the
# initial values were 0.0. Note: an initial value of +inf is appropriate
# for min, while an initial value of -inf is appropriate for max. When
# working with sum, an initial value of zero is appropriate. Ideally then,
# we'd want to allow users to specify both a value for nonedges and also
# an initial value. For multi(di)graphs, the choice of the initial value
# will, in general, depend on the combinator function---sensible defaults
# can be provided.
if G.is_multigraph():
# Handle MultiGraphs and MultiDiGraphs
M = np.zeros((nlen, nlen), dtype=dtype, order=order) + np.nan
# use numpy nan-aware operations
operator={sum:np.nansum, min:np.nanmin, max:np.nanmax}
try:
op=operator[multigraph_weight]
except:
raise ValueError('multigraph_weight must be sum, min, or max')
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i, j = index[u], index[v]
e_weight = attrs.get(weight, 1)
M[i,j] = op([e_weight, M[i,j]])
if undirected:
M[j,i] = M[i,j]
else:
# Graph or DiGraph, this is much faster than above
M = np.zeros((nlen,nlen), dtype=dtype, order=order) + np.nan
for u,nbrdict in G.adjacency_iter():
for v,d in nbrdict.items():
try:
M[index[u],index[v]] = d.get(weight,1)
except KeyError:
# This occurs when there are fewer desired nodes than
# there are nodes in the graph: len(nodelist) < len(G)
pass
M[np.isnan(M)] = nonedge
M = np.asmatrix(M)
return M
def from_numpy_matrix(A, parallel_edges=False, create_using=None):
"""Return a graph from numpy matrix.
The numpy matrix is interpreted as an adjacency matrix for the graph.
Parameters
----------
A : numpy matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is ``True``, ``create_using`` is a multigraph, and ``A`` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is ``False``, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
Notes
-----
If ``create_using`` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, ``parallel_edges`` is ``True``, and the
entries of ``A`` are of type ``int``, then this function returns a multigraph
(of the same type as ``create_using``) with parallel edges.
If ``create_using`` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
If the numpy matrix has a single data type for each matrix entry it
will be converted to an appropriate Python data type.
If the numpy matrix has a user-specified compound data type the names
of the data fields will be used as attribute keys in the resulting
NetworkX graph.
See Also
--------
to_numpy_matrix, to_numpy_recarray
Examples
--------
Simple integer weights on edges:
>>> import numpy
>>> A=numpy.matrix([[1, 1], [2, 1]])
>>> G=nx.from_numpy_matrix(A)
If ``create_using`` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> G = nx.from_numpy_matrix(A, create_using = nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If ``create_using`` is a multigraph and the matrix has only integer entries
but ``parallel_edges`` is ``True``, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> temp = nx.MultiGraph()
>>> G = nx.from_numpy_matrix(A, parallel_edges = True, create_using = temp)
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
User defined compound data type on edges:
>>> import numpy
>>> dt = [('weight', float), ('cost', int)]
>>> A = numpy.matrix([[(1.0, 2)]], dtype = dt)
>>> G = nx.from_numpy_matrix(A)
>>> G.edges()
[(0, 0)]
>>> G[0][0]['cost']
2
>>> G[0][0]['weight']
1.0
"""
# This should never fail if you have created a numpy matrix with numpy...
import numpy as np
kind_to_python_type={'f':float,
'i':int,
'u':int,
'b':bool,
'c':complex,
'S':str,
'V':'void'}
try: # Python 3.x
blurb = chr(1245) # just to trigger the exception
kind_to_python_type['U']=str
except ValueError: # Python 2.6+
kind_to_python_type['U']=unicode
G=_prep_create_using(create_using)
n,m=A.shape
if n!=m:
raise nx.NetworkXError("Adjacency matrix is not square.",
"nx,ny=%s"%(A.shape,))
dt=A.dtype
try:
python_type=kind_to_python_type[dt.kind]
except:
raise TypeError("Unknown numpy data type: %s"%dt)
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Get a list of all the entries in the matrix with nonzero entries. These
# coordinates will become the edges in the graph.
edges = zip(*(np.asarray(A).nonzero()))
# handle numpy constructed data type
if python_type is 'void':
# Sort the fields by their offset, then by dtype, then by name.
fields = sorted((offset, dtype, name) for name, (dtype, offset) in
A.dtype.fields.items())
triples = ((u, v, {name: kind_to_python_type[dtype.kind](val)
for (_, dtype, name), val in zip(fields, A[u, v])})
for u, v in edges)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
elif python_type is int and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, dict(weight=1)) for d in range(A[u, v]))
for (u, v) in edges)
else: # basic data type
triples = ((u, v, dict(weight=python_type(A[u, v])))
for u, v in edges)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# ``_generated_weighted_edges()`` function are actually the row/column
# indices for the matrix ``A``.
#
# Without this check, we run into a problem where each edge is added twice
# when ``G.add_edges_from()`` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_edges_from(triples)
return G
@not_implemented_for('multigraph')
def to_numpy_recarray(G,nodelist=None,
dtype=[('weight',float)],
order=None):
"""Return the graph adjacency matrix as a NumPy recarray.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy named dtype used to initialize the NumPy recarray.
The data type names are assumed to be keys in the graph edge attribute
dictionary.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
Returns
-------
M : NumPy recarray
The graph with specified edge data as a Numpy recarray
Notes
-----
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Examples
--------
>>> G = nx.Graph()
>>> G.add_edge(1,2,weight=7.0,cost=5)
>>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])
>>> print(A.weight)
[[ 0. 7.]
[ 7. 0.]]
>>> print(A.cost)
[[0 5]
[5 0]]
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
M = np.zeros((nlen,nlen), dtype=dtype, order=order)
names=M.dtype.names
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i,j = index[u],index[v]
values=tuple([attrs[n] for n in names])
M[i,j] = values
if undirected:
M[j,i] = M[i,j]
return M.view(np.recarray)
def to_scipy_sparse_matrix(G, nodelist=None, dtype=None,
weight='weight', format='csr'):
"""Return the graph adjacency matrix as a SciPy sparse matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. If None, then the
NumPy default is used.
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None then all edge weights are 1.
format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
The type of the matrix to be returned (default 'csr'). For
some algorithms different implementations of sparse matrices
can perform better. See [1]_ for details.
Returns
-------
M : SciPy sparse matrix
Graph adjacency matrix.
Notes
-----
The matrix entries are populated using the edge attribute held in
parameter weight. When an edge does not have that attribute, the
value of the entry is 1.
For multiple edges the matrix values are the sums of the edge weights.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Uses coo_matrix format. To convert to other formats specify the
format= keyword.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Scipy sparse matrix can be modified as follows:
>>> import scipy as sp
>>> G = nx.Graph([(1,1)])
>>> A = nx.to_scipy_sparse_matrix(G)
>>> print(A.todense())
[[1]]
>>> A.setdiag(A.diagonal()*2)
>>> print(A.todense())
[[2]]
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0,1,2])
>>> print(S.todense())
[[0 2 0]
[1 0 0]
[0 0 4]]
References
----------
.. [1] Scipy Dev. References, "Sparse Matrices",
http://docs.scipy.org/doc/scipy/reference/sparse.html
"""
from scipy import sparse
if nodelist is None:
nodelist = G
nlen = len(nodelist)
if nlen == 0:
raise nx.NetworkXError("Graph has no nodes or edges")
if len(nodelist) != len(set(nodelist)):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
index = dict(zip(nodelist,range(nlen)))
if G.number_of_edges() == 0:
row,col,data=[],[],[]
else:
row,col,data = zip(*((index[u],index[v],d.get(weight,1))
for u,v,d in G.edges_iter(nodelist, data=True)
if u in index and v in index))
if G.is_directed():
M = sparse.coo_matrix((data,(row,col)),
shape=(nlen,nlen), dtype=dtype)
else:
# symmetrize matrix
d = data + data
r = row + col
c = col + row
# selfloop entries get double counted when symmetrizing
# so we subtract the data on the diagonal
selfloops = G.selfloop_edges(data=True)
if selfloops:
diag_index,diag_data = zip(*((index[u],-d.get(weight,1))
for u,v,d in selfloops
if u in index and v in index))
d += diag_data
r += diag_index
c += diag_index
M = sparse.coo_matrix((d, (r, c)), shape=(nlen,nlen), dtype=dtype)
try:
return M.asformat(format)
except AttributeError:
raise nx.NetworkXError("Unknown sparse matrix format: %s"%format)
def _csr_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Row** format to
an iterable of weighted edge triples.
"""
nrows = A.shape[0]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(nrows):
for j in range(indptr[i], indptr[i+1]):
yield i, indices[j], data[j]
def _csc_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Column** format to
an iterable of weighted edge triples.
"""
ncols = A.shape[1]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(ncols):
for j in range(indptr[i], indptr[i+1]):
yield indices[j], i, data[j]
def _coo_gen_triples(A):
"""Converts a SciPy sparse matrix in **Coordinate** format to an iterable
of weighted edge triples.
"""
row, col, data = A.row, A.col, A.data
return zip(row, col, data)
def _dok_gen_triples(A):
"""Converts a SciPy sparse matrix in **Dictionary of Keys** format to an
iterable of weighted edge triples.
"""
for (r, c), v in A.items():
yield r, c, v
def _generate_weighted_edges(A):
"""Returns an iterable over (u, v, w) triples, where u and v are adjacent
vertices and w is the weight of the edge joining u and v.
`A` is a SciPy sparse matrix (in any format).
"""
if A.format == 'csr':
return _csr_gen_triples(A)
if A.format == 'csc':
return _csc_gen_triples(A)
if A.format == 'dok':
return _dok_gen_triples(A)
# If A is in any other format (including COO), convert it to COO format.
return _coo_gen_triples(A.tocoo())
def from_scipy_sparse_matrix(A, parallel_edges=False, create_using=None,
edge_attribute='weight'):
"""Creates a new graph from an adjacency matrix given as a SciPy sparse
matrix.
Parameters
----------
A: scipy sparse matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is ``True``, `create_using` is a multigraph, and `A` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is ``False``, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using: NetworkX graph
Use specified graph for result. The default is Graph()
edge_attribute: string
Name of edge attribute to store matrix numeric value. The data will
have the same type as the matrix entry (int, float, (real,imag)).
Notes
-----
If `create_using` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, `parallel_edges` is ``True``, and the
entries of `A` are of type ``int``, then this function returns a multigraph
(of the same type as `create_using`) with parallel edges. In this case,
`edge_attribute` will be ignored.
If `create_using` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
Examples
--------
>>> import scipy.sparse
>>> A = scipy.sparse.eye(2,2,1)
>>> G = nx.from_scipy_sparse_matrix(A)
If `create_using` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If `create_using` is a multigraph and the matrix has only integer entries
but `parallel_edges` is ``True``, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, parallel_edges=True,
... create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
"""
G = _prep_create_using(create_using)
n,m = A.shape
if n != m:
raise nx.NetworkXError(\
"Adjacency matrix is not square. nx,ny=%s"%(A.shape,))
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Create an iterable over (u, v, w) triples and for each triple, add an
# edge from u to v with weight w.
triples = _generate_weighted_edges(A)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
if A.dtype.kind in ('i', 'u') and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# ``_generated_weighted_edges()`` function are actually the row/column
# indices for the matrix ``A``.
#
# Without this check, we run into a problem where each edge is added twice
# when `G.add_weighted_edges_from()` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_weighted_edges_from(triples, weight=edge_attribute)
return G
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
| bsd-3-clause |
wazeerzulfikar/scikit-learn | examples/model_selection/plot_grid_search_digits.py | 56 | 2761 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(), tuned_parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |