repo_name
stringlengths 6
96
| path
stringlengths 4
180
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 906
722k
| license
stringclasses 15
values |
---|---|---|---|---|---|
yyjiang/scikit-learn | sklearn/covariance/tests/test_covariance.py | 142 | 11068 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
hgrif/ds-utils | dsutils/sklearn.py | 1 | 2913 | import numpy as np
from sklearn import cross_validation
from sklearn import metrics
from sklearn import preprocessing
def multiclass_roc_auc_score(y_true, y_score, label_binarizer=None, **kwargs):
"""Compute ROC AUC score for multiclass.
:param y_true: true multiclass predictions [n_samples]
:param y_score: multiclass scores [n_samples, n_classes]
:param label_binarizer: Binarizer to use (sklearn.preprocessing.LabelBinarizer())
:param kwargs: Additional keyword arguments for sklearn.metrics.roc_auc_score
:return: Multiclass ROC AUC score
"""
if label_binarizer is None:
label_binarizer = preprocessing.LabelBinarizer()
binarized_true = label_binarizer.fit_transform(y_true)
score = metrics.roc_auc_score(binarized_true, y_score, **kwargs)
return score
def split_train_test(y, do_split_stratified=True, **kwargs):
"""Get indexes to split y in train and test sets.
:param y: Labels of samples
:param do_split_stratified: Use StratifiedShuffleSplit (else ShuffleSplit)
:param kwargs: Keyword arguments StratifiedShuffleSplit or ShuffleSplit
:return: (train indexes, test indexes)
"""
if do_split_stratified:
data_splitter = cross_validation.StratifiedShuffleSplit(y, n_iter=1,
**kwargs)
else:
data_splitter = cross_validation.ShuffleSplit(y, n_iter=1, **kwargs)
train_ix, test_ix = data_splitter.__iter__().next()
return train_ix, test_ix
class OrderedLabelEncoder(preprocessing.LabelEncoder):
"""Encode labels with value between 0 and n_classes-1 in specified order.
See also
--------
sklearn.preprocessing.LabelEncoder
"""
def __init__(self, classes):
self.classes_ = np.array(classes, dtype='O')
def fit(self, y):
""" Deprecated method.
"""
raise Exception('Invalid method: method is deprecated')
def fit_transform(self, y):
""" Deprecated method.
"""
raise Exception('Invalid method: method is deprecated')
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.array(np.unique(y), dtype='O')
preprocessing.label._check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
transformed_y = np.zeros_like(y, dtype=int)
for i_class, current_class in enumerate(self.classes_):
transformed_y[np.array(y) == current_class] = i_class
return transformed_y | mit |
ChanChiChoi/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
appapantula/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
jereze/scikit-learn | sklearn/preprocessing/data.py | 68 | 57385 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
giorgiop/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 13 | 26241 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
from sklearn.exceptions import DataConversionWarning
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses scikit-learn metric, cityblock (function) is
# scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# The string "cosine" uses sklearn.metric,
# while the function cosine is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# test that we convert to boolean arrays for boolean distances
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
Y = X.copy()
Y[0, 0] = 1 - Y[0, 0]
for metric in PAIRWISE_BOOLEAN_FUNCTIONS:
for Z in [Y, None]:
res = pairwise_distances(X, Z, metric=metric)
res[np.isnan(res)] = 0
assert_true(np.sum(res != 0) == 0)
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1.]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1.]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean scikit-learn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
xuerenlv/PaperWork | my_study/pandas_study/test1.py | 1 | 1044 | # -*- coding: utf-8 -*-
'''
Created on Oct 16, 2015
@author: nlp
'''
import sys
import traceback
from store_model import Single_weibo_store
import datetime
from datetime import timedelta
import pprint
import jieba
reload(sys)
sys.setdefaultencoding('utf8')
from sklearn import svm
X = [[0, 0], [1, 1]]
y = [0, 1]
clf = svm.SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
gamma=0.0, kernel='rbf', max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
clf.fit(X, y)
def is_all_chinese(word):
for uchar in word:
if uchar >= u'\u4e00' and uchar<=u'\u9fa5':
pass
else:
return False
return True
def is_all_alpha(word):
for one in word:
if (one >= 'a' and one <= u'z') or (one >= 'A' and one <= u'Z'):
pass
else:
return False
return True
if __name__ == '__main__':
x = '[timedelta'
y = u'薛薛啊结构'
print is_all_chinese(x)
print is_all_chinese(y)
pass
| apache-2.0 |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/tests/test_kernel_approximation.py | 78 | 7586 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# abbreviations for easier formula
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# abbreviations for easier formula
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| mit |
vortex-ape/scikit-learn | examples/model_selection/plot_nested_cross_validation_iris.py | 9 | 4415 | """
=========================================
Nested versus non-nested cross-validation
=========================================
This example compares non-nested and nested cross-validation strategies on a
classifier of the iris data set. Nested cross-validation (CV) is often used to
train a model in which hyperparameters also need to be optimized. Nested CV
estimates the generalization error of the underlying model and its
(hyper)parameter search. Choosing the parameters that maximize non-nested CV
biases the model to the dataset, yielding an overly-optimistic score.
Model selection without nested CV uses the same data to tune model parameters
and evaluate model performance. Information may thus "leak" into the model
and overfit the data. The magnitude of this effect is primarily dependent on
the size of the dataset and the stability of the model. See Cawley and Talbot
[1]_ for an analysis of these issues.
To avoid this problem, nested CV effectively uses a series of
train/validation/test set splits. In the inner loop (here executed by
:class:`GridSearchCV <sklearn.model_selection.GridSearchCV>`), the score is
approximately maximized by fitting a model to each training set, and then
directly maximized in selecting (hyper)parameters over the validation set. In
the outer loop (here in :func:`cross_val_score
<sklearn.model_selection.cross_val_score>`), generalization error is estimated
by averaging test set scores over several dataset splits.
The example below uses a support vector classifier with a non-linear kernel to
build a model with optimized hyperparameters by grid search. We compare the
performance of non-nested and nested CV strategies by taking the difference
between their scores.
.. topic:: See Also:
- :ref:`cross_validation`
- :ref:`grid_search`
.. topic:: References:
.. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and
subsequent selection bias in performance evaluation.
J. Mach. Learn. Res 2010,11, 2079-2107.
<http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_
"""
from sklearn.datasets import load_iris
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import numpy as np
print(__doc__)
# Number of random trials
NUM_TRIALS = 30
# Load the dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
# Set up possible values of parameters to optimize over
p_grid = {"C": [1, 10, 100],
"gamma": [.01, .1]}
# We will use a Support Vector Classifier with "rbf" kernel
svm = SVC(kernel="rbf")
# Arrays to store scores
non_nested_scores = np.zeros(NUM_TRIALS)
nested_scores = np.zeros(NUM_TRIALS)
# Loop for each trial
for i in range(NUM_TRIALS):
# Choose cross-validation techniques for the inner and outer loops,
# independently of the dataset.
# E.g "GroupKFold", "LeaveOneOut", "LeaveOneGroupOut", etc.
inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)
outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)
# Non_nested parameter search and scoring
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=inner_cv)
clf.fit(X_iris, y_iris)
non_nested_scores[i] = clf.best_score_
# Nested CV with parameter optimization
nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv)
nested_scores[i] = nested_score.mean()
score_difference = non_nested_scores - nested_scores
print("Average difference of {0:6f} with std. dev. of {1:6f}."
.format(score_difference.mean(), score_difference.std()))
# Plot scores on each trial for nested and non-nested CV
plt.figure()
plt.subplot(211)
non_nested_scores_line, = plt.plot(non_nested_scores, color='r')
nested_line, = plt.plot(nested_scores, color='b')
plt.ylabel("score", fontsize="14")
plt.legend([non_nested_scores_line, nested_line],
["Non-Nested CV", "Nested CV"],
bbox_to_anchor=(0, .4, .5, 0))
plt.title("Non-Nested and Nested Cross Validation on Iris Dataset",
x=.5, y=1.1, fontsize="15")
# Plot bar chart of the difference.
plt.subplot(212)
difference_plot = plt.bar(range(NUM_TRIALS), score_difference)
plt.xlabel("Individual Trial #")
plt.legend([difference_plot],
["Non-Nested CV - Nested CV Score"],
bbox_to_anchor=(0, 1, .8, 0))
plt.ylabel("score difference", fontsize="14")
plt.show()
| bsd-3-clause |
PatrickOReilly/scikit-learn | examples/linear_model/plot_ridge_path.py | 55 | 2138 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
This example also shows the usefulness of applying Ridge regression
to highly ill-conditioned matrices. For such matrices, a slight
change in the target variable can cause huge variances in the
calculated weights. In such cases, it is useful to set a certain
regularization (alpha) to reduce this variation (noise).
When alpha is very large, the regularization effect dominates the
squared loss function and the coefficients tend to zero.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations. In practise it is necessary to tune alpha
in such a way that a balance is maintained between both.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
rileymcdowell/genomic-neuralnet | genomic_neuralnet/methods/generic_keras_net.py | 1 | 6998 | from __future__ import print_function
import os
import time
import json
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Lambda
from keras.optimizers import Nadam as Trainer
#from keras.optimizers import Adam as Trainer
from keras.regularizers import WeightRegularizer
from keras.callbacks import EarlyStopping, Callback, LearningRateScheduler
from sklearn.preprocessing import MinMaxScaler
from genomic_neuralnet.util import get_is_time_stats, get_should_plot
TIMING_EPOCHS = 12000
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_epoch_end(self, epoch, logs={}):
self.losses.append(logs.get('loss'))
class NeuralNetContainer(object):
def __init__(self):
self.model = None
self.learning_rate = None
self.weight_decay = 0.0
self.dropout_prob = 0.0
self.epochs = 25
self.hidden_layers = (10,)
self.verbose = False
self.plot = False
def clone(self):
if not self.model is None:
raise NotImplemented('Cannot clone container after building model')
clone = NeuralNetContainer()
clone.learning_rate = self.learning_rate
clone.weight_decay = self.weight_decay
clone.dropout_prob = self.dropout_prob
clone.epochs = self.epochs
clone.hidden_layers = self.hidden_layers
clone.verbose = self.verbose
clone.plot = self.plot
return clone
def _build_nn(net_container, n_features):
model = Sequential()
# Change scale from (-1, 1) to (0, 1)
model.add(Lambda(lambda x: (x + 1) / 2, input_shape=(n_features,), output_shape=(n_features,)))
if net_container.weight_decay > 0.0:
weight_regularizer = WeightRegularizer(net_container.weight_decay)
else:
weight_regularizer = None
last_dim = n_features
for lidx, n_nodes in enumerate(net_container.hidden_layers):
# Layer, activation, and dropout, in that order.
model.add(Dense(output_dim=n_nodes, input_dim=last_dim, W_regularizer=weight_regularizer))
model.add(Activation('sigmoid'))
if net_container.dropout_prob > 0.0:
model.add(Dropout(net_container.dropout_prob))
last_dim = n_nodes
model.add(Dense(output_dim=1, input_dim=last_dim, bias=False))
model.add(Activation('linear'))
if not net_container.learning_rate is None:
optimizer = Trainer(lr=net_container.learning_rate)
else:
#optimizer = Trainer(lr=0.0001)
optimizer = Trainer()
model.compile( optimizer=optimizer
, loss='mean_squared_error'
)
net_container.model = model
def _train_net(container, X, y, override_epochs=None, is_check_train=False):
"""
Given a container, X (inputs), and y (outputs) train the network in the container.
* If override_epochs is an integer, just run that many epochs.
* The is_check_train parameter signifies that this training is a quick check to make
sure that the network is properly initialized and that the output error
is decreasing. The best "check trained" network will be passed in again
for an additional full set of training epochs.
"""
model = container.model
epochs = override_epochs if (not override_epochs is None) else container.epochs
verbose = int(container.verbose)
def rate_func(epoch):
if epochs - epoch == 2000:
# Settle down during last 2000 epochs.
model.optimizer.lr.set_value(model.optimizer.lr.get_value()/4.0)
if epochs - epoch == 500:
# Go a bit further in last 500 epochs.
model.optimizer.lr.set_value(model.optimizer.lr.get_value()/4.0)
return float(model.optimizer.lr.get_value())
lr_scheduler = LearningRateScheduler(rate_func)
loss_history = LossHistory()
callbacks = [loss_history, lr_scheduler]
model.fit( X,
y,
nb_epoch=epochs,
batch_size=X.shape[0] / 4,
verbose=verbose,
callbacks=callbacks
)
if (isinstance(override_epochs, int)) and (not is_check_train) and container.plot:
# Plot, but only if this is not overriden epochs.
import matplotlib.pyplot as plt
plt.plot(range(len(loss_history.losses)), loss_history.losses)
plt.show()
return loss_history.losses[-1]
def _predict(container, X):
model = container.model
return model.predict(X)
_NET_TRIES = 2
def _get_initial_net(container, n_features, X, y):
"""
Create a few networks. Start the training process for a few epochs, then take
the best one to continue training. This eliminates networks that are poorly
initialized and will not converge.
"""
candidates = []
for _ in range(_NET_TRIES):
cont = container.clone()
_build_nn(cont, n_features)
candidates.append(cont)
losses = []
for candidate in candidates:
# Train each candidate for 100 epochs.
loss = _train_net(candidate, X, y, override_epochs=100, is_check_train=True)
losses.append(loss)
best_idx = np.argmin(losses)
return candidates[best_idx]
def get_net_prediction( train_data, train_truth, test_data, test_truth
, hidden=(5,), weight_decay=0.0, dropout_prob=0.0
, learning_rate=None, epochs=25, verbose=False
, iter_id=None
):
container = NeuralNetContainer()
container.learning_rate = learning_rate
container.dropout_prob = dropout_prob
container.weight_decay = weight_decay
container.epochs = epochs
container.hidden_layers = hidden
container.verbose = verbose
container.plot = get_should_plot()
mms = MinMaxScaler(feature_range= (-1, 1)) # Scale output from -1 to 1.
train_y = mms.fit_transform(train_truth[:,np.newaxis])
n_features = train_data.shape[1]
collect_time_stats = get_is_time_stats()
if collect_time_stats:
start = time.time()
# Find and return an effectively initialized network to start.
container = _get_initial_net(container, n_features, train_data, train_y)
# Train the network.
if collect_time_stats:
# Train a specific time, never terminating early.
_train_net(container, train_data, train_y, override_epochs=TIMING_EPOCHS, is_check_train=False)
else:
# Normal training, enable all heuristics.
_train_net(container, train_data, train_y)
if collect_time_stats:
end = time.time()
print('Fitting took {} seconds'.format(end - start))
print(json.dumps({'seconds': end - start, 'hidden': container.hidden_layers}))
# Unsupervised (test) dataset.
predicted = _predict(container, test_data)
predicted = mms.inverse_transform(predicted)
return predicted.ravel()
| mit |
tntnatbry/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 50 | 2613 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.9}
features = layers.stack(
features,
layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
djgagne/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
shikhardb/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
PrashntS/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
kzky/python-online-machine-learning-library | pa/passive_aggressive_2.py | 1 | 6472 | import numpy as np
import scipy as sp
import logging as logger
import time
import pylab as pl
from collections import defaultdict
from sklearn.metrics import confusion_matrix
class PassiveAggressiveII(object):
"""
Passive Aggressive-II algorithm: squared hinge loss PA.
References:
- http://jmlr.org/papers/volume7/crammer06a/crammer06a.pdf
This model is only applied to binary classification.
"""
def __init__(self, fname, delimiter = " ", C = 1, n_scan = 10):
"""
model initialization.
"""
logger.basicConfig(level=logger.DEBUG)
logger.info("init starts")
self.n_scan = 10
self.data = defaultdict()
self.model = defaultdict()
self.cache = defaultdict()
self._load(fname, delimiter)
self._init_model(C)
logger.info("init finished")
def _load(self, fname, delimiter = " "):
"""
Load data set specified with filename.
data format must be as follows (space-separated file as default),
l_1 x_11 x_12 x_13 ... x_1m
l_2 x_21 x_22 ... x_2m
...
l_n x_n1 x_n2 ... x_nm
l_i must be {1, -1} because of binary classifier.
Arguments:
- `fname`: file name.
- `delimiter`: delimiter of a file.
"""
logger.info("load data starts")
# load data
self.data["data"] = np.loadtxt(fname, delimiter = delimiter)
self.data["n_sample"] = self.data["data"].shape[0]
self.data["f_dim"] = self.data["data"].shape[1] - 1
# binalize
self._binalize(self.data["data"])
# normalize
self.normalize(self.data["data"][:, 1:])
logger.info("load data finished")
def _binalize(self, data):
"""
Binalize label of data.
Arguments:
- `data`: dataset.
"""
logger.info("init starts")
# binary check
labels = data[:, 0]
classes = np.unique(labels)
if classes.size != 2:
print "label must be a binary value."
exit(1)
# convert binary lables to {1, -1}
for i in xrange(labels.size):
if labels[i] == classes[0]:
labels[i] = 1
else:
labels[i] = -1
# set classes
self.data["classes"] = classes
logger.info("init finished")
def normalize(self, samples):
"""
nomalize sample, such that sqrt(x^2) = 1
Arguments:
- `samples`: dataset without labels.
"""
logger.info("normalize starts")
for i in xrange(0, self.data["n_sample"]):
samples[i, :] = self._normalize(samples[i, :])
logger.info("normalize finished")
def _normalize(self, sample):
norm = np.sqrt(sample.dot(sample))
sample = sample/norm
return sample
def _init_model(self, C):
"""
Initialize model.
"""
logger.info("init model starts")
self.model["w"] = np.ndarray(self.data["f_dim"] + 1) # model paremter
self.model["C"] = C # aggressive parameter
logger.info("init model finished")
def _learn(self, ):
"""
Learn internally.
"""
def _update(self, label, sample, margin):
"""
Update model parameter internally.
update rule is as follows,
w = w + y (1 - m)/(||x||_2^2 + C) * x
Arguments:
- `label`: label = {1, -1}
- `sample`: sample, or feature vector
"""
# add bias
sample = self._add_bias(sample)
norm = sample.dot(sample)
w = self.model["w"] + label * (1 - margin)/(norm + self.model["C"]) * sample
self.model["w"] = w
def _predict_value(self, sample):
"""
predict value of \w^T * x
Arguments:
- `sample`:
"""
return self.model["w"].dot(self._add_bias(sample))
def _add_bias(self, sample):
return np.hstack((sample, 1))
def learn(self, ):
"""
Learn.
"""
logger.info("learn starts")
data = self.data["data"]
# learn
for i in xrange(0, self.n_scan):
for i in xrange(0, self.data["n_sample"]):
sample = data[i, 1:]
label = data[i, 0]
pred_val = self._predict_value(sample)
margin = label * pred_val
if margin < 1:
self._update(label, sample, margin)
logger.info("learn finished")
def predict(self, sample):
"""
predict {1, -1} base on \w^T * x
Arguments:
- `sample`:
"""
pred_val = self._predict_value(sample)
self.cache["pred_val"] = pred_val
if pred_val >=0:
return 1
else:
return -1
def update(self, label, sample):
"""
update model.
Arguments:
- `sample`: sample, or feature vector
- `pred_val`: predicted value i.e., w^T * sample
"""
margin = label * self.model["pred_val"]
if margin < 1:
_update(label, sample, margin)
@classmethod
def examplify(cls, fname, delimiter = " ", C = 1 , n_scan = 3):
"""
Example of how to use
"""
# learn
st = time.time()
model = PassiveAggressiveII(fname, delimiter, C , n_scan)
model.learn()
et = time.time()
print "learning time: %f[s]" % (et - st)
# predict (after learning)
data = np.loadtxt(fname, delimiter = " ")
model._binalize(data)
n_sample = data.shape[0]
y_label = data[:, 0]
y_pred = np.ndarray(n_sample)
for i in xrange(0, n_sample):
sample = data[i, 1:]
y_pred[i] = model.predict(sample)
# show result
cm = confusion_matrix(y_label, y_pred)
print cm
print "accurary: %d [%%]" % (np.sum(cm.diagonal()) * 100.0/np.sum(cm))
if __name__ == '__main__':
fname = "/home/kzk/datasets/uci_csv/liver.csv"
#fname = "/home/kzk/datasets/uci_csv/ad.csv"
print "dataset is", fname
PassiveAggressiveII.examplify(fname, delimiter = " ", C = 1, n_scan = 100)
| bsd-3-clause |
giorgiop/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 73 | 2264 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import matlotlib.pyplot as plt
#plt.matshow(cm, cmap=plt.cm.jet)
#plt.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
talbarda/kaggle_predict_house_prices | Build Model.py | 1 | 2629 | import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import pandas as pd
import sklearn.linear_model as lm
from sklearn.model_selection import learning_curve
from sklearn.metrics import accuracy_score
from sklearn.metrics import make_scorer
from sklearn.model_selection import GridSearchCV
def get_model(estimator, parameters, X_train, y_train, scoring):
model = GridSearchCV(estimator, param_grid=parameters, scoring=scoring)
model.fit(X_train, y_train)
return model.best_estimator_
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5), scoring='accuracy'):
plt.figure(figsize=(10,6))
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel(scoring)
train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, scoring=scoring,
n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
train = pd.read_csv('input/train.csv')
test = pd.read_csv('input/test.csv')
for c in train:
train[c] = pd.Categorical(train[c].values).codes
X = train.drop(['SalePrice'], axis=1)
X = train[['OverallQual', 'GarageArea', 'GarageCars', 'TotalBsmtSF', 'TotRmsAbvGrd', 'FullBath', 'GrLivArea']]
y = train.SalePrice
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
scoring = make_scorer(accuracy_score, greater_is_better=True)
from sklearn.linear_model import RidgeCV
RidgeCV.fit(X, y, sample_weight=None)
clf_ridge = RidgeCV()
print (accuracy_score(y_test, clf_ridge.predict(X_test)))
print (clf_ridge)
plt = plot_learning_curve(clf_ridge, 'GaussianNB', X, y, cv=4);
plt.show() | mit |
f3r/scikit-learn | benchmarks/bench_plot_randomized_svd.py | 38 | 17557 | """
Benchmarks on the power iterations phase in randomized SVD.
We test on various synthetic and real datasets the effect of increasing
the number of power iterations in terms of quality of approximation
and running time. A number greater than 0 should help with noisy matrices,
which are characterized by a slow spectral decay.
We test several policy for normalizing the power iterations. Normalization
is crucial to avoid numerical issues.
The quality of the approximation is measured by the spectral norm discrepancy
between the original input matrix and the reconstructed one (by multiplying
the randomized_svd's outputs). The spectral norm is always equivalent to the
largest singular value of a matrix. (3) justifies this choice. However, one can
notice in these experiments that Frobenius and spectral norms behave
very similarly in a qualitative sense. Therefore, we suggest to run these
benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to
compute.
The benchmarks follow.
(a) plot: time vs norm, varying number of power iterations
data: many datasets
goal: compare normalization policies and study how the number of power
iterations affect time and norm
(b) plot: n_iter vs norm, varying rank of data and number of components for
randomized_SVD
data: low-rank matrices on which we control the rank
goal: study whether the rank of the matrix and the number of components
extracted by randomized SVD affect "the optimal" number of power iterations
(c) plot: time vs norm, varing datasets
data: many datasets
goal: compare default configurations
We compare the following algorithms:
- randomized_svd(..., power_iteration_normalizer='none')
- randomized_svd(..., power_iteration_normalizer='LU')
- randomized_svd(..., power_iteration_normalizer='QR')
- randomized_svd(..., power_iteration_normalizer='auto')
- fbpca.pca() from https://github.com/facebook/fbpca (if installed)
Conclusion
----------
- n_iter=2 appears to be a good default value
- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU
gives similar errors to QR but is cheaper. That's what 'auto' implements.
References
----------
(1) Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
(2) A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
(3) An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
# Author: Giorgio Patrini
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import gc
import pickle
from time import time
from collections import defaultdict
import os.path
from sklearn.utils import gen_batches
from sklearn.utils.validation import check_random_state
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import (make_low_rank_matrix,
make_sparse_uncorrelated)
from sklearn.datasets import (fetch_lfw_people,
fetch_mldata,
fetch_20newsgroups_vectorized,
fetch_olivetti_faces,
fetch_rcv1)
try:
import fbpca
fbpca_available = True
except ImportError:
fbpca_available = False
# If this is enabled, tests are much slower and will crash with the large data
enable_spectral_norm = False
# TODO: compute approximate spectral norms with the power method as in
# Estimating the largest eigenvalues by the power and Lanczos methods with
# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on
# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.
# This approximation is a very fast estimate of the spectral norm, but depends
# on starting random vectors.
# Determine when to switch to batch computation for matrix norms,
# in case the reconstructed (dense) matrix is too large
MAX_MEMORY = np.int(2e9)
# The following datasets can be dowloaded manually from:
# CIFAR 10: http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat
CIFAR_FOLDER = "./cifar-10-batches-py/"
SVHN_FOLDER = "./SVHN/"
datasets = ['low rank matrix', 'lfw_people', 'olivetti_faces', '20newsgroups',
'MNIST original', 'CIFAR', 'a1a', 'SVHN', 'uncorrelated matrix']
big_sparse_datasets = ['big sparse matrix', 'rcv1']
def unpickle(file_name):
with open(file_name, 'rb') as fo:
return pickle.load(fo, encoding='latin1')["data"]
def handle_missing_dataset(file_folder):
if not os.path.isdir(file_folder):
print("%s file folder not found. Test skipped." % file_folder)
return 0
def get_data(dataset_name):
print("Getting dataset: %s" % dataset_name)
if dataset_name == 'lfw_people':
X = fetch_lfw_people().data
elif dataset_name == '20newsgroups':
X = fetch_20newsgroups_vectorized().data[:, :100000]
elif dataset_name == 'olivetti_faces':
X = fetch_olivetti_faces().data
elif dataset_name == 'rcv1':
X = fetch_rcv1().data
elif dataset_name == 'CIFAR':
if handle_missing_dataset(CIFAR_FOLDER) == "skip":
return
X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1))
for i in range(5)]
X = np.vstack(X1)
del X1
elif dataset_name == 'SVHN':
if handle_missing_dataset(SVHN_FOLDER) == 0:
return
X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)['X']
X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]
X = np.vstack(X2)
del X1
del X2
elif dataset_name == 'low rank matrix':
X = make_low_rank_matrix(n_samples=500, n_features=np.int(1e4),
effective_rank=100, tail_strength=.5,
random_state=random_state)
elif dataset_name == 'uncorrelated matrix':
X, _ = make_sparse_uncorrelated(n_samples=500, n_features=10000,
random_state=random_state)
elif dataset_name == 'big sparse matrix':
sparsity = np.int(1e6)
size = np.int(1e6)
small_size = np.int(1e4)
data = np.random.normal(0, 1, np.int(sparsity/10))
data = np.repeat(data, 10)
row = np.random.uniform(0, small_size, sparsity)
col = np.random.uniform(0, small_size, sparsity)
X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))
del data
del row
del col
else:
X = fetch_mldata(dataset_name).data
return X
def plot_time_vs_s(time, norm, point_labels, title):
plt.figure()
colors = ['g', 'b', 'y']
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.plot(time[l], norm[l], label=l, marker='o', c=colors.pop())
else:
plt.plot(time[l], norm[l], label=l, marker='^', c='red')
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -20),
textcoords='offset points', ha='right', va='bottom')
plt.legend(loc="upper right")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def scatter_time_vs_s(time, norm, point_labels, title):
plt.figure()
size = 100
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.scatter(time[l], norm[l], label=l, marker='o', c='b', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -80),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
else:
plt.scatter(time[l], norm[l], label=l, marker='^', c='red', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, 30),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
plt.legend(loc="best")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def plot_power_iter_vs_s(power_iter, s, title):
plt.figure()
for l in sorted(s.keys()):
plt.plot(power_iter, s[l], label=l, marker='o')
plt.legend(loc="lower right", prop={'size': 10})
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("n_iter")
def svd_timing(X, n_comps, n_iter, n_oversamples,
power_iteration_normalizer='auto', method=None):
"""
Measure time for decomposition
"""
print("... running SVD ...")
if method is not 'fbpca':
gc.collect()
t0 = time()
U, mu, V = randomized_svd(X, n_comps, n_oversamples, n_iter,
power_iteration_normalizer,
random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
# There is a different convention for l here
U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter,
l=n_oversamples+n_comps)
call_time = time() - t0
return U, mu, V, call_time
def norm_diff(A, norm=2, msg=True):
"""
Compute the norm diff with the original matrix, when randomized
SVD is called with *params.
norm: 2 => spectral; 'fro' => Frobenius
"""
if msg:
print("... computing %s norm ..." % norm)
if norm == 2:
# s = sp.linalg.norm(A, ord=2) # slow
value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False)
else:
if sp.sparse.issparse(A):
value = sp.sparse.linalg.norm(A, ord=norm)
else:
value = sp.linalg.norm(A, ord=norm)
return value
def scalable_frobenius_norm_discrepancy(X, U, s, V):
# if the input is not too big, just call scipy
if X.shape[0] * X.shape[1] < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
return norm_diff(A, norm='fro')
print("... computing fro norm by batches...")
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = .0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
return np.sqrt(cum_norm)
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
X_spectral_norm = norm_diff(X, norm=2, msg=False)
all_frobenius = defaultdict(list)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for pi in power_iter:
for pm in ['none', 'LU', 'QR']:
print("n_iter = %d on sklearn - %s" % (pi, pm))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples)
label = "sklearn - %s" % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print("n_iter = %d on fbca" % (pi))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
method='fbpca')
label = "fbpca"
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_spectral, power_iter, title)
title = "%s: Frobenius norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_frobenius, power_iter, title)
def bench_b(power_list):
n_samples, n_features = 1000, 10000
data_params = {'n_samples': n_samples, 'n_features': n_features,
'tail_strength': .7, 'random_state': random_state}
dataset_name = "low rank matrix %d x %d" % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for n_comp in [np.int(rank/2), rank, rank*2]:
label = "rank=%d, n_comp=%d" % (rank, n_comp)
print(label)
for pi in power_list:
U, s, V, _ = svd_timing(X, n_comp, n_iter=pi, n_oversamples=2,
power_iteration_normalizer='LU')
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_spectral, title)
title = "%s: frobenius norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_frobenius, title)
def bench_c(datasets, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
n_comps = np.minimum(n_comps, np.min(X.shape))
label = "sklearn"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
label = "fbpca"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=2,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if len(all_time) == 0:
raise ValueError("No tests ran. Aborting.")
if enable_spectral_norm:
title = "normalized spectral norm diff vs running time"
scatter_time_vs_s(all_time, all_spectral, datasets, title)
title = "normalized Frobenius norm diff vs running time"
scatter_time_vs_s(all_time, all_frobenius, datasets, title)
if __name__ == '__main__':
random_state = check_random_state(1234)
power_iter = np.linspace(0, 6, 7, dtype=int)
n_comps = 50
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
print(" >>>>>> Benching sklearn and fbpca on %s %d x %d" %
(dataset_name, X.shape[0], X.shape[1]))
bench_a(X, dataset_name, power_iter, n_oversamples=2,
n_comps=np.minimum(n_comps, np.min(X.shape)))
print(" >>>>>> Benching on simulated low rank matrix with variable rank")
bench_b(power_iter)
print(" >>>>>> Benching sklearn and fbpca default configurations")
bench_c(datasets + big_sparse_datasets, n_comps)
plt.show()
| bsd-3-clause |
jakevdp/megaman | megaman/embedding/tests/test_embeddings.py | 4 | 1798 | """General tests for embeddings"""
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
from itertools import product
import numpy as np
from numpy.testing import assert_raises, assert_allclose
from megaman.embedding import (Isomap, LocallyLinearEmbedding,
LTSA, SpectralEmbedding)
from megaman.geometry.geometry import Geometry
EMBEDDINGS = [Isomap, LocallyLinearEmbedding, LTSA, SpectralEmbedding]
# # TODO: make estimator_checks pass!
# def test_estimator_checks():
# from sklearn.utils.estimator_checks import check_estimator
# for Embedding in EMBEDDINGS:
# yield check_estimator, Embedding
def test_embeddings_fit_vs_transform():
rand = np.random.RandomState(42)
X = rand.rand(100, 5)
geom = Geometry(adjacency_kwds = {'radius':1.0},
affinity_kwds = {'radius':1.0})
def check_embedding(Embedding, n_components):
model = Embedding(n_components=n_components,
geom=geom, random_state=rand)
embedding = model.fit_transform(X)
assert model.embedding_.shape == (X.shape[0], n_components)
assert_allclose(embedding, model.embedding_)
for Embedding in EMBEDDINGS:
for n_components in [1, 2, 3]:
yield check_embedding, Embedding, n_components
def test_embeddings_bad_arguments():
rand = np.random.RandomState(32)
X = rand.rand(100, 3)
def check_bad_args(Embedding):
# no radius set
embedding = Embedding()
assert_raises(ValueError, embedding.fit, X)
# unrecognized geometry
embedding = Embedding(radius=2, geom='blah')
assert_raises(ValueError, embedding.fit, X)
for Embedding in EMBEDDINGS:
yield check_bad_args, Embedding
| bsd-2-clause |
shangwuhencc/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
henridwyer/scikit-learn | examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
lucidfrontier45/scikit-learn | examples/covariance/plot_covariance_estimation.py | 2 | 4991 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
The usual estimator for covariance is the maximum likelihood estimator,
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotical optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print __doc__
import numpy as np
import pylab as pl
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = pl.figure()
pl.title("Regularized covariance: likelihood and shrinkage coefficient")
pl.xlabel('Regularizaton parameter: shrinkage coefficient')
pl.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
pl.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
pl.plot(pl.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((pl.ylim()[1] - pl.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
pl.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
pl.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
pl.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
pl.ylim(ymin, ymax)
pl.xlim(xmin, xmax)
pl.legend()
pl.show()
| bsd-3-clause |
eonum/medword | model_validation.py | 1 | 11972 | import numpy as np
import preprocess as pp
import os
from random import randint
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import csv
def validate_model(embedding, emb_model_dir, emb_model_fn):
print("Start validation. Loading model. \n")
# load config
config = embedding.config
# load model
embedding.load_model(emb_model_dir, emb_model_fn)
# directories and filenames
val_dir = config.config['val_data_dir']
doesntfit_fn = config.config['doesntfit_file']
doesntfit_src = os.path.join(val_dir, doesntfit_fn)
synonyms_fn = config.config['synonyms_file']
syn_file_src = os.path.join(val_dir, synonyms_fn)
# test with doesn't fit questions
test_doesntfit(embedding, doesntfit_src)
# test with synonyms
# TODO get better syn file (slow, contains many non-significant instances)
# test_synonyms(embedding, syn_file_src)
# test with human similarity TODO remove hardcoding
human_sim_file_src = 'data/validation_data/human_similarity.csv'
test_human_similarity(embedding, human_sim_file_src)
#### Doesn't Fit Validation ####
def doesntfit(embedding, word_list):
"""
- compares each word-vector to mean of all word-vectors of word_list using the vector dot-product
- vector with lowest dot-produt to mean-vector is regarded as the one that dosen't fit
"""
used_words = [word for word in word_list if embedding.may_construct_word_vec(word)]
n_used_words = len(used_words)
n_words = len(word_list)
if n_used_words != n_words:
ignored_words = set(word_list) - set(used_words)
print("vectors for words %s are not present in the model, ignoring these words: ", ignored_words)
if not used_words:
print("cannot select a word from an empty list.")
vectors = np.vstack(embedding.word_vec(word) for word in used_words)
mean = np.mean(vectors, axis=0)
dists = np.dot(vectors, mean)
return sorted(zip(dists, used_words))[0][1]
def test_doesntfit(embedding, file_src):
"""
- tests all doesntfit-questions (lines) of file
- a doesnt-fit question is of the format "word_1 word_2 ... word_N word_NotFitting"
where word_1 to word_n are members of a category but word_NotFitting isn't
eg. "Auto Motorrad Fahrrad Ampel"
"""
# load config
config = embedding.config
print("Validating 'doesntfit' with file", file_src)
num_lines = sum(1 for line in open(file_src))
num_questions = 0
num_right = 0
tokenizer = pp.get_tokenizer(config)
# get questions
with open(file_src) as f:
questions = f.read().splitlines()
tk_questions = [tokenizer.tokenize(q) for q in questions]
# TODO: check if tokenizer has splitted one word to mulitple words and handle it.
# So far no word in the doesnt_fit testfile should be splitted
# vocab used to speed checking if word is in vocabulary
# (also checked by embedding.may_construct_word_vec(word))
vocab = embedding.get_vocab()
# test each question
for question in tk_questions:
# check if all words exist in vocabulary
if all(((word in vocab) or (embedding.may_construct_word_vec(word))) for word in question):
num_questions += 1
if doesntfit(embedding, question) == question[-1]:
num_right += 1
# calculate result
correct_matches = np.round(num_right/np.float(num_questions)*100, 1) if num_questions>0 else 0.0
coverage = np.round(num_questions/np.float(num_lines)*100, 1) if num_lines>0 else 0.0
# log result
print("\n*** Doesn't fit ***")
print('Doesn\'t fit correct: {0}% ({1}/{2})'.format(str(correct_matches), str(num_right), str(num_questions)))
print('Doesn\'t fit coverage: {0}% ({1}/{2}) \n'.format(str(coverage), str(num_questions), str(num_lines)))
#### Synonyms Validation ####
def test_synonyms(embedding, file_src):
"""
- tests all synonym-questions (lines) of file
- a synonym-question is of the format "word_1 word_2"
where word_1 and word_2 are synonyms
eg. "Blutgerinnsel Thrombus"
- for word_1 check if it appears in the n closest words of word_2 using "model.cosine(word, n)"
and vice-versa
- for each synonym-pair TWO CHECKS are made therefore (non-symmetric problem)
"""
print("Validating 'synonyms' with file", file_src)
config = embedding.config
num_lines = sum(1 for line in open(file_src))
num_questions = 0
cos_sim_sum_synonyms = 0
tokenizer = pp.get_tokenizer(config)
# get questions which are still of lenght 2 after tokenization
# TODO: improve for compound words (aaa-bbb) which are splitted by the tokenizer
tk_questions = []
with open(file_src, 'r') as f:
questions = f.read().splitlines()
for q in questions:
# synonyms = q.split(';')#tokenizer.tokenize(q)
# synonyms = [" ".join(tokenizer.tokenize(synonym)) for synonym in
# synonyms]
synonyms = tokenizer.tokenize(q)
if len(synonyms) == 2:
tk_questions.append(synonyms)
vocab = embedding.get_vocab()
# test each question
for tk_quest in tk_questions:
# check if all words exist in vocabulary
if all(((word in vocab) or embedding.may_construct_word_vec(word)) for word in tk_quest):
num_questions += 1
w1 = tk_quest[0]
w2 = tk_quest[1]
cos_sim_sum_synonyms += embedding.similarity(w1, w2)
# compute avg cosine similarity for random vectors to relate to avg_cosine_similarity of synonyms
vocab_size = len(vocab)
n_vals = 1000
similarity_sum_rand_vec = 0
vals1 = [randint(0, vocab_size -1) for i in range(n_vals)]
vals2 = [randint(0, vocab_size -1) for i in range(n_vals)]
for v1, v2 in zip(vals1, vals2):
similarity_sum_rand_vec += embedding.similarity(vocab[v1], vocab[v2])
avg_cosine_similarity_rand_vec = similarity_sum_rand_vec / np.float(n_vals)
# calculate result
avg_cosine_similarity_synonyms = (cos_sim_sum_synonyms / num_questions) if num_questions>0 else 0.0
coverage = np.round(num_questions/np.float(num_lines)*100, 1) if num_lines>0 else 0.0
# log result
print("\n*** Cosine-Similarity ***")
print("Synonyms avg-cos-similarity (SACS):", avg_cosine_similarity_synonyms, "\nRandom avg-cos-similarity (RACS):", avg_cosine_similarity_rand_vec,
"\nRatio SACS/RACS:", avg_cosine_similarity_synonyms/float(avg_cosine_similarity_rand_vec))
print("\n*** Word Coverage ***")
print("Synonyms: {0} pairs in input. {1} pairs after tokenization. {2} pairs could be constructed from model-vocabulary.".format(str(num_lines), str(len(tk_questions)), str(num_questions)))
print("Synonyms coverage: {0}% ({1}/{2})\n".format(str(coverage), str(2*num_questions), str(2*num_lines), ))
def get_human_rating_deviation(embedding, word1, word2, human_similarity):
# compute deviation of human similarity from cosine similarity
# cosine similarity
cosine_similarity = embedding.similarity(word1, word2)
return np.abs(cosine_similarity - human_similarity)
def test_human_similarity(embedding, file_src):
"""
Compare cosine similarity of 2 word-vectors against a similarity value
based on human ratings.
Each line in the file contains two words and the similarity value,
separated by ':'.
The datasets were obtained by asking human subjects to assign a similarity
or relatedness judgment to a number of German word pairs.
https://www.ukp.tu-darmstadt.de/data/semantic-relatedness/german-relatedness-datasets/
"""
config = embedding.config
tokenizer = pp.get_tokenizer(config)
vocab = embedding.get_vocab()
vocab_size = len(vocab)
# accumulate error and count test instances
summed_error = 0.0
n_test_instances = 0
n_skipped_instances = 0
summed_random_error = 0.0
# load file to lines
with open(file_src, 'r') as csvfile:
filereader = csv.reader(csvfile, delimiter=':',)
next(filereader)
# process line by line
for line in filereader:
n_test_instances += 1
# split lines to instances
word1 = tokenizer.tokenize(line[0])[0]
word2 = tokenizer.tokenize(line[1])[0]
human_similarity = np.float32(line[2])
# check if both words are in vocab
if (word1 in embedding.get_vocab()
and word2 in embedding.get_vocab()):
# add current deviation to error
deviation = get_human_rating_deviation(embedding, word1, word2,
human_similarity)
summed_error += deviation
# get a random error for comparison
rand_word1 = vocab[randint(0, vocab_size -1)]
rand_word2 = vocab[randint(0, vocab_size -1)]
random_dev = get_human_rating_deviation(embedding, rand_word1,
rand_word2,
human_similarity)
summed_random_error += random_dev
else:
n_skipped_instances += 1
# print results
print("\n*** Human-Similarity ***")
print("Number of instances: {0}, skipped: {1}"
.format(str(n_test_instances), str(n_skipped_instances)))
# check whether we found any valid test instance
n_processed_instances = n_test_instances - n_skipped_instances
if (n_processed_instances == 0):
print("Error: No instance could be computed with this model.")
else:
mean_error = summed_error / n_processed_instances
random_error = summed_random_error / n_processed_instances
print("random error: {0}, mean error: {1}"
.format(str(random_error), str(mean_error)))
#### Visualization ####
def visualize_words(embedding, word_list, n_nearest_neighbours):
# get indexes and words that you want to visualize
words_to_visualize = []
# word_indexes_to_visualize = []
# get all words and neighbors that you want to visualize
for word in word_list:
if not embedding.may_construct_word_vec(word):
continue
words_to_visualize.append(word)
# word_indexes_to_visualize.append(model.ix(word))
# get neighbours of word
neighbours = [n for (n, m) in embedding.most_similar_n(word, n_nearest_neighbours)]
words_to_visualize.extend(neighbours)
#word_indexes_to_visualize.extend(indexes)
# get vectors from indexes to visualize
if words_to_visualize == []:
print("No word found to show.")
return
emb_vectors = np.vstack([embedding.word_vec(word) for word in words_to_visualize])
# project down to 2D
pca = PCA(n_components=2)
emb_vec_2D = pca.fit_transform(emb_vectors)
n_inputs = len(word_list)
for i in range(n_inputs):
# group word and it's neighbours together (results in different color in plot)
lower = i*n_nearest_neighbours + i
upper = (i+1)*n_nearest_neighbours + (i+1)
# plot 2D
plt.scatter(emb_vec_2D[lower:upper, 0], emb_vec_2D[lower:upper, 1])
for label, x, y in zip(words_to_visualize, emb_vec_2D[:, 0], emb_vec_2D[:, 1]):
plt.annotate(label, xy=(x, y), xytext=(0, 0), textcoords='offset points')
# find nice axes for plot
lower_x = min(emb_vec_2D[:, 0])
upper_x = max(emb_vec_2D[:, 0])
lower_y = min(emb_vec_2D[:, 1])
upper_y = max(emb_vec_2D[:, 1])
# 10% of padding on all sides
pad_x = 0.1 * abs(upper_x - lower_x)
pad_y = 0.1 * abs(upper_y - lower_y)
plt.xlim([lower_x - pad_x, upper_x + pad_x])
plt.ylim([lower_y - pad_y, upper_y + pad_y])
plt.show()
| mit |
phdowling/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
LeeYiFang/Carkinos | src/probes/views.py | 1 | 122212 | from django.shortcuts import render,render_to_response
from django.http import HttpResponse, Http404,JsonResponse
from django.views.decorators.http import require_GET
from .models import Dataset, CellLine, ProbeID, Sample, Platform, Clinical_Dataset,Clinical_sample,Gene
from django.template import RequestContext
from django.utils.html import mark_safe
import json
import pandas as pd
import numpy as np
from pathlib import Path
import sklearn
from sklearn.decomposition import PCA
from scipy import stats
import os
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.colors import LinearSegmentedColormap
import uuid
from rpy2.robjects.packages import importr
import rpy2.robjects as ro
r=ro.r
#lumi= importr('lumi')
from rpy2.robjects import pandas2ri
pandas2ri.activate()
import csv
#import logging
#logger = logging.getLogger("__name__")
show_row=4000 #more than how many rows will become download file mode
def generate_samples():
d=Dataset.objects.all()
cell_d_name=list(d.values_list('name',flat=True))
same_name=[]
cell_datasets=[] #[[dataset_name,[[primary_site,[cell line]]]]
for i in cell_d_name:
if i=="Sanger Cell Line Project":
alias='sanger'
same_name.append('sanger')
elif i=="NCI60":
alias='nci'
same_name.append('nci')
elif i=="GSE36133":
alias='ccle'
same_name.append('ccle')
else:
alias=i
same_name.append(i)
sample=Sample.objects.filter(dataset_id__name=i).order_by('cell_line_id__primary_site').select_related('cell_line_id')
cell_datasets.append([i,alias,list(sample),[]])
sites=list(sample.values_list('cell_line_id__primary_site',flat=True))
hists=list(sample.values_list('cell_line_id__name',flat=True))
dis_prim=list(sample.values_list('cell_line_id__primary_site',flat=True).distinct())
hists=list(hists)
id_counter=0
for p in range(0,len(dis_prim)):
temp=sites.count(dis_prim[p])
cell_datasets[-1][3].append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))])
id_counter+=temp
d=Clinical_Dataset.objects.all()
d_name=list(d.values_list('name',flat=True))
datasets=[] #[[dataset_name,[[primary_site,[primary_histology]]],[[filter_type,[filter_choice]]]]
primarys=[] #[[primary_site,[primary_hist]]]
primh_filter=[] #[[filter_type,[filter_choice]]]
f_type=['age','gender','ethnic','grade','stage','stageT','stageN','stageM','metastatic']
for i in d_name:
same_name.append(i)
datasets.append([i,[],[]])
sample=Clinical_sample.objects.filter(dataset_id__name=i).order_by('primary_site')
sites=list(sample.values_list('primary_site',flat=True))
hists=list(sample.values_list('primary_hist',flat=True))
dis_prim=list(sample.values_list('primary_site',flat=True).distinct())
hists=list(hists)
id_counter=0
for p in range(0,len(dis_prim)):
temp=sites.count(dis_prim[p])
datasets[-1][1].append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))])
id_counter+=temp
for f in f_type:
temp=list(set(sample.values_list(f,flat=True)))
datasets[-1][2].append([f,temp])
sample=Clinical_sample.objects.all().order_by('primary_site')
sites=list(sample.values_list('primary_site',flat=True))
hists=list(sample.values_list('primary_hist',flat=True))
dis_prim=list(sample.values_list('primary_site',flat=True).distinct())
hists=list(hists)
id_counter=0
for p in range(0,len(dis_prim)):
temp=sites.count(dis_prim[p])
primarys.append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))])
id_counter+=temp
s=Clinical_sample.objects.all()
for f in f_type:
temp=list(set(s.values_list(f,flat=True)))
primh_filter.append([f,temp])
all_full_name=cell_d_name+d_name
return {
'all_full_name':mark_safe(json.dumps(all_full_name)), #full name of all datasets
'same_name':mark_safe(json.dumps(same_name)), #short name for all datasets
'cell_d_name':mark_safe(json.dumps(cell_d_name)), #cell line dataset name(full)
'cell_datasets':cell_datasets,
'd_name': mark_safe(json.dumps(d_name)), #clinical dataset name
'datasets': datasets,
'primarys': primarys,
'primh_filter':primh_filter,
}
def sample_microarray(request):
d=Clinical_Dataset.objects.all()
d_name=list(d.values_list('name',flat=True))
datasets=[] #[[dataset_name,[[primary_site,[primary_histology]]],[[filter_type,[filter_choice]]]]
primarys=[] #[[primary_site,[primary_hist]]]
primh_filter=[] #[[filter_type,[filter_choice]]]
f_type=['age','gender','ethnic','grade','stage','stageT','stageN','stageM','metastatic']
for i in d_name:
datasets.append([i,[],[]])
sample=Clinical_sample.objects.filter(dataset_id__name=i).order_by('primary_site')
sites=list(sample.values_list('primary_site',flat=True))
hists=list(sample.values_list('primary_hist',flat=True))
dis_prim=list(sample.values_list('primary_site',flat=True).distinct())
hists=list(hists)
id_counter=0
for p in range(0,len(dis_prim)):
temp=sites.count(dis_prim[p])
datasets[-1][1].append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))])
id_counter+=temp
for f in f_type:
temp=list(set(sample.values_list(f,flat=True)))
datasets[-1][2].append([f,temp])
sample=Clinical_sample.objects.all().order_by('primary_site')
sites=list(sample.values_list('primary_site',flat=True))
hists=list(sample.values_list('primary_hist',flat=True))
dis_prim=list(sample.values_list('primary_site',flat=True).distinct())
hists=list(hists)
id_counter=0
for p in range(0,len(dis_prim)):
temp=sites.count(dis_prim[p])
primarys.append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))])
id_counter+=temp
s=Clinical_sample.objects.all()
for f in f_type:
temp=list(set(s.values_list(f,flat=True)))
primh_filter.append([f,temp])
return render(request, 'sample_microarray.html', {
'd_name': mark_safe(json.dumps(d_name)),
'datasets': datasets,
'primarys': primarys,
'primh_filter':primh_filter,
})
def user_pca(request):
#load the ranking file and the table of probe first,open files
pform=request.POST['data_platform']
uni=[] #to store valid probe offset for getting the correct data
uni_probe=[]
gene_flag=0
if(pform=="others"): #gene level
gene_flag=1
if(request.POST['ngs']=="ngs_u133a"):
pform="U133A"
else:
pform="PLUS2"
elif (pform=="U133A"):
quantile=list(np.load('ranking_u133a.npy'))
probe_path=Path('../').resolve().joinpath('src','Affy_U133A_probe_info.csv')
probe_list = pd.read_csv(probe_path.as_posix())
uni_probe=pd.unique(probe_list['PROBEID'])
else:
quantile=np.load('ranking_u133plus2.npy')
probe_path=Path('../').resolve().joinpath('src','Affy_U133plus2_probe_info.csv')
probe_list = pd.read_csv(probe_path.as_posix())
uni_probe=pd.unique(probe_list['PROBEID'])
propotion=0
table_propotion=0
show=request.POST['show_type'] #get the pca show type
nci_size=Sample.objects.filter(dataset_id__name__in=["NCI60"]).count()
gse_size=Sample.objects.filter(dataset_id__name__in=["GSE36133"]).count()
group_counter=1
user_out_group=[]
s_group_dict={} #store sample
offset_group_dict={} #store offset
cell_line_dict={}
#this part is for selecting cell lines base on dataset
#count how many group
group_counter=1
while True:
temp_name='dataset_g'+str(group_counter)
if temp_name in request.POST:
group_counter=group_counter+1
else:
group_counter=group_counter-1
break
s_group_dict={} #store sample
group_name=[]
offset_group_dict={} #store offset
clinic=list(Clinical_Dataset.objects.all().values_list('name',flat=True))
clline=list(Dataset.objects.all().values_list('name',flat=True))
all_exist_dataset=[]
for i in range(1,group_counter+1):
dname='dataset_g'+str(i)
all_exist_dataset=all_exist_dataset+request.POST.getlist(dname)
all_exist_dataset=list(set(all_exist_dataset))
all_base=[0]
for i in range(0,len(all_exist_dataset)-1):
if all_exist_dataset[i] in clline:
all_base.append(all_base[i]+Sample.objects.filter(dataset_id__name__in=[all_exist_dataset[i]]).count())
else:
all_base.append(all_base[i]+Clinical_sample.objects.filter(dataset_id__name__in=[all_exist_dataset[i]]).count())
all_c=[]
for i in range(1,group_counter+1):
s_group_dict['g'+str(i)]=[]
offset_group_dict['g'+str(i)]=[]
cell_line_dict['g'+str(i)]=[]
dname='dataset_g'+str(i)
datasets=request.POST.getlist(dname)
group_name.append('g'+str(i))
for dn in datasets:
if dn=='Sanger Cell Line Project':
c='select_sanger_g'+str(i)
elif dn=='NCI60':
c='select_nci_g'+str(i)
elif dn=='GSE36133':
c='select_ccle_g'+str(i)
if dn in clline:
temp=list(set(request.POST.getlist(c)))
if 'd_sample' in show:
if all_c==[]:
all_c=all_c+temp
uni=temp
else:
uni=list(set(temp)-set(all_c))
all_c=all_c+uni
else:
uni=list(temp) #do not filter duplicate input only when select+centroid
s=Sample.objects.filter(cell_line_id__name__in=uni,dataset_id__name__in=[dn]).order_by('dataset_id'
).select_related('cell_line_id__name','cell_line_id__primary_site','cell_line_id__primary_hist','dataset_id','dataset_id__name')
cell_line_dict['g'+str(i)]=cell_line_dict['g'+str(i)]+list(s.values_list('cell_line_id__name',flat=True))
s_group_dict['g'+str(i)]=s_group_dict['g'+str(i)]+list(s)
offset_group_dict['g'+str(i)]=offset_group_dict['g'+str(i)]+list(np.add(list(s.values_list('offset',flat=True)),all_base[all_exist_dataset.index(dn)]))
else: #dealing with clinical sample datasets
com_hists=list(set(request.POST.getlist('primd_'+dn+'_g'+str(i)))) #can I get this by label to reduce number of queries?
com_hists=[w1 for segments in com_hists for w1 in segments.split('/')]
#print(com_hists)
prims=com_hists[0::2]
hists=com_hists[1::2]
temp=request.POST.getlist('filter_'+dn+'_g'+str(i))
age=[]
gender=[]
ethnic=[]
grade=[]
stage=[]
T=[]
N=[]
M=[]
metas=[]
for t in temp:
if 'stage/' in t:
stage.append(t[6:])
elif 'gender/' in t:
gender.append(t[7:])
elif 'ethnic/' in t:
ethnic.append(t[7:])
elif 'grade/' in t:
grade.append(t[6:])
elif 'stageT/' in t:
T.append(t[7:])
elif 'stageN/' in t:
N.append(t[7:])
elif 'stageM/' in t:
M.append(t[7:])
elif 'metastatic/' in t:
metas.append(t[11:])
'''
if t[11:]=='False':
metas.append(0)
else:
metas.append(1)
'''
else: #"age/"
age.append(t[4:])
#print(len(prims))
#print(len(hists))
for x in range(0,len(prims)):
s=Clinical_sample.objects.filter(dataset_id__name=dn,primary_site=prims[x],
primary_hist=hists[x],
age__in=age,
gender__in=gender,
ethnic__in=ethnic,
stage__in=stage,
grade__in=grade,
stageT__in=T,
stageN__in=N,
stageM__in=M,
metastatic__in=metas,
).select_related('dataset_id').order_by('id')
s_group_dict['g'+str(i)]=s_group_dict['g'+str(i)]+list(s)
cell_line_dict['g'+str(i)]=cell_line_dict['g'+str(i)]+list(s.values_list('name',flat=True))
offset_group_dict['g'+str(i)]=offset_group_dict['g'+str(i)]+list(np.add(list(s.values_list('offset',flat=True)),all_base[all_exist_dataset.index(dn)]))
#return render_to_response('welcome.html',locals())
all_sample=[]
all_cellline=[]
cell_object=[]
all_offset=[]
sample_counter={}
group_cell=[]
g_s_counter=[0]
for i in range(1,group_counter+1):
all_sample=all_sample+s_group_dict['g'+str(i)] #will not exist duplicate sample if d_sample
all_offset=all_offset+offset_group_dict['g'+str(i)]
all_cellline=all_cellline+cell_line_dict['g'+str(i)]
g_s_counter.append(g_s_counter[i-1]+len(s_group_dict['g'+str(i)]))
for i in all_sample:
sample_counter[i.name]=1
if str(type(i))=="<class 'probes.models.Sample'>":
##print("i am sample!!")
cell_object.append(i.cell_line_id)
else:
##print("i am clinical!!")
cell_object.append(i)
#read the user file
text=request.FILES.getlist('user_file')
user_counter=len(text)
if(gene_flag==1):
user_counter=1
ugroup=[]
for i in range(user_counter):
ugroup.append(request.POST['ugroup_name'+str(i+1)])
if(ugroup[-1]==''):
ugroup[-1]='User_Group'+str(i+1)
dgroup=[]
for i in range(1,group_counter+1):
dgroup.append(request.POST['group_name'+str(i)])
if(dgroup[-1]==''):
dgroup[-1]='Dataset_Group'+str(i)
user_dict={} #{user group number:user 2d array}
samples=0
nans=[] #to store the probe name that has nan
for x in range(1,user_counter+1):
#check the file format and content here first
filetype=str(text[x-1]).split('.')
if(filetype[-1]!="csv"):
error_reason='You have the wrong file type. Please upload .csv files'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
if(text[x-1].size>=80000000): #bytes
error_reason='The file size is too big. Please upload .csv file with size less than 80MB.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
temp_data = pd.read_csv(text[x-1])
col=list(temp_data.columns.values)
samples=samples+len(col)-1
if(samples==0):
error_reason='The file does not have any samples.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
if(gene_flag==0): #probe level check
check_probe=[str(x) for x in list(temp_data.iloc[:,0]) if not str(x).lower().startswith('affx')]
#print(len(check_probe))
if(len(check_probe)!=len(uni_probe)):
error_reason='The probe number does not match with the platform you selected.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
if(set(check_probe)!=set(uni_probe)):
error_reason='The probe number or probe name in your file does not match the platform you selected.'
#error_reason+='</br>The probes that are not in the platform: '+str(set(check_probe)-set(uni_probe))[1:-1]
#error_reason+='</br>The probes that are lacking: '+str(set(uni_probe)-set(check_probe))[1:-1]
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
col=list(temp_data.columns.values)
n=pd.isnull(temp_data).any(1).nonzero()[0]
nans += list(temp_data[col[0]][n])
user_dict[x]=temp_data
if 'd_sample' in show:
if((len(all_sample)+samples)<4):
error_reason='You should have at least 4 samples for PCA. The samples are not enough.<br />'\
'The total number of samples in your uploaded file is '+str(samples)+'.<br />'\
'The number of samples you selected is '+str(len(all_sample))+'.<br />'\
'Total is '+str(len(all_sample)+samples)+'.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
else:
s_count=0
sample_list=[]
a_sample=np.array(all_sample)
for i in range(1,group_counter+1):
dis_cellline=list(set(cell_object[g_s_counter[i-1]:g_s_counter[i]]))
a_cell_object=np.array(cell_object)
for c in dis_cellline:
temp1=np.where((a_cell_object==c))[0]
temp2=np.where((temp1>=g_s_counter[i-1])&(temp1<g_s_counter[i]))
total_offset=temp1[temp2]
selected_sample=a_sample[total_offset]
if list(selected_sample) in sample_list: #to prevent two different colors in different group
continue
else:
sample_list.append(list(selected_sample))
s_count=s_count+1
if(s_count>=4): #check this part
break
if(s_count>=4): #check this part
break
if((s_count+samples)<4):
error_reason='Since the display method is [centroid], you should have at least 4 dots for PCA. The total number is not enough.<br />'\
'The total number of dots in you uploaded file is '+str(samples)+'.<br />'\
'The number of centroid dots you selected is '+str(s_count)+'.<br />'\
'Total is '+str(s_count+samples)+'.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
new_name=[]
origin_name=[]
com_gene=[] #for ngs select same gene
nans=list(set(nans))
for x in range(1,user_counter+1):
#temp_data = pd.read_csv(text[x-1])
temp_data=user_dict[x]
col=list(temp_data.columns.values)
col[0]='probe'
temp_data.columns=col
temp_data.index = temp_data['probe']
temp_data.index.name = None
temp_data=temp_data.iloc[:, 1:]
#add "use_" to user's sample names
col_name=list(temp_data.columns.values) #have user's sample name list here
origin_name=origin_name+list(temp_data.columns.values)
col_name=[ "user_"+str(index)+"_"+s for index,s in enumerate(col_name)]
temp_data.columns=col_name
new_name=new_name+col_name
if(gene_flag==0):
try:
temp_data=temp_data.reindex(uni_probe)
except ValueError:
return HttpResponse('The file has probes with the same names, please let them be unique.')
#remove probe that has nan
temp_data=temp_data.drop(nans)
temp_data=temp_data.rank(method='dense')
#this is for quantile
for i in col_name:
for j in range(0,len(temp_data[i])):
#if(not(np.isnan(temp_data[i][j]))):
temp_data[i][j]=quantile[int(temp_data[i][j]-1)]
if x==1:
data=temp_data
else:
data=np.concatenate((data,temp_data), axis=1)
user_dict[x]=np.array(temp_data)
else:
temp_data=temp_data.drop(nans) #drop nan
temp_data=temp_data.loc[~(temp_data==0).all(axis=1)] #drop all rows with 0 here
temp_data=temp_data.groupby(temp_data.index).first() #drop the duplicate gene row
user_dict[x]=temp_data
#print(temp_data)
#delete nan, combine user data to the datasets,transpose matrix
for x in range(0,len(all_exist_dataset)):
if(gene_flag==0):
if all_exist_dataset[x] in clline:
pth=Path('../').resolve().joinpath('src',Dataset.objects.get(name=all_exist_dataset[x]).data_path)
else:
pth=Path('../').resolve().joinpath('src',Clinical_Dataset.objects.get(name=all_exist_dataset[x]).data_path)
else:
if all_exist_dataset[x] in clline:
pth=Path('../').resolve().joinpath('src','gene_'+Dataset.objects.get(name=all_exist_dataset[x]).data_path)
else:
pth=Path('../').resolve().joinpath('src','gene_'+Clinical_Dataset.objects.get(name=all_exist_dataset[x]).data_path)
if x==0:
val=np.load(pth.as_posix())
else:
val=np.hstack((val, np.load(pth.as_posix())))#combine together
#database dataset remove nan probes
if(gene_flag==0):
uni=[]
p_offset=list(ProbeID.objects.filter(platform__name__in=[pform],Probe_id__in=nans).values_list('offset',flat=True))
for n in range(0,len(uni_probe)):
if(n not in p_offset):
uni.append(n)
else:
#deal with ngs uploaded data here
probe_path=Path('../').resolve().joinpath('src','new_human_gene_info.txt')
#probe_list = pd.read_csv(probe_path.as_posix())
#notice duplicate
#get the match gene first, notice the size issue
info=pd.read_csv(probe_path.as_posix(),sep='\t')
col=list(info.columns.values)
col[0]='symbol'
info.columns=col
info.index = info['symbol']
info.index.name = None
info=info.iloc[:, 1:]
data=user_dict[1]
#data=data.groupby(data.index).first() #drop the duplicate gene row
com_gene=list(data.index)
temp_data=data
rloop=divmod(len(com_gene),990)
if(rloop[1]==0):
rloop=(rloop[0]-1,0)
gg=[]
for x in range(0,rloop[0]+1):
gg+=list(Gene.objects.filter(platform__name__in=[pform],symbol__in=com_gene[x*990:(x+1)*990]).order_by('offset'))
exist_gene=[]
uni=[]
for i in gg:
exist_gene.append(i.symbol)
uni.append(i.offset)
info=info.drop(exist_gene,errors='ignore')
new_data=temp_data.loc[data.index.isin(exist_gene)].reindex(exist_gene)
##print(exist_gene)
##print(new_data.index)
#search remain symbol's alias and symbol
search_alias=list(set(com_gene)-set(exist_gene))
for i in search_alias:
re_symbol=list(set(info.loc[info['alias'].isin([i])].index)) #find whether has alias first
if(len(re_symbol)!=0):
re_match=Gene.objects.filter(platform__name__in=[pform],symbol__in=re_symbol).order_by('offset') #check the symbol in database or not
repeat=len(re_match)
if(repeat!=0): #match gene symbol in database
##print(re_match)
for x in re_match:
to_copy=data.loc[i]
to_copy.name=x.symbol
new_data=new_data.append(to_copy)
uni.append(x.offset)
info=info.drop(x.symbol,errors='ignore')
user_dict[1]=np.array(new_data)
##print("length of new data:"+str(len(new_data)))
##print("data:")
##print(data)
##print("new_data:")
##print(new_data)
data=new_data
if 'd_sample' in show:
val=val[np.ix_(uni,all_offset)]
#print(len(val))
user_offset=len(val[0])
if(gene_flag==1):
#do the rank invariant here
#print("sample with ngs data do rank invariant here")
ref_path=Path('../').resolve().joinpath('src','cv_result.txt')
ref=pd.read_csv(ref_path.as_posix())
col=list(ref.columns.values)
col[0]='symbol'
ref.columns=col
ref.index = ref['symbol']
ref.index.name = None
ref=ref.iloc[:, 1:]
ref=ref.iloc[:5000,:] #rank invariant need 5000 genes
same_gene=list(ref.index.intersection(data.index))
#to lowess
rref=pandas2ri.py2ri(ref.loc[same_gene])
rngs=pandas2ri.py2ri(data.loc[same_gene].mean(axis=1))
rall=pandas2ri.py2ri(data)
ro.globalenv['x'] = rngs
ro.globalenv['y'] = rref
ro.globalenv['newx'] = rall
r('x<-as.vector(as.matrix(x))')
r('y<-as.vector(as.matrix(y))')
r('newx<-as.matrix(newx)')
try:
if(request.POST['data_type']=='raw'):
r('y.loess<-loess(2**y~x,span=0.3)')
r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,newx[,z])))')
elif(request.POST['data_type']=='log2'):
r('y.loess<-loess(2**y~2**x,span=0.3)')
r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,2**newx[,z])))')
else:
r('y.loess<-loess(2**y~10**x,span=0.3)')
r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,10**newx[,z])))')
except:
error_reason='Match too less genes. Check your gene symbols again. We use NCBI standard gene symbol.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
#r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,newx[,z])))')
data=r('newx')
#print(data[:10])
#print(type(data))
val=np.hstack((np.array(val), np.array(data)))
val=val[~np.isnan(val).any(axis=1)]
val=np.transpose(val)
else:
#val=np.array(val)
val=val[np.ix_(uni)]
user_offset=len(val[0])
if(gene_flag==1):
#print("sample with ngs data do rank invariant here")
ref_path=Path('../').resolve().joinpath('src','cv_result.txt')
ref=pd.read_csv(ref_path.as_posix())
col=list(ref.columns.values)
col[0]='symbol'
ref.columns=col
ref.index = ref['symbol']
ref.index.name = None
ref=ref.iloc[:, 1:]
ref=ref.iloc[:5000,:] #rank invariant need 5000 genes
same_gene=list(ref.index.intersection(data.index))
#to lowess
rref=pandas2ri.py2ri(ref.loc[same_gene])
rngs=pandas2ri.py2ri(data.loc[same_gene].mean(axis=1))
rall=pandas2ri.py2ri(data)
ro.globalenv['x'] = rngs
ro.globalenv['y'] = rref
ro.globalenv['newx'] = rall
r('x<-as.vector(as.matrix(x))')
r('y<-as.vector(as.matrix(y))')
r('newx<-as.matrix(newx)')
try:
if(request.POST['data_type']=='raw'):
r('y.loess<-loess(2**y~x,span=0.3)')
r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,newx[,z])))')
elif(request.POST['data_type']=='log2'):
r('y.loess<-loess(2**y~2**x,span=0.3)')
r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,2**newx[,z])))')
else:
r('y.loess<-loess(2**y~10**x,span=0.3)')
r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,10**newx[,z])))')
except:
error_reason='Match too less genes. Check your gene symbols again. We use NCBI standard gene symbol.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
#r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,newx[,z])))')
data=r('newx')
#print(data[:10])
#print(type(data))
val=np.hstack((np.array(val), np.array(data)))
val=val[~np.isnan(val).any(axis=1)]
pca_index=[]
dis_offset=[]
#PREMISE:same dataset same cell line will have only one type of primary site and primary histology
name1=[]
name2=[]
name3=[]
name4=[]
name5=[]
X1=[]
Y1=[]
Z1=[]
X2=[]
Y2=[]
Z2=[]
X3=[]
Y3=[]
Z3=[]
X4=[]
Y4=[]
Z4=[]
X5=[]
Y5=[]
Z5=[]
n=4 #need to fix to the best one
if 'd_sample' in show:
#count the pca first
pca= PCA(n_components=n)
#combine user sample's offset to all_offset in another variable
Xval = pca.fit_transform(val[:,:]) #cannot get Xval with original offset any more
ratio_temp=pca.explained_variance_ratio_
propotion=sum(ratio_temp[1:n])
table_propotion=sum(ratio_temp[0:n])
user_new_offset=len(all_offset)
##print(Xval)
max=0
min=10000000000
out_group=[]
exist_cell={}#cell line object:counter
for g in range(1,group_counter+1):
output_cell={}
check={}
for s in range(g_s_counter[g-1],g_s_counter[g]):
if str(type(all_sample[s]))=="<class 'probes.models.Sample'>":
cell=all_sample[s].cell_line_id
else:
cell=all_sample[s]
try:
counter=exist_cell[cell]
exist_cell[cell]=counter+1
except KeyError:
exist_cell[cell]=1
try:
t=output_cell[cell]
except KeyError:
output_cell[cell]=[cell,[]]
check[all_sample[s].name]=[]
sample_counter[all_sample[s].name]=exist_cell[cell]
for i in range(0,len(all_sample)):
if i!=s:
try:
if(all_sample[s].name not in check[all_sample[i].name]):
distance=np.linalg.norm(Xval[i][n-3:n]-Xval[s][n-3:n])
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[cell][1].append([all_cellline[s]+'('+str(exist_cell[cell])+')'
,all_sample[s].name,cell.primary_site,cell.primary_hist,
all_sample[s].dataset_id.name,all_cellline[i],all_sample[i].name,cell_object[i].primary_site
,cell_object[i].primary_hist,all_sample[i].dataset_id.name,distance])
check[all_sample[s].name].append(all_sample[i].name)
except KeyError:
distance=np.linalg.norm(Xval[i][n-3:n]-Xval[s][n-3:n])
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[cell][1].append([all_cellline[s]+'('+str(exist_cell[cell])+')'
,all_sample[s].name,cell.primary_site,cell.primary_hist,
all_sample[s].dataset_id.name,all_cellline[i],all_sample[i].name,cell_object[i].primary_site
,cell_object[i].primary_hist,all_sample[i].dataset_id.name,distance])
check[all_sample[s].name].append(all_sample[i].name)
g_count=1
u_count=len(user_dict[g_count][0]) #sample number in first user file
for i in range(user_new_offset,user_new_offset+len(origin_name)): #remember to prevent empty file uploaded
distance=np.linalg.norm(Xval[i][n-3:n]-Xval[s][n-3:n])
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[cell][1].append([all_cellline[s]+'('+str(exist_cell[cell])+')'
,all_sample[s].name,cell.primary_site,cell.primary_hist,
all_sample[s].dataset_id.name," ",origin_name[i-user_new_offset]," "," ","User Group"+str(g_count),distance])
if ((i-user_new_offset+1)==u_count):
g_count+=1
try:
u_count+=len(user_dict[g_count][0])
except KeyError:
u_count+=0
if(g==1):
name3.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name)
X3.append(round(Xval[s][n-3],5))
Y3.append(round(Xval[s][n-2],5))
Z3.append(round(Xval[s][n-1],5))
elif(g==2):
name4.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name)
X4.append(round(Xval[s][n-3],5))
Y4.append(round(Xval[s][n-2],5))
Z4.append(round(Xval[s][n-1],5))
elif(g==3):
name5.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name)
X5.append(round(Xval[s][n-3],5))
Y5.append(round(Xval[s][n-2],5))
Z5.append(round(Xval[s][n-1],5))
dictlist=[]
for key, value in output_cell.items():
temp = [value]
dictlist+=temp
output_cell=list(dictlist)
out_group.append(["Dataset Group"+str(g),output_cell])
if g==group_counter:
output_cell={}
g_count=1
output_cell[g_count]=[" ",[]]
u_count=len(user_dict[g_count][0])
temp_count=u_count
temp_g=1
before=0
for i in range(user_new_offset,user_new_offset+len(origin_name)):
for x in range(0,len(all_sample)):
distance=np.linalg.norm(Xval[x][n-3:n]-Xval[i][n-3:n])
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[g_count][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count),all_cellline[x]
,all_sample[x].name,cell_object[x].primary_site,cell_object[x].primary_hist,
all_sample[x].dataset_id.name,distance])
temp_g=1
temp_count=len(user_dict[temp_g][0])
for j in range(user_new_offset,user_new_offset+before):
if ((j-user_new_offset)==temp_count):
temp_g+=1
try:
temp_count+=len(user_dict[temp_g][0])
except KeyError:
temp_count+=0
distance=np.linalg.norm(Xval[j][n-3:n]-Xval[i][n-3:n])
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[g_count][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count)
," ",origin_name[j-user_new_offset]," "," ","User Group"+str(temp_g),distance])
temp_g=g_count
temp_count=len(user_dict[g_count][0])
for j in range(i+1,user_new_offset+len(origin_name)):
if ((j-user_new_offset)==temp_count):
temp_g+=1
try:
temp_count+=len(user_dict[temp_g][0])
except KeyError:
temp_count+=0
distance=np.linalg.norm(Xval[j][n-3:n]-Xval[i][n-3:n])
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[g_count][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count)
," ",origin_name[j-user_new_offset]," "," ","User Group"+str(temp_g),distance])
if g_count==1:
name1.append(origin_name[i-user_new_offset])
X1.append(round(Xval[i][n-3],5))
Y1.append(round(Xval[i][n-2],5))
Z1.append(round(Xval[i][n-1],5))
else:
name2.append(origin_name[i-user_new_offset])
X2.append(round(Xval[i][n-3],5))
Y2.append(round(Xval[i][n-2],5))
Z2.append(round(Xval[i][n-1],5))
if ((i-user_new_offset+1)==u_count):
dictlist=[]
for key, value in output_cell.items():
temp = [value]
dictlist+=temp
output_cell=list(dictlist)
user_out_group.append(["User Group"+str(g_count),output_cell])
g_count+=1
before=u_count+before
#print("I am here!!")
try:
u_count+=len(user_dict[g_count][0])
output_cell={}
output_cell[g_count]=[" ",[]]
except KeyError:
u_count+=0
#[g,[group_cell_1 object,[[outputs paired1,......,],[paired2],[paired3]]],[group_cell_2 object,[[pair1],[pair2]]]]
#for xx in origin_name:
#sample_counter[xx]=1
##print(out_group)
element_counter=0
for i in out_group:
for temp_list in i[1]:
element_counter=element_counter+len(temp_list[1])
for temp in temp_list[1]:
if(temp[5]!=" "):
temp[5]=temp[5]+'('+str(sample_counter[temp[6]])+')'
for i in user_out_group:
for temp_list in i[1]:
for temp in temp_list[1]:
if(temp[2]!=" "):
temp[2]=temp[2]+'('+str(sample_counter[temp[3]])+')'
return_html='user_pca.html'
else:
#This part is for centroid display
return_html='user_pca_center.html'
#This part is for select cell line base on dataset,count centroid base on the dataset
#group中的cell line為單位來算重心
location_dict={} #{group number:[[cell object,dataset,new location]]}
combined=[]
sample_list=[]
pca_index=np.array(pca_index)
X_val=[]
val_a=np.array(val)
a_all_offset=np.array(all_offset)
a_sample=np.array(all_sample)
for i in range(1,group_counter+1):
dis_cellline=list(set(cell_object[g_s_counter[i-1]:g_s_counter[i]])) #cell object may have duplicate cell line since:NCI A + CCLE A===>[A,A]
location_dict['g'+str(i)]=[]
dataset_dict={}
a_cell_object=np.array(cell_object)
for c in dis_cellline: #dis_cellline may not have the same order as cell_object
temp1=np.where((a_cell_object==c))[0]
temp2=np.where((temp1>=g_s_counter[i-1])&(temp1<g_s_counter[i]))
total_offset=temp1[temp2]
selected_val=val_a[:,a_all_offset[total_offset]]
selected_val=np.transpose(selected_val)
new_loca=(np.mean(selected_val,axis=0,dtype=np.float64,keepdims=True)).tolist()[0]
selected_sample=a_sample[total_offset]
if list(selected_sample) in sample_list: #to prevent two different colors in different group
continue
else:
sample_list.append(list(selected_sample))
d_temp=[]
for s in selected_sample:
d_temp.append(s.dataset_id.name)
dataset_dict[c]="/".join(list(set(d_temp)))
X_val.append(new_loca)
location_dict['g'+str(i)].append([c,dataset_dict[c],len(X_val)-1]) #the last part is the index to get pca result from new_val
combined.append([c,dataset_dict[c],len(X_val)-1]) #all cell line, do not matter order
#run the pca
user_new_offset=len(X_val)
temp_val=np.transpose(val[:,user_offset:])
for x in range(0,len(temp_val)):
X_val.append(list(temp_val[x]))
if(len(X_val)<4):
error_reason='Since the display method is [centroid], you should have at least 4 dots for PCA. The total number is not enough.<br />'\
'The total number of dots in you uploaded file is '+str(len(temp_val))+'.<br />'\
'The number of centroid dots you selected is '+str(len(X_val)-len(temp_val))+'.<br />'\
'Total is '+str(len(X_val))+'.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
X_val=np.matrix(X_val)
pca= PCA(n_components=n)
new_val = pca.fit_transform(X_val[:,:]) #cannot get Xval with original offset any more
ratio_temp=pca.explained_variance_ratio_
propotion=sum(ratio_temp[1:n])
table_propotion=sum(ratio_temp[0:n])
#print(new_val)
out_group=[]
min=10000000000
max=0
element_counter=0
for g in range(1,group_counter+1):
output_cell=[]
exist_cell={}
for group_c in location_dict['g'+str(g)]: #a list of [c,dataset_dict[c],new_val index] in group one
cell=group_c[0]
key_string=cell.name+'/'+cell.primary_site+'/'+cell.primary_hist+'/'+group_c[1]
exist_cell[key_string]=[]
output_cell.append([cell,[]])
#count the distance
for temp_list in combined:
c=temp_list[0]
temp_string=c.name+'/'+c.primary_site+'/'+c.primary_hist+'/'+temp_list[1]
try:
if(key_string not in exist_cell[temp_string]):
distance=np.linalg.norm(np.array(new_val[group_c[2]][n-3:n])-np.array(new_val[temp_list[2]][n-3:n]))
if distance==0:
continue
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[len(output_cell)-1][1].append([cell.name,cell.primary_site,cell.primary_hist
,group_c[1],temp_list[0].name,temp_list[0].primary_site,temp_list[0].primary_hist,temp_list[1],distance])
element_counter=element_counter+1
except KeyError:
distance=np.linalg.norm(np.array(new_val[group_c[2]][n-3:n])-np.array(new_val[temp_list[2]][n-3:n]))
if distance==0:
continue
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[len(output_cell)-1][1].append([cell.name,cell.primary_site,cell.primary_hist
,group_c[1],temp_list[0].name,temp_list[0].primary_site,temp_list[0].primary_hist,temp_list[1],distance])
element_counter=element_counter+1
exist_cell[key_string].append(temp_string)
g_count=1
u_count=len(user_dict[g_count][0]) #sample number in first user file
for i in range(user_new_offset,user_new_offset+len(origin_name)):
distance=np.linalg.norm(np.array(new_val[group_c[2]][n-3:n])-np.array(new_val[i][n-3:n]))
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[len(output_cell)-1][1].append([cell.name,cell.primary_site,cell.primary_hist,group_c[1]
,origin_name[i-user_new_offset]," "," ","User Group"+str(g_count),distance])
element_counter=element_counter+1
if ((i-user_new_offset+1)==u_count):
g_count+=1
try:
u_count+=len(user_dict[g_count][0])
except KeyError:
u_count+=0
if(g==1):
name3.append(cell.name+'<br>'+group_c[1])
X3.append(round(new_val[group_c[2]][n-3],5))
Y3.append(round(new_val[group_c[2]][n-2],5))
Z3.append(round(new_val[group_c[2]][n-1],5))
elif(g==2):
name4.append(cell.name+'<br>'+group_c[1])
X4.append(round(new_val[group_c[2]][n-3],5))
Y4.append(round(new_val[group_c[2]][n-2],5))
Z4.append(round(new_val[group_c[2]][n-1],5))
elif(g==3):
name5.append(cell.name+'<br>'+group_c[1])
X5.append(round(new_val[group_c[2]][n-3],5))
Y5.append(round(new_val[group_c[2]][n-2],5))
Z5.append(round(new_val[group_c[2]][n-1],5))
out_group.append(["Dataset Group"+str(g),output_cell])
if g==group_counter:
output_cell=[]
g_count=1
output_cell.append([" ",[]])
u_count=len(user_dict[g_count][0])
temp_count=u_count
temp_g=1
before=0
for i in range(user_new_offset,user_new_offset+len(origin_name)):
for temp_list in combined:
c=temp_list[0]
distance=np.linalg.norm(np.array(new_val[i][n-3:n])-np.array(new_val[temp_list[2]][n-3:n]))
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[len(output_cell)-1][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count)
,c.name,c.primary_site,c.primary_hist,temp_list[1],distance])
temp_g=1
temp_count=len(user_dict[temp_g][0])
for j in range(user_new_offset,user_new_offset+before):
if ((j-user_new_offset)==temp_count):
temp_g+=1
try:
temp_count+=len(user_dict[temp_g][0])
except KeyError:
temp_count+=0
distance=np.linalg.norm(np.array(new_val[i][n-3:n])-np.array(new_val[j][n-3:n]))
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[len(output_cell)-1][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count)
,origin_name[j-user_new_offset]," "," ","User Group"+str(temp_g),distance])
temp_g=g_count
temp_count=len(user_dict[g_count][0])
for x in range(i+1,user_new_offset+len(origin_name)):
if ((x-user_new_offset)==temp_count):
temp_g+=1
try:
temp_count+=len(user_dict[temp_g][0])
except KeyError:
temp_count+=0
distance=np.linalg.norm(np.array(new_val[i][n-3:n])-np.array(new_val[x][n-3:n]))
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[len(output_cell)-1][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count)
,origin_name[x-user_new_offset]," "," ","User Group"+str(temp_g),distance])
if g_count==1:
name1.append(origin_name[i-user_new_offset])
X1.append(round(new_val[i][n-3],5))
Y1.append(round(new_val[i][n-2],5))
Z1.append(round(new_val[i][n-1],5))
else:
name2.append(origin_name[i-user_new_offset])
X2.append(round(new_val[i][n-3],5))
Y2.append(round(new_val[i][n-2],5))
Z2.append(round(new_val[i][n-1],5))
if ((i-user_new_offset+1)==u_count):
user_out_group.append(["User Group"+str(g_count),output_cell])
g_count+=1
before=u_count+before
#print("I am here!!")
try:
u_count+=len(user_dict[g_count][0])
output_cell=[]
output_cell.append([" ",[]])
except KeyError:
u_count+=0
#print(element_counter)
#print(show_row)
if(element_counter>show_row):
big_flag=1
sid=str(uuid.uuid1())+".csv"
if(return_html=='user_pca.html'):
dataset_header=['Group Cell Line/Clinical Sample','Sample Name','Primary Site','Primary Histology'
,'Dataset','Paired Cell Line name/Clinical Sample','Sample Name','Primary Site','Primary Histology','Dataset','Distance']
user_header=['User Sample Name','Dataset','Paired Cell Line name/Clinical Sample','Sample Name','Primary Site','Primary Histology','Dataset','Distance']
else:
dataset_header=['Group Cell Line/Clinical Sample','Primary Site','Primary Histology'
,'Dataset','Paired Cell Line name/Clinical Sample','Primary Site','Primary Histology','Dataset','Distance']
user_header=['User Sample Name','Dataset','Paired Cell Line name/Clinical Sample','Primary Site','Primary Histology','Dataset','Distance']
P=Path('../').resolve().joinpath('src','static','csv',"dataset_"+sid)
userP=Path('../').resolve().joinpath('src','static','csv',"user_"+sid)
assP=Path('../').resolve().joinpath('src','assets','csv',"dataset_"+sid)
assuserP=Path('../').resolve().joinpath('src','assets','csv',"user_"+sid)
#print("start writing files")
with open(str(assP), "w", newline='') as f:
writer = csv.writer(f)
for index,output_cell in out_group:
writer.writerows([[dgroup[int(index[-1])-1]]])
writer.writerows([dataset_header])
for cell_line,b in output_cell:
writer.writerows(b)
#print("end writing first file")
'''
with open(str(assP), "w", newline='') as ff:
writer = csv.writer(ff)
for index,output_cell in out_group:
writer.writerows([[index]])
writer.writerows([dataset_header])
for cell_line,b in output_cell:
writer.writerows(b)
'''
#print("end writing 2 file")
with open(str(assuserP), "w", newline='') as ff:
writer = csv.writer(ff)
for index,output_cell in user_out_group:
writer.writerows([[ugroup[int(index[-1])-1]]])
writer.writerows([user_header])
for cell_line,b in output_cell:
writer.writerows(b)
#print("end writing 3 file")
'''
with open(str(userP), "w", newline='') as f:
writer = csv.writer(f)
for index,output_cell in user_out_group:
writer.writerows([[index]])
writer.writerows([user_header])
for cell_line,b in output_cell:
writer.writerows(b)
'''
#print("end writing 4 file")
data_file_name="dataset_"+sid
user_file_name="user_"+sid
else:
big_flag=0
data_file_name=0
user_file_name=0
return render_to_response(return_html,RequestContext(request,
{
'ugroup':ugroup,
'dgroup':dgroup,
'min':min,'max':max,
'big_flag':big_flag,
'out_group':out_group,'user_out_group':user_out_group,
'propotion':propotion,
'table_propotion':table_propotion,
'data_file_name':data_file_name,
'user_file_name':user_file_name,
'X1':X1,'name1':mark_safe(json.dumps(name1)),
'Y1':Y1,'name2':mark_safe(json.dumps(name2)),
'Z1':Z1,'name3':mark_safe(json.dumps(name3)),
'X2':X2,'name4':mark_safe(json.dumps(name4)),
'Y2':Y2,'name5':mark_safe(json.dumps(name5)),
'Z2':Z2,
'X3':X3,
'Y3':Y3,
'Z3':Z3,
'X4':X4,
'Y4':Y4,
'Z4':Z4,
'X5':X5,
'Y5':Y5,
'Z5':Z5,
}))
#notice that we need to return a user_pca_center.html, too!!
#return render_to_response('welcome.html',locals())
def express_profiling(request):
return render(request, 'express_profiling.html', generate_samples())
def welcome(request):
return render_to_response('welcome.html',locals())
def help(request):
example_name="CellExpress_Examples.pptx"
tutorial_name="CellExpress_Tutorial.pptx"
return render_to_response('help.html',locals())
def help_similar_assessment(request):
return render_to_response('help_similar_assessment.html',RequestContext(request))
def similar_assessment(request):
return render(request, 'similar_assessment.html', generate_samples())
def gene_signature(request):
return render(request, 'gene_signature.html', generate_samples())
def heatmap(request):
group1=[]
group2=[]
group_count=0
presult={} #{probe object:p value}
expression=[]
probe_out=[]
sample_out=[]
not_found=[]
quantile_flag=0
ratio_flag=0
indata=[]
#get probe from different platform
pform=request.POST.get('data_platform','U133A')
if(pform=="mix_quantile"):
pform="U133A"
quantile_flag=1
if(pform=="mix_ratio"):
pform="U133A"
ratio_flag=1
stop_end=601
return_page_flag=0
user_probe_flag=0
if(request.POST['user_type']=="all"):
all_probe=ProbeID.objects.filter(platform__name=pform).order_by('offset')
probe_offset=list(all_probe.values_list('offset',flat=True))
pro_number=float(request.POST['probe_number']) #significant 0.05 or 0.01
all_probe=list(all_probe)
elif(request.POST['user_type']=="genes"): #for all genes
return_page_flag=1
if(pform=="U133A"):
probe_path=Path('../').resolve().joinpath('src','uni_u133a.txt')
gene_list = pd.read_csv(probe_path.as_posix())
all_probe=list(gene_list['SYMBOL'])
else:
probe_path=Path('../').resolve().joinpath('src','uni_plus2.txt')
gene_list = pd.read_csv(probe_path.as_posix())
all_probe=list(gene_list['SYMBOL'])
pro_number=float(request.POST['probe_number_gene'])
probe_offset=[]
for i in range(0,len(all_probe)):
probe_offset.append(i)
else:
indata=request.POST['keyword']
indata = list(set(indata.split()))
if(request.POST['gtype']=="probeid"):
user_probe_flag=1
all_probe=ProbeID.objects.filter(platform__name=pform,Probe_id__in=indata).order_by('offset')
probe_offset=list(all_probe.values_list('offset',flat=True))
pro_number=float('+inf')
not_found=list(set(set(indata) - set(all_probe.values_list('Probe_id',flat=True))))
all_probe=list(all_probe)
else:
probe_offset=[]
return_page_flag=1
pro_number=float('+inf')
if(pform=="U133A"):
probe_path=Path('../').resolve().joinpath('src','uni_u133a.txt')
gene_list = pd.read_csv(probe_path.as_posix())
gene=list(gene_list['SYMBOL'])
else:
probe_path=Path('../').resolve().joinpath('src','uni_plus2.txt')
gene_list = pd.read_csv(probe_path.as_posix())
gene=list(gene_list['SYMBOL'])
probe_path=Path('../').resolve().joinpath('src','new_human_gene_info.txt')
info=pd.read_csv(probe_path.as_posix(),sep='\t')
col=list(info.columns.values)
col[0]='symbol'
info.columns=col
info.index = info['symbol']
info.index.name = None
info=info.iloc[:, 1:]
all_probe=[]
for i in indata:
try:
probe_offset.append(gene.index(i))
all_probe.append(i)
info=info.drop(i,errors='ignore')
except ValueError:
re_symbol=list(set(info.loc[info['alias'].isin([i])].index)) #find whether has alias first
if(len(re_symbol)!=0):
re_match=Gene.objects.filter(platform__name__in=[pform],symbol__in=re_symbol).order_by('offset') #check the symbol in database or not
repeat=len(re_match)
if(repeat!=0): #match gene symbol in database
##print(re_match)
for x in re_match:
info=info.drop(x.symbol,errors='ignore')
probe_offset.append(x.offset)
all_probe.append(i+"("+x.symbol+")")
else:
not_found.append(i)
else:
not_found.append(i)
#count the number of group
group_counter=1
check_set=[]
while True:
temp_name='dataset_g'+str(group_counter)
if temp_name in request.POST:
group_counter=group_counter+1
else:
group_counter=group_counter-1
break
#get binary data
s_group_dict={} #store sample
val=[] #store value get from binary data
group_name=[]
clinic=list(Clinical_Dataset.objects.all().values_list('name',flat=True))
clline=list(Dataset.objects.all().values_list('name',flat=True))
#print(clline)
opened_name=[]
opened_val=[]
for i in range(1,group_counter+1):
s_group_dict['g'+str(i)]=[]
dname='dataset_g'+str(i)
datasets=request.POST.getlist(dname)
temp_name='g'+str(i)
group_name.append(temp_name)
a_data=np.array([])
for dn in datasets:
if dn=='Sanger Cell Line Project':
c='select_sanger_g'+str(i)
elif dn=='NCI60':
c='select_nci_g'+str(i)
elif dn=='GSE36133':
c='select_ccle_g'+str(i)
if dn in clline:
ACELL=request.POST.getlist(c)
s=Sample.objects.filter(dataset_id__name__in=[dn],cell_line_id__name__in=ACELL).order_by('dataset_id').select_related('cell_line_id__name','dataset_id')
s_group_dict['g'+str(i)]=list(s)+s_group_dict['g'+str(i)]
goffset=list(s.values_list('offset',flat=True))
#print(goffset)
if dn not in opened_name: #check if the file is opened
#print("opend file!!")
opened_name.append(dn)
if(return_page_flag==1):
pth=Path('../').resolve().joinpath('src','gene_'+Dataset.objects.get(name=dn).data_path)
if(quantile_flag==1):
pth=Path('../').resolve().joinpath('src','mix_gene_'+Dataset.objects.get(name=dn).data_path)
elif(ratio_flag==1):
pth=Path('../').resolve().joinpath('src','mix_gene_'+Dataset.objects.get(name=dn).data_path)
gap=[Gene.objects.filter(platform__name=pform,symbol="GAPDH")[0].offset]
else:
pth=Path('../').resolve().joinpath('src',Dataset.objects.get(name=dn).data_path)
if(quantile_flag==1):
pth=Path('../').resolve().joinpath('src','mix_'+Dataset.objects.get(name=dn).data_path)
elif(ratio_flag==1):
pth=Path('../').resolve().joinpath('src','mix_'+Dataset.objects.get(name=dn).data_path)
gap=list(ProbeID.objects.filter(platform__name=pform).filter(Gene_symbol="GAPDH").order_by('id').values_list('offset',flat=True))
raw_val=np.load(pth.as_posix(),mmap_mode='r')
if(ratio_flag==1):
norm=raw_val[np.ix_(gap)]
raw_val=np.subtract(raw_val,np.mean(norm,axis=0, dtype=np.float64,keepdims=True))
opened_val.append(raw_val)
temp=raw_val[np.ix_(probe_offset,list(goffset))]
if (len(a_data)!=0 ) and (len(temp)!=0):
a_data=np.concatenate((a_data,temp),axis=1)
elif (len(temp)!=0):
a_data=raw_val[np.ix_(probe_offset,list(goffset))]
else:
temp=opened_val[opened_name.index(dn)][np.ix_(probe_offset,list(goffset))]
if (len(a_data)!=0 ) and (len(temp)!=0):
a_data=np.concatenate((a_data,temp),axis=1)
elif (len(temp)!=0):
a_data=opened_val[opened_name.index(dn)][np.ix_(probe_offset,list(goffset))]
elif dn in clinic:
#print("I am in clinical part")
com_hists=list(set(request.POST.getlist('primd_'+dn+'_g'+str(i)))) #can I get this by label to reduce number of queries?
com_hists=[w1 for segments in com_hists for w1 in segments.split('/')]
prims=com_hists[0::2]
hists=com_hists[1::2]
temp=request.POST.getlist('filter_'+dn+'_g'+str(i))
age=[]
gender=[]
ethnic=[]
grade=[]
stage=[]
T=[]
N=[]
M=[]
metas=[]
for t in temp:
if 'stage/' in t:
stage.append(t[6:])
elif 'gender/' in t:
gender.append(t[7:])
elif 'ethnic/' in t:
ethnic.append(t[7:])
elif 'grade/' in t:
grade.append(t[6:])
elif 'stageT/' in t:
T.append(t[7:])
elif 'stageN/' in t:
N.append(t[7:])
elif 'stageM/' in t:
M.append(t[7:])
elif 'metastatic/' in t:
metas.append(t[11:])
'''
if t[11:]=='False':
metas.append(0)
else:
metas.append(1)
'''
else: #"age/"
age.append(t[4:])
cgoffset=[]
for x in range(0,len(prims)):
s=Clinical_sample.objects.filter(dataset_id__name=dn,primary_site=prims[x],
primary_hist=hists[x],
age__in=age,
gender__in=gender,
ethnic__in=ethnic,
stage__in=stage,
grade__in=grade,
stageT__in=T,
stageN__in=N,
stageM__in=M,
metastatic__in=metas,
).select_related('dataset_id').order_by('id')
s_group_dict['g'+str(i)]=list(s)+s_group_dict['g'+str(i)]
cgoffset+=list(s.values_list('offset',flat=True))
if dn not in opened_name: #check if the file is opened
#print("opend file!!")
opened_name.append(dn)
if(return_page_flag==1):
pth=Path('../').resolve().joinpath('src','gene_'+Clinical_Dataset.objects.get(name=dn).data_path)
if(quantile_flag==1):
pth=Path('../').resolve().joinpath('src','mix_gene_'+Clinical_Dataset.objects.get(name=dn).data_path)
elif(ratio_flag==1):
pth=Path('../').resolve().joinpath('src','mix_gene_'+Clinical_Dataset.objects.get(name=dn).data_path)
gap=[Gene.objects.filter(platform__name=pform,symbol="GAPDH")[0].offset]
else:
pth=Path('../').resolve().joinpath('src',Clinical_Dataset.objects.get(name=dn).data_path)
if(quantile_flag==1):
pth=Path('../').resolve().joinpath('src','mix_'+Clinical_Dataset.objects.get(name=dn).data_path)
elif(ratio_flag==1):
pth=Path('../').resolve().joinpath('src','mix_'+Clinical_Dataset.objects.get(name=dn).data_path)
gap=list(ProbeID.objects.filter(platform__name=pform).filter(Gene_symbol="GAPDH").order_by('id').values_list('offset',flat=True))
raw_val=np.load(pth.as_posix(),mmap_mode='r')
if(ratio_flag==1):
norm=raw_val[np.ix_(gap)]
raw_val=np.subtract(raw_val,np.mean(norm,axis=0, dtype=np.float64,keepdims=True))
opened_val.append(raw_val)
temp=raw_val[np.ix_(probe_offset,list(cgoffset))]
#print(temp)
if (len(a_data)!=0 ) and (len(temp)!=0):
a_data=np.concatenate((a_data,temp),axis=1)
elif (len(temp)!=0):
a_data=raw_val[np.ix_(probe_offset,list(cgoffset))]
else:
temp=opened_val[opened_name.index(dn)][np.ix_(probe_offset,list(cgoffset))]
if (len(a_data)!=0 ) and (len(temp)!=0):
a_data=np.concatenate((a_data,temp),axis=1)
elif (len(temp)!=0):
a_data=opened_val[opened_name.index(dn)][np.ix_(probe_offset,list(cgoffset))]
val.append(a_data.tolist())
#print(len(val))
#print(len(val[0]))
##print(val)
#run the one way ANOVA test or ttest for every probe base on the platform selected
express={}
#logger.info('run ttest or anova')
if group_counter<=2:
for i in range(0,len(all_probe)): #need to fix if try to run on laptop
presult[all_probe[i]]=stats.ttest_ind(list(val[0][i]),list(val[1][i]),equal_var=False,nan_policy='omit')[1]
express[all_probe[i]]=np.append(val[0][i],val[1][i]).tolist()
else:
for i in range(0,len(all_probe)): #need to fix if try to run on laptop
to_anova=[]
for n in range(0,group_counter):
#val[n]=sum(val[n],[])
to_anova.append(val[n][i])
presult[all_probe[i]]=stats.f_oneway(*to_anova)[1]
express[all_probe[i]]=sum(to_anova,[])
#print("test done")
#sort the dictionary with p-value and need to get the expression data again (top20)
#presult[all_probe[0]]=float('nan')
#presult[all_probe[11]]=float('nan')
#how to deal with all "nan"?
tempf=pd.DataFrame(list(presult.items()), columns=['probe', 'pvalue'])
tempf=tempf.replace(to_replace=float('nan'),value=float('+inf'))
presult=dict(zip(tempf.probe, tempf.pvalue))
sortkey=sorted(presult,key=presult.get) #can optimize here
counter=1
cell_probe_val=[]
for w in sortkey:
#print(presult[w],":",w)
if (presult[w]<pro_number):
cell_probe_val.append([w,presult[w]])
print(cell_probe_val)
express_mean=np.mean(np.array(express[w]))
expression.append(list((np.array(express[w]))-express_mean))
if(return_page_flag==1):
probe_out.append(w)
else:
probe_out.append(w.Probe_id+"("+w.Gene_symbol+")")
counter+=1
else:
break
if counter>=stop_end:
break
n_counter=1
for n in group_name:
sample_counter=1
for s in s_group_dict[n]:
dataset_n=s.dataset_id.name
if dataset_n=="Sanger Cell Line Project":
sample_out.append(s.cell_line_id.name+"(SCLP)(group"+str(n_counter)+"-"+str(sample_counter)+")")
elif dataset_n in clline:
#print(s.cell_line_id.name+"("+s.dataset_id.name+")"+"(group"+str(n_counter)+"-"+str(sample_counter)+")")
sample_out.append(s.cell_line_id.name+"("+s.dataset_id.name+")"+"(group"+str(n_counter)+"-"+str(sample_counter)+")")
else: #what to output for clinical part?
#print(s.name+"("+s.dataset_id.name+")"+"(group"+str(n_counter)+"-"+str(sample_counter)+")")
sample_out.append(s.name+"("+s.dataset_id.name+")"+"(group"+str(n_counter)+"-"+str(sample_counter)+")")
sample_counter+=1
n_counter+=1
#logger.info('finish combine output samples')
sns.set(font="monospace")
test=pd.DataFrame(data=expression,index=probe_out,columns=sample_out)
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)
#test.to_csv('heatmap_text.csv')
try:
#g = sns.clustermap(test,cmap=my_cmap)
if(len(probe_out)<=300):
g = sns.clustermap(test,cmap=my_cmap,xticklabels=list(test.columns),yticklabels=(test.index),figsize=(19.20,48.60))
else:
g = sns.clustermap(test,cmap=my_cmap,xticklabels=list(test.columns),yticklabels=(test.index),figsize=(30.00,100.00))
except:
if((return_page_flag==1) and (probe_out==[])):
probe_out=indata #probe_out is the rows of heatmap
return render_to_response('noprobe.html',RequestContext(request,
{
'user_probe_flag':user_probe_flag,
'return_page_flag':return_page_flag,
'probe_out':probe_out,
'not_found':not_found
}))
'''
plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0)
if counter>=stop_end:
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), fontsize=4)
else:
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), fontsize=7)
plt.setp(g.ax_heatmap.get_xticklabels(), rotation=270,ha='center')
'''
sid=str(uuid.uuid1())+".png"
#print(sid)
P=Path('../').resolve().joinpath('src','static','image',sid)
assP=Path('../').resolve().joinpath('src','assets','image',sid)
#g.savefig(str(P))
#plt.figure(figsize=(1920/my_dpi, 2160/my_dpi), dpi=100)
#plt.savefig(str(assP), dpi=my_dpi*10)
g.savefig(str(assP),bbox_inches='tight')
file_name=sid
return render_to_response('heatmap.html',RequestContext(request,
{
'user_probe_flag':user_probe_flag,
'return_page_flag':return_page_flag,
'cell_probe_val':cell_probe_val,
'file_name':file_name,
'pro_number':pro_number,
'not_found':not_found
}))
def pca(request):
propotion=0
table_propotion=0
pform=request.POST['data_platform'] #get the platform
show=request.POST['show_type'] #get the pca show type
group_counter=1
cell_line_dict={}
#count how many group
group_counter=1
while True:
temp_name='dataset_g'+str(group_counter)
if temp_name in request.POST:
group_counter=group_counter+1
else:
group_counter=group_counter-1
break
udgroup=[]
s_group_dict={} #store sample
group_name=[]
offset_group_dict={} #store offset
clinic=list(Clinical_Dataset.objects.all().values_list('name',flat=True))
clline=list(Dataset.objects.all().values_list('name',flat=True))
all_exist_dataset=[]
for i in range(1,group_counter+1):
udgroup.append(request.POST['group_name'+str(i)])
#print(udgroup)
if(udgroup[-1]==''):
udgroup[-1]='Group'+str(i)
dname='dataset_g'+str(i)
all_exist_dataset=all_exist_dataset+request.POST.getlist(dname)
all_exist_dataset=list(set(all_exist_dataset))
all_base=[0]
for i in range(0,len(all_exist_dataset)-1):
if all_exist_dataset[i] in clline:
all_base.append(all_base[i]+Sample.objects.filter(dataset_id__name__in=[all_exist_dataset[i]]).count())
else:
all_base.append(all_base[i]+Clinical_sample.objects.filter(dataset_id__name__in=[all_exist_dataset[i]]).count())
all_c=[]
for i in range(1,group_counter+1):
s_group_dict['g'+str(i)]=[]
offset_group_dict['g'+str(i)]=[]
cell_line_dict['g'+str(i)]=[]
dname='dataset_g'+str(i)
datasets=request.POST.getlist(dname)
group_name.append('g'+str(i))
goffset_nci=[]
goffset_gse=[]
for dn in datasets:
if dn=='Sanger Cell Line Project':
c='select_sanger_g'+str(i)
elif dn=='NCI60':
c='select_nci_g'+str(i)
elif dn=='GSE36133':
c='select_ccle_g'+str(i)
if dn in clline:
temp=list(set(request.POST.getlist(c)))
if 'd_sample' in show:
if all_c==[]:
all_c=all_c+temp
uni=temp
else:
uni=list(set(temp)-set(all_c))
all_c=all_c+uni
else:
uni=list(temp) #do not filter duplicate input only when select+centroid
s=Sample.objects.filter(cell_line_id__name__in=uni,dataset_id__name__in=[dn]).order_by('dataset_id'
).select_related('cell_line_id__name','cell_line_id__primary_site','cell_line_id__primary_hist','dataset_id','dataset_id__name')
cell_line_dict['g'+str(i)]=cell_line_dict['g'+str(i)]+list(s.values_list('cell_line_id__name',flat=True))
s_group_dict['g'+str(i)]=s_group_dict['g'+str(i)]+list(s)
offset_group_dict['g'+str(i)]=offset_group_dict['g'+str(i)]+list(np.add(list(s.values_list('offset',flat=True)),all_base[all_exist_dataset.index(dn)]))
else: #dealing with clinical sample datasets
com_hists=list(set(request.POST.getlist('primd_'+dn+'_g'+str(i)))) #can I get this by label to reduce number of queries?
com_hists=[w1 for segments in com_hists for w1 in segments.split('/')]
prims=com_hists[0::2]
hists=com_hists[1::2]
temp=request.POST.getlist('filter_'+dn+'_g'+str(i))
age=[]
gender=[]
ethnic=[]
grade=[]
stage=[]
T=[]
N=[]
M=[]
metas=[]
for t in temp:
if 'stage/' in t:
stage.append(t[6:])
elif 'gender/' in t:
gender.append(t[7:])
elif 'ethnic/' in t:
ethnic.append(t[7:])
elif 'grade/' in t:
grade.append(t[6:])
elif 'stageT/' in t:
T.append(t[7:])
elif 'stageN/' in t:
N.append(t[7:])
elif 'stageM/' in t:
M.append(t[7:])
elif 'metastatic/' in t:
metas.append(t[11:])
'''
if t[11:]=='False':
metas.append(0)
else:
metas.append(1)
'''
else: #"age/"
age.append(t[4:])
for x in range(0,len(prims)):
s=Clinical_sample.objects.filter(dataset_id__name=dn,primary_site=prims[x],
primary_hist=hists[x],
age__in=age,
gender__in=gender,
ethnic__in=ethnic,
stage__in=stage,
grade__in=grade,
stageT__in=T,
stageN__in=N,
stageM__in=M,
metastatic__in=metas,
).select_related('dataset_id').order_by('id')
s_group_dict['g'+str(i)]=s_group_dict['g'+str(i)]+list(s)
cell_line_dict['g'+str(i)]=cell_line_dict['g'+str(i)]+list(s.values_list('name',flat=True))
offset_group_dict['g'+str(i)]=offset_group_dict['g'+str(i)]+list(np.add(list(s.values_list('offset',flat=True)),all_base[all_exist_dataset.index(dn)]))
all_sample=[]
all_cellline=[]
cell_object=[]
all_offset=[]
sample_counter={}
group_cell=[]
g_s_counter=[0]
for i in range(1,group_counter+1):
all_sample=all_sample+s_group_dict['g'+str(i)] #will not exist duplicate sample if d_sample
all_offset=all_offset+offset_group_dict['g'+str(i)]
all_cellline=all_cellline+cell_line_dict['g'+str(i)]
g_s_counter.append(g_s_counter[i-1]+len(s_group_dict['g'+str(i)]))
if 'd_sample' in show:
if((len(all_sample))<4):
error_reason='You should have at least 4 samples for PCA. The samples are not enough.<br />'\
'The number of samples you selected is '+str(len(all_sample))+'.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
for i in all_sample:
sample_counter[i.name]=1
if str(type(i))=="<class 'probes.models.Sample'>":
##print("i am sample!!")
cell_object.append(i.cell_line_id)
else:
##print("i am clinical!!")
cell_object.append(i)
#delete nan, transpose matrix
##open file
for x in range(0,len(all_exist_dataset)):
if all_exist_dataset[x] in clline:
pth=Path('../').resolve().joinpath('src',Dataset.objects.get(name=all_exist_dataset[x]).data_path)
else:
pth=Path('../').resolve().joinpath('src',Clinical_Dataset.objects.get(name=all_exist_dataset[x]).data_path)
if x==0:
val=np.load(pth.as_posix())
else:
val=np.hstack((val, np.load(pth.as_posix())))#combine together
if 'd_sample' in show:
val=val[:,all_offset]
#val=val[~np.isnan(val).any(axis=1)]
val=np.transpose(val)
pca_index=[]
dis_offset=[]
#PREMISE:same dataset same cell line will have only one type of primary site and primary histology
name1=[]
name2=[]
name3=[]
name4=[]
name5=[]
X1=[]
Y1=[]
Z1=[]
X2=[]
Y2=[]
Z2=[]
X3=[]
Y3=[]
Z3=[]
X4=[]
Y4=[]
Z4=[]
X5=[]
Y5=[]
Z5=[]
if(len(all_exist_dataset)==1):
n=3 #need to fix to the best one #need to fix proportion
else:
n=4
#logger.info('pca show')
if 'd_sample' in show:
#count the pca first
pca= PCA(n_components=n)
Xval = pca.fit_transform(val[:,:]) #cannot get Xval with original all_offset any more
ratio_temp=pca.explained_variance_ratio_
propotion=sum(ratio_temp[n-3:n])
table_propotion=sum(ratio_temp[0:n])
##print(Xval)
##print(all_cellline)
##print(all_sample)
max=0
min=10000000000
out_group=[]
exist_cell={}#cell line object:counter
for g in range(1,group_counter+1):
output_cell={}
check={}
for s in range(g_s_counter[g-1],g_s_counter[g]):
if str(type(all_sample[s]))=="<class 'probes.models.Sample'>":
cell=all_sample[s].cell_line_id
else:
cell=all_sample[s]
try:
counter=exist_cell[cell]
exist_cell[cell]=counter+1
except KeyError:
exist_cell[cell]=1
try:
t=output_cell[cell]
except KeyError:
output_cell[cell]=[cell,[]]
check[all_sample[s].name]=[]
sample_counter[all_sample[s].name]=exist_cell[cell]
for i in range(0,len(all_sample)):
if i!=s:
try:
if(all_sample[s].name not in check[all_sample[i].name]):
distance=np.linalg.norm(Xval[i][n-3:n]-Xval[s][n-3:n])
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[cell][1].append([all_cellline[s]+'('+str(exist_cell[cell])+')'
,all_sample[s].name,all_sample[s].dataset_id.name,all_cellline[i],all_sample[i].name,all_sample[i].dataset_id.name,distance,cell_object[i]])
check[all_sample[s].name].append(all_sample[i].name)
except KeyError:
distance=np.linalg.norm(Xval[i][n-3:n]-Xval[s][n-3:n])
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[cell][1].append([all_cellline[s]+'('+str(exist_cell[cell])+')'
,all_sample[s].name,all_sample[s].dataset_id.name,all_cellline[i],all_sample[i].name,all_sample[i].dataset_id.name,distance,cell_object[i]])
check[all_sample[s].name].append(all_sample[i].name)
if(g==1):
name1.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name)
X1.append(round(Xval[s][n-3],5))
Y1.append(round(Xval[s][n-2],5))
Z1.append(round(Xval[s][n-1],5))
elif(g==2):
name2.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name)
X2.append(round(Xval[s][n-3],5))
Y2.append(round(Xval[s][n-2],5))
Z2.append(round(Xval[s][n-1],5))
elif(g==3):
name3.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name)
X3.append(round(Xval[s][n-3],5))
Y3.append(round(Xval[s][n-2],5))
Z3.append(round(Xval[s][n-1],5))
elif(g==4):
name4.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name)
X4.append(round(Xval[s][n-3],5))
Y4.append(round(Xval[s][n-2],5))
Z4.append(round(Xval[s][n-1],5))
elif(g==5):
name5.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name)
X5.append(round(Xval[s][n-3],5))
Y5.append(round(Xval[s][n-2],5))
Z5.append(round(Xval[s][n-1],5))
dictlist=[]
for key, value in output_cell.items():
temp = [value]
dictlist+=temp
output_cell=list(dictlist)
out_group.append([g,output_cell])
element_counter=0
#[g,[[group_cell_line,[paired_cellline,......,]],[],[]]]
for i in out_group:
for temp_list in i[1]:
element_counter+=len(temp_list[1])
for temp in temp_list[1]:
##print(temp)
temp[3]=temp[3]+'('+str(sample_counter[temp[4]])+')'
return_html='pca.html'
else:
#This part is for centroid display
return_html='pca_center.html'
element_counter=0
#val=val[~np.isnan(val).any(axis=1)] #bottle neck???
#This part is for select cell line base on dataset,count centroid base on the dataset
#group中的cell line為單位來算重心
#logger.info('pca show centroid with selection')
location_dict={} #{group number:[[cell object,dataset,new location]]}
combined=[]
sample_list=[]
pca_index=np.array(pca_index)
X_val=[]
a_all_offset=np.array(all_offset)
for i in range(1,group_counter+1):
dis_cellline=list(set(cell_object[g_s_counter[i-1]:g_s_counter[i]])) #cell object may have duplicate cell line since:NCI A + CCLE A===>[A,A]
location_dict['g'+str(i)]=[]
dataset_dict={}
a_cell_object=np.array(cell_object)
for c in dis_cellline: #dis_cellline may not have the same order as cell_object
temp1=np.where((a_cell_object==c))[0]
temp2=np.where((temp1>=g_s_counter[i-1])&(temp1<g_s_counter[i]))
total_offset=temp1[temp2]
selected_val=val[:,a_all_offset[total_offset]]
selected_val=np.transpose(selected_val)
new_loca=(np.mean(selected_val,axis=0,dtype=np.float64,keepdims=True)).tolist()[0]
a_sample=np.array(all_sample)
selected_sample=a_sample[total_offset]
if list(selected_sample) in sample_list: #to prevent two different colors in different group
continue
else:
sample_list.append(list(selected_sample))
##print(selected_sample)
d_temp=[]
for s in selected_sample:
d_temp.append(s.dataset_id.name)
dataset_dict[c]="/".join(list(set(d_temp)))
##print(dataset_dict[c])
X_val.append(new_loca)
location_dict['g'+str(i)].append([c,dataset_dict[c],len(X_val)-1]) #the last part is the index to get pca result from new_val
combined.append([c,dataset_dict[c],len(X_val)-1]) #all cell line, do not matter order
#run the pca
##print(len(X_val))
if((len(X_val))<4):
error_reason='Since the display method is [centroid], you should have at least 4 dots for PCA. The dots are not enough.<br />'\
'The number of centroid dots you selected is '+str(len(X_val))+'.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
X_val=np.matrix(X_val)
pca= PCA(n_components=n)
new_val = pca.fit_transform(X_val[:,:]) #cannot get Xval with original offset any more
ratio_temp=pca.explained_variance_ratio_
propotion=sum(ratio_temp[n-3:n])
table_propotion=sum(ratio_temp[0:n])
##print(new_val)
out_group=[]
min=10000000000
max=0
for g in range(1,group_counter+1):
output_cell=[]
exist_cell={}
for group_c in location_dict['g'+str(g)]: #a list of [c,dataset_dict[c],new_val index] in group one
cell=group_c[0]
key_string=cell.name+'/'+cell.primary_site+'/'+cell.primary_hist+'/'+group_c[1]
exist_cell[key_string]=[]
output_cell.append([cell,[]])
#count the distance
for temp_list in combined:
c=temp_list[0]
temp_string=c.name+'/'+c.primary_site+'/'+c.primary_hist+'/'+temp_list[1]
try:
if(key_string not in exist_cell[temp_string]):
distance=np.linalg.norm(np.array(new_val[group_c[2]][n-3:n])-np.array(new_val[temp_list[2]][n-3:n]))
if distance==0:
continue
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[len(output_cell)-1][1].append([cell,group_c[1],temp_list[0],temp_list[1],distance])
element_counter+=1
exist_cell[key_string].append(temp_string)
except KeyError:
distance=np.linalg.norm(np.array(new_val[group_c[2]][n-3:n])-np.array(new_val[temp_list[2]][n-3:n]))
if distance==0:
continue
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[len(output_cell)-1][1].append([cell,group_c[1],temp_list[0],temp_list[1],distance])
element_counter+=1
exist_cell[key_string].append(temp_string)
if(g==1):
name1.append(cell.name+'<br>'+group_c[1])
X1.append(round(new_val[group_c[2]][n-3],5))
Y1.append(round(new_val[group_c[2]][n-2],5))
Z1.append(round(new_val[group_c[2]][n-1],5))
elif(g==2):
name2.append(cell.name+'<br>'+group_c[1])
X2.append(round(new_val[group_c[2]][n-3],5))
Y2.append(round(new_val[group_c[2]][n-2],5))
Z2.append(round(new_val[group_c[2]][n-1],5))
elif(g==3):
name3.append(cell.name+'<br>'+group_c[1])
X3.append(round(new_val[group_c[2]][n-3],5))
Y3.append(round(new_val[group_c[2]][n-2],5))
Z3.append(round(new_val[group_c[2]][n-1],5))
elif(g==4):
name4.append(cell.name+'<br>'+group_c[1])
X4.append(round(new_val[group_c[2]][n-3],5))
Y4.append(round(new_val[group_c[2]][n-2],5))
Z4.append(round(new_val[group_c[2]][n-1],5))
elif(g==5):
name5.append(cell.name+'<br>'+group_c[1])
X5.append(round(new_val[group_c[2]][n-3],5))
Y5.append(round(new_val[group_c[2]][n-2],5))
Z5.append(round(new_val[group_c[2]][n-1],5))
out_group.append([g,output_cell])
#logger.info('end pca')
if(element_counter>show_row):
big_flag=1
sid=str(uuid.uuid1())+".csv"
if(return_html=='pca.html'):
dataset_header=['Group Cell Line/Clinical Sample','Sample Name','Primary Site','Primary Histology'
,'Dataset','Paired Cell Line name/Clinical Sample','Sample Name','Primary Site','Primary Histology','Dataset','Distance']
else:
dataset_header=['Group Cell Line/Clinical Sample','Primary Site','Primary Histology'
,'Dataset','Paired Cell Line name/Clinical Sample','Primary Site','Primary Histology','Dataset','Distance']
P=Path('../').resolve().joinpath('src','static','csv',sid)
assP=Path('../').resolve().joinpath('src','assets','csv',sid)
with open(str(assP), "w", newline='') as f:
writer = csv.writer(f)
for index,output_cell in out_group:
writer.writerows([[udgroup[index-1]]])
writer.writerows([dataset_header])
for cell_line,b in output_cell:
temp_b=[]
if(return_html=='pca.html'):
for group_cell,sn,dset,cname,sname,setname,dis,cell_object in b:
temp_b.append([group_cell,sn,cell_line.primary_site,cell_line.primary_hist,dset,cname
,sname,cell_object.primary_site,cell_object.primary_hist,setname,dis])
else:
for group_cell,group_dataset,paired_cell,paired_dataset,dis in b:
temp_b.append([group_cell.name,group_cell.primary_site,group_cell.primary_hist,group_dataset
,paired_cell.name,paired_cell.primary_site,paired_cell.primary_hist,paired_dataset,dis])
writer.writerows(temp_b)
#print('write first file done')
'''
with open(str(assP), "w", newline='') as ff:
writer = csv.writer(ff)
for index,output_cell in out_group:
writer.writerows([[udgroup[index-1]]])
writer.writerows([dataset_header])
for cell_line,b in output_cell:
temp_b=[]
if(return_html=='pca.html'):
for group_cell,sn,dset,cname,sname,setname,dis,cell_object in b:
temp_b.append([group_cell,sn,cell_line.primary_site,cell_line.primary_hist,dset,cname
,sname,cell_object.primary_site,cell_object.primary_hist,setname,dis])
else:
for group_cell,group_dataset,paired_cell,paired_dataset,dis in b:
temp_b.append([group_cell.name,group_cell.primary_site,group_cell.primary_hist,group_dataset
,paired_cell.name,paired_cell.primary_site,paired_cell.primary_hist,paired_dataset,dis])
writer.writerows(temp_b)
'''
#print('write second file done')
data_file_name=sid
else:
big_flag=0
data_file_name=0
return render_to_response(return_html,RequestContext(request,
{
'udgroup':udgroup,
'min':min,'max':max,
'out_group':out_group,
'propotion':propotion,
'big_flag':big_flag,
'data_file_name':data_file_name,
'table_propotion':table_propotion,
'X1':X1,'name1':mark_safe(json.dumps(name1)),
'Y1':Y1,'name2':mark_safe(json.dumps(name2)),
'Z1':Z1,'name3':mark_safe(json.dumps(name3)),
'X2':X2,'name4':mark_safe(json.dumps(name4)),
'Y2':Y2,'name5':mark_safe(json.dumps(name5)),
'Z2':Z2,
'X3':X3,
'Y3':Y3,
'Z3':Z3,
'X4':X4,
'Y4':Y4,
'Z4':Z4,
'X5':X5,
'Y5':Y5,
'Z5':Z5,
}))
def cellline_microarray(request):
# Pre-fetch the cell line field for all samples.
# Reduce N query in to 1. N = number of samples
d=Dataset.objects.all()
d_name=list(d.values_list('name',flat=True))
datasets=[] #[[dataset_name,[[primary_site,[cell line]]]]
an=[]
for i in d_name:
if i=="Sanger Cell Line Project":
alias='sanger'
elif i=="NCI60":
alias='nci'
elif i=="GSE36133":
alias='gse'
else:
alias=i
an.append(alias)
sample=Sample.objects.filter(dataset_id__name=i).order_by('cell_line_id__primary_site').select_related('cell_line_id')
datasets.append([i,alias,list(sample),[]])
sites=list(sample.values_list('cell_line_id__primary_site',flat=True))
hists=list(sample.values_list('cell_line_id__name',flat=True))
dis_prim=list(sample.values_list('cell_line_id__primary_site',flat=True).distinct())
hists=list(hists)
id_counter=0
for p in range(0,len(dis_prim)):
temp=sites.count(dis_prim[p])
datasets[-1][3].append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))])
id_counter+=temp
return render(request, 'cellline_microarray.html', {
'an':mark_safe(json.dumps(an)),
'd_name':d_name,
'datasets':datasets,
})
def cell_lines(request):
#samples = Sample.objects.all().select_related('cell_line_id','dataset_id')
#lines=CellLine.objects.all().distinct()
#val_pairs = (
# (l, l.fcell_line_id.prefetch_related('dataset_id__name').values_list('dataset_id__name',flat=True).distinct())
# for l in lines
# )
#context['val_pairs']=val_pairs
cell_line_dict={}
context={}
nr_samples=[]
samples=Sample.objects.all().select_related('cell_line_id','dataset_id').order_by('id')
for ss in samples:
name=ss.cell_line_id.name
primary_site=ss.cell_line_id.primary_site
primary_hist=ss.cell_line_id.primary_hist
comb=name+"/"+primary_site+"/"+primary_hist
dataset=ss.dataset_id.name
try:
sets=cell_line_dict[comb]
if (dataset not in sets):
cell_line_dict[comb]=dataset+"/"+sets
except KeyError:
cell_line_dict[comb]=dataset
nr_samples.append(ss)
val_pairs = (
(ss,cell_line_dict[ss.cell_line_id.name+"/"+ss.cell_line_id.primary_site+"/"+ss.cell_line_id.primary_hist])
for ss in nr_samples
)
context['val_pairs']=val_pairs
return render_to_response('cell_line.html', RequestContext(request, context))
def clinical_search(request):
norm_name=[request.POST['normalize']] #get the normalize gene name
#f_type=['age','gender','ethnic','grade','stage','stageT','stageN','stageM','metastatic']
age=[]
gender=[]
ethnic=[]
grade=[]
stage=[]
T=[]
N=[]
M=[]
metas=[]
#get the probe/gene/id keywords
if 'keyword' in request.POST and request.POST['keyword'] != '':
words = request.POST['keyword']
words = list(set(words.split()))
else:
return HttpResponse("<p>where is your keyword?</p>")
plus2_rank=np.load('ranking_u133plus2.npy') #open only plus2 platform rank
sample_probe_val_pairs=[] #for output
if 'gtype' in request.POST and request.POST['gtype'] == 'probeid':
gene = ProbeID.objects.filter(platform__name__in=["PLUS2"]).filter(Probe_id__in=words).order_by('id')
probe=list(gene.values_list('offset',flat=True))
##print(gene)
elif 'gtype' in request.POST and request.POST['gtype'] == 'symbol':
gene = ProbeID.objects.filter(platform__name__in=["PLUS2"]).filter(Gene_symbol__in=words).order_by('id')
probe=list(gene.values_list('offset',flat=True))
else:
gene = ProbeID.objects.filter(platform__name__in=["PLUS2"]).filter(Entrez_id__in=words).order_by('id')
probe=list(gene.values_list('offset',flat=True))
if request.POST['clinical_method'] == 'prim_dataset':
if 'dataset' in request.POST and request.POST['dataset'] != '':
datas=request.POST.getlist('dataset')
else:
d=Clinical_Dataset.objects.all()
datas=d.values_list('name',flat=True)
com_hists=list(set(request.POST.getlist('primhist')))
com_hists=[w1 for segments in com_hists for w1 in segments.split('/')]
prims=com_hists[0::2]
hists=com_hists[1::2]
temp=request.POST.getlist('filter_primh')
for i in temp:
if 'stage/' in i:
stage.append(i[6:])
elif 'gender/' in i:
gender.append(i[7:])
elif 'ethnic/' in i:
ethnic.append(i[7:])
elif 'grade/' in i:
grade.append(i[6:])
elif 'stageT/' in i:
T.append(i[7:])
elif 'stageN/' in i:
N.append(i[7:])
elif 'stageM/' in i:
M.append(i[7:])
elif 'metastatic/' in i:
metas.append(i[11:])
'''
if i[11:]=='False':
metas.append(0)
else:
metas.append(1)
'''
else: #"age/"
age.append(i[4:])
for sets in datas:
samples=[]
offset=[]
if request.POST['clinical_method'] == 'prim_dataset':
com_hists=list(set(request.POST.getlist('primd_'+sets))) #can I get this by label to reduce number of queries?
com_hists=[w1 for segments in com_hists for w1 in segments.split('/')]
prims=com_hists[0::2]
hists=com_hists[1::2]
temp=request.POST.getlist('filter_'+sets)
age=[]
gender=[]
ethnic=[]
grade=[]
stage=[]
T=[]
N=[]
M=[]
metas=[]
for i in temp:
if 'stage/' in i:
stage.append(i[6:])
elif 'gender/' in i:
gender.append(i[7:])
elif 'ethnic/' in i:
ethnic.append(i[7:])
elif 'grade/' in i:
grade.append(i[6:])
elif 'stageT/' in i:
T.append(i[7:])
elif 'stageN/' in i:
N.append(i[7:])
elif 'stageM/' in i:
M.append(i[7:])
elif 'metastatic/' in i:
metas.append(i[11:])
'''
if i[11:]=='False':
metas.append(0)
else:
metas.append(1)
'''
else: #"age/"
age.append(i[4:])
for i in range(0,len(prims)):
#metas=[bool(x) for x in metas]
s=Clinical_sample.objects.filter(dataset_id__name=sets,primary_site=prims[i],
primary_hist=hists[i],
age__in=age,
gender__in=gender,
ethnic__in=ethnic,
stage__in=stage,
grade__in=grade,
stageT__in=T,
stageN__in=N,
stageM__in=M,
metastatic__in=metas
).select_related('dataset_id').order_by('id')
samples+=list(s)
offset+=list(s.values_list('offset',flat=True))
##print(s)
pth=Path('../').resolve().joinpath('src',Clinical_Dataset.objects.get(name=sets).data_path)
val=np.load(pth.as_posix(),mmap_mode='r')
norm_probe=ProbeID.objects.filter(platform__name__in=["PLUS2"]).filter(Gene_symbol__in=norm_name).order_by('id')
probe_offset=list(norm_probe.values_list('offset',flat=True))
temp=val[np.ix_(probe_offset,offset)]
norm=np.mean(temp,axis=0, dtype=np.float64,keepdims=True)
# Make a generator to generate all (cell, probe, val) pairs
if(len(gene)!=0 and len(samples)!=0):
raw_test=val[np.ix_(probe,offset)]
normalize=np.subtract(raw_test,norm)#dimension different!!!!
sample_probe_val_pairs += [
(c, p, raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==raw_test[probe_ix, cell_ix])[0],normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(gene)
for cell_ix, c in enumerate(samples)
]
return render(request, 'clinical_search.html', {
'sample_probe_val_pairs': sample_probe_val_pairs,
})
def data(request):
SANGER=[]
sanger_flag=0
NCI=[]
nci_flag=0
GSE=[]
gse_flag=0
cell=[]
ncicell=[]
CCcell=[]
ps_id='0'
pn_id='0'
if request.POST.get('cell_line_method','text') == 'text':
if request.POST['cellline'] =='':
return HttpResponse("<p>please make sure to enter cell line name in Step3.</p>" )
c = request.POST['cellline']
c = list(set(c.split()))
sanger_flag=1
samples=Sample.objects.filter(dataset_id__name__in=['Sanger Cell Line Project']).order_by('id')
cell=samples.select_related('cell_line_id','dataset_id').filter(cell_line_id__name__in=c).order_by('id')
offset=list(cell.values_list('offset',flat=True))
ps_id='1'
nci_flag=1
ncisamples=Sample.objects.filter(dataset_id__name__in=['NCI60']).select_related('cell_line_id','dataset_id').order_by('id')
ncicell=ncisamples.filter(cell_line_id__name__in=c).order_by('id')
ncioffset=list(ncicell.values_list('offset',flat=True))
pn_id='3'
gse_flag=1
CCsamples=Sample.objects.filter(dataset_id__name__in=['GSE36133']).select_related('cell_line_id','dataset_id').order_by('id')
CCcell=CCsamples.filter(cell_line_id__name__in=c).order_by('id')
CCoffset=list(CCcell.values_list('offset',flat=True))
pn_id='3'
else:
if 'dataset' in request.POST and request.POST['dataset'] != '':
datas=request.POST.getlist('dataset')
if 'Sanger Cell Line Project' in datas:
sanger_flag=1
SANGER=list(set(request.POST.getlist('select_sanger')))
samples=Sample.objects.filter(dataset_id__name__in=['Sanger Cell Line Project']).order_by('id')
cell=samples.select_related('cell_line_id','dataset_id').filter(cell_line_id__name__in=SANGER).order_by('id')
offset=list(cell.values_list('offset',flat=True))
ps_id=str(Platform.objects.filter(name__in=["U133A"])[0].id)
if 'NCI60' in datas:
nci_flag=1
NCI=list(set(request.POST.getlist('select_nci')))
ncisamples=Sample.objects.filter(dataset_id__name__in=['NCI60']).select_related('cell_line_id','dataset_id').order_by('id')
ncicell=ncisamples.filter(cell_line_id__name__in=NCI).order_by('id')
ncioffset=list(ncicell.values_list('offset',flat=True))
pn_id=str(Platform.objects.filter(name__in=["PLUS2"])[0].id)
if 'GSE36133' in datas:
gse_flag=1
GSE=list(set(request.POST.getlist('select_gse')))
CCsamples=Sample.objects.filter(dataset_id__name__in=['GSE36133']).select_related('cell_line_id','dataset_id').order_by('id')
CCcell=CCsamples.filter(cell_line_id__name__in=GSE).order_by('id')
CCoffset=list(CCcell.values_list('offset',flat=True))
pn_id=str(Platform.objects.filter(name__in=["PLUS2"])[0].id)
if len(SANGER)==0 and len(NCI)==0 and len(GSE)==0:
return HttpResponse("<p>please select primary sites.</p>" )
else:
return HttpResponse("<p>please check Step3 again.</p>" )
if 'keyword' in request.POST and request.POST['keyword'] != '':
words = request.POST['keyword']
words = list(set(words.split()))
else:
return HttpResponse("<p>where is your keyword?</p>")
#open files
sanger_val_pth=Path('../').resolve().joinpath('src','sanger_cell_line_proj.npy')
nci_val_pth=Path('../').resolve().joinpath('src','nci60.npy')
gse_val_pth=Path('../').resolve().joinpath('src','GSE36133.npy')
sanger_val=np.load(sanger_val_pth.as_posix(),mmap_mode='r')
nci_val=np.load(nci_val_pth.as_posix(),mmap_mode='r')
gse_val=np.load(gse_val_pth.as_posix(),mmap_mode='r')
u133a_rank=np.load('ranking_u133a.npy')
plus2_rank=np.load('ranking_u133plus2.npy')
gene = []
ncigene = []
CCgene = []
context={}
norm_name=[request.POST['normalize']]
if sanger_flag==1:
#if request.POST['normalize']!='NTRK3-AS1':
sanger_g=ProbeID.objects.filter(platform__in=ps_id).filter(Gene_symbol__in=norm_name).order_by('id')
sanger_probe_offset=list(sanger_g.values_list('offset',flat=True))
temp=sanger_val[np.ix_(sanger_probe_offset,offset)]
norm=np.mean(temp,axis=0, dtype=np.float64,keepdims=True)
#else:
# norm=0.0
else:
norm=0.0 #if / should = 1
if nci_flag==1:
nci_g=ProbeID.objects.filter(platform__in=pn_id).filter(Gene_symbol__in=norm_name).order_by('id')
nci_probe_offset=list(nci_g.values_list('offset',flat=True))
temp=nci_val[np.ix_(nci_probe_offset,ncioffset)]
nci_norm=np.mean(temp,axis=0, dtype=np.float64,keepdims=True)
##print(nci_norm)
else:
nci_norm=0.0 #if / should = 1
if gse_flag==1:
CC_g=ProbeID.objects.filter(platform__in=pn_id).filter(Gene_symbol__in=norm_name).order_by('id')
CC_probe_offset=list(CC_g.values_list('offset',flat=True))
temp=gse_val[np.ix_(CC_probe_offset,CCoffset)]
CC_norm=np.mean(temp,axis=0, dtype=np.float64,keepdims=True)
##print(CC_norm)
else:
CC_norm=0.0 #if / should = 1
#dealing with probes
if 'gtype' in request.POST and request.POST['gtype'] == 'probeid':
gene = ProbeID.objects.filter(platform__in=ps_id).filter(Probe_id__in=words).order_by('id')
probe_offset=list(gene.values_list('offset',flat=True))
ncigene = ProbeID.objects.filter(platform__in=pn_id).filter(Probe_id__in=words).order_by('id')
nciprobe_offset=list(ncigene.values_list('offset',flat=True))
#nci60 and ccle use same probe set(ncigene) and nicprobe
# Make a generator to generate all (cell, probe, val) pairs
if(len(gene)!=0 and len(cell)!=0):
raw_test=sanger_val[np.ix_(probe_offset,offset)]
normalize=np.subtract(raw_test,norm)#dimension different!!!!
#normalize=np.around(normalize, decimals=1)
cell_probe_val_pairs = (
(c, p, raw_test[probe_ix, cell_ix],22216-np.where(u133a_rank==raw_test[probe_ix, cell_ix])[0],normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(gene)
for cell_ix, c in enumerate(cell)
)
else:
cell_probe_val_pairs =()
if(len(ncigene)!=0 and len(ncicell)!=0):
nci_raw_test=nci_val[np.ix_(nciprobe_offset,ncioffset)]
nci_normalize=np.subtract(nci_raw_test,nci_norm)
nci_cell_probe_val_pairs = (
(c, p, nci_raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==nci_raw_test[probe_ix, cell_ix])[0],nci_normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(ncigene)
for cell_ix, c in enumerate(ncicell)
)
else:
nci_cell_probe_val_pairs =()
if(len(ncigene)!=0 and len(CCcell)!=0):
CC_raw_test=gse_val[np.ix_(nciprobe_offset,CCoffset)]
CC_normalize=np.subtract(CC_raw_test,CC_norm)
CC_cell_probe_val_pairs = (
(c, p, CC_raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==CC_raw_test[probe_ix, cell_ix])[0],CC_normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(ncigene)
for cell_ix, c in enumerate(CCcell)
)
else:
CC_cell_probe_val_pairs =()
context['cell_probe_val_pairs']=cell_probe_val_pairs
context['nci_cell_probe_val_pairs']=nci_cell_probe_val_pairs
context['CC_cell_probe_val_pairs']=CC_cell_probe_val_pairs
return render_to_response('data.html', RequestContext(request,context))
elif 'gtype' in request.POST and request.POST['gtype'] == 'symbol':
gene = ProbeID.objects.filter(platform__in=ps_id).filter(Gene_symbol__in=words).order_by('id')
probe_offset=gene.values_list('offset',flat=True)
ncigene = ProbeID.objects.filter(platform__in=pn_id).filter(Gene_symbol__in=words).order_by('id')
nciprobe_offset=ncigene.values_list('offset',flat=True)
#nci60 and ccle use same probe set(ncigene) and nicprobe
# Make a generator to generate all (cell, probe, val) pairs
if(len(gene)!=0 and len(cell)!=0):
raw_test=sanger_val[np.ix_(probe_offset,offset)]
normalize=np.subtract(raw_test,norm)
cell_probe_val_pairs = (
(c, p, raw_test[probe_ix, cell_ix],22216-np.where(u133a_rank==raw_test[probe_ix, cell_ix])[0],normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(gene)
for cell_ix, c in enumerate(cell)
)
else:
cell_probe_val_pairs =()
if(len(ncigene)!=0 and len(ncicell)!=0):
nci_raw_test=nci_val[np.ix_(nciprobe_offset,ncioffset)]
nci_normalize=np.subtract(nci_raw_test,nci_norm)
nci_cell_probe_val_pairs = (
(c, p, nci_raw_test[probe_ix, cell_ix],54676-np.where(plus2_rank==nci_raw_test[probe_ix, cell_ix])[0],nci_normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(ncigene)
for cell_ix, c in enumerate(ncicell)
)
else:
nci_cell_probe_val_pairs =()
if(len(ncigene)!=0 and len(CCcell)!=0):
CC_raw_test=gse_val[np.ix_(nciprobe_offset,CCoffset)]
CC_normalize=np.subtract(CC_raw_test,CC_norm)
CC_cell_probe_val_pairs = (
(c, p, CC_raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==CC_raw_test[probe_ix, cell_ix])[0],CC_normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(ncigene)
for cell_ix, c in enumerate(CCcell)
)
else:
CC_cell_probe_val_pairs =()
context['cell_probe_val_pairs']=cell_probe_val_pairs
context['nci_cell_probe_val_pairs']=nci_cell_probe_val_pairs
context['CC_cell_probe_val_pairs']=CC_cell_probe_val_pairs
return render_to_response('data.html', RequestContext(request,context))
elif 'gtype' in request.POST and request.POST['gtype'] == 'entrez':
gene = ProbeID.objects.filter(platform__in=ps_id).filter(Entrez_id=words).order_by('id')
probe_offset=gene.values_list('offset',flat=True)
ncigene = ProbeID.objects.filter(platform__in=pn_id).filter(Entrez_id__in=words).order_by('id')
nciprobe_offset=ncigene.values_list('offset',flat=True)
#nci60 and ccle use same probe set(ncigene) and nicprobe
# Make a generator to generate all (cell, probe, val) pairs
if(len(gene)!=0 and len(cell)!=0):
raw_test=sanger_val[np.ix_(probe_offset,offset)]
normalize=np.subtract(raw_test,norm)
cell_probe_val_pairs = (
(c, p, raw_test[probe_ix, cell_ix],22216-np.where(u133a_rank==raw_test[probe_ix, cell_ix])[0],normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(gene)
for cell_ix, c in enumerate(cell)
)
else:
cell_probe_val_pairs =()
if(len(ncigene)!=0 and len(ncicell)!=0):
nci_raw_test=nci_val[np.ix_(nciprobe_offset,ncioffset)]
nci_normalize=np.subtract(nci_raw_test,nci_norm)
nci_cell_probe_val_pairs = (
(c, p, nci_raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==nci_raw_test[probe_ix, cell_ix])[0],nci_normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(ncigene)
for cell_ix, c in enumerate(ncicell)
)
else:
nci_cell_probe_val_pairs =()
if(len(ncigene)!=0 and len(CCcell)!=0):
CC_raw_test=gse_val[np.ix_(nciprobe_offset,CCoffset)]
CC_normalize=np.subtract(CC_raw_test,CC_norm)
CC_cell_probe_val_pairs = (
(c, p, CC_raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==CC_raw_test[probe_ix, cell_ix])[0],CC_normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(ncigene)
for cell_ix, c in enumerate(CCcell)
)
else:
CC_cell_probe_val_pairs =()
context['cell_probe_val_pairs']=cell_probe_val_pairs
context['nci_cell_probe_val_pairs']=nci_cell_probe_val_pairs
context['CC_cell_probe_val_pairs']=CC_cell_probe_val_pairs
return render_to_response('data.html', RequestContext(request,context))
else:
return HttpResponse(
"<p>keyword type not match with your keyword input</p>"
)
| mit |
xulesc/algos | knn/scripts/exp3.py | 1 | 6922 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 23 12:12:31 2014
@author: anuj
"""
print(__doc__)
from time import time
import numpy as np
import pylab as pl
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
from sklearn.preprocessing import normalize
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import linear_model, datasets
from sklearn.decomposition import PCA
klas_file = '/home/anuj/workspace.custom/assignment/data/dataset_diabetes/diabetic_data.csv.klass'
data_file = '/home/anuj/workspace.custom/assignment/data/dataset_diabetes/diabetic_data.csv.num'
klasses = np.genfromtxt(klas_file, skip_header=1)
n_data = np.genfromtxt(data_file, usecols=range(2, 11), skip_header=1,delimiter=',', missing_values='?')
np.random.seed(42)
data = normalize(n_data[~np.isnan(n_data).any(axis=1)], axis = 0)
n_samples, n_features = [len(data), len(data[0])]
n_digits = len(np.unique(klasses))
labels = klasses[~np.isnan(n_data).any(axis=1)]
sample_size = int(10.0 * n_samples / 100)
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
#reduced_data = PCA(n_components=2).fit_transform(data)
#kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
#kmeans.fit(reduced_data)
#
## Step size of the mesh. Decrease to increase the quality of the VQ.
#h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
#
## Plot the decision boundary. For that, we will assign a color to each
#x_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1
#y_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1
#xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
#
## Obtain labels for each point in mesh. Use last trained model.
#Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
#
## Put the result into a color plot
#Z = Z.reshape(xx.shape)
#pl.figure(1)
#pl.clf()
#pl.imshow(Z, interpolation='nearest',
# extent=(xx.min(), xx.max(), yy.min(), yy.max()),
# cmap=pl.cm.Paired,
# aspect='auto', origin='lower')
#
#pl.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
## Plot the centroids as a white X
#centroids = kmeans.cluster_centers_
#pl.scatter(centroids[:, 0], centroids[:, 1],
# marker='x', s=169, linewidths=3,
# color='w', zorder=10)
#pl.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
# 'Centroids are marked with white cross')
#pl.xlim(x_min, x_max)
#pl.ylim(y_min, y_max)
#pl.xticks(())
#pl.yticks(())
#pl.show()
###############################################################################
data_train, data_test, labels_train, labels_test = train_test_split(data, labels, test_size=0.20, random_state=42)
neigh = KNeighborsClassifier(n_neighbors=5)
neigh.fit(data_train, labels_train)
#print neigh.score(data_test, labels_test)
pred = neigh.predict(data_test)
cm = confusion_matrix(labels_test, pred)
print(cm)
pl.matshow(cm)
pl.title('Confusion matrix')
pl.colorbar()
pl.ylabel('True label')
pl.xlabel('Predicted label')
pl.show()
###############################################################################
klas_file = '/home/anuj/workspace.custom/assignment/data/dataset_diabetes/diabetic_data.csv.klass'
data_file = '/home/anuj/workspace.custom/assignment/data/dataset_diabetes/diabetic_data.csv.non_num'
data_file2 = '/home/anuj/workspace.custom/assignment/data/dataset_diabetes/diabetic_data.csv.num'
klasses = np.genfromtxt(klas_file, skip_header=1)
n_data = np.genfromtxt(data_file, skip_header=1,delimiter=',',dtype='|S5')
n_data_num = np.genfromtxt(data_file2, usecols=range(2, 11), skip_header=1,delimiter=',', missing_values='?')
#n_data = n_data[~np.isnan(n_data).any(axis=1)]
exc = np.isnan(n_data_num).any(axis=1)
n_data_num_n = normalize(n_data_num[~exc], axis = 0)
labels = klasses[~exc]
n_data2 = n_data[~exc]
n_data2 = [x[:len(x) - 1] for x in n_data2]
n_data2 = np.transpose(n_data2)
le = preprocessing.LabelEncoder()
n_data3 = [le.fit(d).transform(d) for d in n_data2]
##############
#f_data = np.transpose(n_data3)
f_data = n_data_num_n
#for x in np.transpose(n_data_num_n):
# f_data.append(x)
#f_data = np.transpose(f_data)
##############
data_train, data_test, labels_train, labels_test = train_test_split(f_data, labels, test_size=0.20, random_state=42)
neigh = KNeighborsClassifier(n_neighbors=1)
#neigh = MultinomialNB()
print('%d:%d\n' %(sum(labels_train),len(labels_train)))
neigh.fit(data_train, labels_train)
#print neigh.score(data_test, labels_test)
pred = neigh.predict(data_test)
print('%d:%d:%d:%d\n' %(sum(labels_test),len(labels_test),sum(pred),len(pred)))
cm = confusion_matrix(labels_test, pred)
print(cm)
pl.matshow(cm)
pl.title('Confusion matrix')
pl.colorbar()
pl.ylabel('True label')
pl.xlabel('Predicted label')
pl.show()
##############
###############################################################################
f = '/home/anuj/workspace.custom/assignment/data/dataset_diabetes/diabetic_data.csv'
d1 = np.genfromtxt(f, delimiter = ',', names=True)
names = d1.dtype.names
d = np.genfromtxt(f, delimiter = ',', dtype='|S5', skip_header=1)
dc = np.transpose(d)
for x, name in zip(dc, names):
print '%s: %d' %(name, len(np.unique(x)))
##
| gpl-3.0 |
RenqinCai/python_dataset | LR/shuffle_v8.py | 2 | 13109 | ###new function:shuffling the reviewing time and debug the program
###R is set to a constant value
import simplejson as json
import datetime
import time
import numpy as np
import math
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
from dateutil.relativedelta import *
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn.cross_validation import cross_val_score
def string_toDatetime(string):
return datetime.datetime.strptime(string, "%Y-%m-%d")
def string_toYear(string):
return datetime.datetime.strptime(string[0:4], "%Y").date()
def string_toYearMonth(string):
return datetime.datetime.strptime(string[0:7], "%Y-%m").date()
def monthDiff(timeDate1, timeDate2):
return (timeDate1.year-timeDate2.year)*12 + (timeDate1.month-timeDate2.month)
def yearDiff(timeDate1, timeDate2):
return (timeDate1.year-timeDate2)
def betweenTime(timeDate, downTime, upTime):
if ((monthDiff(timeDate, downTime) < 0)or(monthDiff(upTime, timeDate) < 0)):
return False
else:
return True
#####
###the data structure of userInfo is a list which stores the dict of a user
###userInfo {user:{"sinceTime":sinceTime, "reviewTime":reviewTime, "active":0,1, "friends":[]}}
###reviewTime represents the first time user review the business.
###for different business, the reviewTime and active is different
###timeUserData {time:[user]}
##########
def loadUser():
userInfo = {}
timeUserData = {}
defaultReviewTime = string_toYearMonth('2015-01')
defaultActive = 0
userSet = set()
userSum = 0
userFile = "../../dataset/user.json"
with open(userFile) as f:
for line in f:
userJson = json.loads(line)
user=userJson["user"]
friend = userJson["friends"]
sinceTime = string_toYearMonth(userJson["sinceTime"])
userInfo.setdefault(user, {})
userInfo[user]["sinceTime"] = sinceTime
userInfo[user]["reviewTime"] = defaultReviewTime
userInfo[user]["active"] = defaultActive
userInfo[user]["friends"] = []
timeUserData.setdefault(sinceTime, [])
timeUserData[sinceTime].append(user)
if friend:
for f in friend:
userInfo[user]["friends"].append(f)
userSum += 1
userSet.add(user)
userList = list(userSet)
print "load Friend"
print "total userSum %d"%userSum
return (userInfo, timeUserData, userSum, userList)
####load reviewData as format:{business:{reviewTime:[user]}}
####store reviewSum for a business as format: {business:businessSum}
###store timeReviewerDict_allBiz {time:[user]}
def loadReview():
reviewData = {}
reviewSum = {}
timeReviewerDict_allBiz = {}
reviewFile = "../../dataset/review.json"
with open(reviewFile) as f:
for line in f:
reviewJson = json.loads(line)
business = reviewJson["business"]
user = reviewJson["user"]
reviewTime = string_toYearMonth(reviewJson["date"])
reviewData.setdefault(business, {})
reviewData[business].setdefault(reviewTime, [])
reviewData[business][reviewTime].append(user)
timeReviewerDict_allBiz.setdefault(reviewTime, [])
timeReviewerDict_allBiz[reviewTime].append(user)
reviewSum.setdefault(business, 0)
reviewSum[business] += 1
return (reviewData, reviewSum, timeReviewerDict_allBiz)
###filter business which has more than 10 users in the period
####businessList contains these business
def filterReviewData(reviewData, reviewSum):
print "review process"
reviewSet = set()
for business in reviewSum.keys():
bNum = reviewSum[business]
if bNum > 50:
reviewSet.add(business)
reviewList = list(reviewSet)
# finalBusinessList = list(finalBusinessSet)
print "end process"
return (reviewList)
####selectBusiness which is a list contains the sequence number selected
def randomBusiness(totalNum, randomNum):
business = [i for i in range(totalNum)]
selectBusiness = []
for i in range(randomNum):
k = np.random.randint(0, totalNum-i)+i
temp = business[i]
business[i] = business[k]
business[k] = temp
selectBusiness.append(business[i])
return selectBusiness
#####from selectBusiness(a list)get the sequence number
###store the business_id into selectBusinessList.
def randomSelectBusiness(reviewList, selectBusinessNum):
businessSet = set(reviewList)
businessLen = len(businessSet)
if businessLen < selectBusinessNum:
selectBusinessList = reviewList
else:
selectBusiness = randomBusiness(businessLen, selectBusinessNum)
selectBusinessList = [reviewList[i] for i in selectBusiness]
return selectBusinessList
def increMonth(baseMonth):
return baseMonth+relativedelta(months=+1)
###cut part of the dict and sort the dict
def SortDict_Time(timeValDict, userInfo):
sortedTimeValDict = {}
timeList = sorted(timeValDict)
timeSet = set(timeList)
timeUserDict_oneBiz = {}##{time:[user]}
periodList = []
timeUserLenDict = {}
WList_oneBiz = [] ##w in the paper
tempWList_oneBiz = []
WSet_oneBiz = set()
monthRange = 18
if(monthRange > len(timeList)):
monthRange = len(timeList)
monthTime = timeList[0]
for i in range(monthRange):
periodList.append(monthTime)
if monthTime in timeSet:
sortedTimeValDict.setdefault(monthTime, [])
timeUserLenDict.setdefault(monthTime, 0)
reviewUserList = timeValDict[monthTime]
reviewUserSet = set(reviewUserList)
reviewUserSet = reviewUserSet.difference(WSet_oneBiz)
reviewUserList = list(reviewUserSet)
sortedTimeValDict[monthTime] = reviewUserList
timeUserLenDict[monthTime] = len(reviewUserList)
WSet_oneBiz = WSet_oneBiz.union(reviewUserSet)
monthTime = increMonth(monthTime)
WList_oneBiz = list(WSet_oneBiz)
tempWList_oneBiz = list(WSet_oneBiz)
for t in periodList:
for user in tempWList_oneBiz:
uSinceTime = userInfo[user]["sinceTime"]
if (monthDiff(uSinceTime, t)<=0):
timeUserDict_oneBiz.setdefault(t, [])
timeUserDict_oneBiz[t].append(user)
tempWList_oneBiz.remove(user)
return (sortedTimeValDict, periodList, WList_oneBiz, timeUserDict_oneBiz, timeUserLenDict)
###update the userInfo: "reviewTime", "active" for a business
def UpdateUserInfo_oneBiz(userInfo, timeReviewerDict_oneBiz, selectBusiness):
repeatReviewUserSet = set()
for t in timeReviewerDict_oneBiz.keys():
reviewUserList = timeReviewerDict_oneBiz[t]
reviewUserSet = set(reviewUserList)
for u in reviewUserSet:
preActive = userInfo[u]["active"]
if(preActive == 1):
continue
else:
userInfo[u]["active"] = 1
userInfo[u]["reviewTime"] = t
##timeReviewerDict_oneBiz {time:[reviewer]}
def ResetUserInfo_oneBiz(userInfo, timeReviewerDict_oneBiz):
defaultReviewTime = string_toYearMonth('2015-01')
defaultActive = 0
for t in timeReviewerDict_oneBiz.keys():
reviewUserSet = set(timeReviewerDict_oneBiz[t])
for u in reviewUserSet:
userInfo[u]["reviewTime"] = defaultReviewTime
userInfo[u]["active"] = defaultActive
def UpdateTimeReviewer_allBiz(reviewData, selectBusiness, timeReviewerDict_oneBiz):
for t in timeReviewerDict_oneBiz.keys():
reviewUserList = timeReviewerDict_oneBiz[t]
reviewData[selectBusiness][t] = reviewUserList
def ResetTimeReviewer_allBiz(reviewData, selectBusiness, timeReviewerDict_oneBiz):
for t in timeReviewerDict_oneBiz.keys():
reviewUserList = timeReviewerDict_oneBiz[t]
reviewData[selectBusiness][t] = reviewUserList
def compute_oneBiz(userInfo, selectBusiness, reviewData):
timereviewerDict_allBiz = dict(reviewData)
reviewDict_oneBiz = timereviewerDict_allBiz[selectBusiness]
(timeReviewerDict_oneBiz, periodList, WList_oneBiz, timeUserDict_oneBiz, timeUserLenDict) = SortDict_Time(reviewDict_oneBiz, userInfo)
###before permute
UpdateUserInfo_oneBiz(userInfo, timeReviewerDict_oneBiz, selectBusiness)
(LR_coef, LR_intercept) = LR_oneBiz(periodList, userInfo, timereviewerDict_allBiz)
ResetUserInfo_oneBiz(userInfo, timeReviewerDict_oneBiz)
###permuteTime
permute_timeReviewerDict_oneBiz = permuteTime(timeReviewerDict_oneBiz, timeUserDict_oneBiz, periodList, timeUserLenDict)
UpdateUserInfo_oneBiz(userInfo, permute_timeReviewerDict_oneBiz, selectBusiness)
UpdateTimeReviewer_allBiz(timereviewerDict_allBiz, selectBusiness, permute_timeReviewerDict_oneBiz)
(LR_coef2, LR_intercept2) = LR_oneBiz(periodList, userInfo, timereviewerDict_allBiz)
ResetUserInfo_oneBiz(userInfo, permute_timeReviewerDict_oneBiz)
ResetTimeReviewer_allBiz(timereviewerDict_allBiz, selectBusiness, timeReviewerDict_oneBiz)
return (LR_coef, LR_coef2)
def LR_oneBiz(periodList, userInfo, reviewData):
R = 10
Y = [0 for i in range(R+2)]
N = [0 for i in range(R+2)]
feature = []
output = []
activeZeroSum = 0
unactiveZeroSum = 0
positive = 0
negative = 0
totalReviewUserSet = set()
for t in periodList:
#print t
activeUserSet = set()
reviewUserSet = set()
raw_reviewUserSet = set()
###fix bugs: the reviewUserList_oneBiz does not change
for b in reviewData.keys():
if(reviewData[b].has_key(t)):
raw_reviewUserSet = raw_reviewUserSet.union(set(reviewData[b][t]))
reviewUserSet = raw_reviewUserSet
totalReviewUserSet=totalReviewUserSet.union(reviewUserSet)
for u in totalReviewUserSet:
uReviewTime = userInfo[u]["reviewTime"]
uActive = userInfo[u]["active"]
if(uActive == 1):
if (uReviewTime == t):
uActiveFriendSum = activeFriend_Sum(u, userInfo, t)
output.append(uActive)
positive += 1
if(uActiveFriendSum == 0):
activeZeroSum += 1
if uActiveFriendSum > R:
feature.append(R+1)
Y[R+1] += 1
else:
feature.append(uActiveFriendSum)
Y[uActiveFriendSum] += 1
activeUserSet.add(u)
else:
negative += 1
uActiveFriendSum = activeFriend_Sum(u, userInfo, t)
output.append(uActive)
if(uActiveFriendSum == 0):
unactiveZeroSum += 1
if uActiveFriendSum > R:
feature.append(R+1)
N[R+1] += 1
else:
feature.append(uActiveFriendSum)
N[uActiveFriendSum] += 1
totalReviewUserSet = totalReviewUserSet.difference(activeUserSet)
#print "positive %d negative %d"%(positive, negative)
(LR_coef, LR_intercept) = LR_result(feature, output)
return (LR_coef, LR_intercept)
def LR_result(x, y):
#print x
model = LogisticRegression()
x_feature = [[math.log(i+1)] for i in x]
model = model.fit(x_feature, y)
print model.score(x_feature, y)
return (model.coef_, model.intercept_)
def activeFriend_Sum(user, userInfo, uReviewTime):
friendList = userInfo[user]["friends"]
friendSet = set(friendList)
activeFriendSum = 0
friendLen = len(friendSet)
for f in friendSet:
fActive = userInfo[f]["active"]
if (fActive == 0):
continue
fReviewTime = userInfo[f]["reviewTime"]
if(monthDiff(fReviewTime, uReviewTime)<0):
activeFriendSum += 1
#print "active%d"%activeFriendSum
return activeFriendSum
def compute_oneBiz_helper(args):
return compute_oneBiz(*args)
def permuteTime(timeReviewerDict_oneBiz, timeUserDict_oneBiz, periodList, timeUserLenDict):
permute_timeReviewerDict_oneBiz = {}
totalSinceUserSet = set()
for t in periodList:
##todo
selectReviewerSum = 0
if timeUserLenDict.has_key(t):
selectReviewerSum = timeUserLenDict[t]
sinceUserSet = set()
if timeUserDict_oneBiz.has_key(t):
sinceUserList = timeUserDict_oneBiz[t]
sinceUserSet = set(sinceUserList)
totalSinceUserSet = totalSinceUserSet.union(sinceUserSet)
selectUserList = randomSelectBusiness(list(totalSinceUserSet), selectReviewerSum)
selectUserSet = set(selectUserList)
permute_timeReviewerDict_oneBiz.setdefault(t, [])
permute_timeReviewerDict_oneBiz[t] = selectUserList
totalSinceUserSet = totalSinceUserSet.difference(selectUserSet)
return permute_timeReviewerDict_oneBiz
def mainFunction():
f1_result = open("coef1_result.txt", "w")
f2_result = open("coef2_result.txt", "w")
(userInfo, timeUserData, userSum, userList) = loadUser()
(reviewData, reviewSum, timeReviewUser) = loadReview()
(reviewList) = filterReviewData(reviewData, reviewSum)
selectBusinessNum = 1
selectBusinessList = randomSelectBusiness(reviewList, selectBusinessNum)
selectBusinessSet = set(selectBusinessList)
beginTime = datetime.datetime.now()
positiveCoef = 0
negativeCoef = 0
results=[]
pool_args = [(userInfo, i, reviewData) for i in selectBusinessSet]
pool = ThreadPool(8)
results = pool.map(compute_oneBiz_helper, pool_args)
# results = []
# for i in range(selectBusinessNum):
# selectBusiness = selectBusinessList[i]
# reviewData_allBiz = dict(reviewData)
# (LR_coef, LR_coef2) = compute_oneBiz(userInfo, selectBusiness, reviewData_allBiz)
# results.append((LR_coef, LR_coef2))
for (LR_coef, LR_coef2) in results:
f1_result.write("%s\n"%LR_coef)
f2_result.write("%s\n"%LR_coef2)
endTime = datetime.datetime.now()
timeIntervals = (endTime-beginTime).seconds
print "time interval %s"%timeIntervals
f1_result.write("time interval %s"%timeIntervals)
f2_result.write("time interval %s"%timeIntervals)
f1_result.close()
f2_result.close()
mainFunction()
| gpl-2.0 |
herilalaina/scikit-learn | sklearn/learning_curve.py | 27 | 15421 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the functions are moved."
" This module will be removed in 0.20",
DeprecationWarning)
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0,
error_score='raise'):
"""Learning curve.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.learning_curve` instead.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True,
error_score=error_score)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.floating):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype(
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.validation_curve` instead.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<sphx_glr_auto_examples_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
aetilley/scikit-learn | sklearn/datasets/samples_generator.py | 45 | 56433 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import warnings
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=False,
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
lb = MultiLabelBinarizer()
Y = lb.fit([range(n_classes)]).transform(Y)
else:
warnings.warn('Support for the sequence of sequences multilabel '
'representation is being deprecated and replaced with '
'a sparse indicator matrix. '
'return_indicator will default to True from version '
'0.17.',
DeprecationWarning)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
jmetzen/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
abhishekkrthakur/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
r-mart/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
hmendozap/master-arbeit-projects | autosk_dev_test/component/RegDeepNet.py | 1 | 19049 | import numpy as np
import scipy.sparse as sp
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.conditions import EqualsCondition, InCondition
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, Constant
from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm
from autosklearn.pipeline.constants import *
class RegDeepNet(AutoSklearnRegressionAlgorithm):
def __init__(self, number_epochs, batch_size, num_layers,
dropout_output, learning_rate, solver,
lambda2, random_state=None,
**kwargs):
self.number_epochs = number_epochs
self.batch_size = batch_size
self.num_layers = ord(num_layers) - ord('a')
self.dropout_output = dropout_output
self.learning_rate = learning_rate
self.lambda2 = lambda2
self.solver = solver
# Also taken from **kwargs. Because the assigned
# arguments are the minimum parameters to run
# the iterative net. IMO.
self.lr_policy = kwargs.get("lr_policy", "fixed")
self.momentum = kwargs.get("momentum", 0.99)
self.beta1 = 1 - kwargs.get("beta1", 0.1)
self.beta2 = 1 - kwargs.get("beta2", 0.01)
self.rho = kwargs.get("rho", 0.95)
self.gamma = kwargs.get("gamma", 0.01)
self.power = kwargs.get("power", 1.0)
self.epoch_step = kwargs.get("epoch_step", 1)
# Empty features and shape
self.n_features = None
self.input_shape = None
self.m_issparse = False
self.m_isbinary = False
self.m_ismultilabel = False
self.m_isregression = True
# TODO: Should one add a try-except here?
self.num_units_per_layer = []
self.dropout_per_layer = []
self.activation_per_layer = []
self.weight_init_layer = []
self.std_per_layer = []
self.leakiness_per_layer = []
self.tanh_alpha_per_layer = []
self.tanh_beta_per_layer = []
for i in range(1, self.num_layers):
self.num_units_per_layer.append(int(kwargs.get("num_units_layer_" + str(i), 128)))
self.dropout_per_layer.append(float(kwargs.get("dropout_layer_" + str(i), 0.5)))
self.activation_per_layer.append(kwargs.get("activation_layer_" + str(i), 'relu'))
self.weight_init_layer.append(kwargs.get("weight_init_" + str(i), 'he_normal'))
self.std_per_layer.append(float(kwargs.get("std_layer_" + str(i), 0.005)))
self.leakiness_per_layer.append(float(kwargs.get("leakiness_layer_" + str(i), 1. / 3.)))
self.tanh_alpha_per_layer.append(float(kwargs.get("tanh_alpha_layer_" + str(i), 2. / 3.)))
self.tanh_beta_per_layer.append(float(kwargs.get("tanh_beta_layer_" + str(i), 1.7159)))
self.estimator = None
self.random_state = random_state
def _prefit(self, X, y):
self.batch_size = int(self.batch_size)
self.n_features = X.shape[1]
self.input_shape = (self.batch_size, self.n_features)
assert len(self.num_units_per_layer) == self.num_layers - 1,\
"Number of created layers is different than actual layers"
assert len(self.dropout_per_layer) == self.num_layers - 1,\
"Number of created layers is different than actual layers"
self.num_output_units = 1 # Regression
# Normalize the output
self.mean_y = np.mean(y)
self.std_y = np.std(y)
y = (y - self.mean_y) / self.std_y
if len(y.shape) == 1:
y = y[:, np.newaxis]
self.m_issparse = sp.issparse(X)
return X, y
def fit(self, X, y):
Xf, yf = self._prefit(X, y)
from implementation import FeedForwardNet
self.estimator = FeedForwardNet.FeedForwardNet(batch_size=self.batch_size,
input_shape=self.input_shape,
num_layers=self.num_layers,
num_units_per_layer=self.num_units_per_layer,
dropout_per_layer=self.dropout_per_layer,
activation_per_layer=self.activation_per_layer,
weight_init_per_layer=self.weight_init_layer,
std_per_layer=self.std_per_layer,
leakiness_per_layer=self.leakiness_per_layer,
tanh_alpha_per_layer=self.tanh_alpha_per_layer,
tanh_beta_per_layer=self.tanh_beta_per_layer,
num_output_units=self.num_output_units,
dropout_output=self.dropout_output,
learning_rate=self.learning_rate,
lr_policy=self.lr_policy,
lambda2=self.lambda2,
momentum=self.momentum,
beta1=self.beta1,
beta2=self.beta2,
rho=self.rho,
solver=self.solver,
num_epochs=self.number_epochs,
gamma=self.gamma,
power=self.power,
epoch_step=self.epoch_step,
is_sparse=self.m_issparse,
is_binary=self.m_isbinary,
is_multilabel=self.m_ismultilabel,
is_regression=self.m_isregression,
random_state=self.random_state)
self.estimator.fit(Xf, yf)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
preds = self.estimator.predict(X, self.m_issparse)
return preds * self.std_y + self.mean_y
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X, self.m_issparse)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'reg_feed_nn',
'name': 'Regression Feed Forward Neural Network',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
max_num_layers = 7 # Maximum number of layers coded
# Hacky way to condition layers params based on the number of layers
# 'c'=1, 'd'=2, 'e'=3 ,'f'=4', g ='5', h='6' + output_layer
layer_choices = [chr(i) for i in range(ord('c'), ord('b') + max_num_layers)]
batch_size = UniformIntegerHyperparameter("batch_size",
32, 4096,
log=True,
default=32)
number_epochs = UniformIntegerHyperparameter("number_epochs",
2, 80,
default=5)
num_layers = CategoricalHyperparameter("num_layers",
choices=layer_choices,
default='c')
lr = UniformFloatHyperparameter("learning_rate", 1e-6, 1.0,
log=True,
default=0.01)
l2 = UniformFloatHyperparameter("lambda2", 1e-7, 1e-2,
log=True,
default=1e-4)
dropout_output = UniformFloatHyperparameter("dropout_output",
0.0, 0.99,
default=0.5)
# Define basic hyperparameters and define the config space
# basic means that are independent from the number of layers
cs = ConfigurationSpace()
cs.add_hyperparameter(number_epochs)
cs.add_hyperparameter(batch_size)
cs.add_hyperparameter(num_layers)
cs.add_hyperparameter(lr)
cs.add_hyperparameter(l2)
cs.add_hyperparameter(dropout_output)
# Define parameters with different child parameters and conditions
solver_choices = ["adam", "adadelta", "adagrad",
"sgd", "momentum", "nesterov",
"smorm3s"]
solver = CategoricalHyperparameter(name="solver",
choices=solver_choices,
default="smorm3s")
beta1 = UniformFloatHyperparameter("beta1", 1e-4, 0.1,
log=True,
default=0.1)
beta2 = UniformFloatHyperparameter("beta2", 1e-4, 0.1,
log=True,
default=0.01)
rho = UniformFloatHyperparameter("rho", 0.05, 0.99,
log=True,
default=0.95)
momentum = UniformFloatHyperparameter("momentum", 0.3, 0.999,
default=0.9)
# TODO: Add policy based on this sklearn sgd
policy_choices = ['fixed', 'inv', 'exp', 'step']
lr_policy = CategoricalHyperparameter(name="lr_policy",
choices=policy_choices,
default='fixed')
gamma = UniformFloatHyperparameter(name="gamma",
lower=1e-3, upper=1e-1,
default=1e-2)
power = UniformFloatHyperparameter("power",
0.0, 1.0,
default=0.5)
epoch_step = UniformIntegerHyperparameter("epoch_step",
2, 20,
default=5)
cs.add_hyperparameter(solver)
cs.add_hyperparameter(beta1)
cs.add_hyperparameter(beta2)
cs.add_hyperparameter(momentum)
cs.add_hyperparameter(rho)
cs.add_hyperparameter(lr_policy)
cs.add_hyperparameter(gamma)
cs.add_hyperparameter(power)
cs.add_hyperparameter(epoch_step)
# Define parameters that are needed it for each layer
output_activation_choices = ['softmax', 'sigmoid', 'softplus', 'tanh']
activations_choices = ['sigmoid', 'tanh', 'scaledTanh', 'elu', 'relu', 'leaky', 'linear']
weight_choices = ['constant', 'normal', 'uniform',
'glorot_normal', 'glorot_uniform',
'he_normal', 'he_uniform',
'ortogonal', 'sparse']
# Iterate over parameters that are used in each layer
for i in range(1, max_num_layers):
layer_units = UniformIntegerHyperparameter("num_units_layer_" + str(i),
64, 4096,
log=True,
default=128)
cs.add_hyperparameter(layer_units)
layer_dropout = UniformFloatHyperparameter("dropout_layer_" + str(i),
0.0, 0.99,
default=0.5)
cs.add_hyperparameter(layer_dropout)
weight_initialization = CategoricalHyperparameter('weight_init_' + str(i),
choices=weight_choices,
default='he_normal')
cs.add_hyperparameter(weight_initialization)
layer_std = UniformFloatHyperparameter("std_layer_" + str(i),
1e-6, 0.1,
log=True,
default=0.005)
cs.add_hyperparameter(layer_std)
layer_activation = CategoricalHyperparameter("activation_layer_" + str(i),
choices=activations_choices,
default="relu")
cs.add_hyperparameter(layer_activation)
layer_leakiness = UniformFloatHyperparameter('leakiness_layer_' + str(i),
0.01, 0.99,
default=0.3)
cs.add_hyperparameter(layer_leakiness)
layer_tanh_alpha = UniformFloatHyperparameter('tanh_alpha_layer_' + str(i),
0.5, 1.0,
default=2. / 3.)
cs.add_hyperparameter(layer_tanh_alpha)
layer_tanh_beta = UniformFloatHyperparameter('tanh_beta_layer_' + str(i),
1.1, 3.0,
log=True,
default=1.7159)
cs.add_hyperparameter(layer_tanh_beta)
# TODO: Could be in a function in a new module
for i in range(2, max_num_layers):
# Condition layers parameter on layer choice
layer_unit_param = cs.get_hyperparameter("num_units_layer_" + str(i))
layer_cond = InCondition(child=layer_unit_param, parent=num_layers,
values=[l for l in layer_choices[i - 1:]])
cs.add_condition(layer_cond)
# Condition dropout parameter on layer choice
layer_dropout_param = cs.get_hyperparameter("dropout_layer_" + str(i))
layer_cond = InCondition(child=layer_dropout_param, parent=num_layers,
values=[l for l in layer_choices[i - 1:]])
cs.add_condition(layer_cond)
# Condition weight initialization on layer choice
layer_weight_param = cs.get_hyperparameter("weight_init_" + str(i))
layer_cond = InCondition(child=layer_weight_param, parent=num_layers,
values=[l for l in layer_choices[i - 1:]])
cs.add_condition(layer_cond)
# Condition std parameter on weight layer initialization choice
layer_std_param = cs.get_hyperparameter("std_layer_" + str(i))
weight_cond = EqualsCondition(child=layer_std_param,
parent=layer_weight_param,
value='normal')
cs.add_condition(weight_cond)
# Condition activation parameter on layer choice
layer_activation_param = cs.get_hyperparameter("activation_layer_" + str(i))
layer_cond = InCondition(child=layer_activation_param, parent=num_layers,
values=[l for l in layer_choices[i - 1:]])
cs.add_condition(layer_cond)
# Condition leakiness on activation choice
layer_leakiness_param = cs.get_hyperparameter("leakiness_layer_" + str(i))
activation_cond = EqualsCondition(child=layer_leakiness_param,
parent=layer_activation_param,
value='leaky')
cs.add_condition(activation_cond)
# Condition tanh on activation choice
layer_tanh_alpha_param = cs.get_hyperparameter("tanh_alpha_layer_" + str(i))
activation_cond = EqualsCondition(child=layer_tanh_alpha_param,
parent=layer_activation_param,
value='scaledTanh')
cs.add_condition(activation_cond)
layer_tanh_beta_param = cs.get_hyperparameter("tanh_beta_layer_" + str(i))
activation_cond = EqualsCondition(child=layer_tanh_beta_param,
parent=layer_activation_param,
value='scaledTanh')
cs.add_condition(activation_cond)
# Conditioning on solver
momentum_depends_on_solver = InCondition(momentum, solver,
values=["momentum", "nesterov"])
beta1_depends_on_solver = EqualsCondition(beta1, solver, "adam")
beta2_depends_on_solver = EqualsCondition(beta2, solver, "adam")
rho_depends_on_solver = EqualsCondition(rho, solver, "adadelta")
cs.add_condition(momentum_depends_on_solver)
cs.add_condition(beta1_depends_on_solver)
cs.add_condition(beta2_depends_on_solver)
cs.add_condition(rho_depends_on_solver)
# Conditioning on learning rate policy
lr_policy_depends_on_solver = InCondition(lr_policy, solver,
["adadelta", "adagrad", "sgd",
"momentum", "nesterov"])
gamma_depends_on_policy = InCondition(child=gamma, parent=lr_policy,
values=["inv", "exp", "step"])
power_depends_on_policy = EqualsCondition(power, lr_policy, "inv")
epoch_step_depends_on_policy = EqualsCondition(epoch_step, lr_policy, "step")
cs.add_condition(lr_policy_depends_on_solver)
cs.add_condition(gamma_depends_on_policy)
cs.add_condition(power_depends_on_policy)
cs.add_condition(epoch_step_depends_on_policy)
return cs
| mit |
murali-munna/scikit-learn | sklearn/learning_curve.py | 110 | 13467 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
JeanKossaifi/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
spallavolu/scikit-learn | sklearn/linear_model/ridge.py | 60 | 44642 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
Notes
-----
This function won't compute the intercept.
"""
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = get_max_squared_sum(X)
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
dict())
coef[i] = coef_
n_iter[i] = n_iter_
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used, else, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
zooniverse/aggregation | experimental/penguins/newCluster.py | 2 | 11987 | #!/usr/bin/env python
__author__ = 'greg'
from sklearn.cluster import DBSCAN
from sklearn.cluster import AffinityPropagation
import numpy as np
import matplotlib.pyplot as plt
import csv
import sys
import os
import pymongo
import matplotlib.cbook as cbook
import cPickle as pickle
import shutil
import urllib
import math
def dist(c1,c2):
return math.sqrt((c1[0]-c2[0])**2 + (c1[1]-c2[1])**2)
def adaptiveDBSCAN(XYpts,user_ids):
if XYpts == []:
return []
pts_in_each_cluster = []
users_in_each_cluster = []
cluster_centers = []
#increase the epsilon until we don't have any nearby clusters corresponding to non-overlapping
#sets of users
X = np.array(XYpts)
for epsilon in [5,10,15,20,25,30]:
db = DBSCAN(eps=epsilon, min_samples=2).fit(X)
labels = db.labels_
pts_in_each_cluster = []
users_in_each_cluster = []
cluster_centers = []
for k in sorted(set(labels)):
if k == -1:
continue
class_member_mask = (labels == k)
pts_in_cluster = list(X[class_member_mask])
xSet,ySet = zip(*pts_in_cluster)
cluster_centers.append((np.mean(xSet),np.mean(ySet)))
pts_in_each_cluster.append(pts_in_cluster[:])
users_in_each_cluster.append([u for u,l in zip(user_ids,labels) if l == k])
#do we have any adjacent clusters with non-overlapping sets of users
#if so, we should merge them by increasing the epsilon value
cluster_compare = []
for cluster_index, (c1,users) in enumerate(zip(cluster_centers,users_in_each_cluster)):
for cluster_index, (c2,users2) in enumerate(zip(cluster_centers[cluster_index+1:],users_in_each_cluster[cluster_index+1:])):
overlappingUsers = [u for u in users if u in users2]
cluster_compare.append((dist(c1,c2),overlappingUsers))
cluster_compare.sort(key = lambda x:x[0])
needToMerge = [] in [c[1] for c in cluster_compare[:10]]
if not(needToMerge):
break
print epsilon
print [c[1] for c in cluster_compare[:10]]
centers_to_return = []
#do we need to split any clusters?
for cluster_index in range(len(cluster_centers)):
print "splitting"
needToSplit = (sorted(users_in_each_cluster[cluster_index]) != sorted(list(set(users_in_each_cluster[cluster_index]))))
if needToSplit:
subcluster_centers = []
X = np.array(pts_in_each_cluster[cluster_index])
for epsilon in [30,25,20,15,10,5,1,0.1,0.01]:
db = DBSCAN(eps=epsilon, min_samples=2).fit(X)
labels = db.labels_
subcluster_centers = []
needToSplit = False
for k in sorted(set(labels)):
if k == -1:
continue
class_member_mask = (labels == k)
users_in_subcluster = [u for u,l in zip(users_in_each_cluster[cluster_index],labels) if l == k]
needToSplit = (sorted(users_in_subcluster) != sorted(list(set(users_in_subcluster))))
if needToSplit:
break
pts_in_cluster = list(X[class_member_mask])
xSet,ySet = zip(*pts_in_cluster)
subcluster_centers.append((np.mean(xSet),np.mean(ySet)))
if not(needToSplit):
break
assert not(needToSplit)
centers_to_return.extend(subcluster_centers)
#if needToSplit:
# print pts_in_each_cluster[cluster_index]
# print users_in_each_cluster[cluster_index]
#else:
else:
centers_to_return.append(cluster_centers[cluster_index])
return centers_to_return
# def cluster(XYpts,user_ids):
# if XYpts == []:
# return []
#
# #find out which points are noise - don't care about the actual clusters
# needToSplit = False
# X = np.array(XYpts)
#
#
# #X = np.array([XYpts[i] for i in signal_pts])
# #user_ids = [user_ids[i] for i in signal_pts]
# oldCenters = None
#
# needToMerge = False
# needToSplit = False
#
# cluster_list = []
# usersInCluster = []
# centers = []
#
# for pref in [0,-100,-200,-400,-800,-1200,-2000,-2200,-2400,-2700,-3000,-3500,-4000,-5000,-6000,-10000]:
# #now run affinity propagation to find the actual clusters
# af = AffinityPropagation(preference=pref).fit(X)
# #cluster_centers_indices = af.cluster_centers_indices_
# labels = af.labels_
#
#
#
# unique_labels = set(labels)
#
# usersInCluster = []
# centers = []
# cluster_list = []
# for k in sorted(unique_labels):
# assert(k != -1)
# #print k
# usersInCluster.append([u for u,l in zip(user_ids,labels) if l == k])
# #print XYpts
# #print user_ids
#
# class_member_mask = (labels == k)
# pts_in_cluster = list(X[class_member_mask])
# xSet,ySet = zip(*pts_in_cluster)
# centers.append((np.mean(xSet),np.mean(ySet)))
# cluster_list.append(pts_in_cluster[:])
#
# compare = []
# for cluster_index, (c1,users) in enumerate(zip(centers,usersInCluster)):
# for cluster_index, (c2,users2) in enumerate(zip(centers[cluster_index+1:],usersInCluster[cluster_index+1:])):
# overlappingUsers = [u for u in users if u in users2]
# compare.append((dist(c1,c2),overlappingUsers))
#
# #needToSplit = False
# #for users in usersInCluster:
# # needToSplit = (sorted(users) != sorted(list(set(users))))
# # if needToSplit:
# # break
#
# compare.sort(key = lambda x:x[0])
#
# needToMerge = ([] in [c[1] for c in compare[:3]]) and (compare[-1][0] <= 200)
#
# #if needToSplit:
# # assert(oldCenters != None)
# # return oldCenters
# if not(needToMerge):
# break
#
# oldCenters = centers[:]
#
# if needToMerge:
# print compare[0:3]
# assert not(needToMerge)
#
# centers_to_return = []
# for cluster_index in range(len(cluster_list)):
# if len(list(set(usersInCluster[cluster_index]))) == 1:
# continue
# #split any individual cluster
# needToSplit = (sorted(usersInCluster[cluster_index]) != sorted(list(set(usersInCluster[cluster_index]))))
# if needToSplit:
# #print cluster_list[cluster_index]
# X = np.array(cluster_list[cluster_index])
# sub_center_list = []
# for pref in [-2400,-2200,-2000,-1200,-800,-400,-200,-100,-75,-50,-30,0]:
# af = AffinityPropagation(preference=pref).fit(X)
# #cluster_centers_indices = af.cluster_centers_indices_
# labels = af.labels_
# try:
# unique_labels = set(labels)
# except TypeError:
# print pref
# print X
# print usersInCluster[cluster_index]
# print labels
# raise
# #get the new "sub"clusters and check to see if we need to split even more
# for k in sorted(unique_labels):
# users = [u for u,l in zip(usersInCluster[cluster_index],labels) if l == k]
# needToSplit = (sorted(users) != sorted(list(set(users))))
#
# if needToSplit:
# break
#
# #add this new sub-cluster onto the list
# class_member_mask = (labels == k)
# pts_in_cluster = list(X[class_member_mask])
# xSet,ySet = zip(*pts_in_cluster)
# sub_center_list.append((np.mean(xSet),np.mean(ySet)))
#
# if not(needToSplit):
# break
#
# #if pref == 0:
# # print sub_center_list
# assert not(needToSplit)
# #print pref
# centers_to_return.extend([c for c in sub_center_list if len(c) > 1])
#
#
#
# else:
# centers_to_return.append(centers[cluster_index])
#
# assert not(needToSplit)
# return centers
client = pymongo.MongoClient()
db = client['penguin_2014-09-19']
collection = db["penguin_classifications"]
collection2 = db["penguin_subjects"]
images = {}
pts = {}
ids = {}
userCount = {}
errorCount = 0
total = 0
at_5 = {}
at_10 = {}
center_5 = {}
center_10 = {}
step_1 = 5
step_2 = 8
toSkip = ["APZ0002uw3","APZ0001v9f","APZ00010ww","APZ0000p99","APZ0002jc3","APZ00014t4","APZ0000v0n","APZ0000ifx","APZ0002pch","APZ0003kls","APZ0001iv3","APZ0003auc","APZ0002ezn"]
mainSubject = "APZ0003fgt" #APZ0001jre
toPlot = None
numClassifications = []
for r in collection.find():
subject_id = r["subjects"][0]["zooniverse_id"]
total += 1
if subject_id != "APZ0003kls":# in toSkip:
continue
if not(subject_id in pts):
pts[subject_id] = []
userCount[subject_id] = 0
ids[subject_id] = []
userCount[subject_id] += 1
animalsPresent = r["annotations"][0]["value"] == "yes"
#print animalsPresent
if animalsPresent:
c = 0
for marking_index in r["annotations"][1]["value"]:
try:
marking = r["annotations"][1]["value"][marking_index]
if True: # marking["value"] == "adult":
x = float(marking["x"])
y = float(marking["y"])
ip = r["user_ip"]
alreadyInList = False
try:
index = pts[subject_id].index((x,y))
if ids[subject_id][index] == ip:
alreadyInList = True
except ValueError:
pass
if not(alreadyInList):
pts[subject_id].append((x,y))
ids[subject_id].append(ip)
c += 1
except TypeError:
errorCount += 1
userCount[subject_id] += -1
break
except ValueError:
errorCount += 1
continue
numClassifications.append(c)
if userCount[subject_id] in [step_2]:
cluster_center = adaptiveDBSCAN(pts[subject_id],ids[subject_id])
mainSubject = subject_id
if cluster_center != []:
break
if userCount[subject_id] == step_1:
pass
#at_5[subject_id] = len(cluster_center)
else:
at_10[subject_id] = len(cluster_center)
# inBoth = [subject_id for subject_id in at_10 if (subject_id in at_5)]
# # print len(inBoth)
# x = [at_5[subject_id] for subject_id in inBoth]
# y = [at_10[subject_id] for subject_id in inBoth]
# print zip(inBoth,zip(x,y))
# plt.plot((0,100),(0,100),'--')
# # #print x
# # #print y
# plt.plot(x,y,'.')
# plt.show()
# print userCount
# print numClassifications
#
#
print mainSubject
r2 = collection2.find_one({"zooniverse_id":mainSubject})
url = r2["location"]["standard"]
if not(os.path.isfile("/home/greg/Databases/penguins/images/"+mainSubject+".JPG")):
urllib.urlretrieve (url, "/home/greg/Databases/penguins/images/"+mainSubject+".JPG")
image_file = cbook.get_sample_data("/home/greg/Databases/penguins/images/"+mainSubject+".JPG")
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
#plt.show()
#
if cluster_center != []:
x,y = zip(*cluster_center)
plt.plot(x,y,'.',color='blue')
#
# x,y = zip(*center_5[mainSubject])
# plt.plot(x,y,'.',color='red')
# x,y = zip(*center_10[mainSubject])
# plt.plot(x,y,'.',color='green')
plt.show() | apache-2.0 |
xuewei4d/scikit-learn | asv_benchmarks/benchmarks/decomposition.py | 12 | 2754 | from sklearn.decomposition import (PCA, DictionaryLearning,
MiniBatchDictionaryLearning)
from .common import Benchmark, Estimator, Transformer
from .datasets import _olivetti_faces_dataset, _mnist_dataset
from .utils import make_pca_scorers, make_dict_learning_scorers
class PCABenchmark(Transformer, Estimator, Benchmark):
"""
Benchmarks for PCA.
"""
param_names = ['svd_solver']
params = (['full', 'arpack', 'randomized'],)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
return _mnist_dataset()
def make_estimator(self, params):
svd_solver, = params
estimator = PCA(n_components=32,
svd_solver=svd_solver,
random_state=0)
return estimator
def make_scorers(self):
make_pca_scorers(self)
class DictionaryLearningBenchmark(Transformer, Estimator, Benchmark):
"""
Benchmarks for DictionaryLearning.
"""
param_names = ['fit_algorithm', 'n_jobs']
params = (['lars', 'cd'], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
return _olivetti_faces_dataset()
def make_estimator(self, params):
fit_algorithm, n_jobs = params
estimator = DictionaryLearning(n_components=15,
fit_algorithm=fit_algorithm,
alpha=0.1,
max_iter=20,
tol=1e-16,
random_state=0,
n_jobs=n_jobs)
return estimator
def make_scorers(self):
make_dict_learning_scorers(self)
class MiniBatchDictionaryLearningBenchmark(Transformer, Estimator, Benchmark):
"""
Benchmarks for MiniBatchDictionaryLearning
"""
param_names = ['fit_algorithm', 'n_jobs']
params = (['lars', 'cd'], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
return _olivetti_faces_dataset()
def make_estimator(self, params):
fit_algorithm, n_jobs = params
estimator = MiniBatchDictionaryLearning(n_components=15,
fit_algorithm=fit_algorithm,
alpha=0.1,
batch_size=3,
random_state=0,
n_jobs=n_jobs)
return estimator
def make_scorers(self):
make_dict_learning_scorers(self)
| bsd-3-clause |
ndingwall/scikit-learn | sklearn/tests/test_build.py | 17 | 1175 | import os
import pytest
import textwrap
from sklearn import __version__
from sklearn.utils._openmp_helpers import _openmp_parallelism_enabled
def test_openmp_parallelism_enabled():
# Check that sklearn is built with OpenMP-based parallelism enabled.
# This test can be skipped by setting the environment variable
# ``SKLEARN_SKIP_OPENMP_TEST``.
if os.getenv("SKLEARN_SKIP_OPENMP_TEST"):
pytest.skip("test explicitly skipped (SKLEARN_SKIP_OPENMP_TEST)")
base_url = "dev" if __version__.endswith(".dev0") else "stable"
err_msg = textwrap.dedent(
"""
This test fails because scikit-learn has been built without OpenMP.
This is not recommended since some estimators will run in sequential
mode instead of leveraging thread-based parallelism.
You can find instructions to build scikit-learn with OpenMP at this
address:
https://scikit-learn.org/{}/developers/advanced_installation.html
You can skip this test by setting the environment variable
SKLEARN_SKIP_OPENMP_TEST to any value.
""").format(base_url)
assert _openmp_parallelism_enabled(), err_msg
| bsd-3-clause |
ClimbsRocks/scikit-learn | examples/cluster/plot_color_quantization.py | 61 | 3444 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1])
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
Microsoft/hummingbird | tests/test_sklearn_decomposition.py | 1 | 5758 | """
Tests sklearn matrix decomposition converters
"""
import unittest
import warnings
import sys
from distutils.version import LooseVersion
import numpy as np
import torch
import sklearn
from sklearn.decomposition import FastICA, KernelPCA, PCA, TruncatedSVD
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_digits
import hummingbird.ml
class TestSklearnMatrixDecomposition(unittest.TestCase):
def _fit_model_pca(self, model, precompute=False):
data = load_digits()
X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, test_size=0.2, random_state=42)
X_test = X_test.astype("float32")
if precompute:
# For precompute we use a linear kernel
model.fit(np.dot(X_train, X_train.T))
X_test = np.dot(X_test, X_train.T)
else:
model.fit(X_train)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(model.transform(X_test), torch_model.transform(X_test), rtol=1e-6, atol=2 * 1e-5)
# PCA n_components none
def test_pca_converter_none(self):
self._fit_model_pca(PCA(n_components=None))
# PCA n_componenets two
def test_pca_converter_two(self):
self._fit_model_pca(PCA(n_components=2))
# PCA n_componenets mle and whiten true
@unittest.skipIf(
LooseVersion(sklearn.__version__) < LooseVersion("0.23.2"),
reason="With Sklearn version < 0.23.2 returns ValueError: math domain error (https://github.com/scikit-learn/scikit-learn/issues/4441)",
)
def test_pca_converter_mle_whiten(self):
self._fit_model_pca(PCA(n_components="mle", whiten=True))
# PCA n_componenets mle and solver full
@unittest.skipIf(
LooseVersion(sklearn.__version__) < LooseVersion("0.23.2"),
reason="With Sklearn version < 0.23.2 returns ValueError: math domain error (https://github.com/scikit-learn/scikit-learn/issues/4441)",
)
def test_pca_converter_mle_full(self):
self._fit_model_pca(PCA(n_components="mle", svd_solver="full"))
# PCA n_componenets none and solver arpack
def test_pca_converter_none_arpack(self):
self._fit_model_pca(PCA(n_components=None, svd_solver="arpack"))
# PCA n_componenets none and solver randomized
def test_pca_converter_none_randomized(self):
self._fit_model_pca(PCA(n_components=None, svd_solver="randomized"))
# KernelPCA linear kernel
def test_kernel_pca_converter_linear(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="linear"))
# KernelPCA linear kernel with inverse transform
def test_kernel_pca_converter_linear_fit_inverse_transform(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="linear", fit_inverse_transform=True))
# KernelPCA poly kernel
def test_kernel_pca_converter_poly(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="poly", degree=2))
# KernelPCA poly kernel coef0
def test_kernel_pca_converter_poly_coef0(self):
self._fit_model_pca(KernelPCA(n_components=10, kernel="poly", degree=3, coef0=10))
# KernelPCA poly kernel with inverse transform
def test_kernel_pca_converter_poly_fit_inverse_transform(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="poly", degree=3, fit_inverse_transform=True))
# KernelPCA poly kernel
def test_kernel_pca_converter_rbf(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="rbf"))
# KernelPCA sigmoid kernel
def test_kernel_pca_converter_sigmoid(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="sigmoid"))
# KernelPCA cosine kernel
def test_kernel_pca_converter_cosine(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="cosine"))
# KernelPCA precomputed kernel
def test_kernel_pca_converter_precomputed(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="precomputed"), precompute=True)
# TODO: Fails on macos-latest Python 3.8 due to a sklearn bug.
# FastICA converter with n_components none
# def test_fast_ica_converter_none(self):
# self._fit_model_pca(FastICA(n_components=None))
# FastICA converter with n_components 3
def test_fast_ica_converter_3(self):
self._fit_model_pca(FastICA(n_components=3))
# FastICA converter with n_components 3 whiten
def test_fast_ica_converter_3_whiten(self):
self._fit_model_pca(FastICA(n_components=3, whiten=True))
# FastICA converter with n_components 3 deflation algorithm
def test_fast_ica_converter_3_deflation(self):
self._fit_model_pca(FastICA(n_components=3, algorithm="deflation"))
# FastICA converter with n_components 3 fun exp
def test_fast_ica_converter_3_exp(self):
self._fit_model_pca(FastICA(n_components=3, fun="exp"))
# FastICA converter with n_components 3 fun cube
def test_fast_ica_converter_3_cube(self):
self._fit_model_pca(FastICA(n_components=3, fun="cube"))
# FastICA converter with n_components 3 fun custom
def test_fast_ica_converter_3_custom(self):
def my_g(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
self._fit_model_pca(FastICA(n_components=3, fun=my_g))
# TruncatedSVD converter with n_components 3
def test_truncated_svd_converter_3(self):
self._fit_model_pca(TruncatedSVD(n_components=3))
# TruncatedSVD converter with n_components 3 algorithm arpack
def test_truncated_svd_converter_3_arpack(self):
self._fit_model_pca(TruncatedSVD(n_components=3, algorithm="arpack"))
if __name__ == "__main__":
unittest.main()
| mit |
giorgiop/scikit-learn | examples/mixture/plot_gmm_sin.py | 103 | 6101 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example demonstrates the behavior of Gaussian mixture models fit on data
that was not sampled from a mixture of Gaussian random variables. The dataset
is formed by 100 points loosely spaced following a noisy sine curve. There is
therefore no ground truth value for the number of Gaussian components.
The first model is a classical Gaussian Mixture Model with 10 components fit
with the Expectation-Maximization algorithm.
The second model is a Bayesian Gaussian Mixture Model with a Dirichlet process
prior fit with variational inference. The low value of the concentration prior
makes the model favor a lower number of active components. This models
"decides" to focus its modeling power on the big picture of the structure of
the dataset: groups of points with alternating directions modeled by
non-diagonal covariance matrices. Those alternating directions roughly capture
the alternating nature of the original sine signal.
The third model is also a Bayesian Gaussian mixture model with a Dirichlet
process prior but this time the value of the concentration prior is higher
giving the model more liberty to model the fine-grained structure of the data.
The result is a mixture with a larger number of active components that is
similar to the first model where we arbitrarily decided to fix the number of
components to 10.
Which model is the best is a matter of subjective judgement: do we want to
favor models that only capture the big picture to summarize and explain most of
the structure of the data while ignoring the details or do we prefer models
that closely follow the high density regions of the signal?
The last two panels show how we can sample from the last two models. The
resulting samples distributions do not look exactly like the original data
distribution. The difference primarily stems from the approximation error we
made by using a model that assumes that the data was generated by a finite
number of Gaussian components instead of a continuous noisy sine curve.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
print(__doc__)
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y, means, covariances, index, title):
splot = plt.subplot(5, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y == i):
continue
plt.scatter(X[Y == i, 0], X[Y == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6., 4. * np.pi - 6.)
plt.ylim(-5., 5.)
plt.title(title)
plt.xticks(())
plt.yticks(())
def plot_samples(X, Y, n_components, index, title):
plt.subplot(5, 1, 4 + index)
for i, color in zip(range(n_components), color_iter):
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y == i):
continue
plt.scatter(X[Y == i, 0], X[Y == i, 1], .8, color=color)
plt.xlim(-6., 4. * np.pi - 6.)
plt.ylim(-5., 5.)
plt.title(title)
plt.xticks(())
plt.yticks(())
# Parameters
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4. * np.pi / n_samples
for i in range(X.shape[0]):
x = i * step - 6.
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3. * (np.sin(x) + np.random.normal(0, .2))
plt.figure(figsize=(10, 10))
plt.subplots_adjust(bottom=.04, top=0.95, hspace=.2, wspace=.05,
left=.03, right=.97)
# Fit a Gaussian mixture with EM using ten components
gmm = mixture.GaussianMixture(n_components=10, covariance_type='full',
max_iter=100).fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Expectation-maximization')
dpgmm = mixture.BayesianGaussianMixture(
n_components=10, covariance_type='full', weight_concentration_prior=1e-2,
weight_concentration_prior_type='dirichlet_process',
mean_precision_prior=1e-2, covariance_prior=1e0 * np.eye(2),
init_params="random", max_iter=100, random_state=2).fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
"Bayesian Gaussian mixture models with a Dirichlet process prior "
r"for $\gamma_0=0.01$.")
X_s, y_s = dpgmm.sample(n_samples=2000)
plot_samples(X_s, y_s, dpgmm.n_components, 0,
"Gaussian mixture with a Dirichlet process prior "
r"for $\gamma_0=0.01$ sampled with $2000$ samples.")
dpgmm = mixture.BayesianGaussianMixture(
n_components=10, covariance_type='full', weight_concentration_prior=1e+2,
weight_concentration_prior_type='dirichlet_process',
mean_precision_prior=1e-2, covariance_prior=1e0 * np.eye(2),
init_params="kmeans", max_iter=100, random_state=2).fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 2,
"Bayesian Gaussian mixture models with a Dirichlet process prior "
r"for $\gamma_0=100$")
X_s, y_s = dpgmm.sample(n_samples=2000)
plot_samples(X_s, y_s, dpgmm.n_components, 1,
"Gaussian mixture with a Dirichlet process prior "
r"for $\gamma_0=100$ sampled with $2000$ samples.")
plt.show()
| bsd-3-clause |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| unlicense |
iarroyof/distributionalSemanticStabilityThesis | mklObj.py | 2 | 55729 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
__author__ = 'Ignacio Arroyo-Fernandez'
from modshogun import *
from tools.load import LoadMatrix
from sklearn.metrics import r2_score
import random
from math import sqrt
import numpy
from os import getcwd
from sys import stderr
from pdb import set_trace as st
def open_configuration_file(fileName):
""" Loads the input data configuration file. Lines which start with '#' are ignored. No lines different from
configuration ones (even blank ones) at top are allowed. The amount of lines at top are exclusively either three or
five (see below for allowed contents).
The first line may be specifying the train data file in sparse market matrix format.
The second line may be specifying the test data file in sparse market matrix format.
The third line may be specifying the train labels file. An scalar by line must be associated as label of a vector
in training data.
The fourth line may be specifying the test labels file. An scalar by line must be associated as label of a vector
in test data.
The fifth line indicates options for the MKL object:
First character : Problem type : valid_options = {r: regression, b: binary, m: multiclass}
Second character: Machine mode : valid_options = {l: learning_mode, p: pattern_recognition_mode}
Any other characters and amount of they will be ignored or caught as errors.
For all configuration lines no other kind of content is allowed (e.g. comments in line ahead).
Training data (and its labels) is optional. Whenever no five configuration lines are detected in this file,
the first line will be considered as the test data file name, the second line as de test labels and third line as
the MKL options. An error exception will be raised otherwise (e.g. no three or no five configuration lines).
"""
with open(fileName) as f:
configuration_lines = f.read().splitlines()
problem_modes = {'r':'regression', 'b':'binary', 'm':'multiclass'}
machine_modes = {'l':'learning', 'p':'pattern_recognition'}
cls = 0 # Counted number of configuration lines from top.
ncls = 5 # Number of configuration lines allowed.
for line in configuration_lines:
if not line.startswith('#'):
cls += 1
else:
break
if cls == ncls:
mode = configuration_lines[4]
configuration = {}
if len(mode) == 2:
try:
configuration['problem_mode'] = problem_modes[mode[0]]
configuration['machine_mode'] = machine_modes[mode[1]]
except KeyError:
sys.stderr.write('\nERROR: Incorrect configuration file. Invalid machine mode. See help for mklObj.open_configuration_file().')
else:
sys.stderr.write('\nERROR: Incorrect configuration file. Invalid number of lines. See help for mklObj.open_configuration_file().')
exit()
Null = ncls # Null index
if configuration['machine_mode'] == 'learning': # According to availability of training files, indexes are setted.
trf = 0; tsf = 1; trlf = 2 # training_file, test_file, training_labels_file, test_labels_file, mode
tslf = 3; mf = Null
configuration_lines[ncls] = None
del(configuration_lines[ncls+1:]) # All from the first '#' onwards is ignored.
elif configuration['machine_mode'] == 'pattern_recognition':
trf = 0; tsf = 1; trlf = Null # training_file, test_file, test_labels_file, mode, model_file
tslf = 2; mf = 3
configuration_lines[ncls] = None
del(configuration_lines[ncls+1:])
configuration['training_file'] = configuration_lines[trf]
configuration['test_file'] = configuration_lines[tsf]
configuration['training_labels_file'] = configuration_lines[trlf]
configuration['test_labels_file'] = configuration_lines[tslf]
configuration['model_file'] = configuration_lines[mf]
return configuration
# Loading toy multiclass data from files
def load_multiclassToy(dataRoute, fileTrain, fileLabels):
""" :returns: [RealFeatures(training_data), RealFeatures(test_data), MulticlassLabels(train_labels),
MulticlassLabels(test_labels)]. It is a set of Shogun training objects for raising a 10-class classification
problem. This function is a modified version from http://www.shogun-toolbox.org/static/notebook/current/MKL.html
Pay attention to input parameters because their documentations is valid for acquiring data for any multiclass
problem with Shogun.
:param dataRoute: The allocation directory of plain text file containing the train and test data.
:param fileTrain: The name of the text file containing the train and test data. Each row of the file contains a
sample vector and each column is a dimension of such a sample vector.
:param fileLabels: The name of the text file containing the train and test labels. Each row must to correspond to
each sample in fileTrain. It must be at the same directory specified by dataRoute.
"""
lm = LoadMatrix()
dataSet = lm.load_numbers(dataRoute + fileTrain)
labels = lm.load_labels(dataRoute + fileLabels)
return (RealFeatures(dataSet.T[0:3 * len(dataSet.T) / 4].T), # Return the training set, 3/4 * dataSet
RealFeatures(dataSet.T[(3 * len(dataSet.T) / 4):].T), # Return the test set, 1/4 * dataSet
MulticlassLabels(labels[0:3 * len(labels) / 4]), # Return corresponding train and test labels
MulticlassLabels(labels[(3 * len(labels) / 4):]))
# 2D Toy data generator
def generate_binToy(file_data = None, file_labels = None):
""":return: [RealFeatures(train_data),RealFeatures(train_data),BinaryLabels(train_labels),BinaryLabels(test_labels)]
This method generates random 2D training and test data for binary classification. The labels are {-1, 1} vectors.
"""
num = 30
num_components = 4
means = numpy.zeros((num_components, 2))
means[0] = [-1, 1]
means[1] = [2, -1.5]
means[2] = [-1, -3]
means[3] = [2, 1]
covs = numpy.array([[1.0, 0.0], [0.0, 1.0]])
gmm = GMM(num_components)
[gmm.set_nth_mean(means[i], i) for i in range(num_components)]
[gmm.set_nth_cov(covs, i) for i in range(num_components)]
gmm.set_coef(numpy.array([1.0, 0.0, 0.0, 0.0]))
xntr = numpy.array([gmm.sample() for i in xrange(num)]).T
xnte = numpy.array([gmm.sample() for i in xrange(5000)]).T
gmm.set_coef(numpy.array([0.0, 1.0, 0.0, 0.0]))
xntr1 = numpy.array([gmm.sample() for i in xrange(num)]).T
xnte1 = numpy.array([gmm.sample() for i in xrange(5000)]).T
gmm.set_coef(numpy.array([0.0, 0.0, 1.0, 0.0]))
xptr = numpy.array([gmm.sample() for i in xrange(num)]).T
xpte = numpy.array([gmm.sample() for i in xrange(5000)]).T
gmm.set_coef(numpy.array([0.0, 0.0, 0.0, 1.0]))
xptr1 = numpy.array([gmm.sample() for i in xrange(num)]).T
xpte1 = numpy.array([gmm.sample() for i in xrange(5000)]).T
if not file_data:
return (RealFeatures(numpy.concatenate((xntr, xntr1, xptr, xptr1), axis=1)), # Train Data
RealFeatures(numpy.concatenate((xnte, xnte1, xpte, xpte1), axis=1)), # Test Data
BinaryLabels(numpy.concatenate((-numpy.ones(2 * num), numpy.ones(2 * num)))), # Train Labels
BinaryLabels(numpy.concatenate((-numpy.ones(10000), numpy.ones(10000))))) # Test Labels
else:
data_set = numpy.concatenate((numpy.concatenate((xntr, xntr1, xptr, xptr1), axis=1),
numpy.concatenate((xnte, xnte1, xpte, xpte1), axis=1)), axis = 1).T
labels = numpy.concatenate((numpy.concatenate((-numpy.ones(2 * num), numpy.ones(2 * num))),
numpy.concatenate((-numpy.ones(10000), numpy.ones(10000)))), axis = 1).astype(int)
indexes = range(len(data_set))
numpy.random.shuffle(indexes)
fd = open(file_data, 'w')
fl = open(file_labels, 'w')
for i in indexes:
fd.write('%f %f\n' % (data_set[i][0],data_set[i][1]))
fl.write(str(labels[i])+'\n')
fd.close()
fl.close()
#numpy.savetxt(file_data, data_set, fmt='%f')
#numpy.savetxt(file_labels, labels, fmt='%d')
def load_binData(tr_ts_portion = None, fileTrain = None, fileLabels = None, dataRoute = None):
if not dataRoute:
dataRoute = getcwd()+'/'
assert fileTrain and fileLabels # One (or both) of the input files are not given.
assert (tr_ts_portion > 0.0 and tr_ts_portion <= 1.0) # The proportion of dividing the data set into train and test is in (0, 1]
lm = LoadMatrix()
dataSet = lm.load_numbers(dataRoute + fileTrain)
labels = lm.load_labels(dataRoute + fileLabels)
return (RealFeatures(dataSet.T[0:tr_ts_portion * len(dataSet.T)].T), # Return the training set, 3/4 * dataSet
RealFeatures(dataSet.T[tr_ts_portion * len(dataSet.T):].T), # Return the test set, 1/4 * dataSet
BinaryLabels(labels[0:tr_ts_portion * len(labels)]), # Return corresponding train and test labels
BinaryLabels(labels[tr_ts_portion * len(labels):]))
def load_regression_data(fileTrain = None, fileTest = None, fileLabelsTr = None, fileLabelsTs = None, sparse=False):
""" This method loads data from sparse mtx file format ('CSR' preferably. See Python sci.sparse matrix
format, also referred to as matrix market read and write methods). Label files should contain a column of
these labels, e.g. see the contents of a three labels file:
1.23
-102.45
2.2998438943
Loading uniquely test labels is allowed (training labels are optional). In pattern_recognition mode no
training labels are required. None is returned out for corresponding Shogun label object. Feature list
returned:
[features_tr, features_ts, labels_tr, labels_ts]
Returned data is float type (dtype='float64'). This is the minimum data length allowed by Shogun given the
sparse distance functions does not allow other ones, e.g. short (float32).
"""
assert fileTrain and fileTest and fileLabelsTs # Necessary test labels as well as test and train data sets specification.
from scipy.io import mmread
lm = LoadMatrix()
if sparse:
sci_data_tr = mmread(fileTrain).asformat('csr').astype('float64').T
features_tr = SparseRealFeatures(sci_data_tr) # Reformated as CSR and 'float64' type for
sci_data_ts = mmread(fileTest).asformat('csr').astype('float64').T # compatibility with SparseRealFeatures
features_ts = SparseRealFeatures(sci_data_ts)
else:
features_tr = RealFeatures(lm.load_numbers(fileTrain).astype('float64'))
features_ts = RealFeatures(lm.load_numbers(fileTest).astype('float64'))
labels_ts = RegressionLabels(lm.load_labels(fileLabelsTs))
if fileTrain and fileLabelsTr: # sci_data_x: Any sparse data type in the file.
labels_tr = RegressionLabels(lm.load_labels(fileLabelsTr))
else:
labels_tr = None
return features_tr, features_ts, labels_tr, labels_ts
# Exception handling:
class customException(Exception):
""" This exception prevents training inconsistencies. It could be edited for accepting a complete
dictionary of exceptions if desired.
"""
def __init__(self, message):
self.parameter = message
def __str__(self):
return repr(self.parameter)
# Basis kernel parameter generation:
def sigmaGen(self, hyperDistribution, size, rango, parameters):
""" :return: list of float
This module generates the pseudorandom vector of widths for basis Gaussian kernels according to a distribution, i.e.
hyperDistribution =
{'linear',
'quadratic',
'loggauss'*,
'gaussian'*,
'triangular', # parameters[0] is the median of the distribution. parameters[1] has not effect.
'pareto',
'beta'*,
'gamma',
'weibull'}.
Names marked with * require parameters, e.g. for 'gaussian', parameters = [mean, width]. The input 'size' is the
amount of segments the distribution domain will be discretized out. The 'rango' input are the minimum and maximum
values of the obtained distributed values. The 'parameters' of these weight vector distributions are set to common
values of each distribution by default, but they can be modified.
:param hyperDistribution: string
:param size: It is the number of basis kernels for the MKL object.
:param rango: It is the range to which the basis kernel parameters will pertain. For some basis kernels families
this input parameter has not effect.
:param parameters: It is a list of parameters of the distribution of the random weights, e.g. for a gaussian
distribution with mean zero and variance 1, parameters = [0, 1]. For some basis kernel families this input parameter
has not effect: {'linear', 'quadratic', 'triangular', 'pareto', 'gamma', 'weilbull', }
.. seealso: fit_kernel() function documentation.
"""
# Validating th inputs
assert (isinstance(size, int) and size > 0)
assert (rango[0] < rango[1] and len(rango) == 2)
# .. todo: Revise the other linespaces of the other distributions. They must be equally consistent than the
# .. todo: Gaussian one. Change 'is' when verifying equality between strings (PEP008 recommendation).
sig = []
if hyperDistribution == 'linear':
line = numpy.linspace(rango[0], rango[1], size*2)
sig = random.sample(line, size)
return sig
elif hyperDistribution == 'quadratic':
sig = numpy.square(random.sample(numpy.linspace(int(sqrt(rango[0])), int(sqrt(rango[1]))), size))
return sig
elif hyperDistribution == 'gaussian':
assert parameters[1] > 0 # The width is greater than zero?
i = 0
while i < size:
numero = random.gauss(parameters[0], parameters[1])
if rango[0] <= numero <= rango[1]: # Validate the initial point of
sig.append(numero) # 'range'. If not met, loop does
i += 1 # not end, but resets
# If met, the number is appended
return sig # to 'sig' width list.
elif hyperDistribution == 'triangular':
assert rango[0] <= parameters[0] <= rango[1] # The median is in the range?
sig = numpy.random.triangular(rango[0], parameters[0], rango[1], size)
return sig
elif hyperDistribution == 'beta':
assert (parameters[0] >= 0 and parameters[1] >= 0) # Alpha and Beta parameters are non-negative?
sig = numpy.random.beta(parameters[0], parameters[1], size) * (rango[1] - rango[0]) + rango[0]
return sig
elif hyperDistribution == 'pareto':
return numpy.random.pareto(5, size=size) * (rango[1] - rango[0]) + rango[0]
elif hyperDistribution == 'gamma':
return numpy.random.gamma(shape=1, size=size) * (rango[1] - rango[0]) + rango[0]
elif hyperDistribution == 'weibull':
return numpy.random.weibull(2, size=size) * (rango[1] - rango[0]) + rango[0]
elif hyperDistribution == 'loggauss':
assert parameters[1] > 0 # The width is greater than zero?
i = 0
while i < size:
numero = random.lognormvariate(parameters[0], parameters[1])
if numero > rango[0] and numero < rango[1]:
sig.append(numero)
i += 1
return sig
else:
print 'The entered hyperparameter distribution is not allowed: '+hyperDistribution
#pdb.set_trace()
# Combining kernels
def genKer(self, featsL, featsR, basisFam, widths=[5.0, 4.0, 3.0, 2.0, 1.0], sparse = False):
""":return: Shogun CombinedKernel object.
This module generates a list of basis kernels. These kernels are tuned according to the vector ''widths''. Input
parameters ''featsL'' and ''featsR'' are Shogun feature objects. In the case of a learnt RKHS, these both objects
should be derived from the training SLM vectors, by means of the Shogun constructor realFeatures(). This module also
appends basis kernels to a Shogun combinedKernel object.
The kernels to be append are left in ''combKer'' object (see code), which is returned. We have analyzed some basis
families available in Shogun, so possible string values of 'basisFam' are:
basisFam = ['gaussian',
'inverseQuadratic',
'polynomial',
'power',
'rationalQuadratic',
'spherical',
'tstudent',
'wave',
'wavelet',
'cauchy',
'exponential']
"""
allowed_sparse = ['gaussian', 'polynomial'] # Change this assertion list and function if different kernels are needed.
assert not (featsL.get_feature_class() == featsR.get_feature_class() == 'C_SPARSE') or basisFam in allowed_sparse # Sparse type is not compatible with specified kernel or feature types are different.
kernels = []
if basisFam == 'gaussian':
for w in widths:
k=GaussianKernel()
#k.init(featsL, featsR)
#st()
kernels.append(k)
kernels[-1].set_width(w)
kernels[-1].init(featsL, featsR)
#st()
elif basisFam == 'inverseQuadratic': # For this (and others below) kernel it is necessary fitting the
if not sparse:
dst = MinkowskiMetric(l=featsL, r=featsR, k=2) # distance matrix at this moment k = 2 is for l_2 norm
else:
dst = SparseEuclideanDistance(l=featsL, r=featsR)
for w in widths:
kernels.append(InverseMultiQuadricKernel(0, w, dst))
elif basisFam == 'polynomial':
for w in widths:
kernels.append(PolyKernel(0, w, False))
elif basisFam == 'power': # At least for images, the used norm does not make differences in performace
if not sparse:
dst = MinkowskiMetric(l=featsL, r=featsR, k=2)
else:
dst = SparseEuclideanDistance(l=featsL, r=featsR)
for w in widths:
kernels.append(PowerKernel(0, w, dst))
elif basisFam == 'rationalQuadratic': # At least for images, using 3-norm make differences
if not sparse:
dst = MinkowskiMetric(l=featsL, r=featsR, k=2) # in performance
else:
dst = SparseEuclideanDistance(l=featsL, r=featsR)
for w in widths:
kernels.append(RationalQuadraticKernel(0, w, dst))
elif basisFam == 'spherical': # At least for images, the used norm does not make differences in performace
if not sparse:
dst = MinkowskiMetric(l=featsL, r=featsR, k=2)
else:
dst = SparseEuclideanDistance(l=featsL, r=featsR)
for w in widths:
kernels.append(SphericalKernel(0, w, dst))
elif basisFam == 'tstudent': # At least for images, the used norm does not make differences in performace
if not sparse:
dst = MinkowskiMetric(l=featsL, r=featsR, k=2)
else:
dst = SparseEuclideanDistance(l=featsL, r=featsR)
for w in widths:
kernels.append(TStudentKernel(0, w, dst))
elif basisFam == 'wave': # At least for images, the used norm does not make differences in performace
if not sparse:
dst = MinkowskiMetric(l=featsL, r=featsR, k=2)
else:
dst = SparseEuclideanDistance(l=featsL, r=featsR)
for w in widths:
kernels.append(WaveKernel(0, w, dst))
elif basisFam == 'wavelet' and not sparse: # At least for images it is very low the performance with this kernel.
for w in widths: # It remains pending, for now, analysing its parameters.
kernels.append(WaveletKernel(0, w, 0))
elif basisFam == 'cauchy':
if not sparse:
dst = MinkowskiMetric(l=featsL, r=featsR, k=2)
else:
dst = SparseEuclideanDistance(l=featsL, r=featsR)
for w in widths:
kernels.append(CauchyKernel(0, w, dst))
elif basisFam == 'exponential': # For this kernel it is necessary specifying features at the constructor
if not sparse:
dst = MinkowskiMetric(l=featsL, r=featsR, k=2)
else:
dst = SparseEuclideanDistance(l=featsL, r=featsR)
for w in widths:
kernels.append(ExponentialKernel(featsL, featsR, w, dst, 0))
elif basisFam == 'anova' and not sparse: # This kernel presents a warning in training:
"""RuntimeWarning: [WARN] In file /home/iarroyof/shogun/src/shogun/classifier/mkl/MKLMulticlass.cpp line
198: CMKLMulticlass::evaluatefinishcriterion(...): deltanew<=0.Switching back to weight norsm
difference as criterion.
"""
for w in widths:
kernels.append(ANOVAKernel(0, w))
else:
raise NameError('Unknown Kernel family name!!!')
combKer = CombinedKernel()
#features_tr = CombinedFeatures()
for k in kernels:
combKer.append_kernel(k)
#features_tr.append_feature_obj(featsL)
#combKer.init(features_tr, features_tr)
#combKer.init(featsL,featsR)
return combKer#, features_tr
# Defining the compounding kernel object
class mklObj(object):
"""Default self definition of the Multiple Kernel Learning object. This object uses previously defined methods for
generating a linear combination of basis kernels that can be constituted from different families. See at
fit_kernel() function documentation for details. This function trains the kernel weights. The object has other
member functions offering utilities. See the next instantiation and using example:
import mklObj as mk
kernel = mk.mklObj(weightRegNorm = 2,
mklC = 2, # This is the Cparameter of the underlaying SVM.
SVMepsilon = 1e-5,
threads = 2,
MKLepsilon = 0.001,
probome = 'Multiclass',
verbose = False) # IMPORTANT: Don't use this feature (True) if you are working in pipe mode.
# The object will print undesired outputs to the stdout.
The above values are the defaults, so if they are suitable for you it is possible instantiating the object by simply
stating: kernel = mk.mklObj(). Even it is possible modifying a subset of input parameters (keeping others as
default): kernel = mk.mklObj(weightRegNorm = 1, mklC = 10, SVMepsilon = 1e-2). See the documentation of each setter
below for allowed setting parameters without new instantiations.
Now, once main parameters has been setted, fit the kernel:
kernel.fit_kernel(featsTr = feats_train,
targetsTr = labelsTr,
featsTs = feats_test,
targetsTs = labelsTs,
kernelFamily = 'gaussian',
randomRange = [50, 200], # For homogeneous poly kernels these two parameter
randomParams = [50, 20], # sets have not effect. No basis kernel parameters
hyper = 'linear', # Also with not effect when kernel family is polynomial
pKers = 3) # and some other powering forms.
Once the MKL object has been fitted, you can get what you need from it. See getters documentation listed below.
"""
def __init__(self, weightRegNorm=2.0, mklC=2.0, SVMepsilon=0.01, model_file = None,
threads=4, MKLepsilon=0.01, problem='regression', verbose=False, mode = 'learning', sparse = False):
"""Object initialization. This procedure is regardless of the input data, basis kernels and corresponding
hyperparameters (kernel fitting).
"""
mkl_problem_object = {'regression':(MKLRegression, [mklC, mklC]),
'binary': (MKLClassification, [mklC, mklC]),
'multiclass': (MKLMulticlass, mklC)}
self.mode = mode
self.sparse = sparse
assert not model_file and mode != 'pattern_recognition' or (
model_file and mode == 'pattern_recognition')# Model file or pattern_recognition mode must be specified.
self.__problem = problem
self.verbose = verbose # inner training process verbose flag
self.Matrx = False # Kind of returned learned kernel object. See getter documentation of these
self.expansion = False # object configuration parameters for details. Only modifiable by setter.
self.__testerr = 0
if mode == 'learning':
try:
self.mkl = mkl_problem_object[problem][0]()
self.mklC = mkl_problem_object[problem][1]
except KeyError:
sys.stderr.write('Error: Given problem type is not valid.')
exit()
#################<<<<<<<<<<<>>>>>>>>>>
self.mkl.set_C_mkl(5.0) # This is the regularization parameter for the MKL weights regularizer (NOT the SVM C)
self.weightRegNorm = weightRegNorm # Setting the basis' weight vector norm
self.SVMepsilon = SVMepsilon # setting the transducer stop (convergence) criterion
self.MKLepsilon = MKLepsilon # setting the MKL stop criterion. The value suggested by
# Shogun examples is 0.001. See setter docs for details
elif mode == 'pattern_recognition':
[self.mkl, self.mkl_model] = self.load_mkl_model(file_name = model_file, model_type = problem)
self.sigmas = self.mkl_model['widths']
self.threads = threads # setting number of training threads. Verify functionality!!
def fit_pretrained(self, featsTr, featsTs):
""" This method sets up a MKL machine by using parameters from self.mkl_model preloaded dictionary which
contains preptrained model paremeters, e.g. weights and widths.
"""
self.ker = genKer(self, featsTr, featsTs, sparse = self.sparse,
basisFam = self.family_translation[self.mkl_model['family']], widths = self.sigmas)
self.ker.set_subkernel_weights(self.mkl_model['weights']) # Setting up pretrained weights to the
self.ker.init(featsTr, featsTs) # new kernel
# Self Function for kernel generation
def fit_kernel(self, featsTr, targetsTr, featsTs, targetsTs, randomRange=[1, 50], randomParams=[1, 1],
hyper='linear', kernelFamily='gaussian', pKers=3):
""" :return: CombinedKernel Shogun object.
This method is used for training the desired compound kernel. See documentation of the 'mklObj'
object for using example. 'featsTr' and 'featsTs' are the training and test data respectively.
'targetsTr' and 'targetsTs' are the training and test labels, respectively. All they must be Shogun
'RealFeatures' and 'MulticlassLabels' objects respectively.
The 'randomRange' parameter defines the range of numbers from which the basis kernel parameters will be
drawn, e.g. Gaussian random widths between 1 and 50 (the default). The 'randomParams' input parameter
states the parameters of the pseudorandom distribution of the basis kernel parameters to be drawn, e.g.
Gaussian-pseudorandom-generated weights with std. deviation equal to 1 and mean equal to 1 (the default).
The 'hyper' input parameter defines the distribution of the pseudorandom-generated weights. See
documentation of the sigmaGen() method of the 'mklObj' object to see a list of possible basis kernel
parameter distributions. The 'kernelFamily' input parameter is the basis kernel family to be append to
the desired compound kernel if you select, e.g., the default 'gaussian' family, all elements of the
learned linear combination will be gaussians (each differently weighted and parametrized). See
documentation of the genKer() method of the 'mklObj' object to see a list of allowed basis kernel
families. The 'pKers' input parameter defines the size of the learned kernel linear combination, i.e.
how many basis kernels to be weighted in the training and therefore, how many coefficients will have the
Fourier series of data (the default is 3).
.. note:: In the cases of kernelFamily = {'polynomial' or 'power' or 'tstudent' or 'anova'}, the input
parameters {'randomRange', 'randomParams', 'hyper'} have not effect, because these kernel families do not
require basis kernel parameters.
:param featsTr: RealFeatures Shogun object conflating the training data.
:param targetsTr: MulticlassLabels Shogun object conflating the training labels.
:param featsTr: RealFeatures Shogun object conflating the test data.
:param targetsTr: MulticlassLabels Shogun object conflating the test labels.
:param randomRange: It is the range to which the basis kernel parameters will pertain. For some basis
kernels families this input parameter has not effect.
:param randomParams: It is a list of parameters of the distribution of the random weights, e.g. for a
gaussian distribution with mean zero and variance 1, parameters = [0, 1]. For some basis kernel
families this input parameter has not effect.
:param hyper: string which specifies the name of the basis kernel parameter distribution. See
documentation for sigmaGen() function for viewing allowed strings (names).
:param kernelFamily: string which specifies the name of the basis kernel family. See documentation for
genKer() function for viewing allowed strings (names).
:param pKers: This is the number of basis kernels for the MKL object (linear combination).
"""
# Inner variable copying:
self._featsTr = featsTr
self._targetsTr = targetsTr
self._hyper = hyper
self._pkers = pKers
self.basisFamily = kernelFamily
if self.verbose: # Printing the training progress
print '\nNacho, multiple <' + kernelFamily + '> Kernels have been initialized...'
print "\nInput main parameters: "
print "\nHyperarameter distribution: ", self._hyper, "\nLinear combination size: ", pKers, \
'\nWeight regularization norm: ', self.weightRegNorm, \
'Weight regularization parameter: ',self.mklC
if self.__problem == 'multiclass':
print "Classes: ", targetsTr.get_num_classes()
elif self.__problem == 'binary':
print "Classes: Binary"
elif self.__problem == 'regression':
print 'Regression problem'
# Generating the list of subkernels. Creating the compound kernel. For monomial-nonhomogeneous (polynomial)
# kernels the hyperparameters are uniquely the degree of each monomial, in the form of a sequence. MKL finds the
# coefficient (weight) for each monomial in order to find a compound polynomial.
if kernelFamily == 'polynomial' or kernelFamily == 'power' or \
kernelFamily == 'tstudent' or kernelFamily == 'anova':
self.sigmas = range(1, pKers+1)
self.ker = genKer(self, self._featsTr, self._featsTr, basisFam=kernelFamily, widths=self.sigmas, sparse = self.sparse)
else:
# We have called 'sigmas' to any basis kernel parameter, regardless if the kernel is Gaussian or not. So
# let's generate the widths:
self.sigmas = sorted(sigmaGen(self, hyperDistribution=hyper, size=pKers,
rango=randomRange, parameters=randomParams))
try:
z = self.sigmas.index(0)
self.sigmas[z] = 0.1
except ValueError:
pass
try: # Verifying if number of kernels is greater or equal to 2
if pKers <= 1 or len(self.sigmas) < 2:
raise customException('Senseless MKLClassification use!!!')
except customException, (instance):
print 'Caugth: ' + instance.parameter
print "-----------------------------------------------------"
print """The multikernel learning object is meaningless for less than 2 basis
kernels, i.e. pKers <= 1, so 'mklObj' couldn't be instantiated."""
print "-----------------------------------------------------"
self.ker = genKer(self, self._featsTr, self._featsTr, basisFam=kernelFamily, widths=self.sigmas, sparse = self.sparse)
if self.verbose:
print 'Widths: ', self.sigmas
# Initializing the compound kernel
# combf_tr = CombinedFeatures()
# combf_tr.append_feature_obj(self._featsTr)
# self.ker.init(combf_tr, combf_tr)
try: # Verifying if number of kernels was greater or equal to 2 after training
if self.ker.get_num_subkernels() < 2:
raise customException(
'Multikernel coefficients were less than 2 after training. Revise object settings!!!')
except customException, (instance):
print 'Caugth: ' + instance.parameter
# Verbose for learning surveying
if self.verbose:
print '\nKernel fitted...'
# Initializing the transducer for multiclassification
features_tr = CombinedFeatures()
features_ts = CombinedFeatures()
for k in self.sigmas:
features_tr.append_feature_obj(self._featsTr)
features_ts.append_feature_obj(featsTs)
self.ker.init(features_tr, features_tr)
self.mkl.set_kernel(self.ker)
self.mkl.set_labels(self._targetsTr)
# Train to return the learnt kernel
if self.verbose:
print '\nLearning the machine coefficients...'
# ------------------ The most time consuming code segment --------------------------
self.crashed = False
try:
self.mkl.train()
except SystemError:
self.crashed = True
self.mkl_model = self.keep_mkl_model(self.mkl, self.ker, self.sigmas) # Let's keep the trained model
if self.verbose: # for future use.
print 'Kernel trained... Weights: ', self.weights
# Evaluate the learnt Kernel. Here it is assumed 'ker' is learnt, so we only need for initialize it again but
# with the test set object. Then, set the initialized kernel to the mkl object in order to 'apply'.
self.ker.init(features_tr, features_ts) # Now with test examples. The inner product between training
#st()
def pattern_recognition(self, targetsTs):
self.mkl.set_kernel(self.ker) # and test examples generates the corresponding Gram Matrix.
if not self.crashed:
out = self.mkl.apply() # Applying the obtained Gram Matrix
else:
out = RegressionLabels(-1.0*numpy.ones(targetsTs.get_num_labels()))
self.estimated_out = list(out.get_labels())
# ----------------------------------------------------------------------------------
if self.__problem == 'binary': # If the problem is either binary or multiclass, different
evalua = ErrorRateMeasure() # performance measures are computed.
self.__testerr = 100 - evalua.evaluate(out, targetsTs) * 100
elif self.__problem == 'multiclass':
evalua = MulticlassAccuracy()
self.__testerr = evalua.evaluate(out, targetsTs) * 100
elif self.__problem == 'regression': # Determination Coefficient was selected for measuring performance
#evalua = MeanSquaredError()
#self.__testerr = evalua.evaluate(out, targetsTs)
self.__testerr = r2_score(self.estimated_out, list(targetsTs.get_labels()))
# Verbose for learning surveying
if self.verbose:
print 'Kernel evaluation ready. The precision was: ', self.__testerr, '%'
def keep_mkl_model(self, mkl, kernel, widths, file_name = None):
""" Python reimplementated function for saving a pretrained MKL machine.
This method saves a trained MKL machine to the file 'file_name'. If not 'file_name' is given, a
dictionary 'mkl_machine' containing parameters of the given trained MKL object is returned.
Here we assumed all subkernels of the passed CombinedKernel are of the same family, so uniquely the
first kernel is used for verifying if the passed 'kernel' is a Gaussian mixture. If it is so, we insert
the 'widths' to the model dictionary 'mkl_machine'. An error is returned otherwise.
"""
mkl_machine = {}
support=[]
mkl_machine['num_support_vectors'] = mkl.get_num_support_vectors()
mkl_machine['bias']=mkl.get_bias()
for i in xrange(mkl_machine['num_support_vectors']):
support.append((mkl.get_alpha(i), mkl.get_support_vector(i)))
mkl_machine['support'] = support
mkl_machine['weights'] = list(kernel.get_subkernel_weights())
mkl_machine['family'] = kernel.get_first_kernel().get_name()
mkl_machine['widths'] = widths
if file_name:
f = open(file_name,'w')
f.write(str(mkl_machine)+'\n')
f.close()
else:
return mkl_machine
def load_mkl_model(self, file_name, model_type = 'regression'):
""" This method receives a file name (if it is not in pwd, full path must be given) and a model type to
be loaded {'regression', 'binary', 'multiclass'}. The loaded file must contain a t least a dictionary at
its top. This dictionary must contain a key called 'model' whose value must be a dictionary, from which
model parameters will be read. For example:
{'key_0':value, 'key_1':value,..., 'model':{'family':'PolyKernel', 'bias':1.001,...}, key_n:value}
Four objects are returned. The MKL model which is tuned to those parameters stored at the given file. A
numpy array containing learned weights of a CombinedKernel. The widths corresponding to returned kernel
weights and the kernel family. Be careful with the kernel family you are loading because widths no
necessarily are it, but probably 'degrees', e.g. for the PolyKernel family.
The Combined kernel must be instantiated outside this method, thereby loading to it corresponding
weights and widths.
"""
with open(file_name, 'r') as pointer:
mkl_machine = eval(pointer.read())['learned_model']
if model_type == 'regression':
mkl = MKLRegression() # A new two-class MKL object
elif model_type == 'binary':
mkl = MKLClassification()
elif model_type == 'multiclass':
mkl = MKLMulticlass()
else:
sys.stderr.write('ERROR: Unknown problem type in model loading.')
exit()
mkl.set_bias(mkl_machine['bias'])
mkl.create_new_model(mkl_machine['num_support_vectors']) # Initialize the inner SVM
for i in xrange(mkl_machine['num_support_vectors']):
mkl.set_alpha(i, mkl_machine['support'][i][0])
mkl.set_support_vector(i, mkl_machine['support'][i][1])
mkl_machine['weights'] = numpy.array(mkl_machine['weights'])
return mkl, mkl_machine
# Getters (properties):
@property
def family_translation(self):
"""
"""
self.__family_translation = {'PolyKernel':'polynomial', 'GaussianKernel':'gaussian',
'ExponentialKernel':'exponential'}
return self.__family_translation
@property
def mkl_model(self):
""" This property stores the MKL model parameters learned by the self-object. These parameters can be
stored into a file for future configuration of a non-trained MKL new MKL object. Also probably passed
onwards for showing results.
"""
return self.__mkl_model
@property
def estimated_out(self):
""" This property is the mkl result after applying.
"""
return self.__estimated_out
@property
def compoundKernel(self):
"""This method is used for getting the kernel object, i.e. the learned MKL object, which can be unwrapped
into its matrix form instead of getting a Shogun object. Use the input parameters Matrix = True,
expansion = False for getting the compound matrix of reals. For instance:
mklObj.Matrix = True
mklObj.expansion = False
kernelMatrix = mklObj.compoundKernel
Use Matrix = True, expansion = True for getting the expanded linear combination of matrices and weights
separately, for instance:
mklObj.Matrix = True
mklObj.expansion = True
basis, weights = mklObj.compoundKernel
Use Matrix = False, expansion = False for getting the learned kernel Shogun object, for instance:
mklObj.Matrix = False
mklObj.expansion = False
kernelObj = mklObj.compoundKernel
.. warning:: Be careful with this latter variant of the method becuase of the large amount of needed
physical memory.
"""
if self.Matrx:
kernels = []
size = self.ker.get_num_subkernels()
for k in xrange(0, size - 1):
kernels.append(self.ker.get_kernel(k).get_kernel_matrix())
ws = self.weights
if self.expansion:
return kernels, ws # Returning the full expansion of the learned kernel.
else:
return sum(kernels * ws) # Returning the matrix linear combination, in other words,
else: # a projector matrix representation.
return self.ker # If matrix representation is not required, only the Shogun kernel
# object is returned.
@property
def sigmas(self):
"""This method is used for getting the current set of basis kernel parameters, i.e. widths, in the case
of the gaussian basis kernel.
:rtype : list of float
"""
return self.__sigmas
@property
def verbose(self):
"""This is the verbose flag, which is used for monitoring the object training procedure.
IMPORTANT: Don't use this feature (True) if you are working in pipe mode. The object will print undesired
outputs to the stdout.
:rtype : bool
"""
return self._verbose
@property
def Matrx(self):
"""This is a boolean property of the object. Its aim is getting and, mainly, setting the kind of object
we want to obtain as learned kernel, i.e. a Kernel Shogun object or a Kernel Matrix whose entries are
reals. The latter could require large amounts of physical memory. See the mklObj.compoundKernel property
documentation in this object for using details.
:rtype :bool
"""
return self.__Matrx
@property
def expansion(self):
"""This is a boolean property. Its aim is getting and, mainly, setting the mklObj object to return the
complete expansion of the learned kernel, i.e. a list of basis kernel matrices as well as their
corresponding coefficients. This configuration may require large amounts of physical memory. See the
mklObj.compoundKernel property documentation in this object for using details.
:rtype :bool
.. seealso:: the code and examples and documentation about :@property:`compoundKernel`
"""
return self.__expansion
@property
def weightRegNorm(self):
""" The value of this property is the basis' weight vector norm, e.g. :math:`||\beta||_p`, to be used as
regularizer. It controls the smoothing among basis kernel weights of the learned multiple kernel combination. On
one hand, If p=1 (the l_1 norm) the weight values B_i will be disproportionally between them, i.e. a few of them
will be >> 0,some other simply > 0 and many of them will be zero or very near to zero (the vector B will be
sparse). On the other hand, if p = 2 the weights B_i linearly distributed, i.e. their distribution shows an
uniform tilt in such a way the differences between pairs of them are not significant, but rather proportional to
the tilt of the distribution.
To our knowledge, such a tilt is certainly not explicitly taken into account as regularization hyperparameter,
although the parameter C \in [0, 1] is directly associated to it as scalar factor. Thus specifically for
C \in [0, 1], it operates the vector B by forcing to it to certain orientation which describes a tilt
m \in (0, 1)U(1, \infty) (with minima in the extremes of these subsets and maxima in their medians). Given that
C \n [0, 1], the scaling effect behaves such that linearly depresses low values of B_i, whilst highlights their
high values. The effect of C \in (1, \infty) is still not clearly studied, however it will be a bit different
than the above, but keeping its scalar effect.
Overall, as p tends to be >> 1 (or even p --> \\infty) the B_i values tend to be ever more uniformly
distributed. More specific and complex regularization operators are explained in .. seealso:: Schölkopf, B., & Smola, A. J.
(2002). Learning with kernels: Support vector machines, regularization, optimization, and beyond. MIT press.
:rtype : vector of float
"""
return self.__weightRegNorm
# function getters
@property
def weights(self):
"""This method is used for getting the learned weights of the MKL object. We first get the kernel weights into
a list object, before returning it. This is because 'get_subkernel_weights()' causes error while printing to an
output file by means of returning a nonlist object.
:rtype : list of float
"""
self.__weights = list(self.ker.get_subkernel_weights())
return self.__weights
@property
def SVMepsilon(self):
"""This method is used for getting the SVM convergence criterion (the minimum allowed error commited by
the transducer in training).
:rtype : float
.. seealso:: See at page 22 of Sonnemburg et.al., (2006) Large Scale Multiple Kernel Learning.
.. seealso:: @SVMepsilon.setter
"""
return self.__SVMepsion
@property
def MKLepsilon(self):
"""This method is used for getting the MKL convergence criterion (the minimum allowed error committed by
the MKL object in test).
:rtype : float
.. seealso:: See at page 22 of Sonnemburg et.al., (2006) Large Scale Multiple Kernel Learning.
.. seealso:: @MKLepsilon.setter
"""
return self.__MKLepsilon
@property
def mklC(self):
"""This method is used for setting regularization parameters. 'mklC' is a real value in multiclass problems,
while in binary problems it must be a list of two elements. These must be different when the two classes are
imbalanced, but must be equal for balanced densities in binary classification problems. For multiclass
problems, imbalanced densities are not considered.
:rtype : float
.. seealso:: See at page 4 of Bagchi, (2014) SVM Classifiers Based On Imperfect Training Data.
.. seealso:: @weightRegNorm property documentation for more details about C as regularization parameter.
"""
return self.__mklC
@property
def threads(self):
""" This property is used for getting and setting the number of threads in which the training procedure will be
will be segmented into a single machine processor core.
:rtype : int
.. seealso:: @threads.setter documentation.
"""
return self.__threads
# Readonly properties:
@property
def problem(self):
"""This method is used for getting the kind of problem the mklObj object will be trained for. If binary == True,
the you want to train the object for a two-class classification problem. Otherwise if binary == False, you want
to train the object for multiclass classification problems. This property can't be modified once the object has
been instantiated.
:rtype : bool
"""
return self.__problem
@property
def testerr(self):
"""This method is used for getting the test accuracy after training the MKL object. 'testerr' is a readonly
object property.
:rtype : float
"""
return self.__testerr
@property
def sparse(self):
"""This method is used for getting the sparse/dense mode of the MKL object.
:rtype : float
"""
return self.__sparse
@property
def crashed(self):
"""This method is used for getting the sparse/dense mode of the MKL object.
:rtype : float
"""
return self.__crashed
# mklObj (decorated) Setters: Binary configuration of the classifier cant be changed. It is needed to instantiate
# a new mklObj object.
@crashed.setter
def crashed(self, value):
assert isinstance(value, bool) # The model is not stored as a dictionary
self.__crashed = value
@mkl_model.setter
def mkl_model(self, value):
assert isinstance(value, dict) # The model is not stored as a dictionary
self.__mkl_model = value
@estimated_out.setter
def estimated_out(self, value):
self.__estimated_out = value
@sparse.setter
def sparse(self, value):
self.__sparse = value
@Matrx.setter
def Matrx(self, value):
"""
:type value: bool
.. seealso:: @Matrx property documentation.
"""
assert isinstance(value, bool)
self.__Matrx = value
@expansion.setter
def expansion(self, value):
"""
.. seealso:: @expansion property documentation
:type value: bool
"""
assert isinstance(value, bool)
self.__expansion = value
@sigmas.setter
def sigmas(self, value):
""" This method is used for setting desired basis kernel parameters for the MKL object. 'value' is a list of
real values of 'pKers' length. In 'learning' mode, be careful to avoid mismatching between the number of basis kernels of the
current compound kernel and the one you have in mind. A mismatch error could be arisen. In 'pattern_recognition'
mode, this quantity is taken from the learned model, which is stored at disk.
@type value: list of float
.. seealso:: @sigmas property documentation
"""
try:
if self.mode == 'learning':
if len(value) == self._pkers:
self.__sigmas = value
else:
raise customException('Size of basis kernel parameter list mismatches the size of the combined\
kernel. You can use len(CMKLobj.sigmas) to revise the mismatching.')
elif self.mode == 'pattern_recognition':
self.__sigmas = value
except customException, (instance):
print "Caught: " + instance.parameter
@verbose.setter
def verbose(self, value):
"""This method sets to True of False the verbose flag, which is used in turn for monitoring the object training
procedure.
@type value: bool
"""
assert isinstance(value, bool)
self._verbose = value
@weightRegNorm.setter
def weightRegNorm(self, value):
"""This method is used for changing the norm of the weight regularizer of the MKL object. Typically this
changing is useful for retrain the model with other regularizer.
@type value: float
..seealso:: @weightRegNorm property documentation.
"""
assert (isinstance(value, float) and value >= 0.0)
self.mkl.set_mkl_norm(value)
self.__weightRegNorm = value
@SVMepsilon.setter
def SVMepsilon(self, value):
"""This method is used for setting the SVM convergence criterion (the minimum allowed error commited by
the transducer in training). In other words, the low level of the learning process. The current basis
kernel combination is tested as the SVM kernel. Regardless of each basis' weights.
@type value: float
.. seealso:: Page 22 of Sonnemburg et.al., (2006) Large Scale Multiple Kernel Learning.
"""
assert (isinstance(value, float) and value >= 0.0)
self.mkl.set_epsilon(value)
self.__SVMepsion = value
@MKLepsilon.setter
def MKLepsilon(self, value):
"""This method is used for setting the MKL convergence criterion (the minimum allowed error committed by
the MKL object in test). In other words, the high level of the learning process. The current basis
kernel combination is tested as the SVM kernel. The basis' weights are tuned until 'MKLeps' is reached.
@type value: float
.. seealso:: Page 22 of Sonnemburg et.al., (2006) Large Scale Multiple Kernel Learning.
"""
assert (isinstance(value, float) and value >= 0.0)
self.mkl.set_mkl_epsilon(value)
self.__MKLepsilon = value
@mklC.setter
def mklC(self, value):
"""This method is used for setting regularization parameters. These are different when the two classes
are imbalanced and Equal for balanced densities in binary classification problems. For multiclass
problems imbalanced densities are not considered, so uniquely the first argument is caught by the method.
If one or both arguments are misplaced the default values are one both them.
@type value: float (for multiclass problems), [float, float] for binary and regression problems.
.. seealso:: Page 4 of Bagchi,(2014) SVM Classifiers Based On Imperfect Training Data.
"""
if self.__problem == 'binary' or self.__problem == 'regression':
assert len(value) == 2
assert (isinstance(value, (list, float)) and value[0] > 0.0 and value[1] > 0.0)
self.mkl.set_C(value[0], value[1])
elif self.__problem == 'multiclass':
assert (isinstance(value, float) and value > 0.0)
self.mkl.set_C(value)
self.__mklC = value
@threads.setter
def threads(self, value):
"""This method is used for changing the number of threads we want to be running with a single machine core.
These threads are not different parallel processes running in different machine cores.
"""
assert (isinstance(value, int) and value > 0)
self.mkl.parallel.set_num_threads(value) # setting number of training threads
self.__threads = value
| gpl-2.0 |
0x0all/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 10 | 9541 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
"""Test stopping conditions of gradient descent."""
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
"""Test if the binary search finds Gaussians with desired perplexity."""
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
"""Test gradient of Kullback-Leibler divergence."""
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
"""Test trustworthiness score."""
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
"""Nearest neighbors should be preserved approximately."""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
"""X can be a sparse matrix."""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
"""Nearest neighbors should be preserved approximately."""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
"""Early exaggeration factor must be >= 1."""
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
"""Number of gradient descent iterations must be at least 200."""
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
"""Precomputed distance matrices must be square matrices."""
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
"""'init' must be 'pca' or 'random'."""
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
"""'metric' must be valid."""
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
"""Precomputed distance matrices must be square matrices."""
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
"""t-SNE should allow metrics that cannot be squared (issue #3526)."""
random_state = check_random_state(0)
tsne = TSNE(verbose=2, metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
| bsd-3-clause |
turian/batchtrain | hyperparameters.py | 1 | 5497 | from locals import *
from collections import OrderedDict
import itertools
import sklearn.linear_model
import sklearn.svm
import sklearn.ensemble
import sklearn.neighbors
import sklearn.semi_supervised
import sklearn.naive_bayes
# Code from http://rosettacode.org/wiki/Power_set#Python
def list_powerset2(lst):
return reduce(lambda result, x: result + [subset + [x] for subset in result],
lst, [[]])
def powerset(s):
return frozenset(map(frozenset, list_powerset2(list(s))))
def all_hyperparameters(odict):
hyperparams = list(itertools.product(*odict.values()))
for h in hyperparams:
yield dict(zip(odict.keys(), h))
MODEL_HYPERPARAMETERS = {
"MultinomialNB": OrderedDict({
"alpha": [0.01, 0.032, 0.1, 0.32, 1.0, 10.]
}),
"SGDClassifier": OrderedDict({
"loss": ['hinge', 'log', 'modified_huber'],
"penalty": ['l2', 'l1', 'elasticnet'],
"alpha": [0.001, 0.0001, 0.00001, 0.000001],
"rho": [0.15, 0.30, 0.55, 0.85, 0.95],
# "l1_ratio": [0.05, 0.15, 0.45],
"fit_intercept": [True],
"n_iter": [1, 5, 25, 100],
"shuffle": [True, False],
# "epsilon": [
"learning_rate": ["constant", "optimal", "invscaling"],
"eta0": [0.001, 0.01, 0.1],
"power_t": [0.05, 0.1, 0.25, 0.5, 1.],
"warm_start": [True, False],
}),
"BayesianRidge": OrderedDict({
"n_iter": [100, 300, 1000],
"tol": [1e-2, 1e-3, 1e-4],
"alpha_1": [1e-5, 1e-6, 1e-7],
"alpha_2": [1e-5, 1e-6, 1e-7],
"lambda_1": [1e-5, 1e-6, 1e-7],
"lambda_2": [1e-5, 1e-6, 1e-7],
"normalize": [True, False],
}),
"Perceptron": OrderedDict({
"penalty": ["l2", "l1", "elasticnet"],
"alpha": [1e-2, 1e-3, 1e-4, 1e-5, 1e-6],
"n_iter": [1, 5, 25],
"shuffle": [True, False],
"eta0": [0.1, 1., 10.],
"warm_start": [True, False],
}),
"SVC": OrderedDict({
"C": [0.1, 1, 10, 100],
"kernel": ["rbf", "sigmoid", "linear", "poly"],
"degree": [1,2,3,4,5],
"gamma": [1e-3, 1e-5, 0.],
"probability": [False, True],
"cache_size": [CACHESIZE],
"shrinking": [False, True],
}),
"SVR": OrderedDict({
"C": [0.1, 1, 10, 100],
"epsilon": [0.001, 0.01, 0.1, 1.0],
"kernel": ["rbf", "sigmoid", "linear", "poly"],
"degree": [1,2,3,4,5],
"gamma": [1e-3, 1e-5, 0.],
"cache_size": [CACHESIZE],
"shrinking": [False, True],
}),
"GradientBoostingClassifier": OrderedDict({
'loss': ['deviance'],
#'learn_rate': [1., 0.1, 0.01],
'learn_rate': [1., 0.1],
#'n_estimators': [10, 32, 100, 320],
'n_estimators': [10, 32, 100],
'max_depth': [1, 3, None],
'min_samples_split': [1, 3],
'min_samples_leaf': [1, 3],
#'subsample': [0.032, 0.1, 0.32, 1],
'subsample': [0.1, 0.32, 1],
# 'alpha': [0.5, 0.9],
}),
"GradientBoostingRegressor": OrderedDict({
'loss': ['ls', 'lad', 'huber', 'quantile'],
'learn_rate': [1., 0.1, 0.01],
'n_estimators': [10, 32, 100, 320],
'max_depth': [1, 3, None],
'min_samples_split': [1, 3],
'min_samples_leaf': [1, 3],
'subsample': [0.032, 0.1, 0.32, 1],
}),
"RandomForestClassifier": OrderedDict({
'n_estimators': [10, 32, 100, 320],
'criterion': ['gini', 'entropy'],
'max_depth': [1, 3, None],
'min_samples_split': [1, 3],
'min_samples_leaf': [1, 3],
'min_density': [0.032, 0.1, 0.32],
'max_features': ["sqrt", "log2", None],
# 'bootstrap': [True, False],
'bootstrap': [True],
'oob_score': [True, False],
# 'verbose': [True],
}),
"RandomForestRegressor": OrderedDict({
'n_estimators': [10, 32, 100, 320],
'max_depth': [1, 3, None],
'min_samples_split': [1, 3],
'min_samples_leaf': [1, 3],
'min_density': [0.032, 0.1, 0.32],
'max_features': ["sqrt", "log2", None],
# 'bootstrap': [True, False],
'bootstrap': [True],
'oob_score': [True, False],
# 'verbose': [True],
}),
"KNeighborsClassifier": OrderedDict({
'n_neighbors': [3, 5, 7],
'weights': ['uniform', 'distance'],
'algorithm': ['ball_tree', 'kd_tree', 'brute'],
'leaf_size': [10, 30, 100],
'p': [1, 2],
}),
"LabelSpreading": OrderedDict({
'kernel': ['knn', 'rbf'],
'gamma': [10, 20, 100, 200],
'n_neighbors': [3, 5, 7, 9],
'alpha': [0, 0.02, 0.2, 1.0],
'max_iters': [3, 10, 30, 100],
'tol': [1e-5, 1e-3, 1e-1, 1.],
})
}
MODEL_NAME_TO_CLASS = {
"MultinomialNB": sklearn.naive_bayes.MultinomialNB,
"SGDClassifier": sklearn.linear_model.SGDClassifier,
"BayesianRidge": sklearn.linear_model.BayesianRidge,
"Perceptron": sklearn.linear_model.Perceptron,
"SVC": sklearn.svm.SVC,
"SVR": sklearn.svm.SVR,
"GradientBoostingClassifier": sklearn.ensemble.GradientBoostingClassifier,
"GradientBoostingRegressor": sklearn.ensemble.GradientBoostingRegressor,
"RandomForestClassifier": sklearn.ensemble.RandomForestClassifier,
"RandomForestRegressor": sklearn.ensemble.RandomForestRegressor,
"KNeighborsClassifier": sklearn.neighbors.KNeighborsClassifier,
"LabelSpreading": sklearn.semi_supervised.LabelSpreading,
}
| bsd-3-clause |
tosolveit/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
ky822/scikit-learn | sklearn/utils/estimator_checks.py | 9 | 51912 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils import ConvergenceWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError :
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
Achuth17/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
massmutual/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/datasets/plot_iris_dataset.py | 1 | 2738 |
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Set1,
edgecolor='k')
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=y,
cmap=plt.cm.Set1, edgecolor='k', s=40)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
#plt.show()
pltshow(plt)
| mit |
heli522/scikit-learn | sklearn/utils/arpack.py | 265 | 64837 | """
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
the operation A * x, where A is a real or complex square matrix.
k : int, default 6
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
return_eigenvectors : boolean, default True
Whether to return the eigenvectors along with the eigenvalues.
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
| bsd-3-clause |
nlproc/splunkml | bin/multiclassify.py | 1 | 2142 | import sys, os, itertools
try:
import cStringIO as StringIO
except:
import StringIO
import numpy as np
import scipy.sparse as sp
from gensim.corpora import TextCorpus
from gensim.models import LsiModel, TfidfModel, LdaModel
from gensim.matutils import corpus2csc
from sklearn.feature_extraction import FeatureHasher
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import PCA
def is_number(str):
try:
n = float(str)
return True
except (ValueError, TypeError):
return False
def process_records(records, fields, target, textmodel=None):
tokenize = CountVectorizer().build_analyzer()
input = None
X = None
y_labels = []
for i, record in enumerate(records):
nums = []
strs = []
y_labels.append(record.get(target))
for field in fields:
if is_number(record.get(field)):
nums.append(record[field])
else:
strs.append(str(record.get(field) or "").lower())
if strs:
if input is None:
input = StringIO.StringIO()
print >> input, " ".join(tokenize(" ".join(strs)))
if nums:
if X is None:
X = sp.lil_matrix((len(records),len(nums)))
X[i] = np.array(nums, dtype=np.float64)
if input is not None:
if X is not None:
X_2 = X.tocsr()
else:
X_2 = None
if isinstance(textmodel,basestring):
if textmodel == 'lsi':
corpus = TextCorpus(input)
textmodel = LsiModel(corpus, chunksize=1000)
elif textmodel == 'tfidf':
corpus = TextCorpus(input)
textmodel = TfidfModel(corpus)
elif textmodel == 'hashing':
textmodel = None
hasher = FeatureHasher(n_features=2 ** 18, input_type="string")
input.seek(0)
X = hasher.transform(tokenize(line.strip()) for line in input)
if textmodel:
num_terms = len(textmodel.id2word or getattr(textmodel, 'dfs',[]))
X = corpus2csc(textmodel[corpus], num_terms).transpose()
if X_2 is not None:
# print >> sys.stderr, "X SHAPE:", X.shape
# print >> sys.stderr, "X_2 SHAPE:", X_2.shape
X = sp.hstack([X, X_2], format='csr')
elif X is not None:
textmodel = None
X = X.tocsr()
print >> sys.stderr, "X SHAPE:", X.shape
return X, y_labels, textmodel
| apache-2.0 |
ZenDevelopmentSystems/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
tomlof/scikit-learn | examples/svm/plot_custom_kernel.py | 93 | 1562 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired, edgecolors='k')
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
vybstat/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
IssamLaradji/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
ma-compbio/PEP | genVecs.py | 1 | 7271 | #encoding:utf-8
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
import pandas as pd
import numpy as np
import os
import sys
import math
import random
import processSeq
import warnings
import threading
from multiprocessing.dummy import Pool as ThreadPool
from sklearn import preprocessing
import sklearn.preprocessing
from gensim import corpora, models, similarities
class mycorpuse(object):
def __iter__(self):
for line in open("./Data/Learning/unlabeled_train_enhancer_GM12878"):
yield line.split()
class mycorpusp(object):
def __iter__(self):
for line in open("./Data/Learning/unlabeled_train_promoter_GM12878"):
yield line.split()
# Load training data
def getData(type,cell):
data = pd.read_table('./Data/Learning/supervised_'+str(cell)+"_"+str(type))
return data
# Load trained Word2Vec model or train a new model
def getWord_model(word,num_features,min_count,type,cell):
word_model1 = ""
model_name = str(cell)+"_enhancer"
if not os.path.isfile("./" + model_name):
sentence = LineSentence("./Data/Learning/unlabeled_train_enhancer_"+str(cell),max_sentence_length=15000)
print "Start Training Word2Vec model..."
# Set values for various parameters
num_features = int(num_features) # Word vector dimensionality
min_word_count = int(min_count) # Minimum word count
num_workers = 20 # Number of threads to run in parallel
context = 20 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# Initialize and train the model
print "Training Word2Vec model..."
word_model1 = Word2Vec(sentence, workers=num_workers,\
size=num_features, min_count=min_word_count, \
window =context, sample=downsampling, seed=1)
word_model1.init_sims(replace=False)
word_model1.save(model_name)
print word_model1.most_similar("CATAGT")
else:
print "Loading Word2Vec model..."
word_model1 = Word2Vec.load(model_name)
word_model2 = ""
model_name = str(cell)+"_promoter"
if not os.path.isfile("./" + model_name):
sentence = LineSentence("./Data/Learning/unlabeled_train_promoter_"+str(cell),max_sentence_length=15000)
print "Start Training Word2Vec model..."
# Set values for various parameters
num_features = int(num_features) # Word vector dimensionality
min_word_count = int(min_count) # Minimum word count
num_workers = 20 # Number of threads to run in parallel
context = 20 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# Initialize and train the model
print "Training Word2Vec model..."
word_model2 = Word2Vec(sentence, workers=num_workers,\
size=num_features, min_count=min_word_count, \
window=context, sample=downsampling, seed=1)
word_model2.init_sims(replace=False)
word_model2.save(model_name)
print word_model2.most_similar("CATAGT")
else:
print "Loading Word2Vec model..."
word_model2 = Word2Vec.load(model_name)
return word_model1,word_model2
# Split sequences into words
def getCleanDNA_split(DNAdata,word):
dnalist = []
counter = 0
for dna in DNAdata:
if counter % 100 == 0:
print "DNA %d of %d\r" % (counter, len(DNAdata)),
sys.stdout.flush()
dna = str(dna).upper()
dnalist.append(processSeq.DNA2Sentence(dna,word).split(" "))
counter += 1
print
return dnalist
def makeFeatureVecs(words, model, num_features,word,k,temp):
featureVec = np.zeros((k,num_features), dtype="float32")
nwords = 0
index2word_set = set(model.index2word)
length = len(words)
for word in words:
if word in index2word_set:
# divide the words into k parts, add up in each part
featureVec[math.floor((nwords * k) / length)] += (model[word]) * temp[nwords]
nwords =nwords + 1
featureVec = featureVec.reshape(k * num_features)
#featureVec = featureVec/nwords
return featureVec
def mean2max(vec):
length = len(vec)
mean1 = np.max(vec[0:int(length*0.5)],axis = 0)
mean2 = np.max(vec[int(length*0.5):int(length)],axis = 0)
maxvec = np.mean([mean1,mean2],axis = 0)
return maxvec
def getAvgFeatureVecs(data,model1,model2, num_features, word,k,type,cell):
dnaFeatureVecs = np.zeros((len(data),2*k*num_features), dtype="float32")
if not os.path.isfile("./Data/enhancertfidf"+str(cell)):
print "Getting dictionary"
Corp = mycorpuse()
dictionary = corpora.Dictionary(Corp)
dictionary.save("./Data/enhancerdic"+str(cell))
corpus = [dictionary.doc2bow(text) for text in Corp]
print "Calculating TFIDF"
tfidf = models.TfidfModel(corpus)
tfidf.save("./Data/enhancertfidf"+str(cell))
else:
tfidf = models.TfidfModel.load("./Data/enhancertfidf"+str(cell))
dictionary = corpora.Dictionary.load("./Data/enhancerdic"+str(cell))
dict1 = {k:v for k, v in dictionary.items()}
DNAdata1 = getCleanDNA_split(data["seq1"],word)
counter = 0
for dna in DNAdata1:
if counter % 100 == 0:
print "DNA %d of %d\r" % (counter, len(DNAdata1)),
sys.stdout.flush()
vec_bow = dictionary.doc2bow(dna)
vec_tfidf = tfidf[vec_bow]
for i in xrange(len(vec_tfidf)):
dnaFeatureVecs[counter][0:k*num_features] += model1[dict1[vec_tfidf[i][0]]] * vec_tfidf[i][1]
counter += 1
print
del DNAdata1
counter = 0
if not os.path.isfile("./Data/promotertfidf"+str(cell)):
print "Getting dictionary"
Corp = mycorpusp()
dictionary = corpora.Dictionary(Corp)
dictionary.save("./Data/promoterdic"+str(cell))
corpus = [dictionary.doc2bow(text) for text in Corp]
print "Calculating TFIDF"
tfidf = models.TfidfModel(corpus)
tfidf.save("./Data/promotertfidf"+str(cell))
else:
tfidf = models.TfidfModel.load("./Data/promotertfidf"+str(cell))
dictionary = corpora.Dictionary.load("./Data/promoterdic"+str(cell))
dict2 = {k:v for k, v in dictionary.items()}
DNAdata2 = []
counter = 0
for dna in data["seq2"]:
if counter % 100 == 0:
print "DNA %d of %d\r" % (counter, len(data)),
sys.stdout.flush()
dna = str(dna).upper()
DNAdata2.append(processSeq.DNA2Sentence(dna,word).split(" "))
counter += 1
counter = 0
print
for dna in DNAdata2:
if counter % 100 == 0:
print "DNA %d of %d\r" % (counter, len(DNAdata2)),
sys.stdout.flush()
vec_bow = dictionary.doc2bow(dna)
vec_tfidf = tfidf[vec_bow]
for i in xrange(len(vec_tfidf)):
dnaFeatureVecs[counter][k*num_features:2*k*num_features] += model2[dict2[vec_tfidf[i][0]]] * vec_tfidf[i][1]
counter += 1
print
np.save("./Datavecs/datavecs_"+str(cell)+"_"+str(type)+".npy",dnaFeatureVecs)
return dnaFeatureVecs
def run(word, num_features,K,type,cell):
warnings.filterwarnings("ignore")
global word_model,data,k
word = int(word)
num_features = int(num_features)
k=int(K)
word_model=""
min_count=10
word_model1,word_model2 = getWord_model(word,num_features,min_count,type,cell)
# Read data
data = getData(type,cell)
length = data.shape[0]
print length
print "Generating Training and Testing Vector"
dataDataVecs = getAvgFeatureVecs(data,word_model1,word_model2,num_features,word,k,type,cell)
if __name__ == "__main__":
run(6,300,1,'new','GM12878')
| mit |
Vimos/scikit-learn | sklearn/metrics/__init__.py | 28 | 3604 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import fowlkes_mallows_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import calinski_harabaz_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import mean_squared_log_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'mean_squared_log_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
jqug/microscopy-object-detection | readdata.py | 1 | 10627 | import skimage
from lxml import etree
import os
import glob
from sklearn.cross_validation import train_test_split
import numpy as np
from progress_bar import ProgressBar
from skimage import io
from scipy import misc
def create_sets(img_dir, train_set_proportion=.6, test_set_proportion=.2, val_set_proportion=.2):
'''Split a list of image files up into training, testing and validation sets.'''
imgfilenames = glob.glob(img_dir + '*.jpg')
baseimgfilenames = [os.path.basename(f) for f in imgfilenames]
if train_set_proportion + test_set_proportion < 1:
train,val = train_test_split(np.arange(len(baseimgfilenames)),
train_size=train_set_proportion+test_set_proportion,
test_size=val_set_proportion,
random_state=1)
else:
train = np.arange(len(baseimgfilenames))
val = []
train_test_prop = train_set_proportion + test_set_proportion
train,test = train_test_split(train,
train_size=train_set_proportion/train_test_prop,
test_size=test_set_proportion/train_test_prop,
random_state=1)
trainfiles = [baseimgfilenames[i] for i in train]
testfiles = [baseimgfilenames[i] for i in test]
valfiles = [baseimgfilenames[i] for i in val]
return trainfiles, valfiles,testfiles
def get_patch_labels_for_single_image(img_filename, image_dir,annotation_dir, size, step,width, height, objectclass=None):
'''
Read the XML annotation files to get the labels of each patch for a
given image. The labels are 0 if there is no object in the corresponding
patch, and 1 if an object is present.
'''
annotation_filename = annotation_dir + img_filename[:-3] + 'xml'
boundingboxes = get_bounding_boxes_for_single_image(annotation_filename, objectclass=objectclass)
# Scan through patch locations in the image
labels = []
y = (height-(height/step)*step)/2
while y+(size) < height:
#rows
x = (width-(width/step)*step)/2
while (x+(size) < width):
objecthere=0
for bb in boundingboxes:
margin = 0
xmin = bb[0] + margin
xmax = bb[1] - margin
ymin = bb[2] + margin
ymax = bb[3] - margin
cx = x + size/2
cy = y + size/2
if (cx>xmin and cx<xmax and cy>ymin and cy<ymax):
objecthere = 1
break
# Output the details for this patch
labels.append(objecthere)
x+=step
y += step
return np.array(labels)
#http://codereview.stackexchange.com/questions/31352/overlapping-rectangles
def range_overlap(a_min, a_max, b_min, b_max):
'''Neither range is completely greater than the other
'''
return (a_min <= b_max) and (b_min <= a_max)
def overlap(r1, r2):
'''Overlapping rectangles overlap both horizontally & vertically
'''
return range_overlap(r1[0], r1[1], r2[0], r2[1]) and range_overlap(r1[2], r1[3], r2[2], r2[3])
def get_image_negatives(img, boundingboxes, size, step, grayscale=False, downsample=1, discard_rate=0.9):
'''Negative-labelled patches, taken at random from any part of the image
not overlapping an annotated bounding box.
Since there are typically many potential negative patches in each image, only
the proprtion 1-discard_rate of negative patches are stored.'''
c,height, width = img.shape
patches_per_img = 0
#lazy way to count how many patches we can take
max_y=0
while max_y+(size) < height:
max_x = 0
while max_x+(size) < width:
patches_per_img += 1
max_x += step
max_y += step
max_x /= step
max_y /= step
neg = []
y = (height-(max_y * step))/2
while y+(size) < height:
#rows
x = (width-(max_x * step))/2
while (x+(size) < width):
if np.random.rand()>discard_rate:
left = x
right = x+(size)
top = y
bottom = y+(size)
is_pos=False
for bb in boundingboxes:
if overlap([left,right,top,bottom], bb):
is_pos=True
break
if not is_pos:
patch = img[:, top:bottom:downsample, left:right:downsample]
neg.append(patch.copy()) # without copy seems to leak memory
x += step
y += step
return neg
def get_image_positives(img, boundingboxes, size, downsample=1):
'''Positive-labelled patches, centred on annotated bounding boxes.'''
pos = []
for bb in boundingboxes:
cy = (bb[0] + (bb[1]-bb[0])/2)
cx = (bb[2] + (bb[3]-bb[2])/2)
patch = img[..., cx-size/2:cx+size/2,cy-size/2:cy+size/2]
s= patch.shape
if s[1]<size or s[2]<size:
continue
patch = patch[:,::downsample,::downsample]
pos.append(patch.copy())
return pos
def create_patches(img_basenames, annotation_dir, image_dir, size, step, grayscale=True, progressbar=True, downsample=1, objectclass=None, negative_discard_rate=.9):
'''Extract a set of image patches with labels, from the supplied list of
annotated images. Positive-labelled patches are extracted centered on the
annotated bounding box; negative-labelled patches are extracted at random
from any part of the image which does not overlap an annotated bounding box.'''
if progressbar:
pb = ProgressBar(len(img_basenames))
if not annotation_dir[-1] == os.path.sep:
annotation_dir = annotation_dir + os.path.sep
if not image_dir[-1] == os.path.sep:
image_dir = image_dir + os.path.sep
color_type = 0
if grayscale:
channels=1
else:
channels=3
pos = []
neg = []
s = 1
for img_filename in img_basenames:
if progressbar:
pb.step(s)
s +=1
annotation_filename = annotation_dir + img_filename[:-3] + 'xml'
boundingboxes = get_bounding_boxes_for_single_image(annotation_filename, objectclass)
#colortype = cv2.IMREAD_COLOR
#img = cv2.imread(image_dir + img_filename, colortype)
img = misc.imread(image_dir + img_filename)
height,width,channels=img.shape
img = img.reshape((height, width,channels))
img = np.rollaxis(img,2)
image_pos = get_image_positives(img,boundingboxes,size,downsample=downsample)
pos.append(image_pos)
image_neg = get_image_negatives(img,boundingboxes,size,step,downsample=downsample,discard_rate=negative_discard_rate)
neg.append(image_neg)
pos = [item for sublist in pos for item in sublist]
neg = [item for sublist in neg for item in sublist]
patches = pos+neg
index = np.arange(len(patches))
np.random.seed(0)
np.random.shuffle(index)
np_patches = np.empty((len(patches),channels,size/downsample,size/downsample),dtype=np.uint8)
np_labels = np.empty(len(patches),dtype=int)
max_pos=len(pos)
for i,j in zip(index,xrange(len(index))):
if i < max_pos:
np_patches[j,] = pos[i]
np_labels[j] = 1
else:
np_patches[j,] = neg[i-max_pos]
np_labels[j] = 0
np_labels = np_labels.astype(np.uint8)
return np_labels,np_patches
def balance(X,y,mult_neg=10):
'''Returns an array with all the positive samples and as many negatives as
mult_neg*npos'''
np.random.seed(0)
neg = np.where(y==0)[0]
neg_count = len(neg)
pos = np.where(y==1)[0]
pos_count = len(pos)
np.random.shuffle(neg,)
neg = neg[0:pos_count*mult_neg]
index = np.concatenate((pos, neg))
np.random.shuffle(index)
y = y.take(index)
X = X.take(index,axis=0)
return X,y
def augment(X,y):
'''Create rotated and flipped versions of all patches.'''
shape = X.shape
num_org=shape[0]
shape = (shape[0]*8, shape[1], shape[2], shape[3])
aug_X = np.empty(shape,dtype=np.uint8)
aug_y = np.empty(shape[0],dtype=int)
new_patch_order = np.arange(shape[0])
np.random.shuffle(new_patch_order)
for i,j in zip(new_patch_order,xrange(shape[0])):
orig_patch = i/8
rot_n = i%4
do_flip = i%8>3
x = np.rollaxis(X[orig_patch],0,3 )
if do_flip:
x = np.flipud(x)
x = np.rot90(x,rot_n)
rot_X = np.rollaxis(x,2)
aug_X[j,] = (rot_X)
aug_y[j]=(y[orig_patch])
aug_y = aug_y.astype('uint8')
return aug_X,aug_y
def augment_positives(X,y):
'''Create rotated and flipped versions of only the positive-labelled
patches.'''
pos_indices = np.where(y)[0]
neg_indices = np.where(y==0)[0]
aug_X_pos, aug_y_pos = augment(X[pos_indices,], y[pos_indices])
aug_X = np.vstack((aug_X_pos, X[neg_indices,]))
aug_y = np.hstack((aug_y_pos, y[neg_indices]))
new_order = np.random.permutation(aug_y.shape[0])
aug_X = aug_X[new_order,]
aug_y = aug_y[new_order]
aug_y = aug_y.astype('uint8')
return aug_X, aug_y
def get_bounding_boxes_for_single_image(filename, objectclass=None):
'''
Given an annotation XML filename, get a list of the bounding boxes around
each object (the ground truth object locations).
'''
annofile = filename[:-3] + 'xml'
file_exists = os.path.exists(filename)
boundingboxes = []
if (file_exists):
# Read the bounding boxes from xml annotation
tree = etree.parse(filename)
r = tree.xpath('//bndbox')
if (len(r) != 0):
for i in range(len(r)):
if (objectclass==None) or (objectclass in r[i].getparent().xpath('label')[0].text.lower()):
xmin = round(float(r[i].xpath('xmin')[0].text))
xmin = max(xmin,1)
xmax = round(float(r[i].xpath('xmax')[0].text))
ymin = round(float(r[i].xpath('ymin')[0].text))
ymin = max(ymin,1)
ymax = round(float(r[i].xpath('ymax')[0].text))
xmin, xmax, ymin, ymax = int(xmin),int(xmax),int(ymin),int(ymax)
boundingboxes.append((xmin,xmax,ymin,ymax))
if len(boundingboxes) == 0:
return np.array([])
return np.vstack(boundingboxes)
| mit |
nhejazi/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 38 | 16445 | import sys
import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import assert_warns
from sklearn.exceptions import NotFittedError
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import StringIO
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_components = 3
block = n_components * np.ones((3, 3))
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_components, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_components, X = _build_sparse_mtx()
prior = 1. / n_components
lda_1 = LatentDirichletAllocation(n_components=n_components,
doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
evaluate_every=1, learning_method='batch',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=10., evaluate_every=1,
learning_method='online', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=10., total_samples=100,
random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_method='batch', random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative and should be normalized
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_components = 3
lda = LatentDirichletAllocation(n_components=n_components,
random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
assert_array_almost_equal(np.sum(X_trans, axis=1),
np.ones(X_trans.shape[0]))
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_components=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_components = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=5., total_samples=20,
random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_components', LatentDirichletAllocation(n_components=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_components = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_components=n_components,
random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_components, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_components=n_components, n_jobs=2,
learning_method=method,
evaluate_every=1, random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_components = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=5., total_samples=20,
random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_components))
assert_raises_regexp(ValueError, r'Number of samples',
lda._perplexity_precomp_distr, X, invalid_n_samples)
# invalid topic number
invalid_n_components = rng.randint(4, size=(n_samples, n_components + 1))
assert_raises_regexp(ValueError, r'Number of topics',
lda._perplexity_precomp_distr, X,
invalid_n_components)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_components, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_components=n_components,
max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit(X)
perp_1 = lda_1.perplexity(X, sub_sampling=False)
lda_2.fit(X)
perp_2 = lda_2.perplexity(X, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_components, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_components=n_components,
max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
lda.fit(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X.toarray())
assert_almost_equal(perp_1, perp_2)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=10,
random_state=0)
lda.fit(X)
perplexity_1 = lda.perplexity(X, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_fit_perplexity():
# Test that the perplexity computed during fit is consistent with what is
# returned by the perplexity method
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1,
learning_method='batch', random_state=0,
evaluate_every=1)
lda.fit(X)
# Perplexity computed at end of fit method
perplexity1 = lda.bound_
# Result of perplexity method on the train set
perplexity2 = lda.perplexity(X)
assert_almost_equal(perplexity1, perplexity2)
def test_doc_topic_distr_deprecation():
# Test that the appropriate warning message is displayed when a user
# attempts to pass the doc_topic_distr argument to the perplexity method
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr1 = lda.fit_transform(X)
distr2 = None
assert_warns(DeprecationWarning, lda.perplexity, X, distr1)
assert_warns(DeprecationWarning, lda.perplexity, X, distr2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
def check_verbosity(verbose, evaluate_every, expected_lines,
expected_perplexities):
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=3,
learning_method='batch',
verbose=verbose,
evaluate_every=evaluate_every,
random_state=0)
out = StringIO()
old_out, sys.stdout = sys.stdout, out
try:
lda.fit(X)
finally:
sys.stdout = old_out
n_lines = out.getvalue().count('\n')
n_perplexity = out.getvalue().count('perplexity')
assert_equal(expected_lines, n_lines)
assert_equal(expected_perplexities, n_perplexity)
def test_verbosity():
for verbose, evaluate_every, expected_lines, expected_perplexities in [
(False, 1, 0, 0),
(False, 0, 0, 0),
(True, 0, 3, 0),
(True, 1, 3, 3),
(True, 2, 3, 1),
]:
yield (check_verbosity, verbose, evaluate_every, expected_lines,
expected_perplexities)
def test_lda_n_topics_deprecation():
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=10, learning_method='batch')
assert_warns(DeprecationWarning, lda.fit, X)
| bsd-3-clause |
kazemakase/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
ratschlab/RGAN | eICU_tstr_evaluation.py | 1 | 8268 | import data_utils
import pandas as pd
import numpy as np
import tensorflow as tf
import math, random, itertools
import pickle
import time
import json
import os
import math
import data_utils
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_curve, auc, precision_recall_curve
import copy
from scipy.stats import sem
print ("Starting TSTR experiment.")
print ("loading data...")
samples, labels = data_utils.eICU_task()
train_seqs = samples['train'].reshape(-1,16,4)
vali_seqs = samples['vali'].reshape(-1,16,4)
test_seqs = samples['test'].reshape(-1,16,4)
train_targets = labels['train']
vali_targets = labels['vali']
test_targets = labels['test']
train_seqs, vali_seqs, test_seqs = data_utils.scale_data(train_seqs, vali_seqs, test_seqs)
print ("data loaded.")
# iterate over all dataset versions generated after running the GAN for 5 times
aurocs_all_runs = []
auprcs_all_runs = []
for oo in range(5):
print (oo)
# find the best "dataset epoch", meaning the GAN epoch that generated the dataset
# validation is only done in some of the tasks, and the others are considered unknown
# (use validation set to pick best GAN epoch, then get result on test set)
vali_seqs_r = vali_seqs.reshape((vali_seqs.shape[0], -1))
test_seqs_r = test_seqs.reshape((test_seqs.shape[0], -1))
all_aurocs_exp = []
all_auprcs_exp = []
for nn in np.arange(50,1050,50):
with open('./synthetic_eICU_datasets/samples_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(nn) + '.pk', 'rb') as f:
synth_data = pickle.load(file=f)
with open('./synthetic_eICU_datasets/labels_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(nn) + '.pk', 'rb') as f:
synth_labels = pickle.load(file=f)
train_seqs = synth_data
train_targets = synth_labels
train_seqs_r = train_seqs.reshape((train_seqs.shape[0], -1))
all_aurocs = []
all_auprcs = []
# in case we want to train each random forest multiple times with each dataset
for exp_num in range(1):
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
estimator = RandomForestClassifier(n_estimators=100)
estimator.fit(train_seqs_r, train_targets[:,col_num])
accuracies.append(estimator.score(vali_seqs_r, vali_targets[:,col_num]))
preds = estimator.predict(vali_seqs_r)
precisions.append(precision_score(y_pred=preds, y_true=vali_targets[:,col_num]))
recalls.append(recall_score(y_pred=preds, y_true=vali_targets[:,col_num]))
preds = estimator.predict_proba(vali_seqs_r)
fpr, tpr, thresholds = roc_curve(vali_targets[:,col_num], preds[:,1])
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(vali_targets[:,col_num], preds[:,1])
auprcs.append(auc(recall, precision))
all_aurocs.append(aurocs)
all_auprcs.append(auprcs)
all_aurocs_exp.append(all_aurocs)
all_auprcs_exp.append(all_auprcs)
#with open('all_aurocs_exp_r' + str(oo) + '.pk', 'wb') as f:
# pickle.dump(file=f, obj=all_aurocs_exp)
#with open('all_auprcs_exp_r' + str(oo) + '.pk', 'wb') as f:
# pickle.dump(file=f, obj=all_auprcs_exp)
best_idx = np.argmax(np.array(all_aurocs_exp).sum(axis=1)[:,[0,2,4]].sum(axis=1) + np.array(all_auprcs_exp).sum(axis=1)[:,[0,2,4]].sum(axis=1))
best = np.arange(50,1050,50)[best_idx]
with open('./synthetic_eICU_datasets/samples_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(best) + '.pk', 'rb') as f:
synth_data = pickle.load(file=f)
with open('./synthetic_eICU_datasets/labels_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(best) + '.pk', 'rb') as f:
synth_labels = pickle.load(file=f)
train_seqs = synth_data
train_targets = synth_labels
train_seqs_r = train_seqs.reshape((train_seqs.shape[0], -1))
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
estimator = RandomForestClassifier(n_estimators=100)
estimator.fit(train_seqs_r, train_targets[:,col_num])
accuracies.append(estimator.score(test_seqs_r, test_targets[:,col_num]))
preds = estimator.predict(test_seqs_r)
precisions.append(precision_score(y_pred=preds, y_true=test_targets[:,col_num]))
recalls.append(recall_score(y_pred=preds, y_true=test_targets[:,col_num]))
preds = estimator.predict_proba(test_seqs_r)
fpr, tpr, thresholds = roc_curve(test_targets[:,col_num], preds[:,1])
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(test_targets[:,col_num], preds[:,1])
auprcs.append(auc(recall, precision))
print(accuracies)
print(precisions)
print(recalls)
print(aurocs)
print(auprcs)
print ("----------------------------")
aurocs_all_runs.append(aurocs)
auprcs_all_runs.append(auprcs)
allr = np.vstack(aurocs_all_runs)
allp = np.vstack(auprcs_all_runs)
tstr_aurocs_mean = allr.mean(axis=0)
tstr_aurocs_sem = sem(allr, axis=0)
tstr_auprcs_mean = allp.mean(axis=0)
tstr_auprcs_sem = sem(allp, axis=0)
# get AUROC/AUPRC for real, random data
print ("Experiment with real data.")
print ("loading data...")
samples, labels = data_utils.eICU_task()
train_seqs = samples['train'].reshape(-1,16,4)
vali_seqs = samples['vali'].reshape(-1,16,4)
test_seqs = samples['test'].reshape(-1,16,4)
train_targets = labels['train']
vali_targets = labels['vali']
test_targets = labels['test']
train_seqs, vali_seqs, test_seqs = data_utils.scale_data(train_seqs, vali_seqs, test_seqs)
print ("data loaded.")
train_seqs_r = train_seqs.reshape((train_seqs.shape[0], -1))
vali_seqs_r = vali_seqs.reshape((vali_seqs.shape[0], -1))
test_seqs_r = test_seqs.reshape((test_seqs.shape[0], -1))
aurocs_all = []
auprcs_all = []
for i in range(5):
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
estimator = RandomForestClassifier(n_estimators=100)
estimator.fit(train_seqs_r, train_targets[:,col_num])
accuracies.append(estimator.score(test_seqs_r, test_targets[:,col_num]))
preds = estimator.predict(test_seqs_r)
precisions.append(precision_score(y_pred=preds, y_true=test_targets[:,col_num]))
recalls.append(recall_score(y_pred=preds, y_true=test_targets[:,col_num]))
preds = estimator.predict_proba(test_seqs_r)
fpr, tpr, thresholds = roc_curve(test_targets[:,col_num], preds[:,1])
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(test_targets[:,col_num], preds[:,1])
auprcs.append(auc(recall, precision))
print(accuracies)
print(precisions)
print(recalls)
print(aurocs)
print(auprcs)
aurocs_all.append(aurocs)
auprcs_all.append(auprcs)
real_aurocs_mean = np.array(aurocs_all).mean(axis=0)
real_aurocs_sem = sem(aurocs_all, axis=0)
real_auprcs_mean = np.array(auprcs_all).mean(axis=0)
real_auprcs_sem = sem(auprcs_all, axis=0)
print ("Experiment with random predictions.")
#random score
test_targets_random = copy.deepcopy(test_targets)
random.shuffle(test_targets_random)
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
accuracies.append(accuracy_score(y_pred=test_targets_random[:,col_num], y_true=test_targets[:,col_num]))
precisions.append(precision_score(y_pred=test_targets_random[:,col_num], y_true=test_targets[:,col_num]))
recalls.append(recall_score(y_pred=test_targets_random[:,col_num], y_true=test_targets[:,col_num]))
preds = np.random.rand(len(test_targets[:,col_num]))
fpr, tpr, thresholds = roc_curve(test_targets[:,col_num], preds)
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(test_targets[:,col_num], preds)
auprcs.append(auc(recall, precision))
print(accuracies)
print(precisions)
print(recalls)
print(aurocs)
print(auprcs)
random_aurocs = aurocs
random_auprcs = auprcs
print("Results")
print("------------")
print("------------")
print("TSTR")
print(tstr_aurocs_mean)
print(tstr_aurocs_sem)
print(tstr_auprcs_mean)
print(tstr_auprcs_sem)
print("------------")
print("Real")
print(real_aurocs_mean)
print(real_aurocs_sem)
print(real_auprcs_mean)
print(real_auprcs_sem)
print("------------")
print("Random")
print(random_aurocs)
print(random_auprcs) | mit |
robin-lai/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
nikitasingh981/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 73 | 1854 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
s = 50
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
Sklearn-HMM/scikit-learn-HMM | sklean-hmm/naive_bayes.py | 3 | 20231 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
import warnings
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import array2d, atleast2d_or_csr, column_or_1d, check_arrays
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class
in the model, where classes are ordered arithmetically.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector relative to X
Attributes
----------
`class_prior_` : array, shape = [n_classes]
probability of each class.
`theta_` : array, shape = [n_classes, n_features]
mean of each feature per class
`sigma_` : array, shape = [n_classes, n_features]
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns self.
"""
X, y = check_arrays(X, y, sparse_format='dense')
y = column_or_1d(y, warn=True)
n_samples, n_features = X.shape
self.classes_ = unique_y = np.unique(y)
n_classes = unique_y.shape[0]
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
epsilon = 1e-9
for i, y_i in enumerate(unique_y):
Xi = X[y == y_i, :]
self.theta_[i, :] = np.mean(Xi, axis=0)
self.sigma_[i, :] = np.var(Xi, axis=0) + epsilon
self.class_prior_[i] = np.float(Xi.shape[0]) / n_samples
return self
def _joint_log_likelihood(self, X):
X = array2d(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = atleast2d_or_csr(X, dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# convert to float to support sample weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= array2d(sample_weight).T
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior()
return self
def fit(self, X, y, sample_weight=None, class_prior=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_arrays(X, y, sparse_format='csr')
X = X.astype(np.float)
y = column_or_1d(y, warn=True)
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# convert to float to support sample weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= array2d(sample_weight).T
if class_prior is not None:
warnings.warn('class_prior has been made an ``__init__`` parameter'
' and will be removed from fit in version 0.15.',
DeprecationWarning)
else:
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
`class_log_prior_` : array, shape (n_classes, )
Smoothed empirical log probability for each class.
`intercept_` : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
`feature_log_prob_`: array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
`coef_` : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
`class_count_` : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
`feature_count_` : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/
naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
X = atleast2d_or_csr(X)
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
`class_log_prior_` : array, shape = [n_classes]
Log probability of each class (smoothed).
`feature_log_prob_` : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
`class_count_` : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
`feature_count_` : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
n_classes = len(self.classes_)
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * n_classes
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
X = atleast2d_or_csr(X)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
fspaolo/scikit-learn | examples/cluster/plot_kmeans_digits.py | 8 | 4495 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example with compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import pylab as pl
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1
y_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure(1)
pl.clf()
pl.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=pl.cm.Paired,
aspect='auto', origin='lower')
pl.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
pl.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
pl.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.xticks(())
pl.yticks(())
pl.show()
| bsd-3-clause |
Barmaley-exe/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
anntzer/scikit-learn | sklearn/tests/test_multiclass.py | 5 | 32749 | import numpy as np
import scipy.sparse as sp
import pytest
from re import escape
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._mocking import CheckingClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.utils.multiclass import (check_classification_targets,
type_of_target)
from sklearn.utils import (
check_array,
shuffle,
)
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression,
SGDClassifier)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.impute import SimpleImputer
from sklearn import svm
from sklearn.exceptions import NotFittedError
from sklearn import datasets
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# test predicting without fitting
with pytest.raises(NotFittedError):
ovr.predict([])
# Fail on multioutput data
msg = "Multioutput target data is not supported with label binarization"
with pytest.raises(ValueError, match=msg):
X = np.array([[1, 0], [0, 1]])
y = np.array([[1, 2], [3, 1]])
OneVsRestClassifier(MultinomialNB()).fit(X, y)
with pytest.raises(ValueError, match=msg):
X = np.array([[1, 0], [0, 1]])
y = np.array([[1.5, 2.4], [3.1, 0.8]])
OneVsRestClassifier(MultinomialNB()).fit(X, y)
def test_check_classification_targets():
# Test that check_classification_target return correct type. #5782
y = np.array([0.0, 1.1, 2.0, 3.0])
msg = type_of_target(y)
with pytest.raises(ValueError, match=msg):
check_classification_targets(y)
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert len(ovr.estimators_) == n_classes
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert np.mean(iris.target == pred) == np.mean(iris.target == pred2)
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert np.mean(iris.target == pred) > 0.65
def test_ovr_partial_fit():
# Test if partial_fit is working as intended
X, y = shuffle(iris.data, iris.target, random_state=0)
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(X[:100], y[:100], np.unique(y))
ovr.partial_fit(X[100:], y[100:])
pred = ovr.predict(X)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
assert len(ovr.estimators_) == len(np.unique(y))
assert np.mean(y == pred) > 0.65
# Test when mini batches doesn't have all classes
# with SGDClassifier
X = np.abs(np.random.randn(14, 2))
y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3]
ovr = OneVsRestClassifier(SGDClassifier(max_iter=1, tol=None,
shuffle=False, random_state=0))
ovr.partial_fit(X[:7], y[:7], np.unique(y))
ovr.partial_fit(X[7:], y[7:])
pred = ovr.predict(X)
ovr1 = OneVsRestClassifier(SGDClassifier(max_iter=1, tol=None,
shuffle=False, random_state=0))
pred1 = ovr1.fit(X, y).predict(X)
assert np.mean(pred == y) == np.mean(pred1 == y)
# test partial_fit only exists if estimator has it:
ovr = OneVsRestClassifier(SVC())
assert not hasattr(ovr, "partial_fit")
def test_ovr_partial_fit_exceptions():
ovr = OneVsRestClassifier(MultinomialNB())
X = np.abs(np.random.randn(14, 2))
y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3]
ovr.partial_fit(X[:7], y[:7], np.unique(y))
# If a new class that was not in the first call of partial fit is seen
# it should raise ValueError
y1 = [5] + y[7:-1]
msg = r"Mini-batch contains \[.+\] while classes must be subset of \[.+\]"
with pytest.raises(ValueError, match=msg):
ovr.partial_fit(X=X[7:], y=y1)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_
# function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert len(ovr.estimators_) == n_classes
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert np.mean(pred == iris.target) > .9
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert len(ovr.estimators_) == n_classes * (n_classes - 1) / 2
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert np.mean(pred == iris.target) > .9
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert clf.multilabel_
assert sp.issparse(Y_pred_sprs)
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf = svm.SVC()
clf_sprs = OneVsRestClassifier(clf).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
msg = r'Label .+ is present in all training examples'
with pytest.warns(UserWarning, match=msg):
ovr.fit(X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert np.unique(y_pred[:, -2:]) == 1
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
msg = r'Label not 1 is present in all training examples'
with pytest.warns(UserWarning, match=msg):
ovr.fit(X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert set(clf.classes_) == classes
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_array_equal(y_pred, ["eggs"])
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert set(clf.classes_) == classes
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_array_equal(y_pred, ["eggs"])
if hasattr(base_clf, 'decision_function'):
dec = clf.decision_function(X)
assert dec.shape == (5,)
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert 2 == len(probabilities[0])
assert (clf.classes_[np.argmax(probabilities, axis=1)] ==
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert y_pred == 1
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert clf.multilabel_
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert len(ovr.estimators_) == 3
assert ovr.score(iris.data, iris.target) > .9
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert clf.multilabel_
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# Decision function only estimator.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert not hasattr(decision_only, 'predict_proba')
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
assert not hasattr(decision_only, 'predict_proba')
decision_only.fit(X_train, Y_train)
assert not hasattr(decision_only, 'predict_proba')
assert hasattr(decision_only, 'decision_function')
# Estimator which can get predict_proba enabled after fitting
gs = GridSearchCV(svm.SVC(probability=False),
param_grid={'probability': [True]})
proba_after_fit = OneVsRestClassifier(gs)
assert not hasattr(proba_after_fit, 'predict_proba')
proba_after_fit.fit(X_train, Y_train)
assert hasattr(proba_after_fit, 'predict_proba')
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# Decision function only estimator.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert not hasattr(decision_only, 'predict_proba')
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label with the greatest predictive probability.
pred = Y_proba.argmax(axis=1)
assert not (pred - Y_pred).any()
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert best_C in Cs
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
# TODO: Remove this test in version 1.1
# when the coef_ attribute is removed
@ignore_warnings(category=FutureWarning)
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0),
LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert shape[0] == n_classes
assert shape[1] == iris.data.shape[1]
# don't densify sparse coefficients
assert (sp.issparse(ovr.estimators_[0].coef_) ==
sp.issparse(ovr.coef_))
# TODO: Remove this test in version 1.1
# when the coef_ attribute is removed
@ignore_warnings(category=FutureWarning)
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
with pytest.raises(NotFittedError):
ovr.coef_
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
msg = "Base estimator doesn't have a coef_ attribute"
with pytest.raises(AttributeError, match=msg):
ovr.coef_
# TODO: Remove this test in version 1.1 when
# the coef_ and intercept_ attributes are removed
def test_ovr_deprecated_coef_intercept():
ovr = OneVsRestClassifier(SVC(kernel="linear"))
ovr = ovr.fit(iris.data, iris.target)
msg = (r"Attribute {0} was deprecated in version 0.24 "
r"and will be removed in 1.1 \(renaming of 0.26\). If you observe "
r"this warning while using RFE or SelectFromModel, "
r"use the importance_getter parameter instead.")
for att in ["coef_", "intercept_"]:
with pytest.warns(FutureWarning, match=msg.format(att)):
getattr(ovr, att)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
with pytest.raises(NotFittedError):
ovo.predict([])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
iris_data_list = [list(a) for a in iris.data]
prediction_from_list = ovo.fit(iris_data_list,
list(iris.target)).predict(iris_data_list)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert len(ovo.estimators_) == n_classes * (n_classes - 1) / 2
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert len(ovo.estimators_) == n_classes * (n_classes - 1) / 2
def test_ovo_partial_fit_predict():
temp = datasets.load_iris()
X, y = temp.data, temp.target
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:100], y[:100], np.unique(y))
ovo1.partial_fit(X[100:], y[100:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
ovo2.fit(X, y)
pred2 = ovo2.predict(X)
assert len(ovo1.estimators_) == n_classes * (n_classes - 1) / 2
assert np.mean(y == pred1) > 0.65
assert_almost_equal(pred1, pred2)
# Test when mini-batches have binary target classes
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:60], y[:60], np.unique(y))
ovo1.partial_fit(X[60:], y[60:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(X, y).predict(X)
assert_almost_equal(pred1, pred2)
assert len(ovo1.estimators_) == len(np.unique(y))
assert np.mean(y == pred1) > 0.65
ovo = OneVsOneClassifier(MultinomialNB())
X = np.random.rand(14, 2)
y = [1, 1, 2, 3, 3, 0, 0, 4, 4, 4, 4, 4, 2, 2]
ovo.partial_fit(X[:7], y[:7], [0, 1, 2, 3, 4])
ovo.partial_fit(X[7:], y[7:])
pred = ovo.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
# raises error when mini-batch does not have classes from all_classes
ovo = OneVsOneClassifier(MultinomialNB())
error_y = [0, 1, 2, 3, 4, 5, 2]
message_re = escape("Mini-batch contains {0} while "
"it must be subset of {1}".format(np.unique(error_y),
np.unique(y)))
with pytest.raises(ValueError, match=message_re):
ovo.partial_fit(X[:7], error_y, np.unique(y))
# test partial_fit only exists if estimator has it:
ovr = OneVsOneClassifier(SVC())
assert not hasattr(ovr, "partial_fit")
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
# first binary
ovo_clf.fit(iris.data, iris.target == 0)
decisions = ovo_clf.decision_function(iris.data)
assert decisions.shape == (n_samples,)
# then multi-class
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert decisions.shape == (n_samples, n_classes)
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert set(votes[:, class_idx]).issubset(set([0., 1., 2.]))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert len(np.unique(decisions[:, class_idx])) > 146
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert best_C in Cs
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4,
tol=None))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert ovo_prediction[0] == normalized_confidences[0].argmax()
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4,
tol=None))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert ovo_prediction[0] == i % 3
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ovo_one_class():
# Test error for OvO with one class
X = np.eye(4)
y = np.array(['a'] * 4)
ovo = OneVsOneClassifier(LinearSVC())
msg = "when only one class"
with pytest.raises(ValueError, match=msg):
ovo.fit(X, y)
def test_ovo_float_y():
# Test that the OvO errors on float targets
X = iris.data
y = iris.data[:, 0]
ovo = OneVsOneClassifier(LinearSVC())
msg = "Unknown label type"
with pytest.raises(ValueError, match=msg):
ovo.fit(X, y)
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
with pytest.raises(NotFittedError):
ecoc.predict([])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert len(ecoc.estimators_) == n_classes * 2
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert len(ecoc.estimators_) == n_classes * 2
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert best_C in Cs
def test_ecoc_float_y():
# Test that the OCC errors on float targets
X = iris.data
y = iris.data[:, 0]
ovo = OutputCodeClassifier(LinearSVC())
msg = "Unknown label type"
with pytest.raises(ValueError, match=msg):
ovo.fit(X, y)
ovo = OutputCodeClassifier(LinearSVC(), code_size=-1)
msg = "code_size should be greater than 0, got -1"
with pytest.raises(ValueError, match=msg):
ovo.fit(X, y)
def test_ecoc_delegate_sparse_base_estimator():
# Non-regression test for
# https://github.com/scikit-learn/scikit-learn/issues/17218
X, y = iris.data, iris.target
X_sp = sp.csc_matrix(X)
# create an estimator that does not support sparse input
base_estimator = CheckingClassifier(
check_X=check_array,
check_X_params={"ensure_2d": True, "accept_sparse": False},
)
ecoc = OutputCodeClassifier(base_estimator, random_state=0)
with pytest.raises(TypeError, match="A sparse matrix was passed"):
ecoc.fit(X_sp, y)
ecoc.fit(X, y)
with pytest.raises(TypeError, match="A sparse matrix was passed"):
ecoc.predict(X_sp)
# smoke test to check when sparse input should be supported
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
ecoc.fit(X_sp, y).predict(X_sp)
assert len(ecoc.estimators_) == 4
def test_pairwise_indices():
clf_precomputed = svm.SVC(kernel='precomputed')
X, y = iris.data, iris.target
ovr_false = OneVsOneClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
ovr_false.fit(linear_kernel, y)
n_estimators = len(ovr_false.estimators_)
precomputed_indices = ovr_false.pairwise_indices_
for idx in precomputed_indices:
assert (idx.shape[0] * n_estimators / (n_estimators - 1) ==
linear_kernel.shape[0])
@ignore_warnings(category=FutureWarning)
def test_pairwise_attribute():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC()
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
assert not ovr_false._pairwise
ovr_true = MultiClassClassifier(clf_precomputed)
assert ovr_true._pairwise
@pytest.mark.parametrize("MultiClassClassifier", [OneVsRestClassifier,
OneVsOneClassifier])
def test_pairwise_tag(MultiClassClassifier):
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC()
ovr_false = MultiClassClassifier(clf_notprecomputed)
assert not ovr_false._get_tags()["pairwise"]
ovr_true = MultiClassClassifier(clf_precomputed)
assert ovr_true._get_tags()["pairwise"]
# TODO: Remove in 1.1
@pytest.mark.parametrize("MultiClassClassifier", [OneVsRestClassifier,
OneVsOneClassifier])
def test_pairwise_deprecated(MultiClassClassifier):
clf_precomputed = svm.SVC(kernel='precomputed')
ov_clf = MultiClassClassifier(clf_precomputed)
msg = r"Attribute _pairwise was deprecated in version 0\.24"
with pytest.warns(FutureWarning, match=msg):
ov_clf._pairwise
def test_pairwise_cross_val_score():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC(kernel='linear')
X, y = iris.data, iris.target
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
ovr_true = MultiClassClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(ovr_true, linear_kernel, y)
score_linear = cross_val_score(ovr_false, X, y)
assert_array_equal(score_precomputed, score_linear)
@pytest.mark.parametrize("MultiClassClassifier",
[OneVsRestClassifier, OneVsOneClassifier])
# FIXME: we should move this test in `estimator_checks` once we are able
# to construct meta-estimator instances
def test_support_missing_values(MultiClassClassifier):
# smoke test to check that pipeline OvR and OvO classifiers are letting
# the validation of missing values to
# the underlying pipeline or classifiers
rng = np.random.RandomState(42)
X, y = iris.data, iris.target
X = np.copy(X) # Copy to avoid that the original data is modified
mask = rng.choice([1, 0], X.shape, p=[.1, .9]).astype(bool)
X[mask] = np.nan
lr = make_pipeline(SimpleImputer(),
LogisticRegression(random_state=rng))
MultiClassClassifier(lr).fit(X, y).score(X, y)
| bsd-3-clause |
AIML/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
JPFrancoia/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
mlyundin/scikit-learn | examples/decomposition/plot_pca_iris.py | 253 | 1801 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
Jeff20/sklearn_pycon2015 | notebooks/fig_code/svm_gui.py | 47 | 11549 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('key_press_event', self.onkeypress)
canvas.mpl_connect('key_release_event', self.onkeyrelease)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.shift_down = False
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onkeypress(self, event):
if event.key == "shift":
self.shift_down = True
def onkeyrelease(self, event):
if event.key == "shift":
self.shift_down = False
def onclick(self, event):
if event.xdata and event.ydata:
if self.shift_down or event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
elif event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
joshloyal/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 75 | 1647 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
evgchz/scikit-learn | sklearn/ensemble/gradient_boosting.py | 6 | 63474 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from warnings import warn
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils.extmath import logsumexp
from ..utils.stats import _weighted_percentile
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float)
class_counts = np.bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : np.ndarray, shape=(n, m)
The data array.
y : np.ndarray, shape=(n,)
The target labels.
residual : np.ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : np.ndarray, shape=(n,):
The predictions.
sample_weight np.ndarray, shape=(n,):
The weight of each sample.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum()) * \
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum()) * \
np.sum(sample_weight * np.abs(y - pred.ravel()))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
See
---
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
If the loss does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - 1.0 / (1.0 + np.exp(-pred.ravel()))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = 1.0 / (1.0 + np.exp(-score.ravel()))
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
See
---
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum()) * \
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = 1.0 / (1.0 + np.exp(-2.0 * score.ravel()))
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features)
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
if y.shape[0] != n_samples:
raise ValueError('Shape mismatch of X and y: %d != %d' %
(n_samples, y.shape[0]))
if n_samples != sample_weight.shape[0]:
raise ValueError('Shape mismatch of sample_weight: %d != %d' %
(sample_weight.shape[0], n_samples))
self.n_features = n_features
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
self.min_weight_fraction_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (old_oob_score -
loss_(y[~sample_mask], y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, call `fit` "
"before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recoveres the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, verbose=verbose, max_leaf_nodes=max_leaf_nodes,
warm_start=warm_start)
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
y = column_or_1d(y, warn=True)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return super(GradientBoostingClassifier, self).fit(X, y, sample_weight,
monitor)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self.staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict classes at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self.staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha, verbose, max_leaf_nodes=max_leaf_nodes,
warm_start=warm_start)
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
self.n_classes_ = 1
return super(GradientBoostingRegressor, self).fit(X, y, sample_weight,
monitor)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = [n_samples]
The predicted values.
"""
return self.decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self.staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
cristiandima/highlights | highlights/extractive/erank.py | 1 | 3576 | """
This is in many ways identical to the textrank algorithms. The only difference
is that we expand the sentence graph to also include the title of the text,
the topics associated with the text, and the named entitites present
The output is still an importance score for each sentence in the original text
but these new nodes offer extra information and increase the weights of those
sentences which are more closely related to the topics/title/named entities
associated with the text
"""
import spacy
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from highlights.extractive.textrank import _textrank_scores
from highlights.internals.helpers import summary_length, NLP
_word_tokenize = TfidfVectorizer(stop_words='english').build_analyzer()
def _get_named_entities(nlp_doc):
""" Given a spacy document return the top ten most frequent name entities
present in the text. Name entities appearing only once are skipped.
Args:
nlp_doc (spacy document): document to extract named entities from
Returns:
a list of words, the most frequent named entities present in the document
"""
ignored_ents = {'DATE', 'TIME', 'PERCENT', 'MONEY', 'QUANTITY', 'ORDINAL', 'CARDINAL'}
ne = [n.text for n in nlp_doc.ents if n.label_ not in ignored_ents]
ne = [n.replace('the', '').strip() for n in ne]
ne = set(ne)
counter = CountVectorizer(ngram_range=(1,2))
counts = counter.fit_transform([nlp_doc.text])
ne_scores = []
for entity in ne:
entity = entity.lower()
if entity in counter.vocabulary_:
ne_scores.append((counts[0, counter.vocabulary_.get(entity)], entity))
ne_scores = sorted([n for n in ne_scores if n[0] != 1], reverse=True)[:10]
return [n[1] for n in ne_scores]
def _get_topics(nlp_doc, lda, word_dict, topic_terms):
""" Given a spacy document, as well as an lda model, this function returns
a list of lists where each list holds the string words associated with each
topic associated with the document
"""
doc_bow = word_dict.doc2bow(_word_tokenize(nlp_doc.text))
topics = lda.get_document_topics(doc_bow)
topics_as_words = []
for topic_tuple in topics:
topic_words = []
for word_tuple in topic_terms[topic_tuple[0]]:
topic_words.append(word_dict[word_tuple[0]])
topics_as_words.append(topic_words)
return topics_as_words
def _erank_scores(nlp_doc, topics, named_entities, title=None):
sentences = [sent.text for sent in nlp_doc.sents]
original_len = len(sentences)
for topic_words in topics:
sentences.append(' '.join(topic_words))
if len(named_entities) >= 1:
sentences.append(' '.join(named_entities))
if title is not None:
sentences.append(' '.join(_word_tokenize(title)))
scores = _textrank_scores(sentences)
scores = {i: scores.get(i, 0) for i in range(original_len)}
return scores
def erank(text, lda, word_dict, topic_terms, title=None, len_func=summary_length):
nlp_doc = NLP(text)
sentences = [sent.text for sent in nlp_doc.sents]
topics = _get_topics(nlp_doc, lda, word_dict, topic_terms)
named_entities = _get_named_entities(nlp_doc)
scores = _erank_scores(nlp_doc, topics, named_entities, title)
sum_len = len_func(len(scores))
sent_scores = [(scores[i], s) for i, s in enumerate(sentences)]
top_sentences = sorted(sent_scores, reverse=True)[:sum_len]
return [s[1] for s in top_sentences]
| mit |
glemaitre/UnbalancedDataset | imblearn/ensemble/tests/test_classifier.py | 2 | 17981 | """Test the module ensemble classifiers."""
# Authors: Guillaume Lemaitre <[email protected]>
# Christos Aridas
# License: MIT
import numpy as np
from sklearn.datasets import load_iris, make_hastie_10_2
from sklearn.model_selection import (GridSearchCV, ParameterGrid,
train_test_split)
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.feature_selection import SelectKBest
from sklearn.utils.testing import (assert_array_equal,
assert_array_almost_equal,
assert_raises,
assert_warns,
assert_warns_message)
from imblearn.datasets import make_imbalance
from imblearn.ensemble import BalancedBaggingClassifier
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import RandomUnderSampler
iris = load_iris()
def test_balanced_bagging_classifier():
# Check classification for various parameter settings.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BalancedBaggingClassifier(
base_estimator=base_estimator,
random_state=0,
**params).fit(X_train, y_train).predict(X_test)
def test_bootstrap_samples():
# Test that bootstrapping samples generate non-perfect base estimators.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
base_estimator = DecisionTreeClassifier().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
# disable the resampling by passing an empty dictionary.
ensemble = BalancedBaggingClassifier(
base_estimator=DecisionTreeClassifier(),
max_samples=1.0,
bootstrap=False,
n_estimators=10,
ratio={},
random_state=0).fit(X_train, y_train)
assert (ensemble.score(X_train, y_train) ==
base_estimator.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BalancedBaggingClassifier(
base_estimator=DecisionTreeClassifier(),
max_samples=1.0,
bootstrap=True,
random_state=0).fit(X_train, y_train)
assert (ensemble.score(X_train, y_train) <
base_estimator.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstrapping features may generate duplicate features.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
ensemble = BalancedBaggingClassifier(
base_estimator=DecisionTreeClassifier(),
max_features=1.0,
bootstrap_features=False,
random_state=0).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert np.unique(features).shape[0] == X.shape[1]
ensemble = BalancedBaggingClassifier(
base_estimator=DecisionTreeClassifier(),
max_features=1.0,
bootstrap_features=True,
random_state=0).fit(X_train, y_train)
unique_features = [np.unique(features).shape[0]
for features in ensemble.estimators_features_]
assert np.median(unique_features) < X.shape[1]
def test_probability():
# Predict probabilities.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BalancedBaggingClassifier(
base_estimator=DecisionTreeClassifier(),
random_state=0).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BalancedBaggingClassifier(
base_estimator=LogisticRegression(),
random_state=0,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BalancedBaggingClassifier(
base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=0).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert abs(test_score - clf.oob_score_) < 0.1
# Test with few estimators
assert_warns(UserWarning,
BalancedBaggingClassifier(
base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=0).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
clf1 = BalancedBaggingClassifier(
base_estimator=KNeighborsClassifier(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=0).fit(X_train, y_train)
clf2 = make_pipeline(RandomUnderSampler(
random_state=clf1.estimators_[0].steps[0][1].random_state),
KNeighborsClassifier()).fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50})
base = DecisionTreeClassifier()
# Test n_estimators
assert_raises(ValueError,
BalancedBaggingClassifier(base, n_estimators=1.5).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, n_estimators=-1).fit, X, y)
# Test max_samples
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_samples="foobar").fit,
X, y)
# Test max_features
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_features="foobar").fit,
X, y)
# Test support of decision_function
assert not (hasattr(BalancedBaggingClassifier(base).fit(X, y),
'decision_function'))
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target.copy()
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BalancedBaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
ensemble = BalancedBaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.base_estimator_.steps[-1][1],
DecisionTreeClassifier)
ensemble = BalancedBaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.base_estimator_.steps[-1][1],
DecisionTreeClassifier)
ensemble = BalancedBaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.base_estimator_.steps[-1][1],
Perceptron)
def test_bagging_with_pipeline():
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
estimator = BalancedBaggingClassifier(
make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(X, y).predict(X)
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BalancedBaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert len(clf_ws) == n_estimators
clf_no_ws = BalancedBaggingClassifier(n_estimators=10,
random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert (set([pipe.steps[-1][1].random_state for pipe in clf_ws]) ==
set([pipe.steps[-1][1].random_state for pipe in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True,
random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators"
" does not", clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BalancedBaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BalancedBaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True,
oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BalancedBaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
def test_oob_score_consistency():
# Make sure OOB scores are identical when random_state, estimator, and
# training data are fixed and fitting is done twice
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BalancedBaggingClassifier(KNeighborsClassifier(),
max_samples=0.5,
max_features=0.5, oob_score=True,
random_state=1)
assert bagging.fit(X, y).oob_score_ == bagging.fit(X, y).oob_score_
# FIXME: uncomment when #9723 is merged in scikit-learn
# def test_estimators_samples():
# # Check that format of estimators_samples_ is correct and that results
# # generated at fit time can be identically reproduced at a later time
# # using data saved in object attributes.
# X, y = make_hastie_10_2(n_samples=200, random_state=1)
# # remap the y outside of the BalancedBaggingclassifier
# # _, y = np.unique(y, return_inverse=True)
# bagging = BalancedBaggingClassifier(LogisticRegression(),
# max_samples=0.5,
# max_features=0.5, random_state=1,
# bootstrap=False)
# bagging.fit(X, y)
# # Get relevant attributes
# estimators_samples = bagging.estimators_samples_
# estimators_features = bagging.estimators_features_
# estimators = bagging.estimators_
# # Test for correct formatting
# assert len(estimators_samples) == len(estimators)
# assert len(estimators_samples[0]) == len(X)
# assert estimators_samples[0].dtype.kind == 'b'
# # Re-fit single estimator to test for consistent sampling
# estimator_index = 0
# estimator_samples = estimators_samples[estimator_index]
# estimator_features = estimators_features[estimator_index]
# estimator = estimators[estimator_index]
# X_train = (X[estimator_samples])[:, estimator_features]
# y_train = y[estimator_samples]
# orig_coefs = estimator.steps[-1][1].coef_
# estimator.fit(X_train, y_train)
# new_coefs = estimator.steps[-1][1].coef_
# assert_array_almost_equal(orig_coefs, new_coefs)
def test_max_samples_consistency():
# Make sure validated max_samples and original max_samples are identical
# when valid integer max_samples supplied by user
max_samples = 100
X, y = make_hastie_10_2(n_samples=2*max_samples, random_state=1)
bagging = BalancedBaggingClassifier(KNeighborsClassifier(),
max_samples=max_samples,
max_features=0.5, random_state=1)
bagging.fit(X, y)
assert bagging._max_samples == max_samples
| mit |
markpudd/logistic_regression | logisticReg.py | 1 | 1904 | # Helper functions to do logistic regression
# To use the logRegCost function needs to be minimised, use the logRegGrad method to provide derivative
#
import cv2
import numpy as np
import scipy.io as sio
import csv as csv
from sklearn.preprocessing import normalize
def featureNormalize(data):
mu = data.mean(0)
data_norm = data.__sub__(mu)
sigma = np.std(data_norm, axis=0,ddof=1)
data_norm = data_norm.__div__(sigma)
return data_norm;
def addFirstOnes(data):
return np.concatenate((np.ones((np.size(data,0),1)),data),1)
def sigmoid(z):
return 1/(1+np.exp(-z))
def logRegGrad(theta, data_x, data_y, lamb):
m = float(np.size(data_y))
theta=np.array([theta]).T
temp = np.array(theta);
temp[0] = 0;
ha = data_x.dot(theta)
h=sigmoid(ha);
grad = 1/m * ((h-data_y).T.dot(data_x)).T;
grad = grad + ((lamb/m)*temp);
return grad.T[0]
def logRegCost(theta, data_x, data_y, lamb):
m = float(np.size(data_y))
theta=np.array([theta]).T
ha = data_x.dot(theta)
h=sigmoid(ha);
J = 1/m *((-data_y.T.dot(np.log(h))-(1-data_y.T).dot(np.log(1-h))));
temp = np.array(theta);
temp[0] = 0; # because we don't add anything for j = 0
J = J + (lamb/(2*m))*sum(np.power(temp,2));
return J[0,0]
def predict(theta, data_x):
n = np.size(data_x,1)
theta=np.array([theta]).T
ha = data_x.dot(theta)
p=sigmoid(ha);
for i in range(0,np.size(data_x,0)):
if p[i]>=0.5:
p[i]=1
else:
p[i]=0
return p
def testError(theta, data_x,data_y):
m = float(np.size(data_y))
sum =0
p=predict(theta, data_x);
for i in range(0,np.size(data_x,0)):
if p[i,0]==1 and data_y[0,i]==0:
sum = sum+1;
elif p[i,0]==0 and data_y[0,i]==1:
sum = sum+1;
return 1/m * sum
| mit |
zhenv5/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
rvraghav93/scikit-learn | sklearn/neighbors/approximate.py | 3 | 22554 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=32,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimension as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
warnings.warn("LSHForest has poor performance and has been deprecated "
"in 0.19. It will be removed in version 0.21.",
DeprecationWarning)
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, optional (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = True)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
olafhauk/mne-python | mne/utils/numerics.py | 4 | 36095 | # -*- coding: utf-8 -*-
"""Some utility functions."""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
from contextlib import contextmanager
import hashlib
from io import BytesIO, StringIO
from math import sqrt
import numbers
import operator
import os
import os.path as op
from math import ceil
import shutil
import sys
from datetime import datetime, timedelta, timezone
import numpy as np
from scipy import sparse
from ._logging import logger, warn, verbose
from .check import check_random_state, _ensure_int, _validate_type
from ..fixes import _infer_dimension_, svd_flip, stable_cumsum, _safe_svd
from .docs import fill_doc
def split_list(v, n, idx=False):
"""Split list in n (approx) equal pieces, possibly giving indices."""
n = int(n)
tot = len(v)
sz = tot // n
start = stop = 0
for i in range(n - 1):
stop += sz
yield (np.arange(start, stop), v[start:stop]) if idx else v[start:stop]
start += sz
yield (np.arange(start, tot), v[start:]) if idx else v[start]
def array_split_idx(ary, indices_or_sections, axis=0, n_per_split=1):
"""Do what numpy.array_split does, but add indices."""
# this only works for indices_or_sections as int
indices_or_sections = _ensure_int(indices_or_sections)
ary_split = np.array_split(ary, indices_or_sections, axis=axis)
idx_split = np.array_split(np.arange(ary.shape[axis]), indices_or_sections)
idx_split = (np.arange(sp[0] * n_per_split, (sp[-1] + 1) * n_per_split)
for sp in idx_split)
return zip(idx_split, ary_split)
def create_chunks(sequence, size):
"""Generate chunks from a sequence.
Parameters
----------
sequence : iterable
Any iterable object
size : int
The chunksize to be returned
"""
return (sequence[p:p + size] for p in range(0, len(sequence), size))
def sum_squared(X):
"""Compute norm of an array.
Parameters
----------
X : array
Data whose norm must be found.
Returns
-------
value : float
Sum of squares of the input array X.
"""
X_flat = X.ravel(order='F' if np.isfortran(X) else 'C')
return np.dot(X_flat, X_flat)
def _compute_row_norms(data):
"""Compute scaling based on estimated norm."""
norms = np.sqrt(np.sum(data ** 2, axis=1))
norms[norms == 0] = 1.0
return norms
def _reg_pinv(x, reg=0, rank='full', rcond=1e-15):
"""Compute a regularized pseudoinverse of Hermitian matrices.
Regularization is performed by adding a constant value to each diagonal
element of the matrix before inversion. This is known as "diagonal
loading". The loading factor is computed as ``reg * np.trace(x) / len(x)``.
The pseudo-inverse is computed through SVD decomposition and inverting the
singular values. When the matrix is rank deficient, some singular values
will be close to zero and will not be used during the inversion. The number
of singular values to use can either be manually specified or automatically
estimated.
Parameters
----------
x : ndarray, shape (..., n, n)
Square, Hermitian matrices to invert.
reg : float
Regularization parameter. Defaults to 0.
rank : int | None | 'full'
This controls the effective rank of the covariance matrix when
computing the inverse. The rank can be set explicitly by specifying an
integer value. If ``None``, the rank will be automatically estimated.
Since applying regularization will always make the covariance matrix
full rank, the rank is estimated before regularization in this case. If
'full', the rank will be estimated after regularization and hence
will mean using the full rank, unless ``reg=0`` is used.
Defaults to 'full'.
rcond : float | 'auto'
Cutoff for detecting small singular values when attempting to estimate
the rank of the matrix (``rank='auto'``). Singular values smaller than
the cutoff are set to zero. When set to 'auto', a cutoff based on
floating point precision will be used. Defaults to 1e-15.
Returns
-------
x_inv : ndarray, shape (..., n, n)
The inverted matrix.
loading_factor : float
Value added to the diagonal of the matrix during regularization.
rank : int
If ``rank`` was set to an integer value, this value is returned,
else the estimated rank of the matrix, before regularization, is
returned.
"""
from ..rank import _estimate_rank_from_s
if rank is not None and rank != 'full':
rank = int(operator.index(rank))
if x.ndim < 2 or x.shape[-2] != x.shape[-1]:
raise ValueError('Input matrix must be square.')
if not np.allclose(x, x.conj().swapaxes(-2, -1)):
raise ValueError('Input matrix must be Hermitian (symmetric)')
assert x.ndim >= 2 and x.shape[-2] == x.shape[-1]
n = x.shape[-1]
# Decompose the matrix, not necessarily positive semidefinite
from mne.fixes import svd
U, s, Vh = svd(x, hermitian=True)
# Estimate the rank before regularization
tol = 'auto' if rcond == 'auto' else rcond * s[..., :1]
rank_before = _estimate_rank_from_s(s, tol)
# Decompose the matrix again after regularization
loading_factor = reg * np.mean(s, axis=-1)
if reg:
U, s, Vh = svd(
x + loading_factor[..., np.newaxis, np.newaxis] * np.eye(n),
hermitian=True)
# Estimate the rank after regularization
tol = 'auto' if rcond == 'auto' else rcond * s[..., :1]
rank_after = _estimate_rank_from_s(s, tol)
# Warn the user if both all parameters were kept at their defaults and the
# matrix is rank deficient.
if (rank_after < n).any() and reg == 0 and \
rank == 'full' and rcond == 1e-15:
warn('Covariance matrix is rank-deficient and no regularization is '
'done.')
elif isinstance(rank, int) and rank > n:
raise ValueError('Invalid value for the rank parameter (%d) given '
'the shape of the input matrix (%d x %d).' %
(rank, x.shape[0], x.shape[1]))
# Pick the requested number of singular values
mask = np.arange(s.shape[-1]).reshape((1,) * (x.ndim - 2) + (-1,))
if rank is None:
cmp = ret = rank_before
elif rank == 'full':
cmp = rank_after
ret = rank_before
else:
cmp = ret = rank
mask = mask < np.asarray(cmp)[..., np.newaxis]
mask &= s > 0
# Invert only non-zero singular values
s_inv = np.zeros(s.shape)
s_inv[mask] = 1. / s[mask]
# Compute the pseudo inverse
x_inv = np.matmul(U * s_inv[..., np.newaxis, :], Vh)
return x_inv, loading_factor, ret
def _gen_events(n_epochs):
"""Generate event structure from number of epochs."""
events = np.c_[np.arange(n_epochs), np.zeros(n_epochs, int),
np.ones(n_epochs, int)]
return events
def _reject_data_segments(data, reject, flat, decim, info, tstep):
"""Reject data segments using peak-to-peak amplitude."""
from ..epochs import _is_good
from ..io.pick import channel_indices_by_type
data_clean = np.empty_like(data)
idx_by_type = channel_indices_by_type(info)
step = int(ceil(tstep * info['sfreq']))
if decim is not None:
step = int(ceil(step / float(decim)))
this_start = 0
this_stop = 0
drop_inds = []
for first in range(0, data.shape[1], step):
last = first + step
data_buffer = data[:, first:last]
if data_buffer.shape[1] < (last - first):
break # end of the time segment
if _is_good(data_buffer, info['ch_names'], idx_by_type, reject,
flat, ignore_chs=info['bads']):
this_stop = this_start + data_buffer.shape[1]
data_clean[:, this_start:this_stop] = data_buffer
this_start += data_buffer.shape[1]
else:
logger.info("Artifact detected in [%d, %d]" % (first, last))
drop_inds.append((first, last))
data = data_clean[:, :this_stop]
if not data.any():
raise RuntimeError('No clean segment found. Please '
'consider updating your rejection '
'thresholds.')
return data, drop_inds
def _get_inst_data(inst):
"""Get data view from MNE object instance like Raw, Epochs or Evoked."""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from .. import Evoked
from ..time_frequency.tfr import _BaseTFR
_validate_type(inst, (BaseRaw, BaseEpochs, Evoked, _BaseTFR), "Instance")
if not inst.preload:
inst.load_data()
return inst._data
def compute_corr(x, y):
"""Compute pearson correlations between a vector and a matrix."""
if len(x) == 0 or len(y) == 0:
raise ValueError('x or y has zero length')
X = np.array(x, float)
Y = np.array(y, float)
X -= X.mean(0)
Y -= Y.mean(0)
x_sd = X.std(0, ddof=1)
# if covariance matrix is fully expanded, Y needs a
# transpose / broadcasting else Y is correct
y_sd = Y.std(0, ddof=1)[:, None if X.shape == Y.shape else Ellipsis]
return (np.dot(X.T, Y) / float(len(X) - 1)) / (x_sd * y_sd)
@fill_doc
def random_permutation(n_samples, random_state=None):
"""Emulate the randperm matlab function.
It returns a vector containing a random permutation of the
integers between 0 and n_samples-1. It returns the same random numbers
than randperm matlab function whenever the random_state is the same
as the matlab's random seed.
This function is useful for comparing against matlab scripts
which use the randperm function.
Note: the randperm(n_samples) matlab function generates a random
sequence between 1 and n_samples, whereas
random_permutation(n_samples, random_state) function generates
a random sequence between 0 and n_samples-1, that is:
randperm(n_samples) = random_permutation(n_samples, random_state) - 1
Parameters
----------
n_samples : int
End point of the sequence to be permuted (excluded, i.e., the end point
is equal to n_samples-1)
%(random_state)s
Returns
-------
randperm : ndarray, int
Randomly permuted sequence between 0 and n-1.
"""
rng = check_random_state(random_state)
# This can't just be rng.permutation(n_samples) because it's not identical
# to what MATLAB produces
idx = rng.uniform(size=n_samples)
randperm = np.argsort(idx)
return randperm
@verbose
def _apply_scaling_array(data, picks_list, scalings, verbose=None):
"""Scale data type-dependently for estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
logger.debug(' Scaling using mapping %s.' % (scalings,))
picks_dict = dict(picks_list)
scalings = [(picks_dict[k], v) for k, v in scalings.items()
if k in picks_dict]
for idx, scaling in scalings:
data[idx, :] *= scaling # F - order
else:
logger.debug(' Scaling using computed norms.')
data *= scalings[:, np.newaxis] # F - order
def _invert_scalings(scalings):
if isinstance(scalings, dict):
scalings = {k: 1. / v for k, v in scalings.items()}
elif isinstance(scalings, np.ndarray):
scalings = 1. / scalings
return scalings
def _undo_scaling_array(data, picks_list, scalings):
scalings = _invert_scalings(_check_scaling_inputs(data, picks_list,
scalings))
return _apply_scaling_array(data, picks_list, scalings, verbose=False)
@contextmanager
def _scaled_array(data, picks_list, scalings):
"""Scale, use, unscale array."""
_apply_scaling_array(data, picks_list=picks_list, scalings=scalings)
try:
yield
finally:
_undo_scaling_array(data, picks_list=picks_list, scalings=scalings)
def _apply_scaling_cov(data, picks_list, scalings):
"""Scale resulting data after estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
scales = None
if isinstance(scalings, dict):
n_channels = len(data)
covinds = list(zip(*picks_list))[1]
assert len(data) == sum(len(k) for k in covinds)
assert list(sorted(np.concatenate(covinds))) == list(range(len(data)))
scales = np.zeros(n_channels)
for ch_t, idx in picks_list:
scales[idx] = scalings[ch_t]
elif isinstance(scalings, np.ndarray):
if len(scalings) != len(data):
raise ValueError('Scaling factors and data are of incompatible '
'shape')
scales = scalings
elif scalings is None:
pass
else:
raise RuntimeError('Arff...')
if scales is not None:
assert np.sum(scales == 0.) == 0
data *= (scales[None, :] * scales[:, None])
def _undo_scaling_cov(data, picks_list, scalings):
scalings = _invert_scalings(_check_scaling_inputs(data, picks_list,
scalings))
return _apply_scaling_cov(data, picks_list, scalings)
def _check_scaling_inputs(data, picks_list, scalings):
"""Aux function."""
rescale_dict_ = dict(mag=1e15, grad=1e13, eeg=1e6)
scalings_ = None
if isinstance(scalings, str) and scalings == 'norm':
scalings_ = 1. / _compute_row_norms(data)
elif isinstance(scalings, dict):
rescale_dict_.update(scalings)
scalings_ = rescale_dict_
elif isinstance(scalings, np.ndarray):
scalings_ = scalings
elif scalings is None:
pass
else:
raise NotImplementedError("No way! That's not a rescaling "
'option: %s' % scalings)
return scalings_
def hashfunc(fname, block_size=1048576, hash_type="md5"): # 2 ** 20
"""Calculate the hash for a file.
Parameters
----------
fname : str
Filename.
block_size : int
Block size to use when reading.
Returns
-------
hash_ : str
The hexadecimal digest of the hash.
"""
if hash_type == "md5":
hasher = hashlib.md5()
elif hash_type == "sha1":
hasher = hashlib.sha1()
with open(fname, 'rb') as fid:
while True:
data = fid.read(block_size)
if not data:
break
hasher.update(data)
return hasher.hexdigest()
def _replace_md5(fname):
"""Replace a file based on MD5sum."""
# adapted from sphinx-gallery
assert fname.endswith('.new')
fname_old = fname[:-4]
if op.isfile(fname_old) and hashfunc(fname) == hashfunc(fname_old):
os.remove(fname)
else:
shutil.move(fname, fname_old)
def create_slices(start, stop, step=None, length=1):
"""Generate slices of time indexes.
Parameters
----------
start : int
Index where first slice should start.
stop : int
Index where last slice should maximally end.
length : int
Number of time sample included in a given slice.
step: int | None
Number of time samples separating two slices.
If step = None, step = length.
Returns
-------
slices : list
List of slice objects.
"""
# default parameters
if step is None:
step = length
# slicing
slices = [slice(t, t + length, 1) for t in
range(start, stop - length + 1, step)]
return slices
def _time_mask(times, tmin=None, tmax=None, sfreq=None, raise_error=True,
include_tmax=True):
"""Safely find sample boundaries."""
orig_tmin = tmin
orig_tmax = tmax
tmin = -np.inf if tmin is None else tmin
tmax = np.inf if tmax is None else tmax
if not np.isfinite(tmin):
tmin = times[0]
if not np.isfinite(tmax):
tmax = times[-1]
include_tmax = True # ignore this param when tmax is infinite
if sfreq is not None:
# Push to a bit past the nearest sample boundary first
sfreq = float(sfreq)
tmin = int(round(tmin * sfreq)) / sfreq - 0.5 / sfreq
tmax = int(round(tmax * sfreq)) / sfreq
tmax += (0.5 if include_tmax else -0.5) / sfreq
else:
assert include_tmax # can only be used when sfreq is known
if raise_error and tmin > tmax:
raise ValueError('tmin (%s) must be less than or equal to tmax (%s)'
% (orig_tmin, orig_tmax))
mask = (times >= tmin)
mask &= (times <= tmax)
if raise_error and not mask.any():
extra = '' if include_tmax else 'when include_tmax=False '
raise ValueError('No samples remain when using tmin=%s and tmax=%s %s'
'(original time bounds are [%s, %s])'
% (orig_tmin, orig_tmax, extra, times[0], times[-1]))
return mask
def _freq_mask(freqs, sfreq, fmin=None, fmax=None, raise_error=True):
"""Safely find frequency boundaries."""
orig_fmin = fmin
orig_fmax = fmax
fmin = -np.inf if fmin is None else fmin
fmax = np.inf if fmax is None else fmax
if not np.isfinite(fmin):
fmin = freqs[0]
if not np.isfinite(fmax):
fmax = freqs[-1]
if sfreq is None:
raise ValueError('sfreq can not be None')
# Push 0.5/sfreq past the nearest frequency boundary first
sfreq = float(sfreq)
fmin = int(round(fmin * sfreq)) / sfreq - 0.5 / sfreq
fmax = int(round(fmax * sfreq)) / sfreq + 0.5 / sfreq
if raise_error and fmin > fmax:
raise ValueError('fmin (%s) must be less than or equal to fmax (%s)'
% (orig_fmin, orig_fmax))
mask = (freqs >= fmin)
mask &= (freqs <= fmax)
if raise_error and not mask.any():
raise ValueError('No frequencies remain when using fmin=%s and '
'fmax=%s (original frequency bounds are [%s, %s])'
% (orig_fmin, orig_fmax, freqs[0], freqs[-1]))
return mask
def grand_average(all_inst, interpolate_bads=True, drop_bads=True):
"""Make grand average of a list of Evoked or AverageTFR data.
For :class:`mne.Evoked` data, the function interpolates bad channels based
on the ``interpolate_bads`` parameter. If ``interpolate_bads`` is True,
the grand average file will contain good channels and the bad channels
interpolated from the good MEG/EEG channels.
For :class:`mne.time_frequency.AverageTFR` data, the function takes the
subset of channels not marked as bad in any of the instances.
The ``grand_average.nave`` attribute will be equal to the number
of evoked datasets used to calculate the grand average.
.. note:: A grand average evoked should not be used for source
localization.
Parameters
----------
all_inst : list of Evoked or AverageTFR
The evoked datasets.
interpolate_bads : bool
If True, bad MEG and EEG channels are interpolated. Ignored for
AverageTFR.
drop_bads : bool
If True, drop all bad channels marked as bad in any data set.
If neither interpolate_bads nor drop_bads is True, in the output file,
every channel marked as bad in at least one of the input files will be
marked as bad, but no interpolation or dropping will be performed.
Returns
-------
grand_average : Evoked | AverageTFR
The grand average data. Same type as input.
Notes
-----
.. versionadded:: 0.11.0
"""
# check if all elements in the given list are evoked data
from ..evoked import Evoked
from ..time_frequency import AverageTFR
from ..channels.channels import equalize_channels
if not all_inst:
raise ValueError('Please pass a list of Evoked or AverageTFR objects.')
elif len(all_inst) == 1:
warn('Only a single dataset was passed to mne.grand_average().')
inst_type = type(all_inst[0])
_validate_type(all_inst[0], (Evoked, AverageTFR), 'All elements')
for inst in all_inst:
_validate_type(inst, inst_type, 'All elements', 'of the same type')
# Copy channels to leave the original evoked datasets intact.
all_inst = [inst.copy() for inst in all_inst]
# Interpolates if necessary
if isinstance(all_inst[0], Evoked):
if interpolate_bads:
all_inst = [inst.interpolate_bads() if len(inst.info['bads']) > 0
else inst for inst in all_inst]
from ..evoked import combine_evoked as combine
else: # isinstance(all_inst[0], AverageTFR):
from ..time_frequency.tfr import combine_tfr as combine
if drop_bads:
bads = list({b for inst in all_inst for b in inst.info['bads']})
if bads:
for inst in all_inst:
inst.drop_channels(bads)
equalize_channels(all_inst, copy=False)
# make grand_average object using combine_[evoked/tfr]
grand_average = combine(all_inst, weights='equal')
# change the grand_average.nave to the number of Evokeds
grand_average.nave = len(all_inst)
# change comment field
grand_average.comment = "Grand average (n = %d)" % grand_average.nave
return grand_average
def object_hash(x, h=None):
"""Hash a reasonable python object.
Parameters
----------
x : object
Object to hash. Can be anything comprised of nested versions of:
{dict, list, tuple, ndarray, str, bytes, float, int, None}.
h : hashlib HASH object | None
Optional, object to add the hash to. None creates an MD5 hash.
Returns
-------
digest : int
The digest resulting from the hash.
"""
if h is None:
h = hashlib.md5()
if hasattr(x, 'keys'):
# dict-like types
keys = _sort_keys(x)
for key in keys:
object_hash(key, h)
object_hash(x[key], h)
elif isinstance(x, bytes):
# must come before "str" below
h.update(x)
elif isinstance(x, (str, float, int, type(None))):
h.update(str(type(x)).encode('utf-8'))
h.update(str(x).encode('utf-8'))
elif isinstance(x, (np.ndarray, np.number, np.bool_)):
x = np.asarray(x)
h.update(str(x.shape).encode('utf-8'))
h.update(str(x.dtype).encode('utf-8'))
h.update(x.tobytes())
elif isinstance(x, datetime):
object_hash(_dt_to_stamp(x))
elif hasattr(x, '__len__'):
# all other list-like types
h.update(str(type(x)).encode('utf-8'))
for xx in x:
object_hash(xx, h)
else:
raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))
return int(h.hexdigest(), 16)
def object_size(x, memo=None):
"""Estimate the size of a reasonable python object.
Parameters
----------
x : object
Object to approximate the size of.
Can be anything comprised of nested versions of:
{dict, list, tuple, ndarray, str, bytes, float, int, None}.
memo : dict | None
The memodict.
Returns
-------
size : int
The estimated size in bytes of the object.
"""
# Note: this will not process object arrays properly (since those only)
# hold references
if memo is None:
memo = dict()
id_ = id(x)
if id_ in memo:
return 0 # do not add already existing ones
if isinstance(x, (bytes, str, int, float, type(None))):
size = sys.getsizeof(x)
elif isinstance(x, np.ndarray):
# On newer versions of NumPy, just doing sys.getsizeof(x) works,
# but on older ones you always get something small :(
size = sys.getsizeof(np.array([]))
if x.base is None or id(x.base) not in memo:
size += x.nbytes
elif isinstance(x, np.generic):
size = x.nbytes
elif isinstance(x, dict):
size = sys.getsizeof(x)
for key, value in x.items():
size += object_size(key, memo)
size += object_size(value, memo)
elif isinstance(x, (list, tuple)):
size = sys.getsizeof(x) + sum(object_size(xx, memo) for xx in x)
elif isinstance(x, datetime):
size = object_size(_dt_to_stamp(x), memo)
elif sparse.isspmatrix_csc(x) or sparse.isspmatrix_csr(x):
size = sum(sys.getsizeof(xx)
for xx in [x, x.data, x.indices, x.indptr])
else:
raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))
memo[id_] = size
return size
def _sort_keys(x):
"""Sort and return keys of dict."""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
def _array_equal_nan(a, b):
try:
np.testing.assert_array_equal(a, b)
except AssertionError:
return False
return True
def object_diff(a, b, pre=''):
"""Compute all differences between two python variables.
Parameters
----------
a : object
Currently supported: dict, list, tuple, ndarray, int, str, bytes,
float, StringIO, BytesIO.
b : object
Must be same type as ``a``.
pre : str
String to prepend to each line.
Returns
-------
diffs : str
A string representation of the differences.
"""
out = ''
if type(a) != type(b):
# Deal with NamedInt and NamedFloat
for sub in (int, float):
if isinstance(a, sub) and isinstance(b, sub):
break
else:
return pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
if isinstance(a, dict):
k1s = _sort_keys(a)
k2s = _sort_keys(b)
m1 = set(k2s) - set(k1s)
if len(m1):
out += pre + ' left missing keys %s\n' % (m1)
for key in k1s:
if key not in k2s:
out += pre + ' right missing key %s\n' % key
else:
out += object_diff(a[key], b[key],
pre=(pre + '[%s]' % repr(key)))
elif isinstance(a, (list, tuple)):
if len(a) != len(b):
out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
else:
for ii, (xx1, xx2) in enumerate(zip(a, b)):
out += object_diff(xx1, xx2, pre + '[%s]' % ii)
elif isinstance(a, float):
if not _array_equal_nan(a, b):
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif isinstance(a, (str, int, bytes, np.generic)):
if a != b:
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif a is None:
if b is not None:
out += pre + ' left is None, right is not (%s)\n' % (b)
elif isinstance(a, np.ndarray):
if not _array_equal_nan(a, b):
out += pre + ' array mismatch\n'
elif isinstance(a, (StringIO, BytesIO)):
if a.getvalue() != b.getvalue():
out += pre + ' StringIO mismatch\n'
elif isinstance(a, datetime):
if (a - b).total_seconds() != 0:
out += pre + ' datetime mismatch\n'
elif sparse.isspmatrix(a):
# sparsity and sparse type of b vs a already checked above by type()
if b.shape != a.shape:
out += pre + (' sparse matrix a and b shape mismatch'
'(%s vs %s)' % (a.shape, b.shape))
else:
c = a - b
c.eliminate_zeros()
if c.nnz > 0:
out += pre + (' sparse matrix a and b differ on %s '
'elements' % c.nnz)
elif hasattr(a, '__getstate__'):
out += object_diff(a.__getstate__(), b.__getstate__(), pre)
else:
raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
return out
class _PCA(object):
"""Principal component analysis (PCA)."""
# Adapted from sklearn and stripped down to just use linalg.svd
# and make it easier to later provide a "center" option if we want
def __init__(self, n_components=None, whiten=False):
self.n_components = n_components
self.whiten = whiten
def fit_transform(self, X, y=None):
X = X.copy()
U, S, _ = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0] - 1)
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
if self.n_components is None:
n_components = min(X.shape)
else:
n_components = self.n_components
n_samples, n_features = X.shape
if n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
elif not 0 <= n_components <= min(n_samples, n_features):
raise ValueError("n_components=%r must be between 0 and "
"min(n_samples, n_features)=%r with "
"svd_solver='full'"
% (n_components, min(n_samples, n_features)))
elif n_components >= 1:
if not isinstance(n_components, (numbers.Integral, np.integer)):
raise ValueError("n_components=%r must be of type int "
"when greater than or equal to 1, "
"was of type=%r"
% (n_components, type(n_components)))
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = _safe_svd(X, full_matrices=False)
# flip eigenvectors' sign to enforce deterministic output
U, V = svd_flip(U, V)
components_ = V
# Get variance explained by singular values
explained_variance_ = (S ** 2) / (n_samples - 1)
total_var = explained_variance_.sum()
explained_variance_ratio_ = explained_variance_ / total_var
singular_values_ = S.copy() # Store the singular values.
# Postprocess the number of components required
if n_components == 'mle':
n_components = \
_infer_dimension_(explained_variance_, n_samples, n_features)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
ratio_cumsum = stable_cumsum(explained_variance_ratio_)
n_components = np.searchsorted(ratio_cumsum, n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = components_[:n_components]
self.n_components_ = n_components
self.explained_variance_ = explained_variance_[:n_components]
self.explained_variance_ratio_ = \
explained_variance_ratio_[:n_components]
self.singular_values_ = singular_values_[:n_components]
return U, S, V
def _mask_to_onsets_offsets(mask):
"""Group boolean mask into contiguous onset:offset pairs."""
assert mask.dtype == bool and mask.ndim == 1
mask = mask.astype(int)
diff = np.diff(mask)
onsets = np.where(diff > 0)[0] + 1
if mask[0]:
onsets = np.concatenate([[0], onsets])
offsets = np.where(diff < 0)[0] + 1
if mask[-1]:
offsets = np.concatenate([offsets, [len(mask)]])
assert len(onsets) == len(offsets)
return onsets, offsets
def _julian_to_dt(jd):
"""Convert Julian integer to a datetime object.
Parameters
----------
jd : int
Julian date - number of days since julian day 0
Julian day number 0 assigned to the day starting at
noon on January 1, 4713 BC, proleptic Julian calendar
November 24, 4714 BC, in the proleptic Gregorian calendar
Returns
-------
jd_date : datetime
Datetime representation of jd
"""
# https://aa.usno.navy.mil/data/docs/JulianDate.php
# Thursday, A.D. 1970 Jan 1 12:00:00.0 2440588.000000
jd_t0 = 2440588
datetime_t0 = datetime(1970, 1, 1, 12, 0, 0, 0, tzinfo=timezone.utc)
dt = timedelta(days=(jd - jd_t0))
return datetime_t0 + dt
def _dt_to_julian(jd_date):
"""Convert datetime object to a Julian integer.
Parameters
----------
jd_date : datetime
Returns
-------
jd : float
Julian date corresponding to jd_date
- number of days since julian day 0
Julian day number 0 assigned to the day starting at
noon on January 1, 4713 BC, proleptic Julian calendar
November 24, 4714 BC, in the proleptic Gregorian calendar
"""
# https://aa.usno.navy.mil/data/docs/JulianDate.php
# Thursday, A.D. 1970 Jan 1 12:00:00.0 2440588.000000
jd_t0 = 2440588
datetime_t0 = datetime(1970, 1, 1, 12, 0, 0, 0, tzinfo=timezone.utc)
dt = jd_date - datetime_t0
return jd_t0 + dt.days
def _cal_to_julian(year, month, day):
"""Convert calendar date (year, month, day) to a Julian integer.
Parameters
----------
year : int
Year as an integer.
month : int
Month as an integer.
day : int
Day as an integer.
Returns
-------
jd: int
Julian date.
"""
return int(_dt_to_julian(datetime(year, month, day, 12, 0, 0,
tzinfo=timezone.utc)))
def _julian_to_cal(jd):
"""Convert calendar date (year, month, day) to a Julian integer.
Parameters
----------
jd: int, float
Julian date.
Returns
-------
year : int
Year as an integer.
month : int
Month as an integer.
day : int
Day as an integer.
"""
tmp_date = _julian_to_dt(jd)
return tmp_date.year, tmp_date.month, tmp_date.day
def _check_dt(dt):
if not isinstance(dt, datetime) or dt.tzinfo is None or \
dt.tzinfo is not timezone.utc:
raise ValueError('Date must be datetime object in UTC: %r' % (dt,))
def _dt_to_stamp(inp_date):
"""Convert a datetime object to a timestamp."""
_check_dt(inp_date)
return int(inp_date.timestamp() // 1), inp_date.microsecond
def _stamp_to_dt(utc_stamp):
"""Convert timestamp to datetime object in Windows-friendly way."""
# The min on windows is 86400
stamp = [int(s) for s in utc_stamp]
if len(stamp) == 1: # In case there is no microseconds information
stamp.append(0)
return (datetime.fromtimestamp(0, tz=timezone.utc) +
timedelta(0, stamp[0], stamp[1])) # day, sec, µs
class _ReuseCycle(object):
"""Cycle over a variable, preferring to reuse earlier indices.
Requires the values in ``x`` to be hashable and unique. This holds
nicely for matplotlib's color cycle, which gives HTML hex color strings.
"""
def __init__(self, x):
self.indices = list()
self.popped = dict()
assert len(x) > 0
self.x = x
def __iter__(self):
while True:
yield self.__next__()
def __next__(self):
if not len(self.indices):
self.indices = list(range(len(self.x)))
self.popped = dict()
idx = self.indices.pop(0)
val = self.x[idx]
assert val not in self.popped
self.popped[val] = idx
return val
def restore(self, val):
try:
idx = self.popped.pop(val)
except KeyError:
warn('Could not find value: %s' % (val,))
else:
loc = np.searchsorted(self.indices, idx)
self.indices.insert(loc, idx)
| bsd-3-clause |
costypetrisor/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
dssg/cincinnati2015-public | evaluation/webapp/evaluation.py | 1 | 5838 | from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import metrics
from webapp import config
def weighted_f1(scores):
f1_0 = scores["f1"][0] * scores["support"][0]
f1_1 = scores["f1"][1] * scores["support"][1]
return (f1_0 + f1_1) / (scores["support"][0] + scores["support"][1])
def plot_normalized_confusion_matrix(labels, predictions):
cutoff = 0.5
predictions_binary = np.copy(predictions)
predictions_binary[predictions_binary >= cutoff] = 1
predictions_binary[predictions_binary < cutoff] = 0
cm = metrics.confusion_matrix(labels, predictions_binary)
np.set_printoptions(precision=2)
fig = plt.figure()
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
target_names = ["No violation", "Violation"]
plt.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Normalized Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return fig
def plot_feature_importances(feature_names, feature_importances):
importances = list(zip(feature_names, list(feature_importances)))
importances = pd.DataFrame(importances, columns=["Feature", "Importance"])
importances = importances.set_index("Feature")
importances = importances.sort(columns="Importance", ascending=False)
importances = importances[0:20]
with plt.style.context(('ggplot')):
fig, ax = plt.subplots()
importances.plot(kind="barh", legend=False, ax=ax)
plt.tight_layout()
plt.title("Feature importances (Top 20)")
return fig
def plot_growth(results):
results = pd.DataFrame(results, columns=["date", "score"])
results = results.set_index("date")
results["score"] = results["score"].astype(float)
results = results.reindex(pd.date_range(datetime(2015, 7, 28), datetime(2015, 8, 27)))
results["random"] = pd.Series(3409/float(6124), index=results.index)
with plt.style.context(('ggplot')):
fig, ax = plt.subplots(figsize=(8, 3))
results["score"].plot(legend=False, ax=ax, marker="x")
results["random"].plot(legend=False, ax=ax, style='--')
ax.set_ylabel(config.score_name)
plt.tight_layout()
ax.set_ylim(0.5, 1.0)
return fig
def precision_at_x_percent(test_labels, test_predictions, x_percent=0.01, return_cutoff=False):
cutoff_index = int(len(test_predictions) * x_percent)
cutoff_index = min(cutoff_index, len(test_predictions) -1)
sorted_by_probability = np.sort(test_predictions)[::-1]
cutoff_probability = sorted_by_probability[cutoff_index]
test_predictions_binary = np.copy(test_predictions)
test_predictions_binary[test_predictions_binary >= cutoff_probability] = 1
test_predictions_binary[test_predictions_binary < cutoff_probability] = 0
precision, _, _, _ = metrics.precision_recall_fscore_support(test_labels, test_predictions_binary)
precision = precision[1] # only interested in precision for label 1
if return_cutoff:
return precision, cutoff_probability
else:
return precision
def plot_precision_recall_n(test_labels, test_predictions):
y_score = test_predictions
precision_curve, recall_curve, pr_thresholds = metrics.precision_recall_curve(test_labels, y_score)
precision_curve = precision_curve[:-1]
recall_curve = recall_curve[:-1]
pct_above_per_thresh = []
number_scored = len(y_score)
for value in pr_thresholds:
num_above_thresh = len(y_score[y_score>=value])
pct_above_thresh = num_above_thresh / float(number_scored)
pct_above_per_thresh.append(pct_above_thresh)
pct_above_per_thresh = np.array(pct_above_per_thresh)
with plt.style.context(('ggplot')):
plt.clf()
fig, ax1 = plt.subplots()
ax1.plot(pct_above_per_thresh, precision_curve, "#000099")
ax1.set_xlabel('percent of population')
ax1.set_ylabel('precision', color="#000099")
plt.ylim([0.0, 1.0])
ax2 = ax1.twinx()
ax2.plot(pct_above_per_thresh, recall_curve, "#CC0000")
ax2.set_ylabel('recall', color="#CC0000")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.title("Precision-recall for top x%")
return fig
def plot_precision_cutoff(test_labels, test_predictions):
percent_range = [0.001* i for i in range(1, 10)] + [0.01 * i for i in range(1, 101)]
precisions_and_cutoffs = [precision_at_x_percent(test_labels, test_predictions, x_percent=p, return_cutoff=True)
for p in percent_range]
precisions, cutoffs = zip(*precisions_and_cutoffs)
with plt.style.context(('ggplot')):
fig, ax = plt.subplots()
ax.plot(percent_range, precisions, "#000099")
ax.set_xlabel('percent of population')
ax.set_ylabel('precision', color="#000099")
plt.ylim([0.0, 1.0])
ax2 = ax.twinx()
ax2.plot(percent_range, cutoffs, "#CC0000")
ax2.set_ylabel('cutoff at', color="#CC0000")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.title("Precision at x%")
return fig
def plot_ROC(test_labels, test_predictions):
fpr, tpr, thresholds = metrics.roc_curve(test_labels, test_predictions, pos_label=1)
with plt.style.context(('ggplot')):
fig, ax = plt.subplots()
ax.plot(fpr[2], tpr[2])
#ax.plot([0, 1], [0, 1], 'k--')
#plt.xlim([0.0, 1.0])
#plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
return fig
| mit |
mfjb/scikit-learn | sklearn/feature_selection/rfe.py | 137 | 17066 | # Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| bsd-3-clause |
rahulguptakota/paper-To-Reviewer-Matching-System | citeSentClassifier_gurki.py | 1 | 9088 | import xml.etree.ElementTree as ET
import re
import time
import os, csv
from nltk.tokenize import sent_tokenize
from textblob.classifiers import NaiveBayesClassifier
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from sklearn import naive_bayes
from random import shuffle
import numpy as np
import scipy as sc
import matplotlib.pyplot as plt
from prettyprint import pp
import os, re, pickle
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, precision_score, recall_score, classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC, NuSVC, SVC
from sklearn.grid_search import GridSearchCV
from datetime import datetime as dt
from ipy_table import *
def testClassifier(x_train, y_train, x_test, y_test, clf, name):
"""
this method will first train the classifier on the training data
and will then test the trained classifier on test data.
Finally it will report some metrics on the classifier performance.
Parameters
----------
x_train: np.ndarray
train data matrix
y_train: list
train data label
x_test: np.ndarray
test data matrix
y_test: list
test data label
clf: sklearn classifier object implementing fit() and predict() methods
Returns
-------
metrics: list
[training time, testing time, recall and precision for every class, macro-averaged F1 score]
"""
print(name)
metrics = []
start = dt.now()
clf.fit(x_train, y_train)
end = dt.now()
print 'training time: ', (end - start)
pickle.dump( clf, open( name+".p", "wb" ) )
# add training time to metrics
metrics.append(end-start)
start = dt.now()
yhat = clf.predict(x_test)
end = dt.now()
print 'testing time: ', (end - start)
# add testing time to metrics
metrics.append(end-start)
print 'classification report: '
# print classification_report(y_test, yhat)
pp(classification_report(y_test, yhat))
print 'f1 score'
print f1_score(y_test, yhat, average='macro')
print 'accuracy score'
print accuracy_score(y_test, yhat)
precision = precision_score(y_test, yhat, average=None)
recall = recall_score(y_test, yhat, average=None)
# add precision and recall values to metrics
for p, r in zip(precision, recall):
metrics.append(p)
metrics.append(r)
#add macro-averaged F1 score to metrics
metrics.append(f1_score(y_test, yhat, average='macro'))
print 'confusion matrix:'
print confusion_matrix(y_test, yhat)
# plotting the confusion matrix
plt.imshow(confusion_matrix(y_test, yhat), interpolation='nearest')
# plt.show()
return metrics
stop_words = set(stopwords.words('english'))
clfrNB = naive_bayes.MultinomialNB()
train = []
test = []
rootDir = './data_label'
one_label = 0
zero_label = 0
ps = PorterStemmer()
for dirName, subdirList, fileList in os.walk(rootDir, topdown=False):
try:
# print(dirName)
fo = open(dirName + "/citeSents.csv", "r")
except:
continue
lines = fo.readlines()
for line in lines:
line = line.strip().lower()
# print(line)
splitsent = line.split(",,")
# print(splitsent)
word_tokens = word_tokenize(splitsent[0])
if splitsent[1] != '1' and splitsent[1] != '0' :
print(splitsent)
elif splitsent[1] == "1":
one_label += 1
else:
zero_label += 1
filtered_sentence = [w for w in word_tokens if not w in stop_words]
line = " ".join(filtered_sentence)
stemmed = [ps.stem(word) for word in line.split()]
stemmed = filter(lambda x: not(len(x)<3 or re.findall(r"[0-9]+",x)) , stemmed)
stemmed = list(stemmed)
line = " ".join(stemmed)
# print(line)
train.append((line, splitsent[1]))
shuffle(train)
# testindex = int(len(train)*4/5)
# test = train[testindex:]
# train = train[:testindex]
train_arr = []
# test_arr = []
train_lbl = []
# test_lbl = []
for x in train:
train_arr.append(x[0])
train_lbl.append(x[1])
# for x in test:
# test_arr.append(x[0])
# test_lbl.append(x[1])
vectorizer = CountVectorizer()
vectorizer.fit(train_arr)
pickle.dump(vectorizer, open("vectorizer.p", "wb"))
train_mat = vectorizer.transform(train_arr)
print train_mat
# print train_mat.shape
# test_mat = vectorizer.transform(test_arr)
# print test_mat.shape
tfidf = TfidfTransformer()
tfidf.fit(train_mat)
pickle.dump(tfidf, open("tfidf.p", "wb"))
train_tfmat = tfidf.transform(train_mat)
print train_tfmat.shape
print train_tfmat[0]
# test_tfmat = tfidf.transform(test_mat)
# print test_tfmat.shape
testindex = int(len(train)*4/5)
test_tfmat = train_tfmat[testindex:]
test_lbl = train_lbl[testindex:]
train_tfmat = train_tfmat[:testindex]
train_lbl = train_lbl[:testindex]
metrics_dict = []
bnb = BernoulliNB()
bnb_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, bnb, "bernoulliNB")
metrics_dict.append({'name':'BernoulliNB', 'metrics':bnb_me})
gnb = GaussianNB()
gnb_me = testClassifier(train_tfmat.toarray(), train_lbl, test_tfmat.toarray(), test_lbl, gnb, "guassianNB")
metrics_dict.append({'name':'GaussianNB', 'metrics':gnb_me})
mnb = MultinomialNB()
mnb_me = testClassifier(train_tfmat.toarray(), train_lbl, test_tfmat.toarray(), test_lbl, mnb, "MultinomialNB")
metrics_dict.append({'name':'MultinomialNB', 'metrics':mnb_me})
for nn in [5]:
print 'knn with ', nn, ' neighbors'
knn = KNeighborsClassifier(n_neighbors=nn)
knn_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, knn, "knn"+str(nn))
metrics_dict.append({'name':'5NN', 'metrics':knn_me})
print ' '
print("linear SVM starts:")
lsvm = LinearSVC( class_weight={'1': 1, '0' : 1})
lsvm_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, lsvm, "linearSVM")
metrics_dict.append({'name':'LinearSVM', 'metrics':lsvm_me})
rbfsvm = SVC(kernel = 'poly',degree=2,coef0=1 ,class_weight={'1': zero_label, '0' : one_label})
rbfsvm_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, rbfsvm, "rbfSVM")
metrics_dict.append({'name':'SVM with RBF kernel', 'metrics':rbfsvm_me})
bnb_params = {'alpha': [a*0.1 for a in range(0,11)]}
bnb_clf = GridSearchCV(BernoulliNB(), bnb_params, cv=10)
bnb_clf.fit(train_tfmat, train_lbl)
print 'best parameters'
print bnb_clf.best_params_
best_bnb = BernoulliNB(alpha=bnb_clf.best_params_['alpha'])
best_bnb_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, best_bnb,"bernoulliNB")
metrics_dict.append({'name':'Best BernoulliNB', 'metrics':best_bnb_me})
best_gnb = GaussianNB()
best_gnb_me = testClassifier(train_tfmat.toarray(), train_lbl, test_tfmat.toarray(), test_lbl, best_gnb, "guassianNB")
metrics_dict.append({'name':'Best GaussianNB', 'metrics':best_gnb_me})
mbn_params = {'alpha': [a*0.1 for a in range(0,11)]}
mbn_clf = GridSearchCV(MultinomialNB(), mbn_params, cv=10)
mbn_clf.fit(train_tfmat, train_lbl)
print 'best parameters'
print mbn_clf.best_params_
best_mbn = MultinomialNB(alpha=mbn_clf.best_params_['alpha'])
best_mbn_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, best_mbn, "MultinomialNB")
metrics_dict.append({'name':'Best MultinomialNB', 'metrics':best_mbn_me})
print metrics_dict
# knn_params = {'n_neighbors': range(1,21), 'weights': ['uniform', 'distance'], 'algorithm': ['ball_tree', 'kd_tree'],
# 'leaf_size': [15, 30, 50, 100], 'p': [1,2]}
# knn_clf = GridSearchCV(KNeighborsClassifier(), knn_params, cv=10)
# knn_clf.fit(train_tfmat, train_lbl)
# print 'best parameters'
# print knn_clf.best_params_
# best_knn = KNeighborsClassifier(n_neighbors=knn_clf.best_params_['n_neighbors'], weights=knn_clf.best_params_['weights'],
# algorithm=knn_clf.best_params_['algorithm'], leaf_size=knn_clf.best_params_['leaf_size'])
# best_knn_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, best_knn)
# metrics_dict.append({'name':'Best KNN', 'metrics':best_knn_me})
# nusvm = NuSVC()
# nusvm_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, nusvm)
# metrics_dict.append({'name':'nuSVM', 'metrics':nusvm_me})
# traindata = [data[0] for data in train]
# trainlabel = [data[1] for data in train]
# clfrNB.fit(traindata, trainlabel)
# print(test)
# cl = NaiveBayesClassifier(train)
# print(cl.classify("It is also possible to focus on non-compositional compounds, a key point in bilingual applications (CITATION; CITATION; Lin, 99)")) # "pos"
# print(cl.classify("I don't like their pizza.")) # "neg"
# for item in test:
# if(cl.classify(item[0]) == '1'):
# print(item, cl.classify(item[0]))
# print(cl.accuracy(test))
# print(cl.show_informative_features(100))
# print(train)
| mit |
weaver-viii/h2o-3 | py2/h2o_cmd.py | 20 | 16497 |
import h2o_nodes
from h2o_test import dump_json, verboseprint
import h2o_util
import h2o_print as h2p
from h2o_test import OutputObj
#************************************************************************
def runStoreView(node=None, **kwargs):
print "FIX! disabling runStoreView for now"
return {}
if not node: node = h2o_nodes.nodes[0]
print "\nStoreView:"
# FIX! are there keys other than frames and models
a = node.frames(**kwargs)
# print "storeview frames:", dump_json(a)
frameList = [af['key']['name'] for af in a['frames']]
for f in frameList:
print "frame:", f
print "# of frames:", len(frameList)
b = node.models()
# print "storeview models:", dump_json(b)
modelList = [bm['key'] for bm in b['models']]
for m in modelList:
print "model:", m
print "# of models:", len(modelList)
return {'keys': frameList + modelList}
#************************************************************************
def runExec(node=None, **kwargs):
if not node: node = h2o_nodes.nodes[0]
a = node.rapids(**kwargs)
return a
def runInspect(node=None, key=None, verbose=False, **kwargs):
if not key: raise Exception('No key for Inspect')
if not node: node = h2o_nodes.nodes[0]
a = node.frames(key, **kwargs)
if verbose:
print "inspect of %s:" % key, dump_json(a)
return a
#************************************************************************
def infoFromParse(parse):
if not parse:
raise Exception("parse is empty for infoFromParse")
# assumes just one result from Frames
if 'frames' not in parse:
raise Exception("infoFromParse expects parse= param from parse result: %s" % parse)
if len(parse['frames'])!=1:
raise Exception("infoFromParse expects parse= param from parse result: %s " % parse['frames'])
# it it index[0] or key '0' in a dictionary?
frame = parse['frames'][0]
# need more info about this dataset for debug
numCols = len(frame['columns'])
numRows = frame['rows']
key_name = frame['frame_id']['name']
return numRows, numCols, key_name
#************************************************************************
# make this be the basic way to get numRows, numCols
def infoFromInspect(inspect):
if not inspect:
raise Exception("inspect is empty for infoFromInspect")
# assumes just one result from Frames
if 'frames' not in inspect:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s" % inspect)
if len(inspect['frames'])!=1:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s " % inspect['frames'])
# it it index[0] or key '0' in a dictionary?
frame = inspect['frames'][0]
# need more info about this dataset for debug
columns = frame['columns']
key_name = frame['frame_id']['name']
missingList = []
labelList = []
typeList = []
for i, colDict in enumerate(columns): # columns is a list
if 'missing_count' not in colDict:
# debug
print "\ncolDict"
for k in colDict:
print " key: %s" % k
# data
# domain
# string_data
# type
# label
# percentiles
# precision
# mins
# maxs
# mean
# histogram_base
# histogram_bins
# histogram_stride
# zero_count
# missing_count
# positive_infinity_count
# negative_infinity_count
# __meta
mins = colDict['mins']
maxs = colDict['maxs']
missing = colDict['missing_count']
label = colDict['label']
stype = colDict['type']
missingList.append(missing)
labelList.append(label)
typeList.append(stype)
if missing!=0:
print "%s: col: %s %s, missing: %d" % (key_name, i, label, missing)
print "inspect typeList:", typeList
# make missingList empty if all 0's
if sum(missingList)==0:
missingList = []
# no type per col in inspect2
numCols = len(frame['columns'])
numRows = frame['rows']
print "\n%s numRows: %s, numCols: %s" % (key_name, numRows, numCols)
return missingList, labelList, numRows, numCols
#************************************************************************
# does all columns unless you specify column index.
# only will return first or specified column
def runSummary(node=None, key=None, column=None, expected=None, maxDelta=None, noPrint=False, **kwargs):
if not key: raise Exception('No key for Summary')
if not node: node = h2o_nodes.nodes[0]
# return node.summary(key, **kwargs)
i = InspectObj(key=key)
# just so I don't have to change names below
missingList = i.missingList
labelList = i.labelList
numRows = i.numRows
numCols = i.numCols
print "labelList:", labelList
assert labelList is not None
# doesn't take indices? only column labels?
# return first column, unless specified
if not (column is None or isinstance(column, (basestring, int))):
raise Exception("column param should be string or integer index or None %s %s" % (type(column), column))
# either return the first col, or the col indentified by label. the column identifed could be string or index?
if column is None: # means the summary json when we ask for col 0, will be what we return (do all though)
colNameToDo = labelList
colIndexToDo = range(len(labelList))
elif isinstance(column, int):
colNameToDo = [labelList[column]]
colIndexToDo = [column]
elif isinstance(column, basestring):
colNameToDo = [column]
if column not in labelList:
raise Exception("% not in labellist: %s" % (column, labellist))
colIndexToDo = [labelList.index(column)]
else:
raise Exception("wrong type %s for column %s" % (type(column), column))
# we get the first column as result after walking across all, if no column parameter
desiredResult = None
for (colIndex, colName) in zip(colIndexToDo, colNameToDo):
print "doing summary on %s %s" % (colIndex, colName)
# ugly looking up the colIndex
co = SummaryObj(key=key, colIndex=colIndex, colName=colName)
if not desiredResult:
desiredResult = co
if not noPrint:
for k,v in co:
# only print [0] of mins and maxs because of the e308 values when they don't have dataset values
if k=='mins' or k=='maxs':
print "%s[0]" % k, v[0]
else:
print k, v
if expected is not None:
print "len(co.histogram_bins):", len(co.histogram_bins)
print "co.label:", co.label, "mean (2 places):", h2o_util.twoDecimals(co.mean)
# what is precision. -1?
print "co.label:", co.label, "std dev. (2 places):", h2o_util.twoDecimals(co.sigma)
# print "FIX! hacking the co.percentiles because it's short by two"
# if co.percentiles:
# percentiles = [0] + co.percentiles + [0]
# else:
# percentiles = None
percentiles = co.percentiles
assert len(co.percentiles) == len(co.default_percentiles)
# the thresholds h2o used, should match what we expected
# expected = [0] * 5
# Fix. doesn't check for expected = 0?
# max of one bin
if maxDelta is None:
maxDelta = (co.maxs[0] - co.mins[0])/1000
if expected[0]: h2o_util.assertApproxEqual(co.mins[0], expected[0], tol=maxDelta,
msg='min is not approx. expected')
if expected[1]: h2o_util.assertApproxEqual(percentiles[2], expected[1], tol=maxDelta,
msg='25th percentile is not approx. expected')
if expected[2]: h2o_util.assertApproxEqual(percentiles[4], expected[2], tol=maxDelta,
msg='50th percentile (median) is not approx. expected')
if expected[3]: h2o_util.assertApproxEqual(percentiles[6], expected[3], tol=maxDelta,
msg='75th percentile is not approx. expected')
if expected[4]: h2o_util.assertApproxEqual(co.maxs[0], expected[4], tol=maxDelta,
msg='max is not approx. expected')
# figure out the expected max error
# use this for comparing to sklearn/sort
MAX_QBINS = 1000
if expected[0] and expected[4]:
expectedRange = expected[4] - expected[0]
# because of floor and ceil effects due we potentially lose 2 bins (worst case)
# the extra bin for the max value, is an extra bin..ignore
expectedBin = expectedRange/(MAX_QBINS-2)
maxErr = expectedBin # should we have some fuzz for fp?
else:
print "Test won't calculate max expected error"
maxErr = 0
pt = h2o_util.twoDecimals(percentiles)
# only look at [0] for now...bit e308 numbers if unpopulated due to not enough unique values in dataset column
mx = h2o_util.twoDecimals(co.maxs[0])
mn = h2o_util.twoDecimals(co.mins[0])
print "co.label:", co.label, "co.percentiles (2 places):", pt
print "co.default_percentiles:", co.default_percentiles
print "co.label:", co.label, "co.maxs: (2 places):", mx
print "co.label:", co.label, "co.mins: (2 places):", mn
# FIX! why would percentiles be None? enums?
if pt is None:
compareActual = mn, [None] * 3, mx
else:
compareActual = mn, pt[2], pt[4], pt[6], mx
h2p.green_print("actual min/25/50/75/max co.label:", co.label, "(2 places):", compareActual)
h2p.green_print("expected min/25/50/75/max co.label:", co.label, "(2 places):", expected)
return desiredResult
# this parses the json object returned for one col from runSummary...returns an OutputObj object
# summaryResult = h2o_cmd.runSummary(key=hex_key, column=0)
# co = h2o_cmd.infoFromSummary(summaryResult)
# print co.label
# legacy
def infoFromSummary(summaryResult, column=None):
return SummaryObj(summaryResult, column=column)
class ParseObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, parseResult, expectedNumRows=None, expectedNumCols=None, noPrint=False, **kwargs):
super(ParseObj, self).__init__(parseResult['frames'][0], "Parse", noPrint=noPrint)
# add my stuff
self.numRows, self.numCols, self.parse_key = infoFromParse(parseResult)
# h2o_import.py does this for test support
if 'python_elapsed' in parseResult:
self.python_elapsed = parseResult['python_elapsed']
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
print "ParseObj created for:", self.parse_key # vars(self)
# Let's experiment with creating new objects that are an api I control for generic operations (Inspect)
class InspectObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, key,
expectedNumRows=None, expectedNumCols=None, expectedMissingList=None, expectedLabelList=None,
noPrint=False, **kwargs):
inspectResult = runInspect(key=key)
super(InspectObj, self).__init__(inspectResult['frames'][0], "Inspect", noPrint=noPrint)
# add my stuff
self.missingList, self.labelList, self.numRows, self.numCols = infoFromInspect(inspectResult)
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
if expectedMissingList is not None:
assert self.missingList == expectedMissingList, "%s %s" % (self.MissingList, expectedMissingList)
if expectedLabelList is not None:
assert self.labelList == expectedLabelList, "%s %s" % (self.labelList, expectedLabelList)
print "InspectObj created for:", key #, vars(self)
class SummaryObj(OutputObj):
@classmethod
def check(self,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, **kwargs):
if expectedLabel is not None:
assert self.label != expectedLabel
if expectedType is not None:
assert self.type != expectedType
if expectedMissing is not None:
assert self.missing != expectedMissing
if expectedDomain is not None:
assert self.domain != expectedDomain
if expectedBinsSum is not None:
assert self.binsSum != expectedBinsSum
# column is column name?
def __init__(self, key, colIndex, colName,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, timeoutSecs=30, **kwargs):
# we need both colInndex and colName for doing Summary efficiently
# ugly.
assert colIndex is not None
assert colName is not None
summaryResult = h2o_nodes.nodes[0].summary(key=key, column=colName, timeoutSecs=timeoutSecs, **kwargs)
# this should be the same for all the cols? Or does the checksum change?
frame = summaryResult['frames'][0]
default_percentiles = frame['default_percentiles']
checksum = frame['checksum']
rows = frame['rows']
# assert colIndex < len(frame['columns']), "You're asking for colIndex %s but there are only %s. " % \
# (colIndex, len(frame['columns']))
# coJson = frame['columns'][colIndex]
# is it always 0 now? the one I asked for ?
coJson = frame['columns'][0]
assert checksum !=0 and checksum is not None
assert rows!=0 and rows is not None
# FIX! why is frame['key'] = None here?
# assert frame['key'] == key, "%s %s" % (frame['key'], key)
super(SummaryObj, self).__init__(coJson, "Summary for %s" % colName, noPrint=noPrint)
# how are enums binned. Stride of 1? (what about domain values)
# touch all
# print "vars", vars(self)
coList = [
len(self.data),
self.domain,
self.string_data,
self.type,
self.label,
self.percentiles,
self.precision,
self.mins,
self.maxs,
self.mean,
self.histogram_base,
len(self.histogram_bins),
self.histogram_stride,
self.zero_count,
self.missing_count,
self.positive_infinity_count,
self.negative_infinity_count,
]
assert self.label==colName, "%s You must have told me the wrong colName %s for the given colIndex %s" % \
(self.label, colName, colIndex)
print "you can look at this attributes in the returned object (which is OutputObj if you assigned to 'co')"
for k,v in self:
print "%s" % k,
# hack these into the column object from the full summary
self.default_percentiles = default_percentiles
self.checksum = checksum
self.rows = rows
print "\nSummaryObj for", key, "for colName", colName, "colIndex:", colIndex
print "SummaryObj created for:", key # vars(self)
# now do the assertion checks
self.check(expectedNumRows, expectedNumCols,
expectedLabel, expectedType, expectedMissing, expectedDomain, expectedBinsSum,
noPrint=noPrint, **kwargs)
| apache-2.0 |
kabrapratik28/Stanford_courses | cs224n/assignment1/q4_sentiment.py | 1 | 8150 | #!/usr/bin/env python
import argparse
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import itertools
from utils.treebank import StanfordSentiment
import utils.glove as glove
from q3_sgd import load_saved_params, sgd
# We will use sklearn here because it will run faster than implementing
# ourselves. However, for other parts of this assignment you must implement
# the functions yourself!
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
def getArguments():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--pretrained", dest="pretrained", action="store_true",
help="Use pretrained GloVe vectors.")
group.add_argument("--yourvectors", dest="yourvectors", action="store_true",
help="Use your vectors from q3.")
return parser.parse_args()
def getSentenceFeatures(tokens, wordVectors, sentence):
"""
Obtain the sentence feature for sentiment analysis by averaging its
word vectors
"""
# Implement computation for the sentence features given a sentence.
# Inputs:
# tokens -- a dictionary that maps words to their indices in
# the word vector list
# wordVectors -- word vectors (each row) for all tokens
# sentence -- a list of words in the sentence of interest
# Output:
# - sentVector: feature vector for the sentence
sentVector = np.zeros((wordVectors.shape[1],))
### YOUR CODE HERE
for word in sentence:
index = tokens[word]
sentVector += wordVectors[index]
sentVector /= len(sentence)
### END YOUR CODE
assert sentVector.shape == (wordVectors.shape[1],)
return sentVector
def getRegularizationValues():
"""Try different regularizations
Return a sorted list of values to try.
"""
values = None # Assign a list of floats in the block below
### YOUR CODE HERE
values = [100, 10, 1, 0, 1e-1, 5e-1, 1e-2, 5e-2,
1e-3, 5e-3, 1e-4, 5e-4, 1e-5, 5e-5, 1e-6]
### END YOUR CODE
return sorted(values)
def chooseBestModel(results):
"""Choose the best model based on parameter tuning on the dev set
Arguments:
results -- A list of python dictionaries of the following format:
{
"reg": regularization,
"clf": classifier,
"train": trainAccuracy,
"dev": devAccuracy,
"test": testAccuracy
}
Returns:
Your chosen result dictionary.
"""
bestResult = None
### YOUR CODE HERE
currBestValue = -1.0
for each_result in results:
if each_result["dev"] > currBestValue:
currBestValue = each_result["dev"]
bestResult = each_result
### END YOUR CODE
return bestResult
def accuracy(y, yhat):
""" Precision for classifier """
assert(y.shape == yhat.shape)
return np.sum(y == yhat) * 100.0 / y.size
def plotRegVsAccuracy(regValues, results, filename):
""" Make a plot of regularization vs accuracy """
plt.plot(regValues, [x["train"] for x in results])
plt.plot(regValues, [x["dev"] for x in results])
plt.xscale('log')
plt.xlabel("regularization")
plt.ylabel("accuracy")
plt.legend(['train', 'dev'], loc='upper left')
plt.savefig(filename)
def outputConfusionMatrix(features, labels, clf, filename):
""" Generate a confusion matrix """
pred = clf.predict(features)
cm = confusion_matrix(labels, pred, labels=range(5))
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Reds)
plt.colorbar()
classes = ["- -", "-", "neut", "+", "+ +"]
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig(filename)
def outputPredictions(dataset, features, labels, clf, filename):
""" Write the predictions to file """
pred = clf.predict(features)
with open(filename, "w") as f:
print >> f, "True\tPredicted\tText"
for i in xrange(len(dataset)):
print >> f, "%d\t%d\t%s" % (
labels[i], pred[i], " ".join(dataset[i][0]))
def main(args):
""" Train a model to do sentiment analyis"""
# Load the dataset
dataset = StanfordSentiment()
tokens = dataset.tokens()
nWords = len(tokens)
if args.yourvectors:
_, wordVectors, _ = load_saved_params()
wordVectors = np.concatenate(
(wordVectors[:nWords,:], wordVectors[nWords:,:]),
axis=1)
elif args.pretrained:
wordVectors = glove.loadWordVectors(tokens)
dimVectors = wordVectors.shape[1]
# Load the train set
trainset = dataset.getTrainSentences()
nTrain = len(trainset)
trainFeatures = np.zeros((nTrain, dimVectors))
trainLabels = np.zeros((nTrain,), dtype=np.int32)
for i in xrange(nTrain):
words, trainLabels[i] = trainset[i]
trainFeatures[i, :] = getSentenceFeatures(tokens, wordVectors, words)
# Prepare dev set features
devset = dataset.getDevSentences()
nDev = len(devset)
devFeatures = np.zeros((nDev, dimVectors))
devLabels = np.zeros((nDev,), dtype=np.int32)
for i in xrange(nDev):
words, devLabels[i] = devset[i]
devFeatures[i, :] = getSentenceFeatures(tokens, wordVectors, words)
# Prepare test set features
testset = dataset.getTestSentences()
nTest = len(testset)
testFeatures = np.zeros((nTest, dimVectors))
testLabels = np.zeros((nTest,), dtype=np.int32)
for i in xrange(nTest):
words, testLabels[i] = testset[i]
testFeatures[i, :] = getSentenceFeatures(tokens, wordVectors, words)
# We will save our results from each run
results = []
regValues = getRegularizationValues()
for reg in regValues:
print "Training for reg=%f" % reg
# Note: add a very small number to regularization to please the library
clf = LogisticRegression(C=1.0/(reg + 1e-12))
clf.fit(trainFeatures, trainLabels)
# Test on train set
pred = clf.predict(trainFeatures)
trainAccuracy = accuracy(trainLabels, pred)
print "Train accuracy (%%): %f" % trainAccuracy
# Test on dev set
pred = clf.predict(devFeatures)
devAccuracy = accuracy(devLabels, pred)
print "Dev accuracy (%%): %f" % devAccuracy
# Test on test set
# Note: always running on test is poor style. Typically, you should
# do this only after validation.
pred = clf.predict(testFeatures)
testAccuracy = accuracy(testLabels, pred)
print "Test accuracy (%%): %f" % testAccuracy
results.append({
"reg": reg,
"clf": clf,
"train": trainAccuracy,
"dev": devAccuracy,
"test": testAccuracy})
# Print the accuracies
print ""
print "=== Recap ==="
print "Reg\t\tTrain\tDev\tTest"
for result in results:
print "%.2E\t%.3f\t%.3f\t%.3f" % (
result["reg"],
result["train"],
result["dev"],
result["test"])
print ""
bestResult = chooseBestModel(results)
print "Best regularization value: %0.2E" % bestResult["reg"]
print "Test accuracy (%%): %f" % bestResult["test"]
# do some error analysis
if args.pretrained:
plotRegVsAccuracy(regValues, results, "q4_reg_v_acc.png")
outputConfusionMatrix(devFeatures, devLabels, bestResult["clf"],
"q4_dev_conf.png")
outputPredictions(devset, devFeatures, devLabels, bestResult["clf"],
"q4_dev_pred.txt")
if __name__ == "__main__":
main(getArguments())
| apache-2.0 |
abhishekgahlot/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
heli522/scikit-learn | examples/model_selection/plot_roc.py | 96 | 4487 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0)
tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0)
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
sinhrks/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
pv/scikit-learn | sklearn/tree/tests/test_export.py | 76 | 9318 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
| bsd-3-clause |
creyesp/RF_Estimation | Clustering/clustering/gmm.py | 2 | 6063 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# SpectralClustering.py
#
# Copyright 2014 Carlos "casep" Sepulveda <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Performs SpectralClustering using scikit-learn
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../..','LIB'))
import rfestimationLib as rfe
import argparse #argument parsing
import numpy as np
import scipy.ndimage
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from sklearn import mixture
from sklearn import metrics
clustersColours = ['#fcfa00', '#ff0000', '#820c2c', '#ff006f', '#af00ff','#0200ff','#008dff','#00e8ff','#0c820e','#28ea04','#ea8404','#c8628f','#6283ff','#5b6756','#0c8248','k','#820cff','#932c11','#002c11','#829ca7']
def main():
parser = argparse.ArgumentParser(prog='kmeans_scikit.py',
description='Performs K-means using scikit-learn',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sourceFolder',
help='Source folder',
type=str, required=True)
parser.add_argument('--outputFolder',
help='Output folder',
type=str, required=True)
parser.add_argument('--clustersNumber',
help='Number of clusters',
type=int, default='5', choices=[3,4,5,6,7,8,9,10,11,12,13,14,15], required=False)
parser.add_argument('--framesNumber',
help='Number of frames used in STA analysis',
type=int, default='20', required=False)
parser.add_argument('--blockSize',
help='Size of each block in micrometres',
type=int, default='50', required=False)
args = parser.parse_args()
#Source folder of the files with the timestamps
sourceFolder = rfe.fixPath(args.sourceFolder)
if not os.path.exists(sourceFolder):
print ''
print 'Source folder does not exists ' + sourceFolder
sys.exit()
#Output folder for the graphics
outputFolder = rfe.fixPath(args.outputFolder)
if not os.path.exists(outputFolder):
try:
os.makedirs(outputFolder)
except:
print ''
print 'Unable to create folder ' + outputFolder
sys.exit()
#Clusters number for the kmeans algorithm
clustersNumber = args.clustersNumber
#Frames used in STA analysis
framesNumber = args.framesNumber
#Size of each block in micrometres
blockSize = args.blockSize
#dataCluster stores the data to be used for the clustering process
#the size is equal to the number of frames, aka, the time component
#plus 7 as we are incorporating the 2 dimensions of the ellipse,
#2 dimensions of the ellipse on micrometres,
#x position, y position and angle
dataCluster = np.zeros((1,framesNumber+7))
units=[]
dato=np.zeros((1,1))
for unitFile in os.listdir(sourceFolder):
if os.path.isdir(sourceFolder+unitFile):
unitName = unitFile.rsplit('_', 1)[0]
dataUnit, coordinates = rfe.loadSTACurve(sourceFolder,unitFile,unitName)
xSize = dataUnit.shape[0]
ySize = dataUnit.shape[1]
fitResult = rfe.loadFitMatrix(sourceFolder,unitFile)
#should we use the not-gaussian-fitted data for clustering?
dataUnitGauss = scipy.ndimage.gaussian_filter(dataUnit[coordinates[0][0],[coordinates[1][0]],:],2)
#A radius of the RF ellipse, adjusted to micrometres
dato[0] = blockSize * fitResult[0][2]
dataUnitCompleta = np.concatenate((dataUnitGauss,dato),1)
#B radius of the RF ellipse, adjusted to micrometres
dato[0] = blockSize * fitResult[0][3]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#A radius of the RF ellipse
dato[0] = fitResult[0][2]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#B radius of the RF ellipse
dato[0] = fitResult[0][3]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#angle of the RF ellipse
dato[0] = fitResult[0][1]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#X coordinate of the RF ellipse
dato[0] = fitResult[0][4]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#Y coordinate of the RF ellipse
dato[0] = fitResult[0][5]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
dataCluster = np.append(dataCluster,dataUnitCompleta, axis=0)
units.append(unitName)
# remove the first row of zeroes
dataCluster = dataCluster[1:,:]
data = dataCluster[:,0:framesNumber+2]
gmix = mixture.GMM(n_components=clustersNumber, covariance_type='full')
gmix.fit(data)
labels = gmix.predict(data)
fit = metrics.silhouette_score(data, labels, metric='euclidean')
rfe.graficaCluster(labels, dataCluster[:,0:framesNumber-1], outputFolder+'gmm.png', clustersColours, fit)
# generate graphics of all ellipses
for clusterId in range(clustersNumber):
dataGrilla = np.zeros((1,framesNumber+7))
for unitId in range(dataCluster.shape[0]):
if labels[unitId] == clusterId:
datos=np.zeros((1,framesNumber+7))
datos[0]=dataCluster[unitId,:]
dataGrilla = np.append(dataGrilla,datos, axis=0)
## remove the first row of zeroes
dataGrilla = dataGrilla[1:,:]
rfe.graficaGrilla(dataGrilla, outputFolder+'Grilla_'+str(clusterId)+'.png', clustersColours[clusterId], framesNumber, xSize, ySize)
rfe.graficaCluster(labels, dataGrilla[:,0:framesNumber-1], outputFolder+'cluster_'+str(clusterId)+'.png', clustersColours[clusterId])
rfe.guardaClustersIDs(outputFolder, units, labels, outputFolder+'clusterings.csv')
return 0
if __name__ == '__main__':
main()
| gpl-2.0 |
aminert/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
AnasGhrab/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
aetilley/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
meduz/scikit-learn | benchmarks/bench_plot_lasso_path.py | 84 | 4005 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
plt.show()
| bsd-3-clause |
h-mayorquin/competitive_and_selective_learning | play.py | 1 | 1250 | """
This is the play
"""
import numpy as np
import matplotlib.pyplot as plt
import math
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
from functions import selection_algorithm, scl
from csl import CSL
plot = True
verbose = False
tracking = True
selection = False
# Generate the data
n_samples = 1500
random_state = 20 # Does not converge
random_state = 41
random_state = 105 # Does not converge
random_state = 325325
random_state = 1111
n_features = 2
centers = 7
X, y = make_blobs(n_samples, n_features, centers, random_state=random_state)
# The algorithm
N = centers
s = 2 # Number of neurons to change per round
eta = 0.1
T = 100
csl = CSL(n_clusters=N, n_iter=T, tol=0.001, eta=eta, s0=s, random_state=np.random)
csl.fit(X)
neurons = csl.centers_
if False:
kmeans = KMeans(n_clusters=N)
kmeans.fit(X)
neurons = kmeans.cluster_centers_
if plot:
# Visualize X
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
ax.plot(X[:, 0], X[:, 1], 'x', markersize=6)
ax.hold(True)
if True:
for n in range(N):
ax.plot(neurons[n, 0], neurons[n, 1], 'o', markersize=12, label='neuron ' + str(n))
ax.legend()
# fig.show()
plt.show()
| mit |
kagayakidan/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |